From: Jeremy Mordkoff Date: Thu, 31 Mar 2016 23:37:12 +0000 (-0400) Subject: Initial submission of RIFT.ware 4.1.1.0 to ETSI/OSM X-Git-Tag: v0.0~22 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=59178d705894364069e2a2cb1ab6af68af1c1249;p=osm%2Friftware.git Initial submission of RIFT.ware 4.1.1.0 to ETSI/OSM Signed-off-by: Jeremy Mordkoff --- diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ce8509f --- /dev/null +++ b/.gitignore @@ -0,0 +1,28 @@ +.build +*.tgz +.install +cmake/ +etc/ +modules/automation/ +modules/core/enablement/ +modules/core/mgmt/ +modules/core/rwvx/ +modules/core/util/ +modules/ext/ +modules/yang_composite/ +rwbase/ +scripts/cloud/ +scripts/env/ +scripts/install +scripts/ldap +scripts/nagios +scripts/packaging/ +scripts/rift-scripts.sh.in +scripts/rpm/ +scripts/system +scripts/test/ +scripts/util/ +.gitmodules.deps.orig +.gitmodules.orig +modules/toolchain/ +scripts/CMakeLists.txt diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..691f254 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,45 @@ +[submodule "modules/toolchain"] + path = modules/toolchain + url = ../modules/toolchain.git +[submodule "modules/ext/lib"] + path = modules/ext/lib + url = ../modules/ext/lib.git +[submodule "modules/core/util"] + path = modules/core/util + url = ../modules/core/util.git +[submodule "modules/ext/gnome"] + path = modules/ext/gnome + url = ../modules/ext/gnome.git +[submodule "modules/ext/ipc"] + path = modules/ext/ipc + url = ../modules/ext/ipc.git +[submodule "modules/ext/util"] + path = modules/ext/util + url = ../modules/ext/util.git +[submodule "modules/ext/yang"] + path = modules/ext/yang + url = ../modules/ext/yang.git +[submodule "modules/core/mgmt"] + path = modules/core/mgmt + url = ../modules/core/mgmt.git +[submodule "modules/core/rwvx"] + path = modules/core/rwvx + url = ../modules/core/rwvx.git +[submodule "modules/ext/mgmt"] + path = modules/ext/mgmt + url = ../modules/ext/mgmt.git +[submodule "modules/automation/core"] + path = modules/automation/core + url = ../modules/automation/core.git +[submodule "modules/ext/go"] + path = modules/ext/go + url = ../modules/ext/go.git +[submodule "modules/ui/rw.ui"] + path = modules/ui/rw.ui + url = ../modules/ui/rw.ui.git +[submodule "modules/ui/composer"] + path = modules/ui/composer + url = ../modules/ui/composer.git +[submodule "modules/core/mano"] + path = modules/core/mano + url = ../modules/core/mano.git diff --git a/.gitmodules.deps b/.gitmodules.deps new file mode 100644 index 0000000..3fd5eb9 --- /dev/null +++ b/.gitmodules.deps @@ -0,0 +1,24 @@ +// specify the sub"modules that each submodule depends on +// for example +// "modules/core/util" -> "modules/ext/gnome" + +strict digraph dependencies { + // modules/core/util dependencies + "modules/core/util" -> "modules/ext/mgmt" + "modules/core/util" -> "modules/ext/yang" + "modules/core/util" -> "modules/ext/go" + "modules/core/util" -> "modules/ext/cloud" + "modules/core/util" -> "modules/automation/core" + + "modules/ext/util" -> "modules/ext/gnome" + "modules/ext/util" -> "modules/ext/ipc" + "modules/ext/yang" -> "modules/ext/util" + + "modules/core/rwvx" -> "modules/core/util" + + "modules/core/mgmt" -> "modules/core/rwvx" + + "modules/core/mano" -> "modules/core/mgmt" + "modules/core/mano" -> "modules/ui/rw.ui" + "modules/core/mano" -> "modules/ui/composer" +} diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..26e045e --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,496 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 8/29/2013 +# + +cmake_minimum_required(VERSION 2.8) + + +# this block should be at the top of every CMakleLists.txt +# this sets up project root dir and module path +get_filename_component(PROJECT_TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR} ABSOLUTE) +message("PROJECT_TOP_DIR = ${PROJECT_TOP_DIR}") +set(CMAKE_MODULE_PATH ${PROJECT_TOP_DIR}/cmake/modules) +include(rift_globals) +include(rift_build_utils) +include(rift_externalproject) +cmake_policy(SET CMP0017 NEW) + + +## +# Fetch the module dependencies +# For each submodule that is checked out, this target will determine the +# dependencies. For each dependency, this target tries to populate the cache. +# If the cache doesn't exist for the dependency, the submodule is checkedout +# to build locally. +## +configure_file( + ${PROJECT_TOP_DIR}/cmake/modules/rift_fetch_dependencies.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/rift_fetch_dependencies.cmake + ESCAPE_QUOTES @ONLY + ) + +# Custom target for fetching the dependencies +add_custom_target(fetch-dependencies + ${CMAKE_COMMAND} + -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} + -DPROJECT_TOP_DIR=${PROJECT_TOP_DIR} + -P ${CMAKE_CURRENT_BINARY_DIR}/rift_fetch_dependencies.cmake + ) + +# Fetch the dependencies +# This would eliminate the "make fetch_dependencies" step during the build process +execute_process( + COMMAND + ${CMAKE_COMMAND} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} + -DPROJECT_TOP_DIR=${PROJECT_TOP_DIR} + -P ${CMAKE_CURRENT_BINARY_DIR}/rift_fetch_dependencies.cmake + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + +if(result) + message("Failed to get dependencies for submodule ${submodule}") + message(FATAL_ERROR "Error: ${result}") +endif(result) + +## +# function to create a target to generate rw-composite.yang from dir of .yangs +# rift_gen_composite_yang( +# OUTPUT_FILE # composite output file path +# INPUT_YANG_DIR ) +# +# Example: rift_submodule_to_target_name(var modules/core/fpath) +# +# Result: $(var) == core_fpath +## +function(rift_submodule_to_target_name var submodule) + set(retval) + string(REPLACE "/" "_" retval ${submodule}) + string(REPLACE "modules_" "" retval ${retval}) + set(${var} "${retval}" PARENT_SCOPE) + +endfunction(rift_submodule_to_target_name) + + +function(add_submodule_targets + submodule + submodule_target + clean_targets + unittest_targets + unittest_long_targets + systemtest_targets + coverage_targets + doxygen_targets + package_targets + bcache_targets) + ## + # Wipe out the external project and build it again + ## + list(APPEND clean_targets clean_${submodule_target}) + add_custom_target(clean_${submodule_target} + rm -rf ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/ + ) + + ## + # Add custom targets for running quick unittests in each submodule + ## + list(APPEND unittest_targets rw.unittest.${submodule_target}) + add_custom_target(rw.unittest.${submodule_target} + $(MAKE) rw.unittest + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for running long unittests in each submodule + ## + list(APPEND unittest_long_targets rw.unittest_long.${submodule_target}) + add_custom_target(rw.unittest_long.${submodule_target} + $(MAKE) rw.unittest_long + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for running systemtests in each submodule + ## + list(APPEND systemtest_targets rw.systemtest.${submodule_target}) + add_custom_target(rw.systemtest.${submodule_target} + $(MAKE) rw.systemtest + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + + ## + # Add custom targets for running coverage analysis in each submodule + ## + list(APPEND coverage_targets rw.coverage.${submodule_target}) + add_custom_target(rw.coverage.${submodule_target} + $(MAKE) rw.coverage + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for generating doxygen documentation in each submodule + ## + list(APPEND doxygen_targets rw.doxygen.${submodule_target}) + add_custom_target(rw.doxygen.${submodule_target} + $(MAKE) rw.doxygen + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for generating packages in each submodule + ## + list(APPEND package_targets rw.package.${submodule_target}) + add_custom_target(rw.package.${submodule_target} + $(MAKE) rw.package + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + ## + # Add custom targets for caching packages in each submodule + ## + list(APPEND bcache_targets rw.bcache.${submodule_target}) + add_custom_target(rw.bcache.${submodule_target} + $(MAKE) rw.bcache + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + DEPENDS ${submodule_target} + ) + + set(clean_targets "${clean_targets}" PARENT_SCOPE) + set(unittest_targets "${unittest_targets}" PARENT_SCOPE) + set(unittest_long_targets "${unittest_long_targets}" PARENT_SCOPE) + set(systemtest_targets "${systemtest_targets}" PARENT_SCOPE) + set(coverage_targets "${coverage_targets}" PARENT_SCOPE) + set(doxygen_targets "${doxygen_targets}" PARENT_SCOPE) + set(package_targets "${package_targets}" PARENT_SCOPE) + set(bcache_targets "${bcache_targets}" PARENT_SCOPE) +endfunction() + +## +# rwbase is a special project which sets up the environment for +# the rest of Riftware to build. As such, every other project +# will depend on rwbase. As this will never change, rather than +# use .gitmodules.deps, we can just create this special project +# here and set the dependency for the rest of the projects below. +# +# This also allows us to have rwbase be directly included in this +# git tree rather than in a submodule which is preferable as +# rwbase is tiny. +## +externalproject_add( + rwbase + DOWNLOAD_COMMAND "" + PREFIX ${CMAKE_CURRENT_BINARY_DIR}/rwbase + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/rwbase + CMAKE_ARGS + -DCMAKE_INSTALL_PREFIX="${CMAKE_INSTALL_PREFIX}" + -DNOT_DEVELOPER_BUILD="${NOT_DEVELOPER_BUILD}" + -DCOVERAGE_BUILD="${COVERAGE_BUILD}" + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCONFD_FLAVOUR=${CONFD_FLAVOUR} + INSTALL_DIR ${CMAKE_INSTALL_PREFIX} +) + +rift_externalproject_sha_check(rwbase + BINARY_DIR + ${CMAKE_CURRENT_BINARY_DIR}/rwbase/src/rwbase-build + SOURCE_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/rwbase + STAMP_DIR + ${CMAKE_CURRENT_BINARY_DIR}/rwbase/src/rwbase-stamp) + +set(clean_targets) +set(unittest_targets) +set(unittest_long_targets) +set(systemtest_targets) +set(coverage_targets) +set(doxygen_targets) +set(package_targets) +set(bcache_targets) + +add_submodule_targets(rwbase rwbase + "${clean_targets}" + "${unittest_targets}" + "${unittest_long_targets}" + "${systemtest_targets}" + "${coverage_targets}" + "${doxygen_targets}" + "${package_targets}" + "${bcache_targets}") + +set(package_targets) +set(bcache_targets) + +## +# Find the list of checked out submodules +# The user may check out one or more submodules to build +# For example user may check out modules/core/schema using +# - git submodule init modules/core/schema +# - git submodule update modules/core/schema +# - cd modules/core/schema && git checkout master +# Missing cache for submodule dependencies will also cause the submodule +# to be checked-out +## +rift_find_checkedout_submodules( + PROJECT_TOP_DIR ${PROJECT_TOP_DIR} + OUT_SUBMODULES submodules) + +## +# Add external project for building individual submodules +## +foreach(submodule ${submodules}) + rift_submodule_to_target_name(submodule_target ${submodule}) + + ## + # Add the externalproject targets for each submodule + ## + + # RIFT-3266 - Prevent docs from running too much in parallel + # due to out of memory situation with "fop". + set (submodule_build_cmd $(MAKE)) + if(submodule_target MATCHES "(.*)docs(.*)") + set (submodule_build_cmd $(MAKE) -j1) + endif() + + externalproject_add( + ${submodule_target} + DOWNLOAD_COMMAND "" + PREFIX ${CMAKE_CURRENT_BINARY_DIR}/${submodule} + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${submodule} + CMAKE_ARGS + -DCMAKE_INSTALL_PREFIX="${CMAKE_INSTALL_PREFIX}" + -DNOT_DEVELOPER_BUILD="${NOT_DEVELOPER_BUILD}" + -DCOVERAGE_BUILD="${COVERAGE_BUILD}" + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCONFD_FLAVOUR=${CONFD_FLAVOUR} + BUILD_COMMAND ${submodule_build_cmd} + DEPENDS rwbase + ) + + # Get the submodule dependents instead of its dependencies + rift_find_submodule_deps( + PROJECT_TOP_DIR ${PROJECT_TOP_DIR} + SUBMODULE ${submodule} + OUT_DEPS dep_submodules + GET_DEPENDENTS + ) + + # Calculate all dependent submodule targets + set(dep_submodule_targets) + foreach(dep_submodule ${dep_submodules}) + rift_submodule_to_target_name(dep_submodule_target ${dep_submodule}) + list(APPEND dep_submodule_targets ${dep_submodule_target}) + endforeach(dep_submodule) + + rift_externalproject_sha_check(${submodule_target} + BINARY_DIR + ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-build + SOURCE_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/${submodule} + STAMP_DIR + ${CMAKE_CURRENT_BINARY_DIR}/${submodule}/src/${submodule_target}-stamp + GIT_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/${submodule}/.git + DEPENDENT_EXTERNAL_TARGETS + ${dep_submodule_targets} + ) + + add_submodule_targets(${submodule} ${submodule_target} + "${clean_targets}" + "${unittest_targets}" + "${unittest_long_targets}" + "${systemtest_targets}" + "${coverage_targets}" + "${doxygen_targets}" + "${package_targets}" + "${bcache_targets}") +endforeach(submodule) + +## +# Create a list which contains all submodule targets +## +set(submodule_targets) +foreach(submodule ${submodules}) + rift_submodule_to_target_name(submodule_target ${submodule}) + list(APPEND submodule_targets ${submodule_target}) +endforeach(submodule) + +## +# Each submodule may be dependent on other submodules +# Add dependencies for each submodule target +## +foreach(submodule ${submodules}) + rift_submodule_to_target_name(submodule_target ${submodule}) + + # create a list of targets for dependent submodules + unset(dep_targets) + rift_find_submodule_deps( + PROJECT_TOP_DIR ${PROJECT_TOP_DIR} + SUBMODULE ${submodule} + OUT_DEPS deps) + if(deps) + foreach(dep ${deps}) + rift_submodule_to_target_name(dep_target ${dep}) + add_dependencies(${submodule_target} ${dep_target}) + #add_dependencies(externalproject_${submodule_target}_sha externalproject_${dep_target}_sha) + endforeach(dep) + endif(deps) +endforeach(submodule) + +## +# A target which writes solib search path configuration for gdb into a file +# +if(NOT "${submodules}" STREQUAL "") + add_custom_target(rw.gdbinit ALL + COMMAND ${PROJECT_TOP_DIR}/scripts/util/generate_gdbinit.sh ${CMAKE_INSTALL_PREFIX} + # This is an utter hack, the gdbinit ends up not included in rpm installs. For bcache rpms we + # don't mind; for real rpm installs there may well not be symbols anyway so it'll do for now + ) +endif() + +## +# Create a target which combines all installed foss.txt files into a single +# output +## +add_custom_target(rw.foss ALL + COMMAND rm -rf ${CMAKE_INSTALL_PREFIX}/foss/foss.html + COMMAND mkdir -p ${CMAKE_INSTALL_PREFIX}/foss + COMMAND ${PROJECT_TOP_DIR}/scripts/util/generate_foss.py + --foss-dir ${CMAKE_INSTALL_PREFIX}/foss + --output-file ${CMAKE_INSTALL_PREFIX}/foss/foss.html + DEPENDS ${submodule_targets} + ) + +## +# Add a top level target for forcing the clean on all submodules +## +#??this doesn't work as clean isn't a target it's a freebie from the Makefile generator: add_dependencies(clean ${clean_targets}) +#??this doesn't work as two cleans come out?? add_custom_target(clean +# DEPENDS ${clean_targets} +# ) + +## +# Add a top level target for running quick unittests +## +add_custom_target(rw.unittest_long + DEPENDS ${unittest_long_targets} + ) + +## +# Add a top level target for running unittests +## +add_custom_target(rw.unittest + DEPENDS ${unittest_targets} + ) + +## +# Add a top level target for running systemtests +## +add_custom_target(rw.systemtest + DEPENDS ${systemtest_targets} + ) + +## +# Add a top level target for running coverage +## +add_custom_target(rw.coverage + DEPENDS ${coverage_targets} + ) + +## +# Add a top level target for generating doxygen documentation +## +add_custom_target(rw.doxygen + DEPENDS ${doxygen_targets} + ) + +## +# Add a top level target for generating packages +## +add_custom_target(rw.package + DEPENDS ${package_targets} + ) + +## +# Add a top level target for creating build cache +## +add_custom_target(rw.bcache + DEPENDS ${bcache_targets} + ) + +if(NOT submodules) + message("No submodules are checked out") +endif() + +## +# Generate the dependency graph for pictorial viewing +## +add_custom_target(rw.dependency_graph + COMMAND + sed s,modules/,,g ${PROJECT_TOP_DIR}/.gitmodules.deps | dot -Tpng -odependency.png + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + +## +# Generate cscope sysmbols +## +add_custom_target(rw.cscope + cscope-indexer -r + WORKING_DIRECTORY ${PROJECT_TOP_DIR} + ) + +## +# Generate pycscope sysmbols +## +add_custom_target(rw.pycscope + ./scripts/cloud/pycscope-indexer -v + WORKING_DIRECTORY ${PROJECT_TOP_DIR} + ) + +## +# Perform compilation check on python scripts +## +add_custom_target(rw.pycheck + COMMAND python ./bin/rift-lint.py -c -t modules -verbose + WORKING_DIRECTORY ${PROJECT_TOP_DIR} + ) + +add_subdirectory(scripts) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..2b2e329 --- /dev/null +++ b/LICENSE @@ -0,0 +1,13 @@ + Copyright 2016 RIFT.IO Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..aec746b --- /dev/null +++ b/Makefile @@ -0,0 +1,591 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf / Anil Gunturu +# Creation Date: 11/18/2013 +# + +.DEFAULT_GOAL := rw + +## +# Set a variable for the top level directory +## + +makefile.top := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) + +TOP_SRC_PATH := $(makefile.top) +RIFT_BUILD = $(TOP_SRC_PATH)/.build +RIFT_ARTIFACTS = $(TOP_SRC_PATH)/.artifacts +RIFT_MODULE_TEST = $(TOP_SRC_PATH)/.artifacts/moduletest +RIFT_INSTALL = $(TOP_SRC_PATH)/.install +RIFT_SHELL_EXE = $(TOP_SRC_PATH)/rift-shell -e -- +# Force rift-shell to reload env +RIFT_SHELL = + +HARNESS_EXE = $(RIFT_INSTALL)/usr/rift/systemtest/harness/harness + +CONFD = FULL + +## +# Function to get build type +## +ifeq ($(CMAKE_BUILD_TYPE),) + get_build_type=Debug +else + get_build_type=$(CMAKE_BUILD_TYPE) +endif + +## +# Function to get coverage build type +## +ifeq ($(COVERAGE_BUILD),) + is_coverage=FALSE +else + is_coverage=$(COVERAGE_BUILD) +endif + +## +# Function to get developer build type +## +ifeq ($(NOT_DEVELOPER_BUILD),) + is_not_developer=FALSE +else + is_not_developer=$(NOT_DEVELOPER_BUILD) +endif + +## +# Function to get commit revision to checkout +## +ifeq ($(COMMIT),) + get_commit= +else + get_commit=$(COMMIT) +endif + +## +# Function to get whether to abort on first unit test failure +## +ifeq ($(ABORT_ON_TEST_FAILURE),) + get_abort_on_test_failure=1 +else + get_abort_on_test_failure=0 +endif + +## +# Function to get whether to used Confd basic version +# or Confd licensed version +## +ifeq ($(CONFD),) + get_confd_flavour=FULL +else + get_confd_flavour=$(CONFD) +endif + + +## +# Function to lookup submodules +## +ifeq ($(SUBMODULE),) + lookup_submodule=$(error ${newline}ERROR: SUBMODULE=XYZ missing on command line:${newline}) +else ifeq ($(findstring modules/, $(SUBMODULE)), modules/) + lookup_submodule=$(SUBMODULE) +else + lookup_submodule=$(error ${newline}ERROR: Invalid SUBMODULE=XYZ specifier on command line${newline}) +endif + +.PHONY: all +all: rw + +## +# +## +rw.list: + @echo '' + @echo '================================================================================' + @echo ' List of Make targets' + @echo '================================================================================' + @echo '' + @echo ' make rw.checkout SUBMODULE=' + @echo ' - Generic target to checkout a specific submodule' + @echo ' e.g. make rw.checkout SUBMODULE=modules/core/util' + @echo '' + @echo ' make rw.submodule SUBMODULE=' + @echo ' - Generic target to checkout & build a submodule' + @echo ' e.g. make rw.submodule SUBMODULE=modules/core/util' + @echo '' + @echo ' make rw.checkout.world COMMIT=' + @echo ' - Check out the entire tree at a particular superproject hash-state (or branch-tag)' + @echo '' + @echo '' + @echo 'Shortcuts:' + @echo ' make rw - Just want an incremental build' + @echo ' make rw.app.rwbin - Application RW.Bin (checkout & build)' + @echo ' make rw.bcache - Populate the build cache' + @echo ' make rw.checkout.world - Checkout ALL submodules (whole world)' + @echo ' make rw.checkout.stack - Checkout Openstack submodules' + @echo ' make rw.core.fpath - Core FastPath (checkout & build)' + @echo ' make rw.core.ipc - Core IPC packages (checkout & build)' + @echo ' make rw.core.mgmt - Core mgmt packages (checkout & build)' + @echo ' make rw.core.schema - Core management packages (checkout & build)' + @echo ' make rw.core.util - Core utilities (checkout & build)' + @echo ' make rw.cscope - Generate cscope symbols' + @echo ' (in top directory)' + @echo ' make rw.pycscope - Generate pycscope symbols' + @echo ' (in top directory)' + @echo ' make rw.coverage - Run coverage' + @echo ' (results in ${top}/.artifacts/coverage)' + @echo ' make rw.fix_perms - Fix root ownership in .install' + @echo ' make rw.pycheck - Run simple Python compile-check for scripts under modules/*' + @echo ' make rw.doxygen - Generate doxygen documentation' + @echo ' (in ${top}/.install/documentation dir)' + @echo ' make rw.docs - Documentation (checkout & build)' + @echo ' make rw.dependency_graph - Generate submodule dependency dot graph' + @echo ' (in ${top}/.build/dependency.png)' + @echo ' make rw.package - Generate RPM packages' + @echo ' make rw.unittest - Run the unittests' + @echo ' (results in ${top}/.artifacts/unittest)' + @echo ' make rw.unittest_long - Run long unittests' + @echo ' (results in ${top}/.artifacts/unittest)' + @echo ' make rw.automation.systemtest - Checkout modules/automation/systemtest but do not run the systemtests' + @echo ' make rw.sanity - Run a single harness smoke test (default: trafgen)' + @echo ' (takes optional TEST=[trafgen, seagull, ltesim] parameter)' + @echo ' make rw.systemtest - Run the harness smoke tests' + @echo ' make rw.systemtest_local - Run the local systemtest' + @echo ' (results in ${top}/.artifacts/systemtest)' + @echo ' make rw.rift - Checkout & build rift (no ext)' + @echo ' make rw.world - Checkout & build' + @echo ' make CONFD=BASIC - Checkout & build using Confd BASIC version' + @echo ' make CONFD=FULL - Checkout & build using Confd FULL version. This is the default option.' + @echo + @echo 'Examples w/misc. options:' + @echo ' make rw VERBOSE=1 CMAKE_BUILD_TYPE=Release' + @echo ' make rw VERBOSE=1 NOT_DEVELOPER_BUILD=TRUE CMAKE_BUILD_TYPE=Release COVERAGE_BUILD=TRUE' + @echo '' + @echo '' + @echo '' + @echo 'Image building commands:' + @echo ' NOTE: Images require root access and a fully built tree that used NOT_DEVELOPER_BUILD=TRUE' + @echo ' As we do not want to built the entire tree as root, they do not depend on rw.world, it' + @echo ' remains up to the caller to first call "make rw.world NOT_DEVELOPER_BUILD=TRUE"' + @echo ' make rw.ec2-image - Image suitable for uploading to EC2' + @echo ' make rw.kvm-image - Image suitable for uploading to OpenStack' + @echo '' + @echo 'Instructions to run the trafgen simulation (as of 04/15/2015):' + @echo ' cd top-of-your-build-dir' + @echo ' ./rift-shell' + @echo ' ./modules/automation/systemtest/fpath/demos/trafgen_111.py -c -m ethsim --configure --start-traffic' + @echo '' + @echo ' ## To see port statistics:' + @echo ' show colony trafsink port-state trafsink/5/1 counters' + @echo ' ## [Here you should see port rx/tx counters, if the test is running successfully...]' + @echo '' + @echo 'Smoke-Test Instructions:' + @echo ' Wiki: http://confluence.eng.riftio.com/display/AUT/Fpath+smoke+test' + @echo ' Example: ./modules/automation/systemtest/fpath/fp_smoke' + @echo '' + @echo '' + +## +# Make rule to display help for all targets +## + +help: + @echo '================================================================================' + @echo 'Makefile targets - the default target is "help"' + @echo '================================================================================' + @echo '' + @echo ' primer - help message to build source code for the first time' + @echo ' help - this message' + @echo ' cmake - invoke cmake for the module this directory is in' + @echo ' rw.list - list of make targets and usage' + $(RIFT_SHELL_EXE) $(MAKE) rw.list + + + +## +# Make rule to display a primer on how to easily checkout/build the software +## + +primer: + @echo '================================================================================' + @echo 'RiftWare software build primer' + @echo '================================================================================' + @echo '' + @echo 'Step #1 -- First checkout the software module that you wish to build' + @echo '--------------------------------------------------------------------------------' + @echo 'Assuming this is the "rw.core.util" submodule, then:' + @echo '' + @echo '$$ make rw.checkout SUBMODULE=rw.core.util' + @echo '' + @echo 'If you know the git submodule name, you can also specify:' + @echo '' + @echo '$$ make rw.checkout SUBMODULE=modules/core/util' + @echo '' + @echo 'Step #2 -- Now run the cmake target' + @echo '--------------------------------------------------------------------------------' + @echo 'This makes a build directory, runs cmake, and runs make on the generated files' + @echo '' + @echo '$$ make cmake' + + +## +# Clean up all generated files from previous builds. +## +clean: + rm -rf $(RIFT_ARTIFACTS) + rm -rf $(RIFT_INSTALL) + rm -rf $(RIFT_BUILD) + +rw.clean: clean + +clean.fast: + @touch $(RIFT_ARTIFACTS) + @touch $(RIFT_INSTALL) + @touch $(RIFT_BUILD) + @$(eval DELETE := $(shell mktemp -d --tmpdir=$(RIFT_ROOT) .deleteXXXXXX)) + @mv -f $(RIFT_ARTIFACTS) $(DELETE) + @mv -f $(RIFT_INSTALL) $(DELETE) + @mv -f $(RIFT_BUILD) $(DELETE) + @(rm -rf $(DELETE) &>/dev/null &) + +## +# Rule to invoke cmake +## +cmake:: BUILD_TYPE=$(call get_build_type) +cmake:: COVERAGE_TYPE=$(call is_coverage) +cmake:: NOT_DEVELOPER_TYPE=$(call is_not_developer) +cmake:: CONFD_FLAVOUR=$(call get_confd_flavour) +cmake:: + mkdir -p $(RIFT_BUILD) + mkdir -p $(RIFT_ARTIFACTS) + mkdir -p $(RIFT_MODULE_TEST) + mkdir -p $(RIFT_INSTALL) + cd $(RIFT_BUILD) && $(RIFT_SHELL_EXE) cmake ../ -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DNOT_DEVELOPER_BUILD=$(NOT_DEVELOPER_TYPE) -DCOVERAGE_BUILD=$(COVERAGE_TYPE) -DCONFD_FLAVOUR=$(CONFD_FLAVOUR) + +## +# Rule to checkout non-external components +## +rw.checkout.rift: CHANGESET=$(call get_commit) +rw.checkout.rift: + git xinit -e modules/ext/* + git xcheckout $(CHANGESET) + +## +# Rule to checkout all components +## +rw.checkout.world: CHANGESET=$(call get_commit) +rw.checkout.world: + git xinit + git xcheckout $(CHANGESET) + +## +# Rule for rw.checkout +# +# This is done with a "git submodule init" followed by a "git submodule update" +# Then checkout the master branch of the source code +## +rw.checkout:: SUBMODULE_DIR=$(call lookup_submodule) +rw.checkout:: CHANGESET=$(call get_commit) +rw.checkout:: + git xinit -s $(SUBMODULE_DIR) + git xcheckout $(CHANGESET) + +## +# Generic code to checkout submodule and make it +## +rw.submodule:: SUBMODULE_DIR=$(call lookup_submodule) +rw.submodule:: CHANGESET=$(call get_commit) +rw.submodule: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=$(SUBMODULE_DIR) COMMIT=$(CHANGESET) + $(RIFT_SHELL_EXE) $(MAKE) rw + +## +# Shortcut checkout/make rules for various modules +# +# These commands are shortcuts to checkout and build the specified submodule +## +rw.app.rwbin: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/app/rwbin + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.core.util: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/util + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.core.fpath: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/fpath + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/automation/systemtest + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.docs: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/docs + $(RIFT_SHELL_EXE) $(MAKE) rw + +## +# SAMPLE target for making a tar file of the exportable-docs +# as of this moment, no documents are exportable, so this is just a placeholder +## +rw.export_docs: rw.docs + tar -c -h -C $(RIFT_INSTALL)/documentation -f $(RIFT_INSTALL)/documents.tar riftio/pdf/riftio_distributed_fpath.pdf config + +rw.core.mgmt: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/mgmt + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.core.ipc: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/ipc + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.core.rwvx: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/core/rwvx + $(RIFT_SHELL_EXE) $(MAKE) rw + +rw.automation.systemtest: + -$(RIFT_SHELL_EXE) $(MAKE) rw.checkout SUBMODULE=modules/automation/systemtest + $(RIFT_SHELL_EXE) $(MAKE) rw + +core_fpath: cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) $@ + +core_mgmt: cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) $@ + +core_util: cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) $@ + +core_rwvx: cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) $@ + + +# +# Rule to checkout and build rift +# +rw.world: rw.checkout.world + $(RIFT_SHELL_EXE) $(MAKE) rw + + +# +# Rule to checkout and build rift without external packages +# +rw.rift: rw.checkout.rift + $(RIFT_SHELL_EXE) $(MAKE) rw + +# +# Rule to run the systemtest smoke test via the harness +# +rw.systemtest: + $(RIFT_SHELL_EXE) $(HARNESS_EXE) run -i smoke_stable --serial --stdout + + +# +# Get the harness test name from the TEST= make argument (default is trafgen) +# This will convert between a simple test name (trafgen, ltesim, seagull) +# into the corresponding harness test name (passed to harness via --name parameter) +# These test names are found in the respective .racfg test configuration files +# +ifeq ($(TEST),) + get_sanity_test=^TC_TRAFGEN111_0100$$ +else ifeq ($(TEST), trafgen) + get_sanity_test=^TC_TRAFGEN111_0100$$ +else ifeq ($(TEST), ltesim) + get_sanity_test=^TC_LTESIMCOMBINED_0101$$ +else ifeq ($(TEST), seagull) + get_sanity_test=^TC_SEAGULL_0001$$ +endif + +# +# Rule to run a single test via the harness +# +rw.sanity:: HARNESS_TEST=$(call get_sanity_test) +rw.sanity:: + $(RIFT_SHELL_EXE) $(HARNESS_EXE) run --no-user --serial --stdout --name $(HARNESS_TEST) + +## +# Rule to invoke systemtest locally +## +rw.systemtest_local: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) RIFT_NO_SUDO_REAPER=1 rw.systemtest + + +# +# Rule to fix the permissions in the .install directory after running a demo as root +# +rw.fix_perms: + $(RIFT_SHELL_EXE) $(HARNESS_EXE) run --serial --stdout --name FIX_INSTALL_PERMISSIONS_9001 + +## +# Rule to create the combined foss.html from all foss.txt files in +# installed submodules. +## +rw.foss: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.foss + + +## +# This target runs the cmake step. +# The cmake is invoked under the following two conditions: +# - code was checked out and the cmake step was never invoked +# - cmake step was invoked once, however a new submodule +# was checked out since then +## +rw.cmake: + if [[ ! -f .build/Makefile ]] ; then \ + $(RIFT_SHELL_EXE) $(MAKE) cmake ; \ + else \ + grep "path = " .gitmodules | awk '{print $$3}' | \ + while read submodule; do \ + if [[ -f $$submodule/CMakeLists.txt ]] ; then \ + if [[ ! -d .build/$$submodule ]] ; then \ + cd $(RIFT_BUILD) && cmake ../ ; \ + break; \ + fi; \ + fi; \ + done; \ + fi; \ + env RIFT_ROOT=$(TOP_SRC_PATH) python3 rift_env.py + +## +# rule to download and install the non-OSM source code +# +modules/core/util/Makefile: + wget http://repo.riftio.com/releases/open.riftio.com/4.1.1/ext_4_1_1.tgz + tar xzf ext_4_1_1.tgz + +rw.ext: modules/core/util/Makefile + + +# Rule to invoke the incremental build +# This should be invoked after a target that already invoked "make cmake" +# For example after the "make rw.core.util" is invoked first, one can just +# invoke "make rw" +# NOTE: This will not rebuild the external projects in submodules +## +rw: rw.ext rw.cmake + git rev-parse --abbrev-ref HEAD >$(RIFT_INSTALL)/.git_status + git rev-parse HEAD >>$(RIFT_INSTALL)/.git_status + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) + +## +# Rule to invoke the incremental build +# This should be invoked after a target that already invoked "make cmake" +# For example after the "make rw.core.util" is invoked first, one can just +# invoke "make rw-dammit" +# NOTE: This will rebuild the external projects in submodules. +## +rw.dammit: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.dammit + +## +# Rule to invoke the clean build +# This should be invoked after a target that already invoked "make cmake" +# For example after the "make rw.core.util" is invoked first, one can just +# invoke "make rw.clean_and_rebuid" +# NOTE: This will remove the current install directory and submodule +# build directories and build everything from scratch +## +rw.clean_and_rebuild: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.clean_and_rebuild + +## +# Rule generate doxygen documentation +## +rw.doxygen: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.doxygen + +## +# Rule to generate dependency graph +## +rw.dependency_graph: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.dependency_graph + +## +# Rule to invoke unittest +## +rw.unittest: ABORT_ON_TEST_FAILURE=$(call get_abort_on_test_failure) +rw.unittest: rw.cmake + @if [ "$(shell ulimit -c)" == "0" ]; then \ + ulimit -S -c unlimited; \ + fi && \ + ABORT_ON_TEST_FAILURE=$(ABORT_ON_TEST_FAILURE) $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.unittest + +## +# Rule to invoke unittest +## +rw.unittest_long: ABORT_ON_TEST_FAILURE=$(call get_abort_on_test_failure) +rw.unittest_long: rw.cmake + @if [ "$(shell ulimit -c)" == "0" ]; then \ + ulimit -S -c unlimited; \ + fi && \ + ABORT_ON_TEST_FAILURE=$(ABORT_ON_TEST_FAILURE) $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.unittest_long +## +# Rule to invoke python checks +## +rw.pycheck: + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.pycheck + +## +# Rule to invoke coverage target +## +rw.coverage: ABORT_ON_TEST_FAILURE=$(call get_abort_on_test_failure) +rw.coverage: export COVERAGE_BUILD = TRUE +rw.coverage: rw.cmake + @if [ "$(shell ulimit -c)" == "0" ]; then \ + ulimit -S -c unlimited; \ + fi && \ + ABORT_ON_TEST_FAILURE=$(ABORT_ON_TEST_FAILURE) $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.coverage + +## +# Rule to generate cscope symbols +## +rw.cscope: + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.cscope + +## +# Rule to generate pycscope symbols +## +rw.pycscope: + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.pycscope + +## +# Rule to generate ctags +## +rw.ctags: + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.ctags + +## +# Rule for rw.package +rw.package: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.package + +## +# Rule for generating build cache +## +rw.bcache: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.bcache + +## +# Rule for generating EC2 images +## +rw.ec2-image: cmake + @if [ "$(NOT_DEVELOPER_BUILD)" != "TRUE" ]; then \ + echo; \ + echo "ERROR: Images must be built with NOT_DEVELOPER_BUILD=TRUE"; \ + exit 1; \ + fi + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) ec2-image + +## +# Rule for generating KVM images +## +rw.kvm-image: cmake + @if [ "$(NOT_DEVELOPER_BUILD)" != "TRUE" ]; then \ + echo; \ + echo "ERROR: Images must be built with NOT_DEVELOPER_BUILD=TRUE"; \ + exit 1; \ + fi + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) kvm-image +# +rw.rpmbuild: rw.cmake + $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rpmbuild + + diff --git a/Makefile.env b/Makefile.env new file mode 100644 index 0000000..9642724 --- /dev/null +++ b/Makefile.env @@ -0,0 +1,50 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 05/22/2014 +# + +## +# This makefile sets up environment variables +## + +## +# Function to find the top of the RiftWare distribution tree +## + +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) +makefile.top := $(call find_upward, "Makefile.top") + +## +# Set a variable for the top level directory +## + +top.src.path := $(abspath $(dir $(makefile.top))) +top.build.path := $(abspath $(top.src.path)/.build) +top.install.path := $(abspath $(top.src.path)/.install) + +## +# Set the LD_LIBRARY_PATH to include the local install paths +## +export LD_LIBRARY_PATH := $(top.install.path)/usr/local/lib:$(top.install.path)/usr/local/pyang-1.4.1/lib:$(top.install.path)/usr/lib:$(top.install.path)/usr/lib64:$(top.install.path)/usr/lib/rift/plugins + +## +# Set the PKG_CONFIG_PATH to include the local install paths +## +export PKG_CONFIG_PATH := $(top.install.path)/usr/lib/pkgconfig:$(top.install.path)/usr/lib64/pkgconfig:$(top.install.path)/usr/share/pkgconfig + +## +# Set the PATH to include the local install paths +## +export PATH := $(top.install.path)/usr/local/bin:$(top.install.path)/usr/local/pyang-1.4.1/bin:$(top.install.path)/usr/bin:$(top.install.path)/usr/sbin:${PATH} + +## +# Set the GI_TYPELIB_PATH to include the local install paths +## +export GI_TYPELIB_PATH := $(top.install.path)/usr/lib/girepository-1.0:$(top.install.path)/usr/lib/rift/girepository-1.0 + +## +# Needed find the gir files +## +export XDG_DATA_DIRS := $(top.install.path)/usr/share:$(XDG_DATA_DIRS) diff --git a/Makefile.top b/Makefile.top new file mode 100644 index 0000000..c28cc10 --- /dev/null +++ b/Makefile.top @@ -0,0 +1,175 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/18/2013 +# + +## +# Set a variable for the top level directory +## + +top.src.path := $(abspath $(dir $(makefile.top))) +top.build.path := $(abspath $(top.src.path)/.build) +top.install.path := $(abspath $(top.src.path)/.install) + +## +# From the top level directory and the current directory, determine the module directory +## + +module.src.path := $(abspath $(dir $(call find_upward, "manifest"))) +module.src.subdir := $(subst $(top.src.path),,$(module.src.path)) +ifeq ($(wildcard $(top.build.path)/$(module.src.subdir)/src/*-build),) + module.build.path := $(abspath $(top.build.path)/$(module.src.subdir)) +else + module.build.path = $(abspath $(wildcard $(top.build.path)/$(module.src.subdir)/src/*-build)/) +endif + +## +# From the module directory, determine the final build directory +## + +here.src.path := $(abspath $(PWD)) +here.src.subdir := $(subst $(module.src.path),,$(here.src.path)) +here.build.path := $(abspath $(module.build.path)/$(here.src.subdir)) + +makefile.env := $(call find_upward, "Makefile.env") +include $(makefile.env) + +## +# Define a variable for newline +## + +define newline + + +endef + +## +# Default rule is to invoke the "compile" target +## + +all:: compile + +## +# Rule to clean from a particular directory +## + +clean:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Rule to compile from a particular directory +## + +compile:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Rule to install from a particular directory +## + +install:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Rule to create a symbolic link to the build directory for a particular directory +## + +link:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + ln -s $(here.build.path) .build +endif + + +## +# This is a VERY temporary rule to get rid of things in .install that OVERRIDE the local things +# The correct fix for this is in the CMakefiles so we do not need to remove these in the first place +## + +localize: + rm -f $(top.src.path)/.install/usr/include/*rwsched* + rm -f $(top.src.path)/.install/usr/lib/*rwsched* + +## +# Rule to invoke ctest from a particular directory +## + +test:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && ctest --verbose +endif + +unittest:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Rule to invoke cmake +## + +cmake:: + echo $(top.build.path) + rm -rf $(top.build.path) + mkdir $(top.build.path) + cd $(top.build.path) && cmake .. + cd $(top.build.path) && $(MAKE) + + +## +# Generic code to run rwmain +## + +run_rwmain:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +run_ravcs_mtest:: +ifeq ($(wildcard $(here.build.path)),) + $(error ${newline}The build directory does not exist:${newline}-----> ${here.build.path}) +else + cd $(here.build.path) && $(MAKE) $(MAKECMDGOALS) +endif + +## +# Make rule to display help for all targets +## + +help:: + @echo '======================================================================' + @echo 'Makefile targets - the default target is "compile"' + @echo '======================================================================' + @echo '' + @echo ' compile - compile for this directory (default target)' + @echo '' + @echo ' clean - invoke clean in the current sub-directory' + @echo ' cmake - invoke cmake for the module this directory is in' + @echo ' link - create symbolic links to the cmake .build directory for the current sub-directory' + @echo ' test - invoke the cmake "test" target in the current sub-directory' + @echo ' run_rwain - invoke the cmake "run_rwmain" target in the current sub-directory' + @echo ' run_ravcs_mtest - invoke the cmake "run_ravcs_mtest" target in the current sub-directory' + + + diff --git a/RELEASE b/RELEASE new file mode 100644 index 0000000..cb1d6ed --- /dev/null +++ b/RELEASE @@ -0,0 +1 @@ +4.1.1.0 diff --git a/RIFTWARE_COMPILATION_LICENSE b/RIFTWARE_COMPILATION_LICENSE new file mode 100644 index 0000000..5ceb211 --- /dev/null +++ b/RIFTWARE_COMPILATION_LICENSE @@ -0,0 +1,244 @@ +License +------- +RIFT.ware is a compilation of software packages, each distributed under +its own license. The compilation itself is released under the Apache 2.0 +license (See copy below OR http://www.apache.org/licenses/LICENSE-2.0). +However, the RIFT.ware compilation license does not supersede the +licenses of code and content contained in RIFT.ware. + + +Source Availability +------------------- +A complete machine-readable copy of the source code corresponding to portions +of the accompanying RIFT.ware release is available upon request. This offer +is valid to anyone in receipt of this information and shall +expire three years following the date of the final distribution of this +release by RIFT.IO, Inc. + +To obtain such source code, send a check or money order in the amount of +US $20.00 to: +Vice President, Intellectual Property +RIFT.IO, Inc. +77 South Bedford Street Suite 450 +Burlington, MA 01803 USA + +Please write "source for RIFT.ware $VERSION" +(replacing $VERSION for the version of RIFT.ware you want the source for) +in the memo line of your payment. + +You may also access a copy of this source code at: +https://open.riftio.com/download + + +Export Regulations +------------------ +By downloading or installing RIFT.ware software, you acknowledge that you +understand all of the following: RIFT.ware software and technical information +may be subject to the U.S. Export Administration Regulations (the EAR) and +other U.S. and foreign laws and may not be exported, re-exported or +transferred (a) to any country listed in Country Group E:1 in Supplement +No. 1 to part 740 of the EAR (currently, Cuba, Iran, North Korea, Sudan +& Syria); (b) to any prohibited destination or to any end user who has been +prohibited from participating in U.S. export transactions by any federal +agency of the U.S. government; or (c) for use in connection with the design, +development or production of nuclear, chemical or biological weapons, or +rocket systems, space launch vehicles, or sounding rockets, or unmanned +air vehicle systems. You may not download RIFT.ware software or technical +information if you are located in one of these countries or otherwise subject +to these restrictions. You may not provide RIFT.ware software or technical +information to individuals or entities located in one of these countries or +otherwise subject to these restrictions. You are also responsible for +compliance with foreign law requirements applicable to the import, export +and use of RIFT.ware software and technical information. + + + + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. diff --git a/bin/build_ladder.sh b/bin/build_ladder.sh new file mode 100755 index 0000000..9aa3a02 --- /dev/null +++ b/bin/build_ladder.sh @@ -0,0 +1,228 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# /bin/bash +# +# +# Author(s): Austin Cormier +# Creation Date: 2014/06/03 +# +# The build ladder gets the topological sorted list of submodules and builds +# the submodules one at a time using a custom build cache location. +# +# REQUIRES ALL SUBMODULES IN UNINITIALIZED STATE +# +# 1. Create an empty directory to use as the build cache location. +# 3. Generate list of sorted submodules (using dependency_sort.sh). +# 4. For each submodule in the sorted list: +# 1. Build only that submodule (make rw.submodule SUBMODULE= BUILDCACHE_DIR=) +# 2. If the submodule build fails, stop. +# 3. Make submodule package. +# 4. Make submodule build cache. +# 5. Deinitialize submodule. + +# These steps should verify that all submodule dependencies are correct and the +# artifact packaging is complete. If any submodule fails, then the required +# dependencies are somehow incomplete or incorrect. + +set -o nounset + +THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# Set some vars if not defined by env variables. Used for testing. +DEPENDENCY_SORT_BIN=${DEPENDENCY_SORT_BIN:-"$THIS_DIR/dependency_sort.py"} +MAKE_CMD=${MAKE_CMD:-"make VERBOSE=1"} + +function verify_cwd_at_root(){ + if [ ! -f ".gitmodules.deps" ]; then + echo "ERROR: This script should be run at the top-level" + exit 1 + fi +} + +function verify_submodules_uninitialized(){ + while read line; do + if [[ $line != -* ]]; then + echo "ERROR: Found a initialized submodule: $line" + exit 1 + fi + done < <(git submodule) +} + +function verify_single_submodule_initialized(){ + submodule=$1 + git submodule | while read line; do + if [[ $line != -* ]]; then + if [[ $line != *$submodule* ]]; then + echo "ERROR: Found a initialized submodule: $line" + exit 1 + fi + fi + done < <(git submodule) +} + +# Capture stdout to store cache directory. +function generate_build_cache_dir(){ + local dir=$(mktemp -d -t "build_ladder_XXX") || exit 1 + + echo $dir +} + +function get_sorted_submodule_list(){ + local sorted_submodules=$($DEPENDENCY_SORT_BIN) + if [ $? -ne 0 ]; then + echo "ERROR: Could not get list of sorted submodules." + exit 1 + fi + + echo "$sorted_submodules" +} + +# Log the command and run it. +function log_and_run_cmd(){ + echo "INFO: Running command: $@" + $@ +} + +## +# Build the submodule using the top-level rw.submodule target. + +# Arguments: +# $1 - submodule +# $2 - build cache location +## +function submodule_build(){ + local submodule="$1" + local build_cache="$2" + + # Build only this submodule + build_cmd="$MAKE_CMD rw.submodule SUBMODULE=$submodule BUILDCACHE_DIR=$build_cache" + + log_and_run_cmd $build_cmd + if [ $? -ne 0 ]; then + echo "ERROR: Building submodule '$submodule' failed. (command: $build_cmd)" + exit 1 + fi + + verify_single_submodule_initialized "$submodule" || exit 1 +} + + +## +# Package the submodule using the top-level rw.package target. +# +# Arguments: +# $1 - submodule +## +function submodule_package(){ + local submodule="$1" + + # SUBMODULE argument is NOT necessary but doesn't hurt to include it. + local package_cmd="$MAKE_CMD rw.package SUBMODULE=$submodule" + + log_and_run_cmd $package_cmd + if [ $? -ne 0 ]; then + echo "ERROR: Packaging submodule '$submodule' failed. (command: $package_cmd)" + exit 1 + fi +} + +## +# Create the build cache using the packaged submodule artifacts +# +# Arguments: +# $1 - submodule +# $2 - build cache location +## +function submodule_bcache(){ + local submodule="$1" + local build_cache="$2" + + # SUBMODULE argument is NOT necessary. + local bcache_cmd="$MAKE_CMD rw.bcache SUBMODULE=$submodule BUILDCACHE_DIR=$build_cache" + + BCACHE_IGNORE_FAILED_SUBMODULE_TESTS=1 log_and_run_cmd $bcache_cmd + if [ $? -ne 0 ]; then + echo "ERROR: Bcaching submodule '$submodule' failed. (command: $bcache_cmd)" + exit 1 + fi +} + +## +# Deinitialize the submodule so submodule build past this point will not have +# access to the submodule's sources but only the pre-built artifacts. +# +# Arguments: +# $1 - submodule +## +function submodule_deinit(){ + local submodule="$1" + + local deinit_cmd="git submodule deinit $submodule" + + $deinit_cmd + if [ $? -ne 0 ]; then + echo "ERROR: Deinitializing submodule failed. (command: $deinit_cmd)" + exit 1 + fi +} + +## +# Make clean to clear out everything previously generated in the workspace. +# This ensures only artifacts from build cache are retrieved. +## +function make_clean(){ + [ -d .build ] && rm -rf .build + [ -d .install ] && rm -rf .install +} + +## +# +## +function make_unittests(){ + make rw.unittest ABORT_ON_TEST_FAILURE=0 VERBOSE=1 +} + +verify_cwd_at_root +verify_submodules_uninitialized || exit 1 + +build_cache=$(generate_build_cache_dir) +# Set up a trap to automatically clean up build cache directory on exit or catchable signal +trap "[ -d $build_cache ] && rm -rf $build_cache" EXIT SIGINT SIGTERM + +echo "INFO: Created new build cache ($build_cache)" + +sorted_submodules=$(get_sorted_submodule_list) +echo "INFO: Got list of sorted submodules ($sorted_submodules)" + +# Convert the string into an array using the default IFS of ' ' +read -a sorted_submodules_array <<< "$sorted_submodules" + +for submodule in "${sorted_submodules_array[@]}"; do + if [ $submodule == "modules/ext/lib" ]; then + continue + fi + + submodule_build "$submodule" "$build_cache" || (submodule_deinit "$submodule"; exit 1) + make_unittests || echo "Unit tests in submodule $submodule failed, continuing." + submodule_package "$submodule" || (submodule_deinit "$submodule"; exit 1) + submodule_bcache $submodule "$build_cache" || (submodule_deinit "$submodule"; exit 1) + submodule_deinit "$submodule" || exit 1 + + make_clean +done + +echo "INFO: Build ladder was successful!" +exit 0 \ No newline at end of file diff --git a/bin/catchsegv.sh b/bin/catchsegv.sh new file mode 100755 index 0000000..e82bc82 --- /dev/null +++ b/bin/catchsegv.sh @@ -0,0 +1,151 @@ +#! /bin/sh +# Copyright (C) 1998,1999,2001,2003,2004,2006,2007,2008,2009 +# Free Software Foundation, Inc. +# This file is part of the GNU C Library. +# Contributed by Ulrich Drepper , 1998. + +# The GNU C Library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. + +# The GNU C Library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. + +# You should have received a copy of the GNU Lesser General Public +# License along with the GNU C Library; if not, write to the Free +# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +# 02111-1307 USA. + +if test $# -eq 0; then + echo "$0: missing program name" >&2 + echo "Try \`$0 --help' for more information." >&2 + exit 1 +fi + +prog="$1" +shift + +if test $# -eq 0; then + case "$prog" in + --h | --he | --hel | --help) + echo 'Usage: catchsegv PROGRAM ARGS...' + echo ' --help print this help, then exit' + echo ' --version print version number, then exit' + echo "For bug reporting instructions, please see:" + echo "." + exit 0 + ;; + --v | --ve | --ver | --vers | --versi | --versio | --version) + echo 'catchsegv (Ubuntu EGLIBC 2.11.1-0ubuntu7.8) 2.11.1' + echo 'Copyright (C) 2009 Free Software Foundation, Inc. +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +Written by Ulrich Drepper.' + exit 0 + ;; + *) + ;; + esac +fi + +segv_output=`mktemp ${TMPDIR:-/tmp}/segv_output.XXXXXX` || exit + +# Redirect stderr to avoid termination message from shell. +(exec 3>&2 2>/dev/null +LD_PRELOAD=${LD_PRELOAD:+${LD_PRELOAD}:}/\$LIB/libSegFault.so \ +SEGFAULT_USE_ALTSTACK=1 \ +SEGFAULT_OUTPUT_NAME=$segv_output \ +"$prog" ${1+"$@"} 2>&3 3>&-) +exval=$? + +# Check for output. Even if the program terminated correctly it might +# be that a minor process (clone) failed. Therefore we do not check the +# exit code. +if test -s "$segv_output"; then + # The program caught a signal. The output is in the file with the + # name we have in SEGFAULT_OUTPUT_NAME. In the output the names of + # functions in shared objects are available, but names in the static + # part of the program are not. We use addr2line to get this information. + case $prog in + */*) ;; + *) + old_IFS=$IFS + IFS=: + for p in $PATH; do + test -n "$p" || p=. + if test -f "$p/$prog"; then + prog=$p/$prog + break + fi + done + IFS=$old_IFS + ;; + esac + sed '/Backtrace/q' "$segv_output" + sed '1,/Backtrace/d' "$segv_output" | sed '/Memory map:/q' | + (while read line; do + if echo -n "$line" | grep -Fq "$prog"; then + exe=`echo -n "$line" | sed 's/([^(]\{1,\})//' | sed 's/\[0x[[:xdigit:]]\{1,\}\]//'` + exe=`readlink --canonicalize-existing "$exe" 2>/dev/null` + if test $? -eq 0; then + if test -f "$exe" -a -e "$exe"; then + addr=`echo -n "$line" | sed 's/.*\[\(0x[[:xdigit:]]\{1,\}\)\]$/\1/'` + addr2line=`addr2line --exe="$exe" --functions --demangle $addr 2>/dev/null` + if test $? -eq 0; then + if echo -n "$addr2line" | grep -Eq '^\?\?:0'; then + : + else + func=`echo "$addr2line" | head --lines=1` + fileline=`echo "$addr2line" | tail --lines=1` + file=`echo -n "$fileline" | sed 's/:[[:digit:]]\{1,\}$//'` + if test -f "$file"; then + line="$fileline: $func" + fi + fi + fi + fi + fi + else + exe=`echo -n "$line" | sed 's/([^(]\{1,\})//' | sed 's/\[0x[[:xdigit:]]\{1,\}\]//'` + exe=`readlink --canonicalize-existing "$exe" 2>/dev/null` + if test $? -eq 0; then + if test -f "$exe" -a -e "$exe"; then + addr=`echo -n "$line" | sed 's/.*\[\(0x[[:xdigit:]]\{1,\}\)\]$/\1/'` + addr=`printf '%d' $addr 2>/dev/null` + if test $? -eq 0; then + mmap=`grep -F "$exe" "$segv_output" | grep -E ' 0+ '` + if test $? -eq 0; then + baseaddr=`echo -n "$mmap" | grep -Eo '^[[:xdigit:]]+'` + baseaddr=`printf '%d' 0x$baseaddr 2>/dev/null` + if test $? -eq 0; then + addr=`expr $addr - $baseaddr` + addr=`printf '%x' $addr` + addr2line=`addr2line --exe="$exe" --functions --demangle $addr 2>/dev/null` + if test $? -eq 0; then + if echo -n "$addr2line" | grep -Eq '^\?\?:0'; then + : + else + func=`echo "$addr2line" | head --lines=1` + fileline=`echo "$addr2line" | tail --lines=1` + file=`echo -n "$fileline" | sed 's/:[[:digit:]]\{1,\}$//'` + if test -f "$file"; then + line="$fileline: $func" + fi + fi + fi + fi + fi + fi + fi + fi + fi + echo "$line" + done) + sed '1,/Memory map:/d' "$segv_output" +fi +rm -f "$segv_output" + +exit $exval diff --git a/bin/dependency_parser.py b/bin/dependency_parser.py new file mode 100755 index 0000000..ad1cf74 --- /dev/null +++ b/bin/dependency_parser.py @@ -0,0 +1,219 @@ +#!/usr/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# This program parses the depndency digraph to get the +# list of dependencies recursively +# +# This is used by cmake to get the submodule dependencies +# CMAKE has terrible support for recirsive functions, +# and hence this is written in python + +import sys +import re +import os +from hashlib import sha1 +import argparse +import shlex +import subprocess +import StringIO + +THIS_DIR = os.path.dirname(os.path.realpath(__file__)) + +def get_supermodule_hash(): + """ Call the generate_supermodule_hash.sh script in the current directory. + This script prints out a hash that represents all files in the superproject + that could affect the output of what a submodule produces. + """ + return 1 + supermodule_hash_cmd = os.path.join(THIS_DIR, "generate_supermodule_hash.sh") + submodule_hash = subprocess.check_output(supermodule_hash_cmd, shell=True) + + return submodule_hash + +def get_dependencies(dep_file, submodule): + """Parse the dependency file and determine all the dependencies + recursively. + """ + deps = [] + # Save the file position so we can restore before returning + dep_file.seek(0) + + for line in dep_file.readlines(): + line = line.strip() + regex = r'"' + re.escape(submodule) + '"[ ]*->[ ]*"(.*)"' + m = re.match(regex, line) + if not m: + continue + + if m.group(1) not in deps : + deps.append(m.group(1)) + temp = get_dependencies(dep_file, m.group(1)) + for i in temp : + if i not in deps: + deps.append(i) + + # Restore the file position to the same spot before this function was called + return deps + +def reverse_dependency_file(dep_file): + """ Create a in-memory reversed dependency file + + For all lines that match the "sub_a" -> "sub_b" regex, + reverse the dependency such that "sub_b" -> "sub_a". This allows + us to reverse the dependency search to get dependents. + """ + + reversed_file = StringIO.StringIO() + for line in dep_file: + line = line.strip() + if line.startswith("//"): + continue + + regex = r'"(.*)"[ ]*->[ ]*"(.*)"' + m = re.match(regex, line) + if m is None: + reversed_file.write(line + "\n") + continue + + reversed_line = '"{}" -> "{}"\n'.format(m.group(2), m.group(1)) + reversed_file.write(reversed_line) + + reversed_file.flush() + + reversed_file.seek(0) + + return reversed_file + + +def submodule_hash(filename, supermodule_hash, submodule, outdir): + """This function calculates the submodule hash to be + be used when caching the aritifacts. The submodule hash + depends on the hash of the submodule, as well as on the + hashes of all the submodules that the current submodule + depends on. For example if a submodule B depends on A, + the hash for submodule B, should include the hashes + of both submodules A and B. In otherwords, if submodule + A changes, the submodule B needs recompilation eventhough + the contents of submodule B didn't change. + + Since the supermodule also depends on cmake/build files in the supermodule, + use a supermodule hash when calculating the submodule hash. This ensures that + the submodule caches are flushed when a file that could affect its output is changed. + """ + deps = get_dependencies(filename, submodule) + out_fname = outdir + "/" + submodule.replace("/", "_").replace("modules_", "") + ".hash" + out_f = open(out_fname, "w") + + mylist = deps + mylist.append(submodule) + for i in mylist: + cmd = "git submodule status " + i + args = shlex.split(cmd) + p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + out_f.write(out.split()[0].lstrip(" +-") + " " + out.split()[1] + "\n") + + out_f.close() + filesize_bytes = os.path.getsize(out_f.name) + s = sha1() + s.update(("blob %u\0" % filesize_bytes).encode('utf-8')) + + with open(out_fname, 'rb') as f: + s.update(f.read()) + + # Finally, input the supermodule hash into the hash function + s.update(supermodule_hash) + + return s.hexdigest() + +def get_output_dir(): + if os.path.exists(".build"): + return ".build" + + return "/tmp" + + +def main(): + ## + # Command line argument specification + ## + desc= """This script helps in getting the submodule dependency information""" + parser = argparse.ArgumentParser(description=desc) + + parser.add_argument('-o', '--output-dir', + default=get_output_dir(), + help='Directory for the output files (default: %(default)s)' + ) + + parser.add_argument('-f', '--dependency-file', + type=argparse.FileType('r'), + required=True, + default='.gitmodules.deps', + help='Name of the file with dependencies in DAG format (default: %(default)s)' + ) + + parser.add_argument('-s', '--submodule', + default='modules/core/util', + help='Name of the submodule (default: %(default)s' + ) + + parser.add_argument('-d', '--print-dependency', + action='store_true', + help='Print the dependency information for the submodule' + ) + + parser.add_argument('--print-dependents', + action='store_true', + help='Print the dependent information for the submodule' + ) + + parser.add_argument('-x', '--print-hash', + dest='print_hash', + action='store_true', + help='Print the combined hash for the submodule and its dependencies' + ) + + cmdargs = parser.parse_args() + + supermodule_hash = get_supermodule_hash() + + if cmdargs.print_dependency: + deps = get_dependencies(cmdargs.dependency_file, cmdargs.submodule) + # output as a list for cmake + for i in deps: + sys.stdout.write(i+";") + + elif cmdargs.print_dependents: + # In order to get the dependents (instead of dependencies), reverse + # the dependency file (a -> b becomes b -> a) and reuse the + # get_dependencies() function. + reverse_dep_file = reverse_dependency_file(cmdargs.dependency_file) + deps = get_dependencies(reverse_dep_file, cmdargs.submodule) + # output as a list for cmake + for i in deps: + sys.stdout.write(i+";") + + if cmdargs.print_hash: + h=submodule_hash(cmdargs.dependency_file, supermodule_hash, cmdargs.submodule, cmdargs.output_dir) + sys.stdout.write(h) + + +if __name__ == "__main__": + main() diff --git a/bin/dependency_sort.py b/bin/dependency_sort.py new file mode 100755 index 0000000..c8bddb3 --- /dev/null +++ b/bin/dependency_sort.py @@ -0,0 +1,105 @@ +#!/usr/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# +# +# This program takes as arguments a list of submodules and a submodule +# dependency file then prints the submodules in topological order. + +from os.path import join, dirname, relpath, realpath +from dependency_parser import get_dependencies +import argparse +import re + +SCRIPT_DIR=dirname(realpath(__file__)) +ROOT_DIR=join(SCRIPT_DIR, "..") +GITMODULES_DEPS_FILEPATH=relpath(join(ROOT_DIR, ".gitmodules.deps")) +GITMODULES_FILEPATH=join(ROOT_DIR, ".gitmodules") + + +def get_all_submodules(): + submodules = [] + with open(GITMODULES_FILEPATH) as gitmodules_hdl: + for line in gitmodules_hdl: + match = re.match('\[submodule "(?P.*)"\]', line) + if not match: + continue + + submodule = match.group("submodule") + submodules.append(submodule) + + return submodules + +def generate_submodules_depends(submodules, dependency_file): + submodule_depends = [] + for submodule in submodules: + depends = get_dependencies(dependency_file, submodule) + entry = (submodule, depends) + submodule_depends.append(entry) + + return submodule_depends + + +def sort_submodules(submodule_depends): + sorted_depends = [] + unsorted_depends = dict(submodule_depends) + + while unsorted_depends: + acyclic = False + for node, edges in unsorted_depends.items(): + for edge in edges: + if edge in unsorted_depends: + break + else: + acyclic = True + del unsorted_depends[node] + sorted_depends.append((node, edges)) + + assert acyclic + + return [submodule[0] for submodule in sorted_depends] + + +def main(): + ## + # Command line argument specification + ## + desc= """Submodule Topological Sort. Submodules are written to stdout seperated by spaces.""" + parser = argparse.ArgumentParser(description=desc) + + # User can provide an alternate dependency file. Default is the .gitmodules.dep at the root of the repo. + parser.add_argument('-f', '--dependency-file', dest='dependency_file', + type=argparse.FileType('r'), default=GITMODULES_DEPS_FILEPATH, + help='Name of the file with dependencies in DAG format (default: %(default)s)') + + # User can provide a list of submodules. Default is all of the submodules. + parser.add_argument('-s', '--submodule_list', dest='submodule_list', default=get_all_submodules(), + choices=get_all_submodules(), type=str, nargs='+', + help='Names of the submodule to sort (default: %(default)s') + + parsed_args = parser.parse_args() + + submodule_depends = generate_submodules_depends(parsed_args.submodule_list, parsed_args.dependency_file) + sorted_submodules = sort_submodules(submodule_depends) + + # print the sorted list of submodules seperated by spaces. + print " ".join(sorted_submodules) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/bin/dev.sh b/bin/dev.sh new file mode 100755 index 0000000..9817f8b --- /dev/null +++ b/bin/dev.sh @@ -0,0 +1,165 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# +# + +root=$(realpath $(dirname $0)/../) +log=${root}/.install/.undev +verbose=false + +undev() { + local found_dirs=false + local links=$(find ${root}/.install/ -type l) + local f + + for f in ${links}; do + fp=$(readlink ${f}) + if [[ ${fp} = *.build* ]]; then + if [ -d ${fp} ]; then + echo "ERROR: ${f} points to a directory" + found_dirs=true + fi + fi + + if [[ ${fp} = *@* || ${f} = *@* ]]; then + echo "Cannot deal with @ in paths" + exit 1 + fi + done + + if ${found_dirs}; then + echo "Cannot proceed with linked directories" + exit 1 + fi + + if [ -s ${log} ]; then + echo "Previous undev found, appending" + echo > ${log} + fi + + for f in ${links}; do + fp=$(readlink ${f}) + if [[ ${fp} = *.build* ]]; then + rm -f ${f} + + pushd $(dirname ${f}) >/dev/null + cp -p ${fp} ${f} + popd >/dev/null + + echo "${f}@${fp}" >> ${log} + if ${verbose}; then + echo "Replacing ${f} with ${fp}" + fi + fi + done + + echo 'Converted all symlinks to real files' +} + +redev() { + local line + local src + local dest + + if [ ! -s "${log}" ]; then + echo "Cannot redev something that was not undeved" + exit 1 + fi + + for line in $(<${log}); do + dest=${line%@*} + src=${line#*@} + + rm -f ${dest} + ln -s ${src} ${dest} + if ${verbose}; then + echo "Linking ${src} at ${dest}" + fi + done + + rm -f ${log} +} + +_chksum() { + echo $(md5sum ${1} | cut -d' ' -f1) +} + +verify() { + local line + local src + local dest + + if [ ! -s "${log}" ]; then + echo "Nothing to verify" + exit 1 + fi + + for line in $(<${log}); do + dest=${line%@*} + src=${line#*@} + + pushd $(dirname ${dest}) >/dev/null + if [ "$(_chksum ${dest})" != "$(_chksum ${src})" ]; then + echo "${dest} does not match ${src}" + fi + popd > /dev/null + done +} + +usage() { + echo "$(basename $0) ARGUMENTS" + echo + echo "ARGUMENTS:" + echo " -r,--redev re-dev the install tree" + echo " -u,--undev un-dev the install tree" + echo " -V,--verify verify an un-deved tree" + echo " -v,--verbose verbose logging" + echo " -h,--help this screen" +} + +action= + +while [ $# -gt 0 ]; do + case $1 in + -r|--redev) + action='redev' + ;; + -u|--undev) + action='undev' + ;; + -V|--verify) + action='verify' + ;; + -v|--verbose) + verbose=true; + ;; + -h|--help) + usage + exit 0 + ;; + esac + shift +done + +if [ -z "${action}" ]; then + echo "No action specified" + exit 1 +fi + +${action} \ No newline at end of file diff --git a/bin/extract_rpm.sh b/bin/extract_rpm.sh new file mode 100755 index 0000000..7cc3cda --- /dev/null +++ b/bin/extract_rpm.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# +# + +# this script extracts the rpm files +# Usage: +# extract_rpm.sh + +function extract_rpm() +{ + rpm --dbpath $rootdir/usr/lib/rpm --relocate /=$rootdir --nodeps -ivh $rpmfile +} + +function strip_rpaths() +{ + rpm -qlp $rpmfile | while read line; do + # If the RPM command failed (no files or whatever) then get out. + if [ ${PIPESTATUS[0]} -ne 0 ]; then + return + fi + + # dest_file will replace the / with (install path) + dest_file=$rootdir/${line#"/"} + if [ ! -e "$dest_file" ]; then + echo "****Cannot find dest file: $dest_file" + continue + fi + + # Skip installed directories + if [ ! -f "$dest_file" ] ; then + continue + fi + + # Get the list of rpaths for this particular file. + # If the command fails, then move on to the next file + file_rpath=$(chrpath --list "$dest_file" 2>/dev/null) + if [ $? -ne 0 ]; then + continue + fi + + # Extract the beginning of chrpath --list command + # that has no value + rpath_string=${file_rpath##*RPATH=} + + # If there is an empty RPATH then move on to next file + if [ "$rpath_string" == "" ]; then + continue + fi + + # Split the RPATH into the array of directories + IFS=':' read -a rpath_array <<< "$rpath_string" + + for index in "${!rpath_array[@]}" + do + rpath_element=${rpath_array[index]} + # If this rpath element contains a .install then readjust it to match + # the current install path. + if [[ $rpath_element == *".install"* ]]; then + unset "rpath_array[index]" + # This logic below is to replace the .install path with the workspace's + # .install path. Unfortunately becuase of the chrpath limitation of the + # replacement rpath being <= source rpath, it is difficult to pull off. + # + #cache_install_path=${rpath_element%\.install*}".install" + #new_rpath_element=${rpath_element//$cache_install_path/$rootdir} + #rpath_array[index]=$new_rpath_element + fi + + # If the rpath entry contains a .build directory then strip it out completely + if [[ $rpath_element == *".build"* ]]; then + unset "rpath_array[index]" + fi + done + + # Reassemble the rpath string + new_rpath_string=$(IFS=$':'; echo "${rpath_array[*]}") + + # Replace the existing rpath with our newly contructed one. + chrpath --replace "$new_rpath_string" $dest_file + if [ $? -ne 0 ]; then + echo "ERROR: ***Failed to replace rpath in $dest_file****" + echo "****************************************************" + echo "Please see RIFT-3498. If the chrpath fails due to path length issues" + echo "A solution is to increase Jenkins/CI build path length" + echo "or shorten your workspace path length." + exit 1 + fi + + done +} + +# Make an RPM database in this directory +rootdir=$1 +# rm -rf $rootdir +mkdir -p $rootdir +rpm --initdb --dbpath $rootdir/usr/lib/rpm + +# Set which rpm file to work on +rpmfile=$2 + +# Extract the RPM file +extract_rpm + +strip_rpaths \ No newline at end of file diff --git a/bin/generate_descriptor_pkg.sh b/bin/generate_descriptor_pkg.sh new file mode 100755 index 0000000..1cbdbdc --- /dev/null +++ b/bin/generate_descriptor_pkg.sh @@ -0,0 +1,38 @@ +#! /usr/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# Author(s): Anil Gunturu +# Creation Date: 2015/10/09 +# +# This shell script is used to create a descriptor package +# The main functions of this script include: +# - Generate checksums.txt file +# - Generate a tar.gz file + +# Usage: generate_descriptor_pkg.sh + +basedir=${1%/} +dir=${2%/} +cd ${basedir}/${dir} +rm -rf checksums.txt +find * -type f | + while read file; do + md5sum $file >> checksums.txt + done +cd .. +tar -zcvf ${dir}.tar.gz ${dir} --remove-files \ No newline at end of file diff --git a/bin/generate_protopy.sh b/bin/generate_protopy.sh new file mode 100755 index 0000000..08eec24 --- /dev/null +++ b/bin/generate_protopy.sh @@ -0,0 +1,38 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# /bin/bash +# +# +# Author(s): Austin Cormier +# Creation Date: 2014/06/03 +# +# Generate all .py implementations of .proto files using the +# protoc compiler. Temporary solution until integrating +# into build process is solved. This must be run after +# a make. + +RIFT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +RIFT_ROOT="${RIFT_ROOT%/bin}" + +INSTALL_DIR="$RIFT_ROOT/.install" +PROTO_DIR="$INSTALL_DIR/usr/data/proto" +PROTOC=$INSTALL_DIR/usr/bin/protoc + +cd $PROTO_DIR + +find . -name "*.proto" | while read line; do + $PROTOC --python_out=. $line +done \ No newline at end of file diff --git a/bin/generate_supermodule_hash.sh b/bin/generate_supermodule_hash.sh new file mode 100755 index 0000000..aa79839 --- /dev/null +++ b/bin/generate_supermodule_hash.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# Author(s): Austin Cormier +# Creation Date: 2015/01/26 +# +# This script generates a supermodule hash which is used as part of +# the submodule hash. This ensures that when any of the following +# files/folders change in the supermodule all of the submodule +# build caches are flushed. + +cd "$(dirname ${BASH_SOURCE[0]})" + +read -r -d '\n' CACHE_FILE_FIND_LIST <&2 + exit 1 +fi + +rm -f /root/.pip/pip.conf +$PIP3 install --use-wheel --no-index --find-links=https://wheel.riftio.com/mirrors/python3_wheelhouse "$@" + + diff --git a/bin/pip3-kilo-install b/bin/pip3-kilo-install new file mode 100755 index 0000000..c03266a --- /dev/null +++ b/bin/pip3-kilo-install @@ -0,0 +1,19 @@ +#!/usr/bin/bash + + +progs="/bin/pip3 /bin/python3-pip" +for prog in $progs; do + if [ -f $prog ]; then + PIP3=$prog + break + fi +done + +if [ -z "$PIP3" ]; then + echo "FATAL ERROR cannot locate pip3 installer -- tried $progs" >&2 + exit 1 +fi + +$PIP3 install --use-wheel --no-index --trusted-host wheel.riftio.com --find-links=http://wheel.riftio.com/mirrors/python3_kilo_wheelhouse "$@" + + diff --git a/bin/rift-lint.py b/bin/rift-lint.py new file mode 100755 index 0000000..17ede2e --- /dev/null +++ b/bin/rift-lint.py @@ -0,0 +1,429 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Joshua Downer +# Creation Date: 2014/07/17 + +import argparse +import compileall +import concurrent.futures +import contextlib +import cStringIO +import functools +import os +import re +import shlex +import subprocess +import sys + +class CommandError(Exception): + pass + + +@contextlib.contextmanager +def redirect_stdout(buf): + """A context manager that switches stdout for a buffer""" + tmp, sys.stdout = sys.stdout, buf + yield sys.stdout + sys.stdout = tmp + + +@contextlib.contextmanager +def pushd(path): + """A context manager that acts like pushd one enter and popd on exit + + Using this context manager will change the current working directory to the + specified path. On exit, the context manager will change back to the + original directory. + + """ + cwd = os.getcwd() + os.chdir(path) + yield + os.chdir(cwd) + + +def command(cmd): + """Executes a command in a separate process and returned the output + + The command is executed on a process and the output from the command is + returned as a list of strings. Note that empty strings are not returned. + + """ + process = subprocess.Popen(shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + output, error = process.communicate() + if process.returncode != 0: + raise CommandError(error) + + return [s for s in output.split('\n') if s] + + +def top_level(): + """Returns the path of the top level directory of the repository.""" + git_dir = command("git rev-parse --git-dir")[0] + git_dir = re.match('.*(?=\.git/?)', git_dir).group().rstrip('/') + return git_dir if git_dir else '.' + + +def list_submodules(): + """Returns a list of the submodules in the current repository.""" + return command("git submodule -q foreach 'echo $path'") + + +def list_remote(): + """Returns a list of files that differ from the remote master.""" + return command('git diff remotes/origin/master --name-only') + + +def list_untracked(): + """Returns a list of untracked files.""" + return [f[3:] for f in command('git st --porcelain') if f.startswith('?? ')] + + +def list_added(): + """Returns a list of added files.""" + def added(path): + try: + return 'A' in path[:2] + except: + return False + + return [f[3:] for f in command('git st --porcelain') if added(f)] + + +def list_modified(): + """Returns a list of modified files.""" + def modified(path): + try: + return 'M' in path[:2] + except: + return False + + return [f[3:] for f in command('git st --porcelain') if modified(f)] + + +def list_range(commits): + """Returns a list of files changed over the specified range of commits""" + try: + return command('git diff {commits} --name-only'.format(commits=commits)) + except CommandError: + pass + return [] + + +def list_submodule(func, path): + """Applies a function from within a submodule and returns the result.""" + with pushd(path): + return [os.path.join(path, f) for f in func()] + + +class Repository(object): + def __init__(self, root): + """Create an object to represent the repository + + :root: the root of the repository + + """ + self._root = os.path.abspath(root) + with pushd(self.root): + self._submodules = list_submodules() + + @property + def root(self): + """The path to the root of the repository""" + return self._root + + @property + def submodules(self): + """A list of submodules in the repository""" + return self._submodules + + def foreach_submodule(self, func): + """Applies a function to each of the submodules in the repository. + + :func: a function that returns a list of file paths + + The result of the provided function is required to be a list of paths to + files within each of the submodules. The paths are relative to the root + of the repository. + + """ + with concurrent.futures.ProcessPoolExecutor() as executor: + paths = [os.path.join(self.root, s) for s in self.submodules] + results = executor.map(func, paths) + results = [u for v in results for u in v] + return [os.path.relpath(f, self.root) for f in results] + + def forall(self, func): + """Applies a function to the submodules and the top-level repo. + + :func: a function that returns a list of file paths + + The result of the provided function is required to be a list of paths to + files within each of the submodules or the top-level repo. The paths are + relative to the root of the repository. + + """ + files = [] + with pushd(self.root): + files = [f for f in func() if os.path.isfile(f)] + + list_submodule_func = functools.partial(list_submodule, func) + files.extend(self.foreach_submodule(list_submodule_func)) + + return sorted(files) + + def remote(self): + """Returns a list of files that differ from the remote/origin/master.""" + return self.forall(list_remote) + + def modified(self): + """Returns a list of files have been modified in the repo and submodules""" + return self.forall(list_modified) + + def untracked(self): + """Returns a list of all the untracked files in the repo and submodules""" + return self.forall(list_untracked) + + def range(self, commits): + """Returns a list of files modified over the specified range""" + return self.forall(functools.partial(list_range, commits)) + + +class Lint(object): + command = "pylint -E --rcfile={rcfile}" + + def __init__(self, exclude=None, rcfile=None): + """ + Create a lint object. + + :exclude: a list of regular expressions used to exclude files + :rcfile: a path to a pylintrc file + + """ + self._exclude = exclude + self._command = Lint.command.format(rcfile=rcfile) + + def should_exclude(self, path): + """Returns TRUE if this specified path should be excluded + + :path: the path to test + + """ + if not path.endswith('.py'): + return True + + if not os.path.isfile(path): + return True + + try: + for rule in self._exclude: + if rule.match(path) is not None: + return True + except: + pass + + return False + + def evaluate(self, path): + """Applies pylint to the specified file. + + Pylint will only be applied to python scripts that have a '.py' suffix + and that do not match any excluded paths. + + :path: the path of the file that is to be evaluated + + """ + if self.should_exclude(path): + return [] + + results = command('{cmd} {path}'.format(cmd=self._command, path=path)) + return [line for line in results if not line.startswith("***")] + + +def compile_target(target): + """Generate bytecode for the specified target + + The :target: is a python script that get compiled into byte code. + + Returns a tuple (result, details), where result is a string that with one of + the values: SKIPPED, SUCCESS, or FAILURE. The details provide information + about any failure that has occurred. + + If there is already bytecode in the same directory as the :target:, the + :target: is not compile and returns a result of 'SKIPPED'. If there is a + syntax error in the script, 'FAILURE' is returned with the details of the + compilation error. Otherwise, 'SUCCESS' is returned. + + """ + result = 'SUCCESS' + details = [] + + # The output to stdout is redirected to a buffer so that it can + # be optionally reported in the case of a failure. + with redirect_stdout(cStringIO.StringIO()): + bytecode = target + 'c' + if os.path.isfile(bytecode): + return target, 'SKIPPED', details + + if compileall.compile_file(target, quiet=True): + os.remove(bytecode) + else: + result = 'FAILURE' + sys.stdout.seek(0) + details = [line.rstrip() for line in sys.stdout.readlines() if line] + + return target, result, details + + # If there are any error messages, write them to stdout at this + # time and then exit. Or, in verbose mode, write out a + # success/failure mode for each file. + if verbose: + print('{result} {file}'.format( + result='FAILURE' if failure else 'SUCCESS', file=target)) + else: + for line in failure: + print(line.rstrip()) + + return target, result, details + + +def main(argv=sys.argv[1:]): + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--compile', + action='store_true', + help="compile the python scripts to detect syntax errors") + parser.add_argument('-e', '--exclude', + help="a list of rules for excluding file paths") + parser.add_argument('-l', '--list', + action='store_true', + help="list the files to be processed") + parser.add_argument('-m', '--modified', + action='store_true', + help="list files that have been modified") + parser.add_argument('-r', '--remote', + action='store_true', + help="list files that differ from the remote") + parser.add_argument('-t', '--target', + default=None, + type=str, + help="a directory to search (recursively) for python files") + parser.add_argument('--range', + help="list files that have changed over a specified range \ + of commits (as understood by git)") + parser.add_argument('-u', '--untracked', + action='store_true', + help="list files that are untracked") + parser.add_argument('--rcfile', + default=os.path.join(top_level(), 'etc/pylintrc'), + help="specifies the path to a pylintrc file to use") + parser.add_argument('-v', '--verbose', + action='store_true', + help="print out additional diagnostic information") + parser.add_argument('files', + nargs=argparse.REMAINDER, + default=[], + help="a list of additional files to process") + + args = parser.parse_args(argv) + + # If pylint is required, check that it is available + if not args.compile: + try: + command('command -v pylint') + except CommandError: + print('Unable to find pylint on the PATH') + exit(1) + except Exception as e: + print(str(e)) + exit(2) + + repo = Repository(top_level()) + + # Construct the lint object using any rules provided by the caller + exclude = args.exclude.split(":") if args.exclude else [] + lint = Lint(rcfile=args.rcfile, exclude=[re.compile(e) for e in exclude]) + + # Construct a list of the required files + files = args.files + if args.modified: + files.extend(repo.modified()) + if args.untracked: + files.extend(repo.untracked()) + if args.remote: + files.extend(repo.remote()) + if args.range: + files.extend(repo.range(args.range)) + + # If a target directory has been specified, recursively search for python + # files + if args.target is not None: + if not os.path.isdir(args.target): + print("The specified target directory does not exist!") + exit(1) + + for root, _, names in os.walk(args.target): + files.extend(os.path.join(root, n) for n in names if n.endswith('.py')) + + # Simply print out the paths of all of the files + if args.list: + for f in files: + print(f) + + # Compile each of the specified files to determine if there are any syntax + # errors. + elif args.compile: + files = [f for f in files if f.endswith('.py')] + with pushd(repo.root): + with concurrent.futures.ProcessPoolExecutor() as executor: + futures = [executor.submit(compile_target, f) for f in files] + concurrent.futures.wait(futures) + + results = [f.result() for f in futures] + + if args.verbose: + failed = False + for target, result, details in results: + failed = (failed or result == 'FAILURE') + print('PYCHECK {result} {target}'.format(result=result, target=target)) + for line in details: + print(line) + + if failed: + exit(1) + + else: + failures = [target for target, result, _ in results if result == 'FAILURE'] + for target in failures: + print('PYCHECK FAILURE {target}'.format(target=target)) + + if failures: + exit(1) + + # Apply pylint to each of the files and report the result + else: + with pushd(repo.root): + for f in files: + for line in lint.evaluate(f): + print(line) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/bin/submodule_has_failed_tests.sh b/bin/submodule_has_failed_tests.sh new file mode 100755 index 0000000..3a7500a --- /dev/null +++ b/bin/submodule_has_failed_tests.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# +# Author(s): Austin Cormier +# Creation Date: 2014/06/05 +# +# Script to be invoked by CMake to determine whether a submodule has failed tests. +# Currently, test_wrapper.sh will create a _FAILED file for each unit test +# that lives within a submodule tree. We can seach this tree for any *_FAILED file +# to determine if any test failed. +# +# submodule_has_failed_test.sh +# +# Arguments: +# - Submodule path relative to the root of the repository. +# +# Returns 0 if failed tests were found and 1 otherwise. + +rift_root="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +rift_root="${rift_root%/bin}" + +unittest_dir="$RIFT_UNIT_TEST" + +if [ ! $# -eq 1 ]; then + echo "ERROR: A submodule prefix argument is expected." + exit 1 +fi + +submodule_prefix="$1" + +if [ ! -d "$rift_root/$submodule_prefix" ]; then + echo "ERROR: Submodule doesn't exist?: $rift_root/$submodule_prefix" + exit 1 +fi + +submodule_unittest_dir="$unittest_dir/$submodule_prefix" +if [ ! -d "$submodule_unittest_dir" ]; then + echo "WARNING: Submodule unittest output directory doesn't exist: $submodule_unittest_dir" + exit 1 +fi + +found_files=$(find "$submodule_unittest_dir" -name "*_FAILED" | wc -l) +if [ $found_files == "0" ]; then + echo "INFO: Did not find any failed unittests in: $submodule_unittest_dir" + exit 1 +fi + +# There a certain cases (build_ladder) where we still want to cache the submodule even +# if there are failures. This was the easiest place to inject that logic. +if [ $BCACHE_IGNORE_FAILED_SUBMODULE_TESTS -eq 1 ]; then + echo 'WARNING: $BCACHE_IGNORE_FAILED_SUBMODULE_TESTS env var set, caching submodule regardless of failed unit tests.' + exit 1 +fi + +echo "INFO: Found $found_files failed unit tests in: $submodule_unittest_dir" +exit 0 \ No newline at end of file diff --git a/bin/uninitialize_cached_submodules.sh b/bin/uninitialize_cached_submodules.sh new file mode 100755 index 0000000..cdb4ee0 --- /dev/null +++ b/bin/uninitialize_cached_submodules.sh @@ -0,0 +1,131 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# /bin/bash +# +# +# Author(s): Austin Cormier +# Creation Date: 2014/06/03 +# +# This script is meant to assist Jenkins in performing incremental +# builds by deinitializing submodules if the build cache exists for the +# submodule hash. +# +# uninitialize_cached_submodules.sh +# +# Arguments: +# - Debug, Debug_Coverage, Release +# + +set -o nounset +set -u + +THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +DEPENDENCY_PARSER_CMD="$THIS_DIR/dependency_parser.py" + +# Set some vars if not defined by env variables. Used for testing. +GET_SUBMODULES_CMD="$THIS_DIR/dependency_sort.py" + +function verify_cwd_at_root(){ + if [ ! -f ".gitmodules.deps" ]; then + echo "ERROR: This script should be run at the top-level" + exit 1 + fi +} + +## +# Calculate and return submodule hash. Capture stdout to get hash. +# $1 - submodule to calculate hash for +## +function get_submodule_hash(){ + local submodule="$1" + + set -x + local parser_cmd="$DEPENDENCY_PARSER_CMD --dependency-file=.gitmodules.deps "--submodule=$submodule" --print-hash" + local hash=$($parser_cmd) + set +x + if [ $? -ne 0 ]; then + echo "ERROR: Command failed to retrieve submodule hash (command: $parser_cmd)" + exit 1 + fi + + echo $hash +} + +## +# Gets list of submodules available in workspace. Capture stdout to get list. +## +function get_submodules(){ + local sorted_submodules=$($GET_SUBMODULES_CMD) + if [ $? -ne 0 ]; then + echo "ERROR: Could not get list of submodules." + exit 1 + fi + + echo "$sorted_submodules" +} + +## +# Builds the full build cache path. Capture stdout to get the path. +# $1 - Submodule +# $2 - Submodule Hash +# $3 - Build Type +## +function get_full_cache_path(){ + local submodule="$1" + local hash="$2" + local build_type="$3" + + local cache_path="$RIFT_BUILD_CACHE_DIR/$build_type/$submodule/$hash" + + echo $cache_path +} + +if [ $# -ne 1 ]; then + echo "ERROR: Expecting a single build_type argument" + exit 1 +fi + +if [[ "$1" != "Debug_FILES" &&"$1" != "Debug" && "$1" != "Debug_Coverage" && $1 != "Release" ]]; then + echo "ERROR: Build type should be in the set (Debug, Debug_Coverage, Release)." + exit 1 +fi + +build_type=$1 + +submodules="$(get_submodules)" +# Convert the string into an array using the default IFS of ' ' +read -a submodules_array <<< "$submodules" + +for submodule in "${submodules_array[@]}"; do + if [ ! -e "$submodule" ]; then + echo "WARNING: Could not find $submodule path." + continue + fi + + hash="$(get_submodule_hash $submodule)" + echo "INFO: Calculated submodule hash for $submodule: $hash" + + full_cache_path="$(get_full_cache_path $submodule $hash $build_type)" + echo "INFO: Checking if submodule cache path exists for $submodule: $full_cache_path" + if [ -e "$full_cache_path" ]; then + echo "INFO: Build cache exists for submodule $submodule. Deinitializing." + git submodule deinit $submodule + if [ $? -ne 0 ]; then + echo "ERROR: Could not deinitialize submodule: $submodule" + continue + fi + fi +done \ No newline at end of file diff --git a/modules/core/mano/.cpack-workaround b/modules/core/mano/.cpack-workaround new file mode 100644 index 0000000..e69de29 diff --git a/modules/core/mano/CMakeLists.txt b/modules/core/mano/CMakeLists.txt new file mode 100644 index 0000000..7c6b207 --- /dev/null +++ b/modules/core/mano/CMakeLists.txt @@ -0,0 +1,72 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 03/26/2014 +# + +## +# DEPENDENCY ALERT +# The submodule dependencies must be specified in the +# .gitmodules.dep file at the top level (supermodule) directory +# If this submodule depends other submodules remember to update +# the .gitmodules.dep +## + +cmake_minimum_required(VERSION 2.8) + +## +# Set the path to the top level cmake modules directory +## +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../../cmake/modules") + +## +# DO NOT add any code before this and DO NOT +# include this file anywhere else +## +include(rift_submodule) +include(rift_python) + +## +# Submodule specific includes will go here, +# These are specified here, since these variables are accessed +# from multiple sub directories. If the variable is subdirectory +# specific it must be declared in the subdirectory. +## + +## +# Include the subdirs +## +set(subdirs + models + common + rwmc + rwlaunchpad + confd_client + rwcm + ) + +rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +## +# This macro adds targets for documentaion, unittests, code coverage and packaging +## +rift_add_submodule_targets(SUBMODULE_PACKAGE_NAME "rw.core.mc") + +# Workaround whatever mess rw.package is doing as it can't seem +# to figure out that it should make a directory -before- making +# symlinks.. +set(dir usr/lib64/python${RIFT_PYTHON3}/site-packages/gi/overrides) +install(FILES + ${CMAKE_CURRENT_SOURCE_DIR}/.cpack-workaround + DESTINATION ${dir}) + +if (RIFT_SUPPORT_PYTHON2) + set(dir usr/lib64/python${RIFT_PYTHON2}/site-packages/gi/overrides) + + install(FILES + ${CMAKE_CURRENT_SOURCE_DIR}/.cpack-workaround + DESTINATION ${dir}) +endif() + + diff --git a/modules/core/mano/Makefile b/modules/core/mano/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/README b/modules/core/mano/README new file mode 100644 index 0000000..ed6a799 --- /dev/null +++ b/modules/core/mano/README @@ -0,0 +1,9 @@ +This sumodule contains the MANO subsystem from RIFT.ware. The following +section describes the directory structure of the MANO subsystem: + +common: contains code shared by mission-control and launchpad +examples: contains a ping/pong NS example +models: contains YANG based information models +rwlaunchpad: contains software for RIFT.ware launchpad +rwmc: contains software RIFT.ware mission control +rwcm: conatins software for RIFT.ware configuration manager diff --git a/modules/core/mano/common/CMakeLists.txt b/modules/core/mano/common/CMakeLists.txt new file mode 100644 index 0000000..2cb4f42 --- /dev/null +++ b/modules/core/mano/common/CMakeLists.txt @@ -0,0 +1,30 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME common) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + python + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +install( + FILES + rw_gen_package.py + DESTINATION usr/rift/mano/common + COMPONENT ${PKG_LONG_NAME}) diff --git a/modules/core/mano/common/plugins/CMakeLists.txt b/modules/core/mano/common/plugins/CMakeLists.txt new file mode 100644 index 0000000..95fb6af --- /dev/null +++ b/modules/core/mano/common/plugins/CMakeLists.txt @@ -0,0 +1,19 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + rwcntmgrtasklet + yang + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/CMakeLists.txt b/modules/core/mano/common/plugins/rwcntmgrtasklet/CMakeLists.txt new file mode 100644 index 0000000..18eb9c1 --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwcntmgrtasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/Makefile b/modules/core/mano/common/plugins/rwcntmgrtasklet/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py b/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py new file mode 100644 index 0000000..f7b0ab3 --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwcntmgrtasklet import ContainerManager diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py b/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py new file mode 100755 index 0000000..3a8b437 --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py @@ -0,0 +1,331 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import os +import shlex +import subprocess +import time +import uuid + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwcalYang', '1.0') + +from gi.repository import ( + RwDts as rwdts, + RwcalYang, +) + +import rift.rwcal.cloudsim.lvm as lvm +import rift.rwcal.cloudsim.lxc as lxc +import rift.tasklets +import rw_peas + + +class SaltConnectionTimeoutError(Exception): + pass + + +class ContainerManager(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super(ContainerManager, self).__init__(*args, **kwargs) + self.lvm = None + self.resources = None + self.dts_api = None + + def start(self): + super(ContainerManager, self).start() + self.log.info("Starting ContainerManager") + self.log.setLevel(logging.DEBUG) + ResourceProvisioning.log_hdl = self.log_hdl + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS( + self.tasklet_info, + RwcalYang.get_schema(), + self.loop, + self.on_dts_state_change + ) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + def stop(self): + super(ContainerManager, self).stop() + self.resources.destroy() + self.lvm.destroy() + + @asyncio.coroutine + def init(self): + # Create the LVM backing store with the 'rift' volume group + self.lvm = LvmProvisioning() + self.resources = ResourceProvisioning(self.loop, self.log) + + # Create lvm partition + yield from self.loop.run_in_executor( + None, + self.resources.destroy, + ) + + if "REUSE_LXC" not in os.environ: + # Create lvm partition + yield from self.loop.run_in_executor( + None, + self.lvm.destroy, + ) + + # Create lvm partition + yield from self.loop.run_in_executor( + None, + self.lvm.create, + ) + + # Create an initial set of VMs + yield from self.loop.run_in_executor( + None, + self.resources.create, + ) + + yield from self.loop.run_in_executor( + None, + self.resources.wait_ready, + ) + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) + + +class LvmProvisioning(object): + """ + This class represents LVM provisioning. + """ + + def create(self): + """Creates an LVM backing store""" + lvm.create('rift') + + def destroy(self): + """Destroys the existing LVM backing store""" + lvm.destroy('rift') + + +class ResourceProvisioning(object): + """ + This is a placeholder class that is used to represent the provisioning of + container resources. + """ + + cal_interface = None + log_hdl = None + + def __init__(self, loop, log): + # Initialize the CAL interface if it has not already been initialized + if ResourceProvisioning.cal_interface is None: + plugin = rw_peas.PeasPlugin('rwcal_cloudsimproxy', 'RwCal-1.0') + engine, info, extension = plugin() + + ResourceProvisioning.cal_interface = plugin.get_interface("Cloud") + ResourceProvisioning.cal_interface.init(ResourceProvisioning.log_hdl) + + self.account = RwcalYang.CloudAccount() + self.account.account_type = "cloudsim_proxy" + self.account.cloudsim_proxy.host = "192.168.122.1" + + self.log = log + self.loop = loop + self.nvms = 1 + + self._vms = [] + + @property + def cal(self): + return ResourceProvisioning.cal_interface + + def create(self): + """Create all of the necessary resources""" + + rift_root = os.environ['RIFT_ROOT'] + image = self.create_image("%s/images/rift-root-latest.qcow2" % (rift_root)) + + # Create a VM + for index in range(self.nvms): + self._vms.append(self.create_vm(image, index)) + + # Start the VMs + for vm in self._vms: + self.cal.start_vm(self.account, vm.vm_id) + + def destroy(self): + """Destroy all of the provided resources""" + + for container in lxc.containers(): + lxc.stop(container) + + for container in lxc.containers(): + if not ("REUSE_LXC" in os.environ and container == "rwm0"): + lxc.destroy(container) + + def create_image(self, location): + """Creates and returns a CAL image""" + + image = RwcalYang.ImageInfoItem() + image.name = "rift-lxc-image" + image.location = location + image.disk_format = "qcow2" + rc, image.id = self.cal.create_image(self.account, image) + return image + + def create_network(self, network_name, subnet): + """Creates and returns a CAL network""" + + network = RwcalYang.NetworkInfoItem( + network_name=network_name, + subnet=subnet, + ) + rc, network.network_id = self.cal.create_network(self.account, network) + return network + + def create_vm(self, image, index): + """Returns a VM + + Arguments: + image - the image used to create the VM + index - an index used to label the VM + + Returns: + A VM object + + """ + vm = RwcalYang.VMInfoItem() + vm.vm_name = 'rift-s{}'.format(index + 1) + vm.image_id = image.id + vm.user_tags.node_id = str(uuid.uuid4()) + + user_data_template_str = open( + os.path.join( + os.environ['RIFT_INSTALL'], + 'etc/userdata-template', + ) + ).read() + + # Get the interface ip address of the mgmt network + # This is where the salt master is accessible on + mgmt_interface_ip = "192.168.122.1" + + # Create salt-stack userdata + vm.cloud_init.userdata = user_data_template_str.format( + master_ip=mgmt_interface_ip, + lxcname=vm.user_tags.node_id, + ) + + rc, vm.vm_id = self.cal.create_vm(self.account, vm) + + return vm + + def wait_vm_salt_connection(self, vm, timeout_secs=600): + """ Wait for vm salt minion to reach up state with master """ + + vm_node_id = vm.user_tags.node_id + start_time = time.time() + self.log.debug("Waiting up to %s seconds for node id %s", + timeout_secs, vm_node_id) + while (time.time() - start_time) < timeout_secs: + try: + stdout = subprocess.check_output( + shlex.split('salt %s test.ping' % vm_node_id), + universal_newlines=True, + ) + except subprocess.CalledProcessError: + continue + + up_minions = stdout.splitlines() + for line in up_minions: + if "True" in line: + return + + raise SaltConnectionTimeoutError( + "Salt id %s did not enter UP state in %s seconds" % ( + vm_node_id, timeout_secs + ) + ) + + def wait_ready(self): + """ Wait for all resources to become ready """ + + self.log.info("Waiting for all VM's to make a salt minion connection") + for i, vm in enumerate(self._vms): + self.wait_vm_salt_connection(vm) + self.log.debug( + "Node id %s came up (%s/%s)", + vm.user_tags.node_id, i + 1, len(self._vms) + ) + + def create_port(self, network, vm, index): + """Returns a port + + Arguments: + network - a network object + vm - a VM object + index - an index to label the port + + Returns: + Returns a port object + + """ + port = RwcalYang.PortInfoItem() + port.port_name = "eth1" + port.network_id = network.network_id + port.vm_id = vm.vm_id + + rc, port.port_id = self.cal.create_port(self.account, port) + return port \ No newline at end of file diff --git a/modules/core/mano/common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py b/modules/core/mano/common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py new file mode 100755 index 0000000..8d517ea --- /dev/null +++ b/modules/core/mano/common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwcntmgrtasklet + +class Tasklet(rift.tasklets.rwcntmgrtasklet.ContainerManager): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/common/plugins/yang/CMakeLists.txt b/modules/core/mano/common/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..8e29677 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 2015/11/20 +# + +## +# Yang targets +## +rift_add_yang_target( + TARGET rwcloud_yang + YANG_FILES rw-cloud.yang rw-sdn.yang + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + rwsdn_yang_gen +) + +rift_add_yang_target( + TARGET rwconfig_agent_yang + YANG_FILES rw-config-agent.yang + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + mano_yang_gen + DEPENDS + mano_yang +) diff --git a/modules/core/mano/common/plugins/yang/rw-cloud.tailf.yang b/modules/core/mano/common/plugins/yang/rw-cloud.tailf.yang new file mode 100644 index 0000000..d7dc559 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-cloud.tailf.yang @@ -0,0 +1,29 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-cloud-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-cloud-annotation"; + prefix "rw-cloud-ann"; + + import rw-cloud { + prefix rw-cloud; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-cloud:cloud/rw-cloud:account/rw-cloud:connection-status" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-cloud:update-cloud-status" { + tailf:actionpoint rw_action; + } +} diff --git a/modules/core/mano/common/plugins/yang/rw-cloud.yang b/modules/core/mano/common/plugins/yang/rw-cloud.yang new file mode 100755 index 0000000..c3fb1c7 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-cloud.yang @@ -0,0 +1,81 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-cloud +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-cloud"; + prefix "rw-cloud"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import rw-sdn { + prefix "rw-sdn"; + } + + revision 2015-09-14 { + description + "Initial revision."; + } + + container cloud { + rwpb:msg-new CloudConfig; + list account { + rwpb:msg-new CloudAccount; + description "Configure Cloud Accounts"; + + max-elements 16; + key "name"; + + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + + leaf sdn-account { + description "Configured SDN account associated with this cloud account"; + type leafref { + path "/rw-sdn:sdn-account/rw-sdn:name"; + } + } + + uses rwcal:provider-auth; + uses rwcal:connection-status; + } + } + + rpc update-cloud-status { + description "Begin cloud account connection status"; + input { + leaf cloud-account { + mandatory true; + description + "The cloud account name to update connection status for"; + type string; + } + } + } + +} + diff --git a/modules/core/mano/common/plugins/yang/rw-config-agent.taif.yang b/modules/core/mano/common/plugins/yang/rw-config-agent.taif.yang new file mode 100644 index 0000000..cd72eea --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-config-agent.taif.yang @@ -0,0 +1,17 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-config-agent-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-config-agent-annotation"; + prefix "rw-config-agent-ann"; + + import rw-config-agent { + prefix rw-config-agent; + } +} diff --git a/modules/core/mano/common/plugins/yang/rw-config-agent.yang b/modules/core/mano/common/plugins/yang/rw-config-agent.yang new file mode 100755 index 0000000..e97d419 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-config-agent.yang @@ -0,0 +1,83 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-config-agent +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-config-agent"; + prefix "rw-config-agent"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-inet-types { + prefix "inet"; + } + + revision 2016-02-04 { + description + "Initial revision."; + } + + typedef config-agent-account-type { + description "config agent account type"; + type enumeration { + enum juju; + } + } + + container config-agent { + rwpb:msg-new ConfigAgent; + + list account { + rwpb:msg-new ConfigAgentAccount; + key "name"; + + description "List of configuration agent accounts"; + + leaf name { + description "Name of this config agent account"; + type string; + } + + leaf account-type { + type config-agent-account-type; + } + + choice config-agent-account-type { + case juju { + description + "Configure the VNF through Juju."; + container juju { + leaf ip-address { + description "Juju host IP address."; + type inet:ip-address; + } + leaf port { + description + "Juju host port number. Default 17070."; + type inet:port-number; + default 17070; + } + leaf user { + description + "User name to connect to Juju host. Default user-admin."; + type string; + default "user-admin" ; + } + leaf secret { + description + "Admin secret or password for Juju host."; + type string; + } + } + } + } + } + } +} diff --git a/modules/core/mano/common/plugins/yang/rw-sdn.tailf.yang b/modules/core/mano/common/plugins/yang/rw-sdn.tailf.yang new file mode 100644 index 0000000..3cf4beb --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-sdn.tailf.yang @@ -0,0 +1,17 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-sdn-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-sdn-annotation"; + prefix "rw-sdn-ann"; + + import rw-sdn { + prefix rw-sdn; + } +} diff --git a/modules/core/mano/common/plugins/yang/rw-sdn.yang b/modules/core/mano/common/plugins/yang/rw-sdn.yang new file mode 100644 index 0000000..41bc4a8 --- /dev/null +++ b/modules/core/mano/common/plugins/yang/rw-sdn.yang @@ -0,0 +1,47 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-sdn +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-sdn"; + prefix "rw-sdn"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwsdn { + prefix "rwsdn"; + } + + revision 2015-09-14 { + description + "Initial revision."; + } + + list sdn-account { + rwpb:msg-new SDNAccountConfig; + + key "name"; + leaf name { + type string; + } + + uses rwsdn:sdn-provider-auth; + } +} + diff --git a/modules/core/mano/common/python/CMakeLists.txt b/modules/core/mano/common/python/CMakeLists.txt new file mode 100644 index 0000000..a390627 --- /dev/null +++ b/modules/core/mano/common/python/CMakeLists.txt @@ -0,0 +1,18 @@ +# Creation Date: 2016/1/12 +# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END) + +cmake_minimum_required(VERSION 2.8) + + +rift_python_install_tree( + FILES + rift/mano/cloud/__init__.py + rift/mano/cloud/accounts.py + rift/mano/cloud/config.py + rift/mano/cloud/operdata.py + rift/mano/config_agent/operdata.py + rift/mano/config_agent/__init__.py + rift/mano/config_agent/config.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY + ) diff --git a/modules/core/mano/common/python/rift/mano/cloud/__init__.py b/modules/core/mano/common/python/rift/mano/cloud/__init__.py new file mode 100644 index 0000000..4c1191d --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/cloud/__init__.py @@ -0,0 +1,28 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .accounts import ( + CloudAccount, + CloudAccountCalError, + ) + +from .config import ( + CloudAccountConfigSubscriber, + CloudAccountConfigCallbacks + ) + +from .operdata import ( + CloudAccountDtsOperdataHandler, +) \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/cloud/accounts.py b/modules/core/mano/common/python/rift/mano/cloud/accounts.py new file mode 100644 index 0000000..908f8c8 --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/cloud/accounts.py @@ -0,0 +1,174 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import asyncio + +from gi.repository import ( + RwTypes, + RwcalYang, + RwCloudYang, + ) +import rw_peas + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class PluginLoadingError(Exception): + pass + + +class CloudAccountCalError(Exception): + pass + + +class CloudAccount(object): + def __init__(self, log, rwlog_hdl, account_msg): + self._log = log + self._account_msg = account_msg.deep_copy() + + self._cal_plugin = None + self._engine = None + + self._cal = self.plugin.get_interface("Cloud") + self._cal.init(rwlog_hdl) + + self._status = RwCloudYang.CloudAccount_ConnectionStatus( + status="unknown", + details="Connection status lookup not started" + ) + + self._validate_task = None + + @property + def plugin(self): + if self._cal_plugin is None: + try: + self._cal_plugin = rw_peas.PeasPlugin( + getattr(self._account_msg, self.account_type).plugin_name, + 'RwCal-1.0', + ) + + except AttributeError as e: + raise PluginLoadingError(str(e)) + + self._engine, _, _ = self._cal_plugin() + + return self._cal_plugin + + def _wrap_status_fn(self, fn, *args, **kwargs): + ret = fn(*args, **kwargs) + rw_status = ret[0] + if rw_status != RwTypes.RwStatus.SUCCESS: + msg = "%s returned %s" % (fn.__name__, str(rw_status)) + self._log.error(msg) + raise CloudAccountCalError(msg) + + # If there was only one other return value besides rw_status, then just + # return that element. Otherwise return the rest of the return values + # as a list. + return ret[1] if len(ret) == 2 else ret[1:] + + @property + def cal(self): + return self._cal + + @property + def name(self): + return self._account_msg.name + + @property + def account_msg(self): + return self._account_msg + + @property + def cal_account_msg(self): + return RwcalYang.CloudAccount.from_dict( + self.account_msg.as_dict(), + ignore_missing_keys=True, + ) + + def cloud_account_msg(self, account_dict): + self._account_msg = RwCloudYang.CloudAccount.from_dict(account_dict) + + @property + def account_type(self): + return self._account_msg.account_type + + @property + def connection_status(self): + return self._status + + def update_from_cfg(self, cfg): + self._log.debug("Updating parent CloudAccount to %s", cfg) + + # Hack to catch updates triggered from apply_callback when a sdn-account is removed + # from a cloud-account. To be fixed properly when updates are handled + if (self.account_msg.name == cfg.name + and self.account_msg.account_type == cfg.account_type): + return + + if cfg.has_field("sdn_account"): + self.account_msg.sdn_account = cfg.sdn_account + else: + raise NotImplementedError("Update cloud account not yet supported") + + def create_image(self, filename): + image_id = self._wrap_status_fn( + self.cal.create_image, self.cal_account_msg, filename + ) + + return image_id + + def get_image_list(self): + self._log.debug("Getting image list from account: %s", self.name) + resources = self._wrap_status_fn( + self.cal.get_image_list, self.cal_account_msg + ) + + return resources.imageinfo_list + + @asyncio.coroutine + def validate_cloud_account_credentials(self, loop): + self._log.debug("Validating Cloud Account credentials %s", self._account_msg) + self._status = RwCloudYang.CloudAccount_ConnectionStatus( + status="validating", + details="Cloud account connection validation in progress" + ) + rwstatus, status = yield from loop.run_in_executor( + None, + self._cal.validate_cloud_creds, + self.cal_account_msg, + ) + if rwstatus == RwTypes.RwStatus.SUCCESS: + self._status = RwCloudYang.CloudAccount_ConnectionStatus.from_dict(status.as_dict()) + else: + self._status = RwCloudYang.CloudAccount_ConnectionStatus( + status="failure", + details="Error when calling CAL validate cloud creds" + ) + + self._log.info("Got cloud account validation response: %s", self._status) + + def start_validate_credentials(self, loop): + if self._validate_task is not None: + self._validate_task.cancel() + self._validate_task = None + + self._validate_task = asyncio.ensure_future( + self.validate_cloud_account_credentials(loop), + loop=loop + ) \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/cloud/config.py b/modules/core/mano/common/python/rift/mano/cloud/config.py new file mode 100644 index 0000000..a495d16 --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/cloud/config.py @@ -0,0 +1,256 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import rw_peas + +import gi +gi.require_version('RwDts', '1.0') +import rift.tasklets + +from gi.repository import ( + RwcalYang as rwcal, + RwDts as rwdts, + ProtobufC, + ) + +from . import accounts + +class CloudAccountNotFound(Exception): + pass + + +class CloudAccountError(Exception): + pass + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class CloudAccountConfigCallbacks(object): + def __init__(self, + on_add_apply=None, on_add_prepare=None, + on_update_apply=None, on_update_prepare=None, + on_delete_apply=None, on_delete_prepare=None): + + @asyncio.coroutine + def prepare_noop(*args, **kwargs): + pass + + def apply_noop(*args, **kwargs): + pass + + self.on_add_apply = on_add_apply + self.on_add_prepare = on_add_prepare + self.on_update_apply = on_update_apply + self.on_update_prepare = on_update_prepare + self.on_delete_apply = on_delete_apply + self.on_delete_prepare = on_delete_prepare + + for f in ('on_add_apply', 'on_update_apply', 'on_delete_apply'): + ref = getattr(self, f) + if ref is None: + setattr(self, f, apply_noop) + continue + + if asyncio.iscoroutinefunction(ref): + raise ValueError('%s cannot be a coroutine' % (f,)) + + for f in ('on_add_prepare', 'on_update_prepare', 'on_delete_prepare'): + ref = getattr(self, f) + if ref is None: + setattr(self, f, prepare_noop) + continue + + if not asyncio.iscoroutinefunction(ref): + raise ValueError("%s must be a coroutine" % f) + + +class CloudAccountConfigSubscriber(object): + XPATH = "C,/rw-cloud:cloud/rw-cloud:account" + + def __init__(self, dts, log, rwlog_hdl, cloud_callbacks): + self._dts = dts + self._log = log + self._rwlog_hdl = rwlog_hdl + self._reg = None + + self.accounts = {} + + self._cloud_callbacks = cloud_callbacks + + def add_account(self, account_msg): + self._log.info("adding cloud account: {}".format(account_msg)) + + account = accounts.CloudAccount(self._log, self._rwlog_hdl, account_msg) + self.accounts[account.name] = account + + self._cloud_callbacks.on_add_apply(account) + + def delete_account(self, account_name): + self._log.info("deleting cloud account: {}".format(account_name)) + del self.accounts[account_name] + + self._cloud_callbacks.on_delete_apply(account_name) + + def update_account(self, account_msg): + self._log.info("updating cloud account: {}".format(account_msg)) + account = accounts.CloudAccount(self._log, self._rwlog_hdl, account_msg) + self.accounts[account.name].update_from_cfg(account_msg) + + # Block update callbacks for cloud accounts if due to SDN account changes + # If there are other cloud-account fields that are also updated at the same time, + # in addition to sdn-account, this update will not be triggered. + # The logic to detect this might not be worth it since this won't happen through UI + if not account_msg.has_field("sdn_account"): + self._cloud_callbacks.on_update_apply(account) + + def register(self): + @asyncio.coroutine + def apply_config(dts, acg, xact, action, _): + self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action) + + if xact.xact is None: + if action == rwdts.AppconfAction.INSTALL: + curr_cfg = self._reg.elements + for cfg in curr_cfg: + self._log.debug("Cloud account being re-added after restart.") + if not cfg.has_field('account_type'): + raise CloudAccountError("New cloud account must contain account_type field.") + print(cfg) + print("Adding account .........") + self.add_account(cfg) + return + else: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self._reg, + xact=xact, + key_name="name", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_account(cfg.name) + + # Handle Adds + for cfg in add_cfgs: + self.add_account(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_account(cfg) + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for Cloud Account """ + + action = xact_info.query_action + self._log.debug("Cloud account on_prepare config received (action: %s): %s", + xact_info.query_action, msg) + + if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]: + # If the account already exists, then this is an update. Update the + # cloud account and invoke the on_update_prepare callback + if msg.name in self.accounts: + self._log.debug("Cloud account already exists. Invoking on_prepare update request") + if msg.has_field("account_type"): + raise CloudAccountError("Cannot change cloud's account-type") + + account = self.accounts[msg.name] + account.update_from_cfg(msg) + + # Block update callbacks for cloud accounts if due to SDN account changes + # If there are other cloud-account fields that are also updated at the same time, + # in addition to sdn-account, this update will not be triggered. + # The logic to detect this might not be worth it since this won't happen through UI + if not msg.has_field("sdn_account"): + yield from self._cloud_callbacks.on_update_prepare(account) + + else: + self._log.debug("Cloud account does not already exist. Invoking on_prepare add request") + if not msg.has_field('account_type'): + raise CloudAccountError("New cloud account must contain account_type field.") + + account = accounts.CloudAccount(self._log, self._rwlog_hdl, msg) + yield from self._cloud_callbacks.on_add_prepare(account) + + elif action == rwdts.QueryAction.DELETE: + # Check if the entire cloud account got deleted + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + if fref.is_field_deleted(): + yield from self._cloud_callbacks.on_delete_prepare(msg.name) + else: + fref.goto_proto_name(msg.to_pbcm(), "sdn_account") + if fref.is_field_deleted(): + # SDN account disassociated from cloud account + account = self.accounts[msg.name] + dict_account = account.account_msg.as_dict() + del dict_account["sdn_account"] + account.cloud_account_msg(dict_account) + else: + self._log.error("Deleting individual fields for cloud account not supported") + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for Cloud Account config using xpath: %s", + CloudAccountConfigSubscriber.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self._dts.appconf_group_create(acg_handler) as acg: + self._reg = acg.register( + xpath=CloudAccountConfigSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare, + ) \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/cloud/operdata.py b/modules/core/mano/common/python/rift/mano/cloud/operdata.py new file mode 100644 index 0000000..b4db9b3 --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/cloud/operdata.py @@ -0,0 +1,114 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import rift.tasklets + +from gi.repository import( + RwCloudYang, + RwDts as rwdts, + ) + +class CloudAccountNotFound(Exception): + pass + + +class CloudAccountDtsOperdataHandler(object): + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + + self.cloud_accounts = {} + + def add_cloud_account(self, account): + self.cloud_accounts[account.name] = account + account.start_validate_credentials(self._loop) + + def delete_cloud_account(self, account_name): + del self.cloud_accounts[account_name] + + def _register_show_status(self): + def get_xpath(cloud_name=None): + return "D,/rw-cloud:cloud/account{}/connection-status".format( + "[name='%s']" % cloud_name if cloud_name is not None else '' + ) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwCloudYang.CloudAccount.schema().keyspec_to_entry(ks_path) + cloud_account_name = path_entry.key00.name + self._log.debug("Got show cloud connection status request: %s", ks_path.create_string()) + + if not cloud_account_name: + self._log.warning("Cloud account name %s not found", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + account = self.cloud_accounts[cloud_account_name] + except KeyError: + self._log.warning("Cloud account %s does not exist", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + connection_status = account.connection_status + self._log.debug("Responding to cloud connection status request: %s", connection_status) + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + xpath=get_xpath(cloud_account_name), + msg=account.connection_status, + ) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + def _register_validate_rpc(self): + def get_xpath(): + return "/rw-cloud:update-cloud-status" + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + if not msg.has_field("cloud_account"): + raise CloudAccountNotFound("Cloud account name not provided") + + cloud_account_name = msg.cloud_account + try: + account = self.cloud_accounts[cloud_account_name] + except KeyError: + raise CloudAccountNotFound("Cloud account name %s not found" % cloud_account_name) + + account.start_validate_credentials(self._loop) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare + ), + flags=rwdts.Flag.PUBLISHER, + ) + + @asyncio.coroutine + def register(self): + yield from self._register_show_status() + yield from self._register_validate_rpc() \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/config_agent/__init__.py b/modules/core/mano/common/python/rift/mano/config_agent/__init__.py new file mode 100644 index 0000000..02dd8ff --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/config_agent/__init__.py @@ -0,0 +1,24 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .config import ( + ConfigAgentCallbacks, + ConfigAgentSubscriber + ) + +from .operdata import ( + ConfigAgentJobManager, + CfgAgentJobDtsHandler + ) diff --git a/modules/core/mano/common/python/rift/mano/config_agent/config.py b/modules/core/mano/common/python/rift/mano/config_agent/config.py new file mode 100644 index 0000000..f2be62e --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/config_agent/config.py @@ -0,0 +1,219 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import rw_peas + +import gi +gi.require_version('RwDts', '1.0') +import rift.tasklets + +from gi.repository import ( + RwcalYang as rwcal, + RwDts as rwdts, + RwConfigAgentYang as rwcfg_agent, + ProtobufC, + ) + +class ConfigAccountNotFound(Exception): + pass + +class ConfigAccountError(Exception): + pass + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class ConfigAgentCallbacks(object): + def __init__(self, + on_add_apply=None, on_add_prepare=None, + on_update_apply=None, on_update_prepare=None, + on_delete_apply=None, on_delete_prepare=None): + + @asyncio.coroutine + def prepare_noop(*args, **kwargs): + pass + + def apply_noop(*args, **kwargs): + pass + + self.on_add_apply = on_add_apply + self.on_add_prepare = on_add_prepare + self.on_update_apply = on_update_apply + self.on_update_prepare = on_update_prepare + self.on_delete_apply = on_delete_apply + self.on_delete_prepare = on_delete_prepare + + for f in ('on_add_apply', 'on_update_apply', 'on_delete_apply'): + ref = getattr(self, f) + if ref is None: + setattr(self, f, apply_noop) + continue + + if asyncio.iscoroutinefunction(ref): + raise ValueError('%s cannot be a coroutine' % (f,)) + + for f in ('on_add_prepare', 'on_update_prepare', 'on_delete_prepare'): + ref = getattr(self, f) + if ref is None: + setattr(self, f, prepare_noop) + continue + + if not asyncio.iscoroutinefunction(ref): + raise ValueError("%s must be a coroutine" % f) + + +class ConfigAgentSubscriber(object): + XPATH = "C,/rw-config-agent:config-agent/account" + + def __init__(self, dts, log, config_callbacks): + self._dts = dts + self._log = log + self._reg = None + + self.accounts = {} + + self._config_callbacks = config_callbacks + + def add_account(self, account_msg): + self._log.info("adding config account: {}".format(account_msg)) + + self.accounts[account_msg.name] = account_msg + + self._config_callbacks.on_add_apply(account_msg) + + def delete_account(self, account_name): + self._log.info("deleting config account: {}".format(account_name)) + del self.accounts[account_name] + + self._config_callbacks.on_delete_apply(account_name) + + def update_account(self, account_msg): + self._log.info("updating config account: {}".format(account_msg)) + self.accounts[account_msg.name] = account_msg + + self._config_callbacks.on_update_apply(account_msg) + + def register(self): + def apply_config(dts, acg, xact, action, _): + self._log.debug("Got config account apply config (xact: %s) (action: %s)", xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self._reg, + xact=xact, + key_name="name", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_account(cfg.name) + + # Handle Adds + for cfg in add_cfgs: + self.add_account(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_account(cfg) + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for Config Account """ + + action = xact_info.handle.query_action + self._log.debug("Config account on_prepare config received (action: %s): %s", + xact_info.handle.query_action, msg) + + if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]: + # If the account already exists, then this is an update. Update the + # cloud account and invoke the on_update_prepare callback + if msg.name in self.accounts: + self._log.debug("Config account already exists. Invoking on_prepare update request") + if msg.has_field("account_type"): + raise CloudAccountError("Cannot change config's account-type") + + account = self.accounts[msg.name] + yield from self._config_callbacks.on_update_prepare(account) + + else: + self._log.debug("Config account does not already exist. Invoking on_prepare add request") + if not msg.has_field('account_type'): + raise ConfigAccountError("New Config account must contain account_type field.") + + account = msg + yield from self._config_callbacks.on_add_prepare(account) + + elif action == rwdts.QueryAction.DELETE: + # Check if the entire cloud account got deleted + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + if fref.is_field_deleted(): + yield from self._config_callbacks.on_delete_prepare(msg.name) + else: + self._log.error("Deleting individual fields for config account not supported") + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for Config Account config using xpath: %s", + ConfigAgentSubscriber.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self._dts.appconf_group_create(acg_handler) as acg: + self._reg = acg.register( + xpath=ConfigAgentSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + on_prepare=on_prepare, + ) \ No newline at end of file diff --git a/modules/core/mano/common/python/rift/mano/config_agent/operdata.py b/modules/core/mano/common/python/rift/mano/config_agent/operdata.py new file mode 100644 index 0000000..dcb0b22 --- /dev/null +++ b/modules/core/mano/common/python/rift/mano/config_agent/operdata.py @@ -0,0 +1,461 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import concurrent.futures +import time + +from gi.repository import ( + NsrYang, + RwNsrYang, + RwDts as rwdts) + +import rift.tasklets + + +class ConfigAgentJob(object): + """A wrapper over the config agent job object, providing some + convenience functions. + + YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob contains + || + ==> VNFRS + || + ==> Primitives + + """ + # The normalizes the state terms from Juju to our yang models + # Juju : Yang model + STATUS_MAP = {"completed": "success", + "pending" : "pending", + "running" : "pending", + "failed" : "failure"} + + def __init__(self, nsr_id, job, tasks=None): + """ + Args: + nsr_id (uuid): ID of NSR record + job (YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object + tasks: List of asyncio.tasks. If provided the job monitor will + use it to monitor the tasks instead of the execution IDs + """ + self._job = job + self.nsr_id = nsr_id + self.tasks = tasks + + @property + def id(self): + """Job id""" + return self._job.job_id + + @property + def name(self): + """Job name""" + return self._job.job_name + + @property + def job_status(self): + """Status of the job (success|pending|failure)""" + return self._job.job_status + + @job_status.setter + def job_status(self, value): + """Setter for job status""" + self._job.job_status = value + + @property + def job(self): + """Gi object""" + return self._job + + @property + def xpath(self): + """Xpath of the job""" + return ("D,/nsr:ns-instance-opdata" + + "/nsr:nsr[nsr:ns-instance-config-ref='{}']" + + "/nsr:config-agent-job[nsr:job-id='{}']" + ).format(self.nsr_id, self.id) + + @staticmethod + def convert_rpc_input_to_job(nsr_id, rpc_output, tasks): + """A helper function to convert the YangOutput_Nsr_ExecNsConfigPrimitive + to YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang) + + Args: + nsr_id (uuid): NSR ID + rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): RPC output + tasks (list): A list of asyncio.Tasks + + Returns: + ConfigAgentJob + """ + # Shortcuts to prevent the HUUGE names. + CfgAgentJob = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob + CfgAgentVnfr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr + CfgAgentPrimitive = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive + + job = CfgAgentJob.from_dict({ + "job_id": rpc_output.job_id, + "job_name" : rpc_output.name, + "job_status": "pending", + }) + + for vnfr in rpc_output.vnf_out_list: + vnfr_job = CfgAgentVnfr.from_dict({ + "id": vnfr.vnfr_id_ref, + "vnf_job_status": "pending", + }) + + for primitive in vnfr.vnf_out_primitive: + vnf_primitive = CfgAgentPrimitive.from_dict({ + "name": primitive.name, + "execution_status": ConfigAgentJob.STATUS_MAP[primitive.execution_status], + "execution_id": primitive.execution_id + }) + vnfr_job.primitive.append(vnf_primitive) + + job.vnfr.append(vnfr_job) + + return ConfigAgentJob(nsr_id, job, tasks) + + +class ConfigAgentJobMonitor(object): + """Job monitor: Polls the Juju controller and get the status. + Rules: + If all Primitive are success, then vnf & nsr status will be "success" + If any one Primitive reaches a failed state then both vnf and nsr will fail. + """ + POLLING_PERIOD = 2 + + def __init__(self, dts, log, job, executor, loop, config_plugin): + """ + Args: + dts : DTS handle + log : log handle + job (ConfigAgentJob): ConfigAgentJob instance + executor (concurrent.futures): Executor for juju status api calls + loop (eventloop): Current event loop instance + config_plugin : Config plugin to be used. + """ + self.job = job + self.log = log + self.loop = loop + self.executor = executor + self.polling_period = ConfigAgentJobMonitor.POLLING_PERIOD + self.config_plugin = config_plugin + self.dts = dts + + @asyncio.coroutine + def _monitor_processes(self, registration_handle): + result = 0 + for process in self.job.tasks: + rc = yield from process + self.log.debug("Process {} returned rc: {}".format(process, rc)) + result |= rc + + if result == 0: + self.job.job_status = "success" + else: + self.job.job_status = "failure" + + registration_handle.update_element(self.job.xpath, self.job.job) + + + @asyncio.coroutine + def publish_action_status(self): + """ + Starts publishing the status for jobs/primitives + """ + registration_handle = yield from self.dts.register( + xpath=self.job.xpath, + handler=rift.tasklets.DTS.RegistrationHandler(), + flags=(rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ), + ) + + self.log.debug('preparing to publish job status for {}'.format(self.job.xpath)) + + try: + registration_handle.create_element(self.job.xpath, self.job.job) + + # If the config is done via a user defined script + if self.job.tasks is not None: + yield from self._monitor_processes(registration_handle) + return + + prev = time.time() + # Run until pending moves to either failure/success + while self.job.job_status == "pending": + curr = time.time() + + if curr - prev < self.polling_period: + pause = self.polling_period - (curr - prev) + yield from asyncio.sleep(pause, loop=self.loop) + + prev = time.time() + + tasks = [] + for vnfr in self.job.job.vnfr: + task = self.loop.create_task(self.get_vnfr_status(vnfr)) + tasks.append(task) + + # Exit, if no tasks are found + if not tasks: + break + + yield from asyncio.wait(tasks, loop=self.loop) + + job_status = [task.result() for task in tasks] + + if "failure" in job_status: + self.job.job_status = "failure" + elif "pending" in job_status: + self.job.job_status = "pending" + else: + self.job.job_status = "success" + + # self.log.debug("Publishing job status: {} at {} for nsr id: {}".format( + # self.job.job_status, + # self.job.xpath, + # self.job.nsr_id)) + + registration_handle.update_element(self.job.xpath, self.job.job) + + + except Exception as e: + self.log.exception(e) + raise + + + @asyncio.coroutine + def get_vnfr_status(self, vnfr): + """Schedules tasks for all containing primitives and updates it's own + status. + + Args: + vnfr : Vnfr job record containing primitives. + + Returns: + (str): "success|failure|pending" + """ + tasks = [] + job_status = [] + + for primitive in vnfr.primitive: + if primitive.execution_id == "": + # TODO: For some config data, the id will be empty, check if + # mapping is needed. + job_status.append(primitive.execution_status) + continue + + task = self.loop.create_task(self.get_primitive_status(primitive)) + tasks.append(task) + + if tasks: + yield from asyncio.wait(tasks, loop=self.loop) + + job_status.extend([task.result() for task in tasks]) + if "failure" in job_status: + vnfr.vnf_job_status = "failure" + return "failure" + + elif "pending" in job_status: + vnfr.vnf_job_status = "pending" + return "pending" + + else: + vnfr.vnf_job_status = "success" + return "success" + + @asyncio.coroutine + def get_primitive_status(self, primitive): + """ + Queries the juju api and gets the status of the execution id. + + Args: + primitive : Primitive containing the execution ID. + """ + + try: + status = yield from self.loop.run_in_executor( + self.executor, + self.config_plugin.get_action_status, + primitive.execution_id + ) + # self.log.debug("Got {} for execution id: {}".format( + # status, + # primitive.execution_id)) + except Exception as e: + self.log.exception(e) + status = "failed" + + # Handle case status is None + if status: + primitive.execution_status = ConfigAgentJob.STATUS_MAP[status] + else: + primitive.execution_status = "failure" + + return primitive.execution_status + + +class CfgAgentJobDtsHandler(object): + """Dts Handler for CfgAgent""" + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr/nsr:config-agent-job" + + def __init__(self, dts, log, loop, nsm, cfgm): + """ + Args: + dts : Dts Handle. + log : Log handle. + loop : Event loop. + nsm : NsmManager. + cfgm : ConfigManager. + """ + self._dts = dts + self._log = log + self._loop = loop + self._cfgm = cfgm + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NSManager manager instance """ + return self._nsm + + @property + def cfgm(self): + """ Return the ConfigManager manager instance """ + return self._cfgm + + @staticmethod + def cfg_job_xpath(nsr_id, job_id): + return ("D,/nsr:ns-instance-opdata" + + "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" + + "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id) + + @asyncio.coroutine + def register(self): + """ Register for NS monitoring read from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + if action == rwdts.QueryAction.READ: + schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + try: + nsr_id = path_entry.key00.ns_instance_config_ref + + nsr_ids = [] + if nsr_id is None or nsr_id == "": + nsrs = list(self.nsm.nsrs.values()) + nsr_ids = [nsr.id for nsr in nsrs] + else: + nsr_ids = [nsr_id] + + for nsr_id in nsr_ids: + job = self.cfgm.get_job(nsr_id) + + # If no jobs are queued for the NSR + if job is None: + continue + + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + CfgAgentJobDtsHandler.cfg_job_xpath(nsr_id, job.job_id), + job) + + except Exception as e: + self._log.exception("Caught exception:", str(e)) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + else: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=CfgAgentJobDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER, + ) + + +class ConfigAgentJobManager(object): + """A central class that manager all the Config Agent related data, + Including updating the status + + TODO: Needs to support multiple config agents. + """ + def __init__(self, dts, log, loop, nsm): + """ + Args: + dts : Dts handle + log : Log handler + loop : Event loop + nsm : NsmTasklet instance + """ + self.jobs = {} + self.dts = dts + self.log = log + self.loop = loop + self.nsm = nsm + self.handler = CfgAgentJobDtsHandler(dts, log, loop, nsm, self) + self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + + def add_job(self, rpc_output, tasks=None): + """Once an RPC is trigger add a now job + + Args: + rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): Rpc output + tasks(list) A list of asyncio.Tasks + + """ + nsr_id = rpc_output.nsr_id_ref + self.jobs[nsr_id] = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output, tasks) + + self.log.debug("Creating a job monitor for Job id: {}".format( + rpc_output.job_id)) + + # For every Job we will schedule a new monitoring process. + job_monitor = ConfigAgentJobMonitor( + self.dts, + self.log, + self.jobs[nsr_id], + self.executor, + self.loop, + self.nsm.config_agent_plugins[0] # Hack + ) + task = self.loop.create_task(job_monitor.publish_action_status()) + + def get_job(self, nsr_id): + """Get the job associated with the NSR Id, if present.""" + try: + return self.jobs[nsr_id].job + except KeyError: + return None + + @asyncio.coroutine + def register(self): + yield from self.handler.register() \ No newline at end of file diff --git a/modules/core/mano/common/rw_gen_package.py b/modules/core/mano/common/rw_gen_package.py new file mode 100755 index 0000000..b9a0d8c --- /dev/null +++ b/modules/core/mano/common/rw_gen_package.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import subprocess +import argparse +import shutil +import xml.etree.ElementTree as etree + +from gi.repository import ( + RwYang, + NsdYang, + RwNsdYang, + VnfdYang, + RwVnfdYang, + VldYang, + RwVldYang +) + +def read_from_file(module_list, infile, input_format, descr_type): + model = RwYang.Model.create_libncx() + for module in module_list: + model.load_module(module) + + descr = None + if descr_type == "nsd": + descr = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd() + else: + descr = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd() + + if input_format == 'json': + json_str = open(infile).read() + descr.from_json(model, json_str) + + elif input_format.strip() == 'xml': + tree = etree.parse(infile) + root = tree.getroot() + xmlstr = etree.tostring(root, encoding="unicode") + descr.from_xml_v2(model, xmlstr) + else: + raise("Invalid input format for the descriptor") + + return descr + +def write_to_file(name, outdir, infile, descr_type): + dirpath = os.path.join(outdir, name, descr_type) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + shutil.copy(infile, dirpath) + +def main(argv=sys.argv[1:]): + global outdir, output_format + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--infile', required=True, + type=lambda x: os.path.isfile(x) and x or parser.error("INFILE does not exist")) + parser.add_argument('-o', '--outdir', default=".", + type=lambda x: os.path.isdir(x) and x or parser.error("OUTDIR does not exist")) + parser.add_argument('-f', '--format', choices=['json', 'xml'], required=True) + parser.add_argument('-t', '--descriptor-type', choices=['nsd', 'vnfd'], required=True ) + + args = parser.parse_args() + infile = args.infile + input_format = args.format + outdir = args.outdir + dtype = args.descriptor_type + + print('Reading file {} in format {}'.format(infile, input_format)) + module_list = ['vld', 'rw-vld'] + if dtype == 'nsd': + module_list.extend(['nsd', 'rw-nsd']) + else: + module_list.extend(['vnfd', 'rw-vnfd']) + + descr = read_from_file(module_list, args.infile, args.format, dtype) + + print("Creating %s descriptor for {}".format(dtype.upper(), descr.name)) + write_to_file(descr.name, outdir, infile, dtype) + status = subprocess.call(os.path.join(os.environ["RIFT_ROOT"], + "bin/generate_descriptor_pkg.sh %s %s" % (outdir, descr.name)), shell=True) + print("Status of %s descriptor package creation is: %s" % (dtype.upper(), status)) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/confd_client/CMakeLists.txt b/modules/core/mano/confd_client/CMakeLists.txt new file mode 100644 index 0000000..5de8f0a --- /dev/null +++ b/modules/core/mano/confd_client/CMakeLists.txt @@ -0,0 +1,16 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/04/30 +# + +cmake_minimum_required(VERSION 2.8) + +# confd_client executable +add_executable(confd_client confd_client.c) + +target_link_libraries(confd_client + ${CMAKE_INSTALL_PREFIX}/usr/local/confd/lib/libconfd.so + pthread + ) diff --git a/modules/core/mano/confd_client/Makefile b/modules/core/mano/confd_client/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/confd_client/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/confd_client/README b/modules/core/mano/confd_client/README new file mode 100644 index 0000000..aa711c0 --- /dev/null +++ b/modules/core/mano/confd_client/README @@ -0,0 +1,8 @@ +This is barebones confd client test program. This is useful for confd module testing. To use this program follow these steps: + +1. Reserve and login to a VM a root +2. cd ${RIFT_ROOT} +3. ./rift-shell -e +4. cd modules/core/mc/confd_client +4. ./confd_client_opdata.sh (will measure the rate for fetching operational data) +5. ./confd_client_config.sh (will measure the rate of config writes and reads) diff --git a/modules/core/mano/confd_client/confd_client.c b/modules/core/mano/confd_client/confd_client.c new file mode 100644 index 0000000..9c0613e --- /dev/null +++ b/modules/core/mano/confd_client/confd_client.c @@ -0,0 +1,436 @@ +/* * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + * + */ + +#include +#include +#include +#include +#include + +#include +#include "confd_cdb.h" +#include "confd_dp.h" + +static struct confd_daemon_ctx *dctx; +static int ctlsock; +static int workersock; + +typedef struct _foodata { + char *name; + struct _foodata *next; +} foodata_t; + +typedef struct _opdata { + foodata_t *foo; +} opdata_t; + +opdata_t *g_opdata = NULL; + +int process_confd_subscription(int subsock) +{ + int confd_result, flags, length, *subscription_points, i, j, nvalues; + enum cdb_sub_notification type; + confd_tag_value_t *values; + + confd_result = cdb_read_subscription_socket2(subsock, + &type, + &flags, + &subscription_points, + &length); + + if (confd_result != CONFD_OK) { + confd_fatal("Failed to read subscription data \n"); + } + + switch (type) { + case CDB_SUB_PREPARE: + for (i = 0; i < length; i++) { + printf("i = %d, point = %d\n", i, subscription_points[i]); + if (cdb_get_modifications(subsock, subscription_points[i], flags, &values, &nvalues, + "/") == CONFD_OK) { + for (j = 0; j < nvalues; j++) { + printf("j = %d\n", j); + confd_free_value(CONFD_GET_TAG_VALUE(&values[j])); + } + } + } + cdb_sync_subscription_socket(subsock, CDB_DONE_PRIORITY); + fprintf(stdout, "CBD_SUB_PREPARE\n"); + break; + + case CDB_SUB_COMMIT: + cdb_sync_subscription_socket(subsock, CDB_DONE_PRIORITY); + fprintf(stdout, "CDB_SUB_COMMIT\n"); + break; + + case CDB_SUB_ABORT: + fprintf(stdout, "CDB_SUB_ABORT\n"); + break; + + default: + confd_fatal("Invalid type %d in cdb_read_subscription_socket2\n", type); + } + + return 0; +} + +static int do_init_action(struct confd_user_info *uinfo) +{ + int ret = CONFD_OK; + // fprintf(stdout, "init_action called\n"); + confd_action_set_fd(uinfo, workersock); + return ret; +} + +static int do_rw_action(struct confd_user_info *uinfo, + struct xml_tag *name, + confd_hkeypath_t *kp, + confd_tag_value_t *params, + int nparams) +{ + // confd_tag_value_t reply[2]; + // int status; + // char *ret_status; + int i; + char buf[BUFSIZ]; + + /* Just print the parameters and return */ + + // + for (i = 0; i < nparams; i++) { + confd_pp_value(buf, sizeof(buf), CONFD_GET_TAG_VALUE(¶ms[i])); + printf("param %2d: %9u:%-9u, %s\n", i, CONFD_GET_TAG_NS(¶ms[i]), + CONFD_GET_TAG_TAG(¶ms[i]), buf); + } + + i = 0; + // CONFD_SET_TAG_INT32(&reply[i], NULL, 0); i++; + // CONFD_SET_TAG_STR(&reply[i], NULL, "success"); i++; + confd_action_reply_values(uinfo, NULL, i); + + return CONFD_OK; + +} + +static int get_next(struct confd_trans_ctx *tctx, + confd_hkeypath_t *keypath, + long next) +{ + opdata_t *opdata = tctx->t_opaque; + foodata_t *curr; + confd_value_t v[2]; + + if (next == -1) { /* first call */ + curr = opdata->foo; + } else { + curr = (foodata_t *)next; + } + + if (curr == NULL) { + confd_data_reply_next_key(tctx, NULL, -1, -1); + return CONFD_OK; + } + + CONFD_SET_STR(&v[0], curr->name); + confd_data_reply_next_key(tctx, &v[0], 1, (long)curr->next); + return CONFD_OK; +} + +static foodata_t *find_foo(confd_hkeypath_t *keypath, opdata_t *dp) +{ + char *name = (char*)CONFD_GET_BUFPTR(&keypath->v[1][0]); + foodata_t *foo = dp->foo; + while (foo != NULL) { + if (strcmp(foo->name, name) == 0) { + return foo; + } + foo = foo->next; + } + return NULL; +} + +/* Keypath example */ +/* /arpentries/arpe{192.168.1.1 eth0}/hwaddr */ +/* 3 2 1 0 */ +static int get_elem(struct confd_trans_ctx *tctx, + confd_hkeypath_t *keypath) +{ + confd_value_t v; + foodata_t *foo = find_foo(keypath, tctx->t_opaque); + if (foo == NULL) { + confd_data_reply_not_found(tctx); + return CONFD_OK; + } + + CONFD_SET_STR(&v, foo->name); + confd_data_reply_value(tctx, &v); + + return CONFD_OK; +} + +static foodata_t *create_dummy_foodata_list(int count) +{ + foodata_t *head, *curr, *prev; + int i; + char buf[64]; + + head = prev = curr = NULL; + for (i = 0; i < count; ++i) { + curr = malloc(sizeof(foodata_t)); + memset(curr, 0, sizeof(foodata_t)); + snprintf(buf, 64, "foo%d", i); + curr->name = strdup(buf); + if (prev) { + prev->next = curr; + } else { + head = curr; + } + prev = curr; + } + + return head; +} + +static void free_foodata_list(foodata_t *foo) +{ + foodata_t *curr, *next; + curr = foo; + while (curr) { + next = curr->next; + if (curr->name) { + free(curr->name); + } + free(curr); + curr = next; + } +} + +static void print_foodata_list(foodata_t *foo) +{ + foodata_t *curr = foo; + while (curr) { + // fprintf(stdout, "%s\n", curr->name); + curr = curr->next; + } +} + +static int s_init(struct confd_trans_ctx *tctx) +{ + opdata_t *opdata; + if ((opdata = malloc(sizeof(opdata_t))) == NULL) { + return CONFD_ERR; + } + + memset(opdata, 0, sizeof(opdata_t)); + opdata->foo = create_dummy_foodata_list(10); + print_foodata_list(opdata->foo); + tctx->t_opaque = opdata; + confd_trans_set_fd(tctx, workersock); + return CONFD_OK; +} + +static int s_finish(struct confd_trans_ctx *tctx) +{ + opdata_t *opdata = tctx->t_opaque; + if (opdata != NULL) { + free_foodata_list(opdata->foo); + free(opdata); + } + + return CONFD_OK; +} + +int main(int argc, char **argv) +{ + struct sockaddr_in addr; + int debuglevel = CONFD_TRACE; + struct confd_trans_cbs trans; + struct confd_data_cbs data; + struct confd_action_cbs action; + int i; + + int subsock, datasock; + int status; + int spoint; + + addr.sin_addr.s_addr = inet_addr("127.0.0.1"); + addr.sin_family = AF_INET; + addr.sin_port = htons(CONFD_PORT); + + /** + * Setup CDB subscription socket + */ + confd_init(argv[0], stderr, CONFD_DEBUG); + if ((subsock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { + confd_fatal("Failed to open subscription socket\n"); + } + + printf("Subscription socket: %d\n", subsock); + + for (i = 1; i < 10; ++i) { + if (cdb_connect(subsock, CDB_SUBSCRIPTION_SOCKET, + (struct sockaddr*)&addr, + sizeof (struct sockaddr_in)) < 0) { + sleep(2); + fprintf(stdout, "Failed in confd_connect() {attempt: %d}\n", i); + } else { + fprintf(stdout, "confd_connect succeeded\n"); + break; + } + } + + if ((status = cdb_subscribe2(subsock, CDB_SUB_RUNNING_TWOPHASE, 0, 0, &spoint, 0, "/")) + != CONFD_OK) { + fprintf(stderr, "Terminate: subscribe %d\n", status); + exit(1); + } + + if (cdb_subscribe_done(subsock) != CONFD_OK) { + confd_fatal("cdb_subscribe_done() failed"); + } + + /** + * Setup CBD data socket + */ + + if ((datasock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { + confd_fatal("Failed to open data socket\n"); + } + + if (cdb_connect(datasock, CDB_DATA_SOCKET, + (struct sockaddr*)&addr, + sizeof (struct sockaddr_in)) < 0) { + confd_fatal("Failed to confd_connect() to confd \n"); + } + + memset(&trans, 0, sizeof (struct confd_trans_cbs)); + trans.init = s_init; + trans.finish = s_finish; + + memset(&data, 0, sizeof (struct confd_data_cbs)); + data.get_elem = get_elem; + data.get_next = get_next; + strcpy(data.callpoint, "base_show"); + + memset(&action, 0, sizeof (action)); + strcpy(action.actionpoint, "rw_action"); + action.init = do_init_action; + action.action = do_rw_action; + + + /* initialize confd library */ + confd_init("confd_client_op_data_daemon", stderr, debuglevel); + + + for (i = 1; i < 10; ++i) { + if (confd_load_schemas((struct sockaddr*)&addr, + sizeof(struct sockaddr_in)) != CONFD_OK) { + fprintf(stdout, "Failed to load schemas from confd {attempt: %d}\n", i); + sleep(2); + } else { + fprintf(stdout, "confd_load_schemas succeeded\n"); + break; + } + } + + if ((dctx = confd_init_daemon("confd_client_op_data_daemon")) == NULL) { + confd_fatal("Failed to initialize confdlib\n"); + } + + /* Create the first control socket, all requests to */ + /* create new transactions arrive here */ + if ((ctlsock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { + confd_fatal("Failed to open ctlsocket\n"); + } + + if (confd_connect(dctx, ctlsock, CONTROL_SOCKET, (struct sockaddr*)&addr, + sizeof (struct sockaddr_in)) < 0) { + confd_fatal("Failed to confd_connect() to confd \n"); + } + + /* Also establish a workersocket, this is the most simple */ + /* case where we have just one ctlsock and one workersock */ + if ((workersock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { + confd_fatal("Failed to open workersocket\n"); + } + + if (confd_connect(dctx, workersock, WORKER_SOCKET,(struct sockaddr*)&addr, + sizeof (struct sockaddr_in)) < 0) { + confd_fatal("Failed to confd_connect() to confd \n"); + } + + if (confd_register_trans_cb(dctx, &trans) == CONFD_ERR) { + confd_fatal("Failed to register trans cb \n"); + } + + if (confd_register_data_cb(dctx, &data) == CONFD_ERR) { + confd_fatal("Failed to register data cb \n"); + } + + if (confd_register_action_cbs(dctx, &action) == CONFD_ERR) { + confd_fatal("Failed to register action cb \n"); + } + + if (confd_register_done(dctx) != CONFD_OK) { + confd_fatal("Failed to complete registration \n"); + } + + while(1) { + struct pollfd set[3]; + int ret; + set[0].fd = ctlsock; + set[0].events = POLLIN; + set[0].revents = 0; + set[1].fd = workersock; + set[1].events = POLLIN; + set[1].revents = 0; + set[2].fd = subsock; + set[2].events = POLLIN; + set[2].revents = 0; + if (poll(set, sizeof(set)/sizeof(*set), -1) < 0) { + perror("Poll failed:"); + continue; + } + /* Check for I/O */ + if (set[0].revents & POLLIN) { + if ((ret = confd_fd_ready(dctx, ctlsock)) == CONFD_EOF) { + confd_fatal("Control socket closed\n"); + } else if (ret == CONFD_ERR && confd_errno != CONFD_ERR_EXTERNAL) { + confd_fatal("Error on control socket request: %s (%d): %s\n", + confd_strerror(confd_errno), confd_errno, confd_lasterr()); + } + } + if (set[1].revents & POLLIN) { + if ((ret = confd_fd_ready(dctx, workersock)) == CONFD_EOF) { + confd_fatal("Worker socket closed\n"); + } else if (ret == CONFD_ERR && confd_errno != CONFD_ERR_EXTERNAL) { + confd_fatal("Error on worker socket request: %s (%d): %s\n", + confd_strerror(confd_errno), confd_errno, confd_lasterr()); + } + } + if (set[2].revents & POLLIN) { + process_confd_subscription(set[2].fd); + } + } + + return 0; +} diff --git a/modules/core/mano/confd_client/confd_client.py b/modules/core/mano/confd_client/confd_client.py new file mode 100755 index 0000000..7aa52e5 --- /dev/null +++ b/modules/core/mano/confd_client/confd_client.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python2 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import subprocess +import contextlib +import rift.auto.proxy +import sys +import os +import time +import rw_peas +import requests + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('YangModelPlugin', '1.0') +from gi.repository import RwMcYang + +# NOTE: This cript measures the single threaded performance +# This also gives an idea about latency +# To get throughput numbers may need multiple parallel clients + + +yang = rw_peas.PeasPlugin('yangmodel_plugin-c', 'YangModelPlugin-1.0') +yang_model_api = yang.get_interface('Model') +yang_model = yang_model_api.alloc() +mc_module = yang_model_api.load_module(yang_model, 'rw-mc') + +@contextlib.contextmanager +def start_confd(): + print("Starting confd") + proc = subprocess.Popen(["./usr/bin/rw_confd"]) + try: + yield + finally: + print("Killing confd") + proc.kill() + +@contextlib.contextmanager +def start_confd_client(): + print("Starting confd_client") + proc = subprocess.Popen(["{}/.build/modules/core/mc/src/core_mc-build/confd_client/confd_client".format( + os.environ["RIFT_ROOT"]) + ]) + try: + yield + finally: + proc.kill() + print("Starting confd_client") + +def run_rpc_perf_test(proxy, num_rpcs=1): + start_time = time.time() + + for i in range(1, num_rpcs + 1): + start = RwMcYang.StartLaunchpadInput() + start.federation_name = "lp_%s" % i + print(proxy.rpc(start.to_xml(yang_model))) + + stop_time = time.time() + + print("Retrieved %s rpc in %s seconds" % (num_rpcs, stop_time - start_time)) + return (stop_time - start_time) + + +def run_federation_config_http_perf_test(num_federations=1): + session = requests.Session() + + start_time = time.time() + for i in range(1, num_federations + 1): + req = session.post( + url="http://localhost:8008/api/config", + json={"federation": {"name": "foo_%s" % i}}, + headers={'Content-Type': 'application/vnd.yang.data+json'}, + auth=('admin', 'admin') + ) + req.raise_for_status() + stop_time = time.time() + + print("Configured %s federations using restconf in %s seconds" % (num_federations, stop_time - start_time)) + return (stop_time - start_time) + +def run_opdata_get_opdata_perf_test(proxy, num_gets=1): + start_time = time.time() + + for i in range(1, num_gets + 1): + print(proxy.get_from_xpath(filter_xpath="/opdata")) + pass + + stop_time = time.time() + print("Retrieved %s opdata in %s seconds" % (num_gets, stop_time - start_time)) + return (stop_time - start_time) + +def run_federation_config_perf_test(proxy, num_federations=1): + start_time = time.time() + + for i in range(1, num_federations + 1): + fed = RwMcYang.FederationConfig() + fed.name = "foobar_%s" % i + proxy.merge_config(fed.to_xml(yang_model)) + + stop_time = time.time() + + print("Configured %s federations using netconf in %s seconds" % (num_federations, stop_time - start_time)) + return (stop_time - start_time) + +def run_federation_get_config_perf_test(proxy, num_gets=1): + start_time = time.time() + + for i in range(1, num_gets + 1): + proxy.get_config(filter_xpath="/federation") + + stop_time = time.time() + + print("Retrieved %s federations in %s seconds" % (num_gets, stop_time - start_time)) + return (stop_time - start_time) + +def main(): + with start_confd(): + with start_confd_client(): + nc_proxy = rift.auto.proxy.NetconfProxy() + nc_proxy.connect() + n_fed = 10; + n_fed_get = 100 + n_opdata_get = 100 + n_rpc = 100 + config_time = run_federation_config_perf_test(nc_proxy, num_federations=n_fed) + config_get_time = run_federation_get_config_perf_test(nc_proxy, num_gets=n_fed_get) + opdata_get_time = run_opdata_get_opdata_perf_test(nc_proxy, num_gets=n_opdata_get) + rpc_time = run_rpc_perf_test(nc_proxy, num_rpcs=n_rpc) + + print("") + print("..............................................") + print("CONFD Performance Results Using Netconf Client") + print("..............................................") + print("Rate of config writes: %d" % (n_fed/config_time)) + print("Rate of config reads : %d" % (n_fed_get/config_get_time)) + print("Rate of opdata reads : %d" % (n_opdata_get/opdata_get_time)) + print("Rate of rpc calls : %d" % (n_rpc/rpc_time)) + print("* Config read is reading a list with %d entries" % n_fed) + print("* Opdata read is reading a list with 5 entries") + print("..............................................") + +if __name__ == "__main__": + if "RIFT_ROOT" not in os.environ: + print("Must be in rift shell to run.") + sys.exit(1) + + os.chdir(os.environ["RIFT_INSTALL"]) + main() \ No newline at end of file diff --git a/modules/core/mano/confd_client/confd_client.sh b/modules/core/mano/confd_client/confd_client.sh new file mode 100755 index 0000000..aaa1638 --- /dev/null +++ b/modules/core/mano/confd_client/confd_client.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + +echo "Starting confd" +cd $RIFT_ROOT/.install +pwd +./usr/bin/rw_confd& + + +echo "Starting the confd_client" +cd $RIFT_ROOT/.build/modules/core/mc/src/core_mc-build/confd_client/ +./confd_client& + +echo "sleeping for 20 secs for confd to complete initialization" +sleep 20 + +cd $RIFT_ROOT/modules/core/mc/confd_client +time ./test.sh + + + +# echo "Testing confd config write performance" +# echo "Sending 20 create fedaration requests..." + +# time for i in `seq 1 20`; do curl -d '{"federation": {"name": "foobar'$i'"}}' -H 'Content-Type: application/vnd.yang.data+json' http://localhost:8008/api/running -uadmin:admin -v; done + +# echo "Testing confd config read performance" +# echo "Sending 200 read fedaration requests..." +# time for i in `seq 1 50`; do curl -s -H 'Content-Type: application/vnd.yang.data+json' http://localhost:8008/api/running/federation -uadmin:admin -v -X GET; done + +# echo "Testing confd operational data get performance" +# echo "Sending 20 create fedaration requests..." + +# time for i in `seq 1 200`; do curl -s -H "Content-Type: application/vnd.yang.data+json" http://localhost:8008/api/operational/opdata -uadmin:admin -v -X GET; done + +killall confd +trap 'kill $(jobs -pr)' SIGINT SIGTERM EXIT \ No newline at end of file diff --git a/modules/core/mano/confd_client/test.sh b/modules/core/mano/confd_client/test.sh new file mode 100755 index 0000000..938328c --- /dev/null +++ b/modules/core/mano/confd_client/test.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + +# This script tests the throughput of get operations. +# change iter and loop variables + +NETCONF_CONSOLE_DIR=${RIFT_ROOT}/.install/usr/local/confd/bin + +iter=100 +loop=30 + +for i in `seq 1 $loop`; +do + echo "Background script $i" + ${NETCONF_CONSOLE_DIR}/netconf-console-tcp -s all --iter=$iter --get -x /opdata& +done + +wait + +total=$(($iter * $loop)) +echo "Total number of netconf operations=$total" \ No newline at end of file diff --git a/modules/core/mano/examples/CMakeLists.txt b/modules/core/mano/examples/CMakeLists.txt new file mode 100644 index 0000000..ce7be69 --- /dev/null +++ b/modules/core/mano/examples/CMakeLists.txt @@ -0,0 +1,23 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 03/26/2014 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME rwmano_examples) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + + +## +# Include the subdirs +## +set(subdirs + ping_pong_ns + ) + +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/examples/Makefile b/modules/core/mano/examples/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/examples/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/examples/ping_pong_ns/CMakeLists.txt b/modules/core/mano/examples/ping_pong_ns/CMakeLists.txt new file mode 100644 index 0000000..5a2d6f9 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/CMakeLists.txt @@ -0,0 +1,49 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 03/26/2014 +# + +cmake_minimum_required(VERSION 2.8) + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/generate_packages.sh.in + ${CMAKE_CURRENT_BINARY_DIR}/generate_packages.sh + ESCAPE_QUOTES @ONLY + ) + +set(PACKAGE_OUTPUT + ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_image.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_image.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd_with_epa.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_epa.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_epa.tar.gz) + +add_custom_command( + OUTPUT ${PACKAGE_OUTPUT} + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/generate_packages.sh + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/ping_pong_nsd.py + ) + +add_custom_target(ping_pong_pkg_gen ALL + DEPENDS ${PACKAGE_OUTPUT} + mano_yang + ) + +install( + FILES ${PACKAGE_OUTPUT} + DESTINATION + usr/rift/mano/examples/ping_pong_ns + COMPONENT ${PKG_LONG_NAME} + ) + +rift_python_install_tree( + COMPONENT ${PKG_LONG_NAME} + FILES + rift/mano/examples/ping_pong_nsd.py + ) + diff --git a/modules/core/mano/examples/ping_pong_ns/Makefile b/modules/core/mano/examples/ping_pong_ns/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/examples/ping_pong_ns/config_desc.py b/modules/core/mano/examples/ping_pong_ns/config_desc.py new file mode 100755 index 0000000..2afc170 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/config_desc.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import logging +import rift.auto.proxy +import rift.vcs +import sys + +import gi +gi.require_version('RwYang', '1.0') + +from gi.repository import NsdYang, VldYang, VnfdYang, RwYang + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +model = RwYang.Model.create_libncx() +model.load_schema_ypbc(VldYang.get_schema()) +model.load_schema_ypbc(NsdYang.get_schema()) +model.load_schema_ypbc(VnfdYang.get_schema()) + + +def configure_vld(proxy, vld_xml_hdl): + vld_xml = vld_xml_hdl.read() + logger.debug("Attempting to deserialize XML into VLD protobuf: %s", vld_xml) + vld = VldYang.YangData_Vld_VldCatalog_Vld() + vld.from_xml_v2(model, vld_xml) + + logger.debug("Sending VLD to netconf: %s", vld) + proxy.merge_config(vld.to_xml_v2(model)) + + +def configure_vnfd(proxy, vnfd_xml_hdl): + vnfd_xml = vnfd_xml_hdl.read() + logger.debug("Attempting to deserialize XML into VNFD protobuf: %s", vnfd_xml) + vnfd = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd() + vnfd.from_xml_v2(model, vnfd_xml) + + logger.debug("Sending VNFD to netconf: %s", vnfd) + proxy.merge_config(vnfd.to_xml_v2(model)) + + +def configure_nsd(proxy, nsd_xml_hdl): + nsd_xml = nsd_xml_hdl.read() + logger.debug("Attempting to deserialize XML into NSD protobuf: %s", nsd_xml) + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + nsd.from_xml_v2(model, nsd_xml) + + logger.debug("Sending NSD to netconf: %s", nsd) + proxy.merge_config(nsd.to_xml_v2(model)) + + +def parse_args(argv=sys.argv[1:]): + """Create a parser which includes all generic demo arguments and parse args + + Arguments: + argv - arguments to be parsed + + Returns: List of parsed arguments + """ + + parser = argparse.ArgumentParser() + parser.add_argument( + '--confd-host', + default="127.0.0.1", + help="Hostname or IP where the confd netconf server is running.") + + parser.add_argument( + '--vld-xml-file', + action="append", + default=[], + type=argparse.FileType(), + help="VLD XML File Path", + ) + + parser.add_argument( + '--vnfd-xml-file', + action="append", + default=[], + type=argparse.FileType(), + help="VNFD XML File Path", + ) + + parser.add_argument( + '--nsd-xml-file', + action="append", + default=[], + type=argparse.FileType(), + help="VNFD XML File Path", + ) + + parser.add_argument( + '-v', '--verbose', + action='store_true', + help="Logging is normally set to an INFO level. When this flag " + "is used logging is set to DEBUG. ") + + args = parser.parse_args(argv) + + return args + + +def connect(args): + # Initialize Netconf Management Proxy + mgmt_proxy = rift.auto.proxy.NetconfProxy(args.confd_host) + mgmt_proxy.connect() + + # Ensure system started + vcs_component_info = rift.vcs.mgmt.VcsComponentInfo(mgmt_proxy) + vcs_component_info.wait_until_system_started() + + return mgmt_proxy + + +def main(): + args = parse_args() + proxy = connect(args) + for xml_file in args.vnfd_xml_file: + configure_vnfd(proxy, xml_file) + + for xml_file in args.vld_xml_file: + configure_vld(proxy, xml_file) + + for xml_file in args.nsd_xml_file: + configure_nsd(proxy, xml_file) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/generate_packages.sh.in b/modules/core/mano/examples/ping_pong_ns/generate_packages.sh.in new file mode 100755 index 0000000..d214b69 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/generate_packages.sh.in @@ -0,0 +1,78 @@ +#! /usr/bin/bash + +set -e +set -x + +SOURCE_DIR=@CMAKE_CURRENT_SOURCE_DIR@ +BINARY_DIR=@CMAKE_CURRENT_BINARY_DIR@ +PROJECT_TOP_DIR=@PROJECT_TOP_DIR@ +QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda.qcow2 +RIFT_QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda.qcow2 +PONG_QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2 +PING_QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2 + +# These paths are needed for finding the overrides and so files +PYTHONPATH=${PYTHONPATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang +PYTHON3PATH=${PYTHON3PATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang +LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang + +# Remove any old directories +rm -rf ${BINARY_DIR}/ping_vnfd +rm -rf ${BINARY_DIR}/pong_vnfd +rm -rf ${BINARY_DIR}/ping_pong_nsd + +rm -rf ${BINARY_DIR}/ping_vnfd_with_image +rm -rf ${BINARY_DIR}/pong_vnfd_with_image + +rm -rf ${BINARY_DIR}/ping_vnfd_with_epa +rm -rf ${BINARY_DIR}/pong_vnfd_with_epa +rm -rf ${BINARY_DIR}/ping_pong_nsd_with_epa + + +# Generate image md5sum +ping_md5sum="$(md5sum ${PING_QCOW_IMAGE} | cut -f1 -d" ")" +pong_md5sum="$(md5sum ${PONG_QCOW_IMAGE} | cut -f1 -d" ")" + +# Generate the descriptors +${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR} --format=json --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum} + +# create directories for packages with images +cp -r ${BINARY_DIR}/ping_vnfd ${BINARY_DIR}/ping_vnfd_with_image +cp -r ${BINARY_DIR}/pong_vnfd ${BINARY_DIR}/pong_vnfd_with_image +mkdir -p ${BINARY_DIR}/ping_vnfd_with_image/images +mkdir -p ${BINARY_DIR}/pong_vnfd_with_image/images + +### Generate descriptors with EPA +${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/with_epa --format=json --epa --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum} + +### Move the generated artifacts to appropriate directories +mv ${BINARY_DIR}/with_epa/ping_vnfd_with_epa ${BINARY_DIR}/ping_vnfd_with_epa +mv ${BINARY_DIR}/with_epa/pong_vnfd_with_epa ${BINARY_DIR}/pong_vnfd_with_epa +mv ${BINARY_DIR}/with_epa/ping_pong_nsd_with_epa ${BINARY_DIR}/ping_pong_nsd_with_epa + +### ReMove the original directories +rm -rf ${BINARY_DIR}/with_epa + +# copy a dummy image for now +if [ -e ${PING_QCOW_IMAGE} ]; then + cp ${PING_QCOW_IMAGE} ${BINARY_DIR}/ping_vnfd_with_image/images/ + ${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_image +else + echo >&2 "Warn: Skipped creating ping_vnfd_with_image due to missing image: ${PING_QCOW_IMAGE}" +fi + +if [ -e ${PONG_QCOW_IMAGE} ]; then + cp ${PONG_QCOW_IMAGE} ${BINARY_DIR}/pong_vnfd_with_image/images/ + ${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_image +else + echo >&2 "Warn: Skipped creating pong_vnfd_with_image due to missing image: ${PONG_QCOW_IMAGE}" +fi + +# Generate the tar files +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd + +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_epa +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_epa +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_with_epa diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/__init__.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/__init__.py new file mode 100644 index 0000000..e57e943 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.py new file mode 100644 index 0000000..9892643 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.py @@ -0,0 +1,314 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +from datetime import date +import logging +import json +import socket +import threading +import time + +import tornado.web + +from util.util import get_url_target + +class Ping(object): + def __init__(self): + self._log = logging.getLogger("ping") + self._log.setLevel(logging.DEBUG) + + self._ping_count = 0; + self._request_count = 0; + self._response_count = 0; + + self._pong_ip = None + self._pong_port = None + + self._send_rate = 1 # per second + + self._close_lock = threading.Lock() + + self._enabled = False + self._socket = None + + @property + def rate(self): + return self._send_rate + + @rate.setter + def rate(self, value): + self._log.debug("new rate: %s" % value) + self._send_rate = value + + @property + def pong_port(self): + return self._pong_port + + @pong_port.setter + def pong_port(self, value): + self._log.debug("new pong port: %s" % value) + self._pong_port = value + + @property + def pong_ip(self): + return self._pong_ip + + @pong_ip.setter + def pong_ip(self, value): + + self._log.debug("new pong ip: %s" % value) + self._pong_ip = value + + @property + def enabled(self): + return self._enabled + + @property + def request_count(self): + return self._request_count + + @property + def response_count(self): + return self._response_count + + def start(self): + self._log.debug("starting") + self._enabled = True + # self.open_socket() + self.send_thread = threading.Thread(target=self.send_ping) + self.recv_thread = threading.Thread(target=self.recv_resp) + self.send_thread.start() + self.recv_thread.start() + + def stop(self): + self._log.debug("stopping") + self._enabled = False + self.close_socket("stopping") + + def close_socket(self, msg): + self._close_lock.acquire() + if self._socket != None: + self._socket.close() + self._socket = None + self._log.info("Closed socket with msg={}".format(msg)) + self._close_lock.release() + + def open_socket(self): + try: + self._log.debug("construct socket") + self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._socket.settimeout(1) + except socket.error as msg: + self._log.error("error constructing socket %s" % msg) + self._socket = None + + while self._enabled: + try: + self._log.info("Trying to connect....") + self._socket.connect((self.pong_ip, self.pong_port)) + self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + self._log.info("Socket connected") + break + except socket.error as msg: + time.sleep(1) + + + def send_ping(self): + self.open_socket() + + while self._enabled: + if self._socket != None: + req = "rwpingpong-{}".format(self._ping_count) + try: + self._log.info("sending: %s" %req) + self._socket.sendall(req) + self._ping_count += 1 + self._request_count += 1 + except socket.error as msg: + self._log.error("Error({}) sending data".format(msg)) + self.close_socket(msg) + return + + time.sleep(1.0/self._send_rate) + + self._log.info("Stopping send_ping") + + def recv_resp(self): + while self._enabled: + respb = None + if self._socket != None: + try: + respb = self._socket.recv(1024) + except socket.timeout: + continue + except socket.error as msg: + self._log.error("Error({}) receiving data".format(msg)) + time.sleep(1) + continue + # self.close_socket(msg) + # return + + if not respb: + continue + + resp = respb.decode('UTF-8') + self._response_count += 1 + self._log.info("receive: %s" % resp) + + self._log.info("Stopping recv_resp") + +class PingServerHandler(tornado.web.RequestHandler): + def initialize(self, ping_instance): + self._ping_instance = ping_instance + + def get(self, args): + response = {'ip': self._ping_instance.pong_ip, + 'port': self._ping_instance.pong_port} + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.write("Content-Type must be some kind of json 2") + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.write("Content-Type must be some kind of json 1") + self.set_status(405) + return + + if target == "server": + if type(json_dicts['port']) is not int: + self.set_status(405) + return + + if type(json_dicts['ip']) not in (str, unicode): + self.set_status(405) + return + + self._ping_instance.pong_ip = json_dicts['ip'] + self._ping_instance.pong_port = json_dicts['port'] + + else: + self.set_status(404) + return + + self.set_status(200) + +class PingAdminStatusHandler(tornado.web.RequestHandler): + def initialize(self, ping_instance): + self._ping_instance = ping_instance + + def get(self, args): + target = get_url_target(self.request.uri) + if target == "state": + value = "enabled" if self._ping_instance.enabled else "disabled" + + response = { 'adminstatus': value } + else: + self.set_status(404) + return + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.write("Content-Type must be some kind of json 2") + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.write("Content-Type must be some kind of json 1") + self.set_status(405) + return + + if target == "state": + if type(json_dicts['enable']) is not bool: + self.set_status(405) + return + + if json_dicts['enable']: + if not self._ping_instance.enabled: + self._ping_instance.start() + else: + self._ping_instance.stop() + + else: + self.set_status(404) + return + + self.set_status(200) + +class PingStatsHandler(tornado.web.RequestHandler): + def initialize(self, ping_instance): + self._ping_instance = ping_instance + + def get(self): + response = {'ping-request-tx-count': self._ping_instance.request_count, + 'ping-response-rx-count': self._ping_instance.response_count} + + self.write(response) + +class PingRateHandler(tornado.web.RequestHandler): + def initialize(self, ping_instance): + self._ping_instance = ping_instance + + def get(self, args): + response = { 'rate': self._ping_instance.rate } + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.set_status(405) + return + + if target == "rate": + if type(json_dicts['rate']) is not int: + self.set_status(405) + return + + self._ping_instance.rate = json_dicts['rate'] + else: + self.set_status(404) + return + + self.set_status(200) \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.service b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.service new file mode 100644 index 0000000..cd0ac65 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/ping.service @@ -0,0 +1,12 @@ +[Unit] +Description=Ping Client +After=syslog.target network.target + +[Service] +Type=simple +ExecStart=/opt/rift/ping_pong_ns/start_ping + +[Install] +WantedBy=multi-user.target + + diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.py new file mode 100644 index 0000000..ea8f552 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.py @@ -0,0 +1,334 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +from datetime import date +from Queue import Queue +import logging +import json +import socket +import threading +import time + +import tornado.web + +from util.util import get_url_target + +class Stats(object): + def __init__(self): + self._request_count = 0 + self._response_count = 0 + + self._lock = threading.Lock() + + @property + def request_count(self): + with self._lock: + return self._request_count + + @request_count.setter + def request_count(self, value): + with self._lock: + self._request_count = value + + @property + def response_count(self): + with self._lock: + return self._response_count + + @response_count.setter + def response_count(self, value): + with self._lock: + self._response_count = value + +class Worker(threading.Thread): + def __init__(self, log, connections, stats): + super(Worker, self).__init__() + self._log = log + self._connections = connections + self._stats = stats + + self._running = True + + self._lock = threading.Lock() + + @property + def running(self): + return self._running + + @running.setter + def running(self, value): + self._running = value + + def run(self): + while self.running: + try: + connection = self._connections.get_nowait() + except: + continue + + try: + req = connection.recv(1024) + except socket.error as msg: + self._log.error("error with connection read: " % msg) + self._connections.put(connection) + continue + + if not req: + self._connections.put(connection) + continue + + resp = req.decode('UTF-8') + self._log.debug("got: %s", resp) + + self._stats.request_count += 1 + + try: + connection.sendall(resp) + self._stats.response_count += 1 + except socket.error as msg: + self._log.error("error with connection read: " % msg) + self._connections.put(connection) + continue + + self._connections.put(connection) + +class Pong(object): + def __init__(self, worker_count=5): + self._log = logging.getLogger("pong") + self._log.setLevel(logging.DEBUG) + + self.listen_ip = None + self.listen_port = None + + self._lock = threading.Lock() + + self._connections = Queue() + + self._stats = Stats() + + self._workers = list() + + self._enabled = False + + for _ in range(worker_count): + self._workers.append(Worker(self._log, self._connections, self._stats)) + + @property + def listen_port(self): + return self._listen_port + + @listen_port.setter + def listen_port(self, value): + self._log.debug("new listen port: %s" % value) + self._listen_port = value + + @property + def listen_ip(self): + return self._listen_ip + + @listen_ip.setter + def listen_ip(self, value): + self._log.debug("listen pong ip: %s" % value) + self._listen_ip = value + + + @property + def enabled(self): + with self._lock: + return self._enabled + + @property + def request_count(self): + return self._stats.request_count + + @property + def response_count(self): + return self._stats.response_count + + def start(self): + self._log.debug("starting") + self._enabled = True + self.listener_thread = threading.Thread(target=self._listen) + self.listener_thread.start() + for worker in self._workers: + worker.start() + + def stop(self): + with self._lock: + self._enabled = False + + self._log.debug("stopping workers") + for worker in self._workers: + worker.running = False + + self._log.debug("joining on workers") + for worker in self._workers: + if worker.is_alive(): + worker.join() + + while self._connections.full(): + try: + connection = self._connections.get_nowait() + connection.close() + except: + pass + + def close_socket(self, msg): + with self._lock: + if self._socket != None: + self._socket.shutdown(socket.SHUT_RD) + self._socket.close() + self._socket = None + self._log.info("Closed socket with msg={}".format(msg)) + + def _listen(self): + if self._listen_ip is None or self.listen_port is None: + self._log.error("address not properly configured to listen") + return + + self._log.info("listen for incomming connections") + try: + self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # self._socket.bind((self.listen_ip, self.listen_port)) + self._socket.bind(("0.0.0.0", self.listen_port)) + self._socket.settimeout(1) + + while self.enabled: + + try: + self._socket.listen(1) + connection, address = self._socket.accept() + except socket.timeout: + continue + self._log.info("Accepted connection from {}".format(address)) + + self._connections.put(connection) + else: + self.stop() + except socket.error as msg: + self.close_socket(msg) + +class PongStatsHandler(tornado.web.RequestHandler): + def initialize(self, pong_instance): + self._pong_instance = pong_instance + + def get(self): + response = {'ping-request-rx-count': self._pong_instance.request_count, + 'ping-response-tx-count': self._pong_instance.response_count} + + self.write(response) + + +class PongServerHandler(tornado.web.RequestHandler): + def initialize(self, pong_instance): + self._pong_instance = pong_instance + + def get(self, args): + response = {'ip': self._pong_instance.listen_ip, + 'port': self._pong_instance.listen_port} + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.write("Content-Type must be some kind of json") + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.write("Content-Type must be some kind of json") + self.set_status(405) + return + + if target == "server": + + if type(json_dicts['port']) is not int: + self.set_status(405) + return + + if type(json_dicts['ip']) not in (str, unicode): + self.set_status(405) + return + + self._pong_instance.listen_ip = json_dicts['ip'] + self._pong_instance.listen_port = json_dicts['port'] + + else: + self.set_status(404) + return + + self.set_status(200) + +class PongAdminStatusHandler(tornado.web.RequestHandler): + def initialize(self, pong_instance): + self._pong_instance = pong_instance + + def get(self, args): + target = get_url_target(self.request.uri) + + if target == "state": + value = "enabled" if self._pong_instance.enabled else "disabled" + + response = { 'adminstatus': value } + else: + self.set_status(404) + return + + self.write(response) + + def post(self, args): + target = get_url_target(self.request.uri) + body = self.request.body.decode("utf-8") + body_header = self.request.headers.get("Content-Type") + + if "json" not in body_header: + self.write("Content-Type must be some kind of json") + self.set_status(405) + return + + try: + json_dicts = json.loads(body) + except: + self.write("Content-Type must be some kind of json") + self.set_status(405) + return + + if target == "state": + if type(json_dicts['enable']) is not bool: + self.set_status(405) + return + + if json_dicts['enable']: + if not self._pong_instance.enabled: + self._pong_instance.start() + else: + self._pong_instance.stop() + + else: + self.set_status(404) + return + + self.set_status(200) \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.service b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.service new file mode 100644 index 0000000..7d94836 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/pong.service @@ -0,0 +1,12 @@ +[Unit] +Description=Ping Client +After=syslog.target network.target + +[Service] +Type=simple +ExecStart=/opt/rift/ping_pong_ns/start_pong + +[Install] +WantedBy=multi-user.target + + diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh new file mode 100755 index 0000000..fc34710 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh @@ -0,0 +1,139 @@ +#! /bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# +# Author(s): Anil Gunturu +# Creation Date: 07/24/2014 +# + +## +# This script is used to copy the riftware software into the qcow image +# This script must be run on the grunt machine as root +## + +set -x +set -e + +if ! [ $# -eq 1 ]; then + echo "Usage: $0 " + echo " Example:" + echo " $0 /net/boson/home1/agunturu/lepton/atg/modules/core/mc/examples/ping_pong_ns" + exit 1 +fi + +# Currently returning 0 on error as this script fails in Bangalore +# systems and causes the jenkins spot_debug to fail +function cleanup { + if [ "$(ls -A $MOUNT_PT)" ]; then + guestunmount $MOUNT_PT + fi + exit 0 +} +trap cleanup EXIT + +MOUNT_PT=ping_pong/mnt$$ + +if [ -d $MOUNT_PT ]; then + echo "ping_pong_mnt directory exists - deleting..!!" + guestunmount $MOUNT_PT || true + rm -rf ping_pong +fi + +mkdir -p $MOUNT_PT +FC20QCOW=Fedora-x86_64-20-20131211.1-sda.qcow2 +PINGQCOW=Fedora-x86_64-20-20131211.1-sda-ping.qcow2 +PONGQCOW=Fedora-x86_64-20-20131211.1-sda-pong.qcow2 + +if [ ! -e ${RIFT_ROOT}/images/${FC20QCOW} ]; then + echo >&2 "Warn: Cannot prepare ping_pong qcow due to missing FC20 image: ${RIFT_ROOT}/images/${FC20QCOW}" + exit 0 +fi + +echo "Copying $FC20QCOW" +cp ${RIFT_ROOT}/images/${FC20QCOW} ping_pong/${PINGQCOW} +chmod +w ping_pong/${PINGQCOW} +cp ${RIFT_ROOT}/images/${FC20QCOW} ping_pong/${PONGQCOW} +chmod +w ping_pong/${PONGQCOW} + +CURRENT_DIR=$PWD +echo "Mounting guestfs for $PINGQCOW" +guestmount -a ping_pong/$PINGQCOW -m /dev/sda1 $MOUNT_PT + +echo "Setting up resolv.conf" +# removed RIFT.io lab-centric setup in RIFT-11991 +#echo "search lab.riftio.com eng.riftio.com riftio.com" > $MOUNT_PT/etc/resolv.conf +#echo "nameserver 10.64.1.3" >> $MOUNT_PT/etc/resolv.conf +#echo "PEERDNS=no" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0 + +# add a valid DNS server just in case +echo "nameserver 8.8.8.8" > $MOUNT_PT/etc/resolv.conf +echo "DEFROUTE=yes" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0 + +for i in 1 2 +do + cat <> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth$i +DEVICE="eth$i" +BOOTPROTO="dhcp" +ONBOOT="no" +TYPE="Ethernet" +DEFROUTE=no +PEERDNS=no +EOF +done + + +echo "Copying ping/pong ns..." +cd $MOUNT_PT/opt +mkdir rift +cd rift +cp -r $1 . +cd $CURRENT_DIR +mv $MOUNT_PT/opt/rift/ping_pong_ns/ping.service $MOUNT_PT/etc/systemd/system +cp -ar /usr/lib/python2.7/site-packages/tornado $MOUNT_PT/usr/lib/python2.7/site-packages/ +guestunmount $MOUNT_PT + +echo "Mounting guestfs for $PINGQCOW" +guestmount -a ping_pong/$PONGQCOW -m /dev/sda1 $MOUNT_PT + +echo "Setting up resolv.conf" +echo "search lab.riftio.com eng.riftio.com riftio.com" > $MOUNT_PT/etc/resolv.conf +echo "nameserver 10.64.1.3" >> $MOUNT_PT/etc/resolv.conf +echo "PEERDNS=no" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0 +echo "DEFROUTE=yes" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0 + +for i in 1 2 +do + cat <> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth$i +DEVICE="eth$i" +BOOTPROTO="dhcp" +ONBOOT="no" +DEFROUTE=no +TYPE="Ethernet" +PEERDNS=no +EOF +done + +echo "Copying ping/pong ns..." +cd $MOUNT_PT/opt +mkdir rift +cd rift +cp -r $1 . +cd $CURRENT_DIR +cp -ar /usr/lib/python2.7/site-packages/tornado $MOUNT_PT/usr/lib/python2.7/site-packages/ +mv $MOUNT_PT/opt/rift/ping_pong_ns/pong.service $MOUNT_PT/etc/systemd/system +guestunmount $MOUNT_PT \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping new file mode 100755 index 0000000..fb29422 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping @@ -0,0 +1,5 @@ +#!/bin/bash +ulimit -c 0 +#yum install -y python-tornado +python /opt/rift/ping_pong_ns/start_ping.py 2>&1 | logger + diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping.py new file mode 100644 index 0000000..90400dd --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_ping.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import argparse +import signal +import logging + +import tornado +import tornado.httpserver + +from ping import ( + Ping, + PingAdminStatusHandler, + PingServerHandler, + PingRateHandler, + PingStatsHandler, +) +from util.util import ( + VersionHandler, +) + +logging.basicConfig(level=logging.DEBUG, + format='(%(threadName)-10s) %(name)-8s :: %(message)s', +) + +def main(): + log = logging.getLogger("main") + + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument( + "--ping-manager-port", + required=False, + default="18888", + help="port number for ping") + + arguments = parser.parse_args() + + # setup application + log.debug("setup application") + ping_instance = Ping() + ping_application_arguments = {'ping_instance': ping_instance} + ping_application = tornado.web.Application([ + (r"/api/v1/ping/stats", PingStatsHandler, ping_application_arguments), + (r"/api/v1/ping/adminstatus/([a-z]+)", PingAdminStatusHandler, ping_application_arguments), + (r"/api/v1/ping/server/?([0-9a-z\.]*)", PingServerHandler, ping_application_arguments), + (r"/api/v1/ping/rate/?([0-9]*)", PingRateHandler, ping_application_arguments), + (r"/version", VersionHandler, ping_application_arguments) + ]) + ping_server = tornado.httpserver.HTTPServer( + ping_application) + + # setup SIGINT handler + log.debug("setup SIGINT handler") + def signal_handler(signal, frame): + print("") # print newline to clear user input + log.info("Exiting") + ping_instance.stop() + ping_server.stop() + log.info("Sayonara!") + quit() + + signal.signal(signal.SIGINT, signal_handler) + + # start + log.debug("start") + try: + ping_server.listen(arguments.ping_manager_port) + except OSError: + print("port %s is already is use, exiting" % arguments.ping_manager_port) + return + + tornado.ioloop.IOLoop.instance().start() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong new file mode 100755 index 0000000..af46646 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong @@ -0,0 +1,5 @@ +#!/bin/bash +ulimit -c 0 +#yum install -y python-tornado +python /opt/rift/ping_pong_ns/start_pong.py 2>&1 | logger + diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong.py new file mode 100644 index 0000000..ba0c9b7 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/start_pong.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import argparse +import signal +import logging + +import tornado +import tornado.httpserver + +from pong import ( + Pong, + PongAdminStatusHandler, + PongServerHandler, + PongStatsHandler, +) +from util.util import ( + VersionHandler, +) + +logging.basicConfig(level=logging.DEBUG, + format='(%(threadName)-10s) %(name)-8s :: %(message)s', +) + +def main(): + log = logging.getLogger("main") + + # parse arguments + parser = argparse.ArgumentParser() + parser.add_argument( + "--pong-manager-port", + required=False, + default="18889", + help="port number for pong") + parser.add_argument( + "--worker-count", + required=False, + default=5, + help="ip address of pong") + + arguments = parser.parse_args() + + # setup application + log.debug("setup application") + pong_instance = Pong(arguments.worker_count) + pong_application_arguments = {'pong_instance': pong_instance} + pong_application = tornado.web.Application([ + (r"/version", VersionHandler, pong_application_arguments), + (r"/api/v1/pong/stats", PongStatsHandler, pong_application_arguments), + (r"/api/v1/pong/server/?([0-9a-z\.]*)", PongServerHandler, pong_application_arguments), + (r"/api/v1/pong/adminstatus/([a-z]+)", PongAdminStatusHandler, pong_application_arguments) + ]) + pong_server = tornado.httpserver.HTTPServer( + pong_application) + + # setup SIGINT handler + log.debug("setup SIGINT handler") + def signal_handler(signal, frame): + print("") # print newline to clear user input + log.info("Exiting") + pong_instance.stop() + pong_server.stop() + log.info("Sayonara!") + quit() + + signal.signal(signal.SIGINT, signal_handler) + + # start + log.debug("pong application listening on %s" % arguments.pong_manager_port) + try: + pong_server.listen(arguments.pong_manager_port) + except OSError: + print("port %s is already is use, exiting" % arguments.ping_manager_port) + return + tornado.ioloop.IOLoop.instance().start() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/test/test.sh b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/test/test.sh new file mode 100644 index 0000000..8bd480f --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/test/test.sh @@ -0,0 +1,151 @@ +#!/usr/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + + +pong_ip='localhost' +pong_port=18889 + +ping_ip='localhost' +ping_port=18888 + +if [ "$1" == "pong" ]; +then + if [ "$2" == "enable" ]; + then + echo "enable pong" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${pong_ip}:${pong_port}/api/v1/pong/adminstatus/state + fi + if [ "$2" == "disable" ]; + then + echo "disable pong" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":false}" \ + http://${pong_ip}:${pong_port}/api/v1/pong/adminstatus/state + fi + + if [ "$2" == "server" ]; + then + echo "set server" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$3\", \"port\":$4}" \ + http://${pong_ip}:${pong_port}/api/v1/pong/server + fi + + echo "" +fi + +if [ "$1" == "ping" ]; +then + if [ "$2" == "enable" ]; + then + echo "enable ping" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${ping_ip}:${ping_port}/api/v1/ping/adminstatus/state + fi + if [ "$2" == "disable" ]; + then + echo "disable ping" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":false}" \ + http://${ping_ip}:${ping_port}/api/v1/ping/adminstatus/state + fi + echo "" + + if [ "$2" == "rate" ]; + then + echo "disable ping" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"rate\":$3}" \ + http://${ping_ip}:${ping_port}/api/v1/ping/rate + fi + echo "" + + if [ "$2" == "server" ]; + then + echo "set server" + + curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$3\", \"port\":$4}" \ + http://${ping_ip}:${ping_port}/api/v1/ping/server + fi + echo "" + + +fi + +if [ "$1" == "stats" ]; +then + echo "ping stats:" + curl http://${ping_ip}:${ping_port}/api/v1/ping/stats + echo "" + + echo "pong stats:" + curl http://${pong_ip}:${pong_port}/api/v1/pong/stats + echo "" +fi + +if [ "$1" == "config" ]; +then + echo "ping server:" + curl http://${ping_ip}:${ping_port}/api/v1/ping/server + echo "" + echo "ping rate:" + curl http://${ping_ip}:${ping_port}/api/v1/ping/rate + echo "" + echo "ping admin status:" + curl http://${ping_ip}:${ping_port}/api/v1/ping/adminstatus/state + echo "" + echo "pong server:" + curl http://${pong_ip}:${pong_port}/api/v1/pong/server + echo "" + echo "pong admin status:" + curl http://${pong_ip}:${pong_port}/api/v1/pong/adminstatus/state + echo "" +fi \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/user-data b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/user-data new file mode 100644 index 0000000..9bf1d5b --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/user-data @@ -0,0 +1,8 @@ +#cloud-config +password: fedora +chpasswd: { expire: False } +ssh_pwauth: True +runcmd: + - [ systemctl, daemon-reload ] + - [ systemctl, enable, ping.service ] + - [ systemctl, start, --no-block, ping.service ] diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/__init__.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/__init__.py new file mode 100644 index 0000000..e57e943 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/util.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/util.py new file mode 100644 index 0000000..1d35ae5 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_ns/util/util.py @@ -0,0 +1,40 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +from datetime import date +import urlparse + +import tornado.web + +class VersionHandler(tornado.web.RequestHandler): + def initialize(self, instance): + self._instance = instance + + def get(self): + response = { 'version': '3.5.1', + 'last_build': date.today().isoformat() } + self.write(response) + +def get_url_target(url): + is_operation = False + url_parts = urlparse.urlsplit(url) + whole_url = url_parts[2] + + url_pieces = whole_url.split("/") + + return url_pieces[-1] \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/ping_pong_nsd.py b/modules/core/mano/examples/ping_pong_ns/ping_pong_nsd.py new file mode 120000 index 0000000..3147ac8 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/ping_pong_nsd.py @@ -0,0 +1 @@ +rift/mano/examples/ping_pong_nsd.py \ No newline at end of file diff --git a/modules/core/mano/examples/ping_pong_ns/rift/mano/__init__.py b/modules/core/mano/examples/ping_pong_ns/rift/mano/__init__.py new file mode 100644 index 0000000..00f74ea --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/rift/mano/__init__.py @@ -0,0 +1,15 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/__init__.py b/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/__init__.py new file mode 100644 index 0000000..00f74ea --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/__init__.py @@ -0,0 +1,15 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py b/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py new file mode 100755 index 0000000..e7bd172 --- /dev/null +++ b/modules/core/mano/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py @@ -0,0 +1,588 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import sys +import os +import argparse +import uuid +import rift.vcs.component as vcs + +import gi +gi.require_version('RwYang', '1.0') +gi.require_version('VnfdYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwNsdYang', '1.0') + + + +from gi.repository import ( + NsdYang, + VldYang, + VnfdYang, + RwNsdYang, + RwVnfdYang, + RwYang, + ) + +NUM_PING_INSTANCES = 1 +MAX_VNF_INSTANCES_PER_NS = 10 +use_epa = False +pingcount = NUM_PING_INSTANCES + +PING_USERDATA_FILE = '''#cloud-config +password: fedora +chpasswd: { expire: False } +ssh_pwauth: True +runcmd: + - [ systemctl, daemon-reload ] + - [ systemctl, enable, ping.service ] + - [ systemctl, start, --no-block, ping.service ] + - [ ifup, eth1 ] +''' + +PONG_USERDATA_FILE = '''#cloud-config +password: fedora +chpasswd: { expire: False } +ssh_pwauth: True +runcmd: + - [ systemctl, daemon-reload ] + - [ systemctl, enable, pong.service ] + - [ systemctl, start, --no-block, pong.service ] + - [ ifup, eth1 ] +''' + + +class UnknownVNFError(Exception): + pass + +class ManoDescriptor(object): + def __init__(self, name): + self.name = name + self.descriptor = None + + def write_to_file(self, module_list, outdir, output_format): + model = RwYang.Model.create_libncx() + for module in module_list: + model.load_module(module) + + if output_format == 'json': + with open('%s/%s.json' % (outdir, self.name), "w") as fh: + fh.write(self.descriptor.to_json(model)) + elif output_format.strip() == 'xml': + with open('%s/%s.xml' % (outdir, self.name), "w") as fh: + fh.write(self.descriptor.to_xml_v2(model, pretty_print=True)) + else: + raise("Invalid output format for the descriptor") + +class VirtualNetworkFunction(ManoDescriptor): + def __init__(self, name, instance_count=1): + self.vnfd_catalog = None + self.vnfd = None + self.instance_count = instance_count + super(VirtualNetworkFunction, self).__init__(name) + + def compose(self, image_name, cloud_init="", endpoint=None, mon_params=[], + mon_port=8888, mgmt_port=8888, num_vlr_count=1, num_ivlr_count=1, + num_vms=1, image_md5sum=None): + self.descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog() + self.id = str(uuid.uuid1()) + vnfd = self.descriptor.vnfd.add() + vnfd.id = self.id + vnfd.name = self.name + vnfd.short_name = self.name + vnfd.vendor = 'RIFT.io' + vnfd.description = 'This is an example RIFT.ware VNF' + vnfd.version = '1.0' + + self.vnfd = vnfd + + internal_vlds = [] + for i in range(num_ivlr_count): + internal_vld = vnfd.internal_vld.add() + internal_vld.id = str(uuid.uuid1()) + internal_vld.name = 'fabric%s' % i + internal_vld.short_name = 'fabric%s' % i + internal_vld.description = 'Virtual link for internal fabric%s' % i + internal_vld.type_yang = 'ELAN' + internal_vlds.append(internal_vld) + + for i in range(num_vlr_count): + cp = vnfd.connection_point.add() + cp.type_yang = 'VPORT' + cp.name = '%s/cp%d' % (self.name, i) + + if endpoint is not None: + endp = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_HttpEndpoint( + path=endpoint, port=mon_port, polling_interval_secs=2 + ) + vnfd.http_endpoint.append(endp) + + # Monitoring params + for monp_dict in mon_params: + monp = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_MonitoringParam.from_dict(monp_dict) + monp.http_endpoint_ref = endpoint + vnfd.monitoring_param.append(monp) + + for i in range(num_vms): + # VDU Specification + vdu = vnfd.vdu.add() + vdu.id = str(uuid.uuid1()) + vdu.name = 'iovdu_%s' % i + vdu.count = 1 + #vdu.mgmt_vpci = '0000:00:20.0' + + # specify the VM flavor + if use_epa: + vdu.vm_flavor.vcpu_count = 4 + vdu.vm_flavor.memory_mb = 1024 + vdu.vm_flavor.storage_gb = 4 + else: + vdu.vm_flavor.vcpu_count = 1 + vdu.vm_flavor.memory_mb = 512 + vdu.vm_flavor.storage_gb = 4 + + # Management interface + mgmt_intf = vnfd.mgmt_interface + mgmt_intf.vdu_id = vdu.id + mgmt_intf.port = mgmt_port + mgmt_intf.dashboard_params.path = "/api/v1/pong/stats" + + vdu.cloud_init = cloud_init + + # sepcify the guest EPA + if use_epa: + vdu.guest_epa.trusted_execution = False + vdu.guest_epa.mempage_size = 'LARGE' + vdu.guest_epa.cpu_pinning_policy = 'DEDICATED' + vdu.guest_epa.cpu_thread_pinning_policy = 'PREFER' + vdu.guest_epa.numa_node_policy.node_cnt = 2 + vdu.guest_epa.numa_node_policy.mem_policy = 'STRICT' + + node = vdu.guest_epa.numa_node_policy.node.add() + node.id = 0 + node.memory_mb = 512 + node.vcpu = [0, 1] + + node = vdu.guest_epa.numa_node_policy.node.add() + node.id = 1 + node.memory_mb = 512 + node.vcpu = [2, 3] + + # specify the vswitch EPA + vdu.vswitch_epa.ovs_acceleration = 'DISABLED' + vdu.vswitch_epa.ovs_offload = 'DISABLED' + + # Specify the hypervisor EPA + vdu.hypervisor_epa.type_yang = 'PREFER_KVM' + + # Specify the host EPA + vdu.host_epa.cpu_model = 'PREFER_SANDYBRIDGE' + vdu.host_epa.cpu_arch = 'PREFER_X86_64' + vdu.host_epa.cpu_vendor = 'PREFER_INTEL' + vdu.host_epa.cpu_socket_count = 'PREFER_TWO' + vdu.host_epa.cpu_feature = ['PREFER_AES', 'PREFER_CAT'] + + vdu.image = image_name + if image_md5sum is not None: + vdu.image_checksum = image_md5sum + + for i in range(num_ivlr_count): + internal_cp = vdu.internal_connection_point.add() + internal_cp.id = str(uuid.uuid1()) + internal_cp.type_yang = 'VPORT' + internal_vlds[i].internal_connection_point_ref.append(internal_cp.id) + + internal_interface = vdu.internal_interface.add() + internal_interface.name = 'fab%d' % i + internal_interface.vdu_internal_connection_point_ref = internal_cp.id + internal_interface.virtual_interface.type_yang = 'VIRTIO' + + #internal_interface.virtual_interface.vpci = '0000:00:1%d.0'%i + + for i in range(num_vlr_count): + external_interface = vdu.external_interface.add() + external_interface.name = 'eth%d' % i + external_interface.vnfd_connection_point_ref = '%s/cp%d' % (self.name, i) + if use_epa: + external_interface.virtual_interface.type_yang = 'VIRTIO' + else: + external_interface.virtual_interface.type_yang = 'VIRTIO' + #external_interface.virtual_interface.vpci = '0000:00:2%d.0'%i + + def write_to_file(self, outdir, output_format): + dirpath = "%s/%s/vnfd" % (outdir, self.name) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + super(VirtualNetworkFunction, self).write_to_file(['vnfd', 'rw-vnfd'], + "%s/%s/vnfd" % (outdir, self.name), + output_format) + +class NetworkService(ManoDescriptor): + def __init__(self, name): + super(NetworkService, self).__init__(name) + + def ping_config(self): + suffix = '' + if use_epa: + suffix = '_with_epa' + ping_cfg = r''' +#!/usr/bin/bash + +# Rest API config +ping_mgmt_ip='' +ping_mgmt_port=18888 + +# VNF specific configuration +pong_server_ip='' +ping_rate=5 +server_port=5555 + +# Make rest API calls to configure VNF +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set server info for ping!" + exit $rc +fi + +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"rate\":$ping_rate}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set ping rate!" + exit $rc +fi + +output=$(curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state) +if [[ $output == *"Internal Server Error"* ]] +then + echo $output + exit 3 +else + echo $output +fi + + +exit 0 + ''' % suffix + return ping_cfg + + def pong_config(self): + suffix = '' + if use_epa: + suffix = '_with_epa' + pong_cfg = r''' +#!/usr/bin/bash + +# Rest API configuration +pong_mgmt_ip='' +pong_mgmt_port=18889 +# username= +# password= + +# VNF specific configuration +pong_server_ip='' +server_port=5555 + +# Make Rest API calls to configure VNF +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \ + http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set server(own) info for pong!" + exit $rc +fi + +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/adminstatus/state +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to enable pong service!" + exit $rc +fi + +exit 0 + ''' % suffix + return pong_cfg + + def default_config(self, const_vnfd, vnfd): + vnf_config = const_vnfd.vnf_configuration + + vnf_config.input_params.config_priority = 0 + vnf_config.input_params.config_delay = 0 + + # Select "script" configuration + vnf_config.config_type = 'script' + vnf_config.script.script_type = 'bash' + + if vnfd.name == 'pong_vnfd' or vnfd.name == 'pong_vnfd_with_epa': + vnf_config.input_params.config_priority = 1 + # First priority config delay will delay the entire NS config delay + vnf_config.input_params.config_delay = 60 + vnf_config.config_template = self.pong_config() + if vnfd.name == 'ping_vnfd' or vnfd.name == 'ping_vnfd_with_epa': + vnf_config.input_params.config_priority = 2 + vnf_config.config_template = self.ping_config() + ## Remove this - test only + ## vnf_config.config_access.mgmt_ip_address = '1.1.1.1' + + print("### TBR ###", vnfd.name, "vng_config = ", vnf_config) + + def compose(self, vnfd_list, cpgroup_list): + self.descriptor = RwNsdYang.YangData_Nsd_NsdCatalog() + self.id = str(uuid.uuid1()) + nsd = self.descriptor.nsd.add() + nsd.id = self.id + nsd.name = self.name + nsd.short_name = self.name + nsd.vendor = 'RIFT.io' + nsd.description = 'Toy NS' + nsd.version = '1.0' + nsd.input_parameter_xpath.append( + NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:vendor", + ) + ) + + for cpgroup in cpgroup_list: + vld = nsd.vld.add() + vld.id = str(uuid.uuid1()) + vld.name = 'ping_pong_vld' #hard coded + vld.short_name = vld.name + vld.vendor = 'RIFT.io' + vld.description = 'Toy VL' + vld.version = '1.0' + vld.type_yang = 'ELAN' + + for cp in cpgroup: + cpref = vld.vnfd_connection_point_ref.add() + cpref.member_vnf_index_ref = cp[0] + cpref.vnfd_id_ref = cp[1] + cpref.vnfd_connection_point_ref = cp[2] + + member_vnf_index = 1 + for vnfd in vnfd_list: + for i in range(vnfd.instance_count): + constituent_vnfd = nsd.constituent_vnfd.add() + constituent_vnfd.member_vnf_index = member_vnf_index + + constituent_vnfd.vnfd_id_ref = vnfd.descriptor.vnfd[0].id + self.default_config(constituent_vnfd, vnfd) + member_vnf_index += 1 + + def write_to_file(self, outdir, output_format): + dirpath = "%s/%s/nsd" % (outdir, self.name) + if not os.path.exists(dirpath): + os.makedirs(dirpath) + super(NetworkService, self).write_to_file(["nsd", "rw-nsd"], + "%s/%s/nsd" % (outdir, self.name), + output_format) + + +def get_ping_mon_params(path): + return [ + { + 'id': '1', + 'name': 'ping-request-tx-count', + 'json_query_method': "NAMEKEY", + 'value_type': "INT", + 'description': 'no of ping requests', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }, + + { + 'id': '2', + 'name': 'ping-response-rx-count', + 'json_query_method': "NAMEKEY", + 'value_type': "INT", + 'description': 'no of ping responses', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }, + ] + + +def get_pong_mon_params(path): + return [ + { + 'id': '1', + 'name': 'ping-request-rx-count', + 'json_query_method': "NAMEKEY", + 'value_type': "INT", + 'description': 'no of ping requests', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }, + + { + 'id': '2', + 'name': 'ping-response-tx-count', + 'json_query_method': "NAMEKEY", + 'value_type': "INT", + 'description': 'no of ping responses', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }, + ] + +def generate_ping_pong_descriptors(fmt="json", + write_to_file=False, + out_dir="./", + pingcount=NUM_PING_INSTANCES, + external_vlr_count=1, + internal_vlr_count=0, + num_vnf_vms=1, + ping_md5sum=None, + pong_md5sum=None, + ): + # List of connection point groups + # Each connection point group refers to a virtual link + # the CP group consists of tuples of connection points + cpgroup_list = [] + for i in range(external_vlr_count): + cpgroup_list.append([]) + + if use_epa: + suffix = '_with_epa' + else: + suffix = '' + + ping = VirtualNetworkFunction("ping_vnfd%s" % (suffix), pingcount) + #ping = VirtualNetworkFunction("ping_vnfd", pingcount) + ping.compose( + "Fedora-x86_64-20-20131211.1-sda-ping.qcow2", + PING_USERDATA_FILE, + "api/v1/ping/stats", + get_ping_mon_params("api/v1/ping/stats"), + mon_port=18888, + mgmt_port=18888, + num_vlr_count=external_vlr_count, + num_ivlr_count=internal_vlr_count, + num_vms=num_vnf_vms, + image_md5sum=ping_md5sum, + ) + + pong = VirtualNetworkFunction("pong_vnfd%s" % (suffix)) + #pong = VirtualNetworkFunction("pong_vnfd") + pong.compose( + "Fedora-x86_64-20-20131211.1-sda-pong.qcow2", + PONG_USERDATA_FILE, + "api/v1/pong/stats", + get_pong_mon_params("api/v1/pong/stats"), + mon_port=18889, + mgmt_port=18889, + num_vlr_count=external_vlr_count, + num_ivlr_count=internal_vlr_count, + num_vms=num_vnf_vms, + image_md5sum=pong_md5sum, + ) + + # Initialize the member VNF index + member_vnf_index = 1 + + # define the connection point groups + for index, cp_group in enumerate(cpgroup_list): + desc_id = ping.descriptor.vnfd[0].id + filename = 'ping_vnfd{}/cp{}'.format(suffix, index) + + for idx in range(pingcount): + cp_group.append(( + member_vnf_index, + desc_id, + filename, + )) + + member_vnf_index += 1 + + desc_id = pong.descriptor.vnfd[0].id + filename = 'pong_vnfd{}/cp{}'.format(suffix, index) + + cp_group.append(( + member_vnf_index, + desc_id, + filename, + )) + + member_vnf_index += 1 + + vnfd_list = [ping, pong] + nsd_catalog = NetworkService("ping_pong_nsd%s" % (suffix)) + #nsd_catalog = NetworkService("ping_pong_nsd") + nsd_catalog.compose(vnfd_list, cpgroup_list) + + if write_to_file: + ping.write_to_file(out_dir, fmt) + pong.write_to_file(out_dir, fmt) + nsd_catalog.write_to_file(out_dir, fmt) + + return (ping, pong, nsd_catalog) + +def main(argv=sys.argv[1:]): + global outdir, output_format, use_epa + parser = argparse.ArgumentParser() + parser.add_argument('-o', '--outdir', default='.') + parser.add_argument('-f', '--format', default='json') + parser.add_argument('-e', '--epa', action="store_true", default=False) + parser.add_argument('-n', '--pingcount', default=NUM_PING_INSTANCES) + parser.add_argument('--ping-image-md5') + parser.add_argument('--pong-image-md5') + args = parser.parse_args() + outdir = args.outdir + output_format = args.format + use_epa = args.epa + pingcount = args.pingcount + + generate_ping_pong_descriptors(args.format, True, args.outdir, pingcount, + ping_md5sum=args.ping_image_md5, pong_md5sum=args.pong_image_md5) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/foss.txt b/modules/core/mano/foss.txt new file mode 100644 index 0000000..6d1ef78 --- /dev/null +++ b/modules/core/mano/foss.txt @@ -0,0 +1 @@ +RIFT.core, rwmc/mock/node_modules/autobahn, AutobahnJS, MIT, https://github.com/crossbario/autobahn-js diff --git a/modules/core/mano/manifest/LICENSE b/modules/core/mano/manifest/LICENSE new file mode 100644 index 0000000..e69de29 diff --git a/modules/core/mano/models/CMakeLists.txt b/modules/core/mano/models/CMakeLists.txt new file mode 100644 index 0000000..cc9bed6 --- /dev/null +++ b/modules/core/mano/models/CMakeLists.txt @@ -0,0 +1,19 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/12/11 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME models) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + openmano + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/models/openmano/CMakeLists.txt b/modules/core/mano/models/openmano/CMakeLists.txt new file mode 100644 index 0000000..296dc6d --- /dev/null +++ b/modules/core/mano/models/openmano/CMakeLists.txt @@ -0,0 +1,15 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/12/11 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + bin + src + python + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/models/openmano/bin/CMakeLists.txt b/modules/core/mano/models/openmano/bin/CMakeLists.txt new file mode 100644 index 0000000..07472ff --- /dev/null +++ b/modules/core/mano/models/openmano/bin/CMakeLists.txt @@ -0,0 +1,15 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 1/11/2015 +# + +install( + PROGRAMS + openmano + openmano_cleanup.sh + add_corporation.py + DESTINATION usr/bin + COMPONENT ${PKG_LONG_NAME} +) diff --git a/modules/core/mano/models/openmano/bin/add_corporation.py b/modules/core/mano/models/openmano/bin/add_corporation.py new file mode 100755 index 0000000..511369a --- /dev/null +++ b/modules/core/mano/models/openmano/bin/add_corporation.py @@ -0,0 +1,528 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import ipaddress +import itertools +import jujuclient +import logging +import sys +import time +import yaml +import hashlib + + +logging.basicConfig(filename="/tmp/rift_ns_add_corp.log", level=logging.DEBUG) +logger = logging.getLogger() + +ch = logging.StreamHandler() +ch.setLevel(logging.INFO) + +# create formatter and add it to the handlers +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +logger.addHandler(ch) + + +dry_run = False + +class JujuActionError(Exception): + pass + + +class JujuClient(object): + """Class for executing Juju actions """ + def __init__(self, ip, port, user, passwd): + self._ip = ip + self._port = port + self._user = user + self._passwd = passwd + + endpoint = 'wss://%s:%d' % (ip, port) + logger.debug("Using endpoint=%s", endpoint) + if dry_run: + return + self.env = jujuclient.Environment(endpoint) + self.env.login(passwd, user) + + def get_service(self, name): + return self.env.get_service(name) + + def _get_units(self, name): + """ + Get the units associated with service + """ + units = self.env.status(name)['Services'][name]['Units'] + units = list(units.keys()) + + # convert to a friendly format for juju-python-client + units[:] = [('unit-%s' % u).replace('/', '-') for u in units] + return units + + def exec_action(self, name, action_name, params, block=False): + logger.debug("execute actiion %s using params %s", action_name, params) + if dry_run: + return + + actions = jujuclient.Actions(self.env) + results = actions.enqueue_units(self._get_units(name), + action_name, + params) + if not block: + return results + + if 'error' in results['results'][0].keys(): + raise JujuActionError("Juju action error: %s" % results['results'][0]) + + action = results['results'][0]['action'] + info = actions.info([action]) + i = 0 + logging.debug("Initial action results: %s", results['results'][0]) + while info['results'][0]['status'] not in ['completed', 'failed']: + time.sleep(1) + info = actions.info([action]) + + # break out if the action doesn't complete in 10 secs + i += 1 + if i == 10: + raise JujuActionError("Juju action timed out after 30 seconds") + + if info['results'][0]['status'] != 'completed': + raise JujuActionError("Action %s failure: %s" % (action_name, info['results'][0])) + + return info + + +class CharmAction(object): + def __init__(self, deployed_name, action_name, action_params=None): + self._deployed_name = deployed_name + self._action_name = action_name + self._params = action_params if action_params is not None else [] + + def execute(self, juju_client): + logger.info("Executing charm (%s) action (%s) with params (%s)", + self._deployed_name, self._action_name, self._params) + try: + info = juju_client.exec_action( + name=self._deployed_name, + action_name=self._action_name, + params=self._params, + block=True + ) + + except JujuActionError as e: + logger.error("Juju charm (%s) action (%s) failed: %s", + self._deployed_name, self._action_name, str(e)) + raise + + logger.debug("Juju charm (%s) action (%s) success.", + self._deployed_name, self._action_name) + + +class DeployedProxyCharm(object): + def __init__(self, juju_client, service_name, mgmt_ip=None, charm_name=None): + self._juju_client = juju_client + self.service_name = service_name + self.mgmt_ip = mgmt_ip + self.charm_name = charm_name + + def do_action(self, action_name, action_params={}): + action = CharmAction(self.service_name, action_name, action_params) + action.execute(self._juju_client) + + +class SixWindPEProxyCharm(DeployedProxyCharm): + USER = "root" + PASSWD = "6windos" + + def configure_interface(self, iface_name, ipv4_interface_str=None): + action = "configure-interface" + params = {'iface-name', iface_name} + + if ipv4_interface_str is None: + # Use ipaddress module to validate ipv4 interface string + ip_intf = ipaddress.IPv4Interface(ipv4_interface_str) + params["cidr"] = ip_intf.with_prefixlen + + self.do_action(action, params) + else: + self.do_action(action, params) + + + def add_corporation(self, domain_name, user_iface_name, vlan_id, corp_gw, + corp_net, local_net="10.255.255.0/24", local_net_area="0"): + logger.debug("Add corporation called with params: %s", locals()) + + action = "add-corporation" + params = { + "domain-name": domain_name, + "iface-name": user_iface_name, + "vlan-id": int(vlan_id), + "cidr": corp_net, + "area": corp_gw, + "subnet-cidr":local_net, + "subnet-area":local_net_area, + } + + self.do_action(action, params) + + def connect_domains(self, domain_name, core_iface_name, local_ip, remote_ip, + internal_local_ip, internal_remote_ip, tunnel_name, + tunnel_key, tunnel_type="gre"): + + logger.debug("Connect domains called with params: %s", locals()) + + action = "connect-domains" + params = { + "domain-name": domain_name, + "iface-name": core_iface_name, + "tunnel-name": tunnel_name, + "local-ip": local_ip, + "remote-ip": remote_ip, + "tunnel-key": tunnel_key, + "internal-local-ip": internal_local_ip, + "internal-remote-ip": internal_remote_ip, + "tunnel-type":tunnel_type, + } + + self.do_action(action, params) + + +class PEGroupConfig(object): + def __init__(self, pe_group_cfg): + self._pe_group_cfg = pe_group_cfg + + def _get_param_value(self, param_name): + for param in self._pe_group_cfg["parameter"]: + if param["name"] == param_name: + return param["value"] + + raise ValueError("PE param not found: %s" % param_name) + + @property + def vlan_id(self): + return self._get_param_value("Vlan ID") + + @property + def interface_name(self): + return self._get_param_value("Interface Name") + + @property + def corp_network(self): + return self._get_param_value("Corp. Network") + + @property + def corp_gateway(self): + return self._get_param_value("Corp. Gateway") + + +class AddCorporationRequest(object): + def __init__(self, add_corporation_rpc): + self._add_corporation_rpc = add_corporation_rpc + + @property + def name(self): + return self._add_corporation_rpc["name"] + + @property + def param_groups(self): + return self._add_corporation_rpc["parameter_group"] + + @property + def params(self): + return self._add_corporation_rpc["parameter"] + + @property + def corporation_name(self): + for param in self.params: + if param["name"] == "Corporation Name": + return param["value"] + + raise ValueError("Could not find 'Corporation Name' field") + + @property + def tunnel_key(self): + for param in self.params: + if param["name"] == "Tunnel Key": + return param["value"] + + raise ValueError("Could not find 'Tunnel Key' field") + + def get_pe_parameter_group_map(self): + group_name_map = {} + for group in self.param_groups: + group_name_map[group["name"]] = group + + return group_name_map + + def get_parameter_name_map(self): + name_param_map = {} + for param in self.params: + name_param_map[param["name"]] = param + + return name_param_map + + @classmethod + def from_yaml_cfg(cls, yaml_hdl): + config = yaml.load(yaml_hdl) + return cls( + config["rpc_ip"], + ) + + +class JujuVNFConfig(object): + def __init__(self, vnfr_index_map, vnf_name_map, vnf_init_config_map): + self._vnfr_index_map = vnfr_index_map + self._vnf_name_map = vnf_name_map + self._vnf_init_config_map = vnf_name_map + + def get_service_name(self, vnf_index): + for vnfr_id, index in self._vnfr_index_map.items(): + if index != vnf_index: + continue + + return self._vnf_name_map[vnfr_id] + + raise ValueError("VNF Index not found: %s" % vnf_index) + + def get_vnfr_id(self, vnf_index): + for vnfr_id, index in self._vnfr_index_map.items(): + if index != vnf_index: + continue + + return vnfr_id + + raise ValueError("VNF Index not found: %s" % vnf_index) + + @classmethod + def from_yaml_cfg(cls, yaml_hdl): + config = yaml.load(yaml_hdl) + return cls( + config["vnfr_index_map"], + config["unit_names"], + config["init_config"], + ) + + +class JujuClientConfig(object): + def __init__(self, juju_ctrl_cfg): + self._juju_ctrl_cfg = juju_ctrl_cfg + + @property + def name(self): + return self._juju_ctrl_cfg["name"] + + @property + def host(self): + return self._juju_ctrl_cfg["host"] + + @property + def port(self): + return self._juju_ctrl_cfg["port"] + + @property + def user(self): + return self._juju_ctrl_cfg["user"] + + @property + def secret(self): + return self._juju_ctrl_cfg["secret"] + + @classmethod + def from_yaml_cfg(cls, yaml_hdl): + config = yaml.load(yaml_hdl) + return cls( + config["config_agent"], + ) + + +class OSM_MWC_Demo(object): + VNF_INDEX_NAME_MAP = { + "PE1": 1, + "PE2": 2, + "PE3": 3, + } + + CORE_PE_CONN_MAP = { + "PE1": { + "PE2": { + "ifacename": "eth1", + "ip": "10.10.10.9", + "mask": "30", + "internal_local_ip": "10.255.255.1" + }, + "PE3": { + "ifacename": "eth2", + "ip": "10.10.10.1", + "mask": "30", + "internal_local_ip": "10.255.255.1" + }, + }, + "PE2": { + "PE1": { + "ifacename": "eth1", + "ip": "10.10.10.10", + "mask": "30", + "internal_local_ip": "10.255.255.2" + }, + "PE3": { + "ifacename": "eth2", + "ip": "10.10.10.6", + "mask": "30", + "internal_local_ip": "10.255.255.2" + } + }, + "PE3": { + "PE1": { + "ifacename": "eth1", + "ip": "10.10.10.2", + "mask": "30", + "internal_local_ip": "10.255.255.3" + }, + "PE2": { + "ifacename": "eth2", + "ip": "10.10.10.5", + "mask": "30", + "internal_local_ip": "10.255.255.3" + } + } + } + + @staticmethod + def get_pe_vnf_index(pe_name): + if pe_name not in OSM_MWC_Demo.VNF_INDEX_NAME_MAP: + raise ValueError("Could not find PE name: %s", pe_name) + + return OSM_MWC_Demo.VNF_INDEX_NAME_MAP[pe_name] + + @staticmethod + def get_src_core_iface(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[src_pe_name][dest_pe_name]["ifacename"] + + @staticmethod + def get_local_ip(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[src_pe_name][dest_pe_name]["ip"] + + @staticmethod + def get_remote_ip(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[dest_pe_name][src_pe_name]["ip"] + + @staticmethod + def get_internal_local_ip(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[src_pe_name][dest_pe_name]["internal_local_ip"] + + @staticmethod + def get_internal_remote_ip(src_pe_name, dest_pe_name): + return OSM_MWC_Demo.CORE_PE_CONN_MAP[dest_pe_name][src_pe_name]["internal_local_ip"] + + +def add_pe_corporation(src_pe_name, src_pe_charm, src_pe_group_cfg, corporation_name): + domain_name = corporation_name + vlan_id = src_pe_group_cfg.vlan_id + corp_gw = src_pe_group_cfg.corp_gateway + corp_net = src_pe_group_cfg.corp_network + + user_iface = src_pe_group_cfg.interface_name + + src_pe_charm.add_corporation(domain_name, user_iface, vlan_id, corp_gw, corp_net) + + +def connect_pe_domains(src_pe_name, src_pe_charm, dest_pe_name, corporation_name, tunnel_key): + domain_name = corporation_name + core_iface_name = OSM_MWC_Demo.get_src_core_iface(src_pe_name, dest_pe_name) + local_ip = OSM_MWC_Demo.get_local_ip(src_pe_name, dest_pe_name) + remote_ip = OSM_MWC_Demo.get_remote_ip(src_pe_name, dest_pe_name) + internal_local_ip = OSM_MWC_Demo.get_internal_local_ip(src_pe_name, dest_pe_name) + internal_remote_ip = OSM_MWC_Demo.get_internal_remote_ip(src_pe_name, dest_pe_name) + + + src_pe_idx = OSM_MWC_Demo.get_pe_vnf_index(src_pe_name) + dest_pe_idx = OSM_MWC_Demo.get_pe_vnf_index(dest_pe_name) + + # Create a 4 digit hash of the corporation name + hash_object = hashlib.md5(corporation_name.encode()) + corp_hash = hash_object.hexdigest()[-4:] + + # Tunnel name is the 4 digit corporation name hash followed by + # src index and dest index. When there are less than 10 PE's + # this creates a 8 character tunnel name which is the limit. + tunnel_name = "".join([corp_hash, "_", str(src_pe_idx), str(dest_pe_idx)]) + + src_pe_charm.connect_domains(domain_name, core_iface_name, local_ip, remote_ip, + internal_local_ip, internal_remote_ip, tunnel_name, + tunnel_key) + + +def main(argv=sys.argv[1:]): + parser = argparse.ArgumentParser() + parser.add_argument("yaml_cfg_file", type=argparse.FileType('r')) + parser.add_argument("--dry-run", action="store_true") + parser.add_argument("--quiet", "-q", dest="verbose", action="store_false") + args = parser.parse_args() + if args.verbose: + ch.setLevel(logging.DEBUG) + + global dry_run + dry_run = args.dry_run + + yaml_str = args.yaml_cfg_file.read() + + juju_cfg = JujuClientConfig.from_yaml_cfg(yaml_str) + juju_client = JujuClient(juju_cfg.host, juju_cfg.port, juju_cfg.user, juju_cfg.secret) + + juju_vnf_config = JujuVNFConfig.from_yaml_cfg(yaml_str) + + rpc_request = AddCorporationRequest.from_yaml_cfg(yaml_str) + pe_param_group_map = rpc_request.get_pe_parameter_group_map() + + pe_name_charm_map = {} + for pe_name, pe_group_cfg in pe_param_group_map.items(): + # The PE name (i.e. PE1) must be in the parameter group name so we can correlate + # to an actual VNF in the descriptor. + pe_vnf_index = OSM_MWC_Demo.get_pe_vnf_index(pe_name) + + # Get the deployed VNFR charm service name + pe_charm_service_name = juju_vnf_config.get_service_name(pe_vnf_index) + + pe_name_charm_map[pe_name] = SixWindPEProxyCharm(juju_client, pe_charm_service_name) + + # At this point we have SixWindPEProxyCharm() instances for each PE and each + # PE param group configuration. + for src_pe_name in pe_param_group_map: + add_pe_corporation( + src_pe_name=src_pe_name, + src_pe_charm=pe_name_charm_map[src_pe_name], + src_pe_group_cfg=PEGroupConfig(pe_param_group_map[src_pe_name]), + corporation_name=rpc_request.corporation_name + ) + + # Create a permutation of all PE's involved in this topology and connect + # them together by creating tunnels with matching keys + for src_pe_name, dest_pe_name in itertools.permutations(pe_name_charm_map, 2): + connect_pe_domains( + src_pe_name=src_pe_name, + src_pe_charm=pe_name_charm_map[src_pe_name], + dest_pe_name=dest_pe_name, + corporation_name=rpc_request.corporation_name, + tunnel_key=rpc_request.tunnel_key, + ) + +if __name__ == "__main__": + try: + main() + except Exception as e: + logger.exception("Caught exception when executing add_corporation ns") + raise \ No newline at end of file diff --git a/modules/core/mano/models/openmano/bin/openmano b/modules/core/mano/models/openmano/bin/openmano new file mode 100755 index 0000000..a453b10 --- /dev/null +++ b/modules/core/mano/models/openmano/bin/openmano @@ -0,0 +1,1069 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# PYTHON_ARGCOMPLETE_OK + +## +# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. +# This file is part of openmano +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact with: nfvlabs@tid.es +## + +''' +openmano client used to interact with openmano-server (openmanod) +''' +__author__="Alfonso Tierno, Gerardo Garcia" +__date__ ="$09-oct-2014 09:09:48$" +__version__="0.4.1-r449" +version_date="Dec 2015" + +import os +import argparse +import requests +import json +import yaml +import logging + +class ArgumentParserError(Exception): pass + +class ThrowingArgumentParser(argparse.ArgumentParser): + def error(self, message): + print "Error: %s" %message + print + self.print_usage() + #self.print_help() + print + print "Type 'openmano -h' for help" + raise ArgumentParserError + + +def config(args): + print "OPENMANO_HOST: %s" %mano_host + print "OPENMANO_PORT: %s" %mano_port + print "OPENMANO_TENANT: %s" %mano_tenant + print "OPENMANO_DATACENTER: %s" %str (mano_datacenter) + + +def _print_verbose(mano_response, verbose_level=0): + content = mano_response.json() + result = 0 if mano_response.status_code==200 else mano_response.status_code + if type(content)!=dict or len(content)!=1: + #print "Non expected format output" + print str(content) + return result + + val=content.values()[0] + if type(val)==str: + print val + return result + elif type(val) == list: + content_list = val + elif type(val)==dict: + content_list = [val] + else: + #print "Non expected dict/list format output" + print str(content) + return result + + #print content_list + if verbose_level==None: + verbose_level=0 + if verbose_level >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + + if mano_response.status_code == 200: + for content in content_list: + myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20)) + if verbose_level >=1: + myoutput += " " + content['created_at'].ljust(20) + if verbose_level >=2: + new_line='\n' + if 'type' in content and content['type']!=None: + myoutput += new_line + " Type: " + content['type'].ljust(29) + new_line='' + if 'description' in content and content['description']!=None: + myoutput += new_line + " Description: " + content['description'].ljust(20) + print myoutput + else: + print content['error']['description'] + return result + +def parser_json_yaml(file_name): + try: + f = file(file_name, "r") + text = f.read() + f.close() + except Exception as e: + return (False, str(e)) + + #Read and parse file + if file_name[-5:]=='.yaml' or file_name[-4:]=='.yml' or (file_name[-5:]!='.json' and '\t' not in text): + try: + config = yaml.load(text) + except yaml.YAMLError as exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at line:%s column:%s" % (mark.line+1, mark.column+1) + return (False, "Error loading file '"+file_name+"' yaml format error" + error_pos) + else: #json + try: + config = json.loads(text) + except Exception as e: + return (False, "Error loading file '"+file_name+"' json format error " + str(e) ) + + return True, config + +def _load_file_or_yaml(content): + ''' + 'content' can be or a yaml/json file or a text containing a yaml/json text format + This function autodetect, trying to load and parse the file, + if fails trying to parse the 'content' text + Returns the dictionary once parsed, or print an error and finish the program + ''' + #Check config file exists + if os.path.isfile(content): + r,payload = parser_json_yaml(content) + if not r: + print payload + exit(-1) + elif "{" in content or ":" in content: + try: + payload = yaml.load(content) + except yaml.YAMLError as exc: + error_pos = "" + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1) + print "Error loading yaml/json text"+error_pos + exit (-1) + else: + print "'%s' is neither a valid file nor a yaml/json content" % content + exit(-1) + return payload + +def vnf_create(args): + #print "vnf-create",args + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + myvnf = _load_file_or_yaml(args.file) + + if args.name or args.description or args.image_path: + #print args.name + try: + if args.name: + myvnf['vnf']['name'] = args.name + if args.description: + myvnf['vnf']['description'] = args.description + if args.image_path: + index=0 + for image_path_ in args.image_path.split(","): + #print "image-path", image_path_ + myvnf['vnf']['VNFC'][index]['VNFC image']=image_path_ + index=index+1 + except (KeyError, TypeError), e: + if str(e)=='vnf': error_pos= "missing field 'vnf'" + elif str(e)=='name': error_pos= "missing field 'vnf':'name'" + elif str(e)=='description': error_pos= "missing field 'vnf':'description'" + elif str(e)=='VNFC': error_pos= "missing field 'vnf':'VNFC'" + elif str(e)==str(index): error_pos= "field 'vnf':'VNFC' must be an array" + elif str(e)=='VNFC image': error_pos= "missing field 'vnf':'VNFC'['VNFC image']" + else: error_pos="wrong format" + print "Wrong VNF descriptor: " + error_pos + return -1 + payload_req = json.dumps(myvnf) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/vnfs" %(mano_host, mano_port, mano_tenant) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + + return _print_verbose(mano_response, args.verbose) + +def vnf_list(args): + #print "vnf-list",args + if args.name: + URLrequest = "http://%s:%s/openmano/%s/vnfs/%s" %(mano_host, mano_port, mano_tenant, args.name) + else: + URLrequest = "http://%s:%s/openmano/%s/vnfs" %(mano_host, mano_port, mano_tenant) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose==None: + args.verbose=0 + result = 0 if mano_response.status_code==200 else mano_response.status_code + if mano_response.status_code == 200: + if not args.name: + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + if len(content['vnfs']) == 0: + print "No VNFs were found." + return 404 #HTTP_Not_Found + for vnf in content['vnfs']: + myoutput = "%s %s" %(vnf['uuid'].ljust(38),vnf['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, vnf['created_at'].ljust(20)) + print myoutput + if args.verbose >=2: + print " Description: %s" %vnf['description'] + print " VNF descriptor file: %s" %vnf['path'] + else: + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + vnf = content['vnf'] + print "%s %s %s" %(vnf['uuid'].ljust(38),vnf['name'].ljust(20), vnf['created_at'].ljust(20)) + print " Description: %s" %vnf['description'] + print " VNF descriptor file: %s" %vnf['path'] + print " VMs:" + for vm in vnf['VNFC']: + #print " %s %s %s" %(vm['name'].ljust(20), vm['uuid'].ljust(38), vm['description'].ljust(30)) + print " %s %s" %(vm['name'].ljust(20), vm['description']) + if len(vnf['nets'])>0: + print " Internal nets:" + for net in vnf['nets']: + print " %s %s" %(net['name'].ljust(20), net['description']) + if len(vnf['external-connections'])>0: + print " External interfaces:" + for interface in vnf['external-connections']: + print " %s %s %s %s" %(interface['external_name'].ljust(20), interface['vm_name'].ljust(20), interface['internal_name'].ljust(20), \ + interface['vpci'].ljust(14)) + else: + print content['error']['description'] + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + +def vnf_delete(args): + #print "vnf-delete",args + if not args.force: + r = raw_input("Delete VNF %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + URLrequest = "http://%s:%s/openmano/%s/vnfs/%s" %(mano_host, mano_port, mano_tenant, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def scenario_create(args): + #print "scenario-create",args + headers_req = {'content-type': 'application/yaml'} + myscenario = _load_file_or_yaml(args.file) + + if args.name: + myscenario['name'] = args.name + if args.description: + myscenario['description'] = args.description + payload_req = yaml.safe_dump(myscenario, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/scenarios" %(mano_host, mano_port, mano_tenant) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + return _print_verbose(mano_response, args.verbose) + +def scenario_list(args): + #print "scenario-list",args + if args.name: + URLrequest = "http://%s:%s/openmano/%s/scenarios/%s" %(mano_host, mano_port, mano_tenant, args.name) + else: + URLrequest = "http://%s:%s/openmano/%s/scenarios" %(mano_host, mano_port, mano_tenant) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose==None: + args.verbose=0 + + result = 0 if mano_response.status_code==200 else mano_response.status_code + if mano_response.status_code == 200: + if not args.name: + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + if len(content['scenarios']) == 0: + print "No scenarios were found." + return 404 #HTTP_Not_Found + for scenario in content['scenarios']: + myoutput = "%s %s" %(scenario['uuid'].ljust(38),scenario['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, scenario['created_at'].ljust(20)) + print myoutput + if args.verbose >=2: + print " Description: %s" %scenario['description'] + else: + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + scenario = content['scenario'] + myoutput = "%s %s %s" %(scenario['uuid'].ljust(38),scenario['name'].ljust(20), scenario['created_at'].ljust(20)) + print myoutput + print " Description: %s" %scenario['description'] + print " VNFs:" + for vnf in scenario['vnfs']: + print " %s %s %s" %(vnf['name'].ljust(20), vnf['vnf_id'].ljust(38), vnf['description']) + if len(scenario['nets'])>0: + print " Internal nets:" + for net in scenario['nets']: + if net['description'] is None: #if description does not exist, description is "-". Valid for external and internal nets. + net['description'] = '-' + if not net['external']: + print " %s %s %s" %(net['name'].ljust(20), net['uuid'].ljust(38), net['description'].ljust(30)) + print " External nets:" + for net in scenario['nets']: + if net['external']: + print " %s %s %s vim-id:%s" %(net['name'].ljust(20), net['uuid'].ljust(38), net['description'].ljust(30), net['vim_id']) + else: + print content['error']['description'] + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + +def scenario_delete(args): + #print "scenario-delete",args + if not args.force: + r = raw_input("Delete scenario %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + URLrequest = "http://%s:%s/openmano/%s/scenarios/%s" %(mano_host, mano_port, mano_tenant, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def scenario_deploy(args): + #print "scenario-deploy",args + headers_req = {'content-type': 'application/json'} + action = {} + actionCmd="start" + if args.nostart: + actionCmd="reserve" + action[actionCmd] = {} + action[actionCmd]["instance_name"] = args.name + if args.datacenter != None: + action[actionCmd]["datacenter"] = args.datacenter + elif mano_datacenter != None: + action[actionCmd]["datacenter"] = mano_datacenter + + if args.description: + action[actionCmd]["description"] = args.description + payload_req = json.dumps(action, indent=4) + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/scenarios/%s/action" %(mano_host, mano_port, mano_tenant, args.scenario) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + + if mano_response.status_code == 200: + myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, content['created_at'].ljust(20)) + if args.verbose >=2: + myoutput = "%s %s %s" %(myoutput, content['description'].ljust(30)) + print myoutput + print "" + print "To check the status, run the following command:" + print "openmano instance-scenario-list " + else: + print content['error']['description'] + return result + +def scenario_verify(args): + #print "scenario-verify",args + headers_req = {'content-type': 'application/json'} + action = {} + action["verify"] = {} + action["verify"]["instance_name"] = "scen-verify-return5" + payload_req = json.dumps(action, indent=4) + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/scenarios/%s/action" %(mano_host, mano_port, mano_tenant, args.scenario) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def instance_create(args): + headers_req = {'content-type': 'application/yaml'} + myInstance={"instance": {}, "schema_version": "0.1"} + if args.file: + instance_dict = _load_file_or_yaml(args.file) + if "instance" not in instance_dict: + myInstance = {"instance": instance_dict, "schema_version": "0.1"} + else: + myInstance = instance_dict + if args.name: + myInstance["instance"]['name'] = args.name + if args.description: + myInstance["instance"]['description'] = args.description + if args.nostart: + myInstance["instance"]['action'] = "reserve" + if args.datacenter != None: + myInstance["instance"]["datacenter"] = args.datacenter + elif "datacenter" not in myInstance and mano_datacenter != None: + myInstance["instance"]["datacenter"] = mano_datacenter + if args.scenario != None: + myInstance["instance"]["scenario"] = args.scenario + elif "scenario" not in myInstance["instance"]: + print "you must provide an scenario in the file descriptor or with --scenario" + return -1 + if "name" not in myInstance["instance"]: + print "you must provide a name in the file descriptor or with --name" + return 1 + if args.net_map: + if "networks" not in myInstance["instance"]: + myInstance["instance"]["networks"] = {} + for net in args.net_map: + net_comma_list = net.split(",") + for net_comma in net_comma_list: + net_tuple = net_comma.split("=") + if len(net_tuple) != 2: + print "error at net-map. Expected net-scenario=net-datacenter. (%s)?" % net_comma + return + net_scenario = net_comma[0].strip() + net_datacenter = net_comma[1].strip() + if net_scenario not in myInstance["instance"]["networks"]: + myInstance["instance"]["networks"][net_scenario] = {} + myInstance["instance"]["networks"][net_scenario]["source"] = net_datacenter + + payload_req = yaml.safe_dump(myInstance, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) + logger.debug("openmano request: %s", payload_req) + URLrequest = "http://%s:%s/openmano/%s/instances" %(mano_host, mano_port, mano_tenant) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + + if mano_response.status_code == 200: + myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, content['created_at'].ljust(20)) + if args.verbose >=2: + myoutput = "%s %s %s" %(myoutput, content['description'].ljust(30)) + print myoutput + else: + print content['error']['description'] + return result + + +def instance_scenario_list(args): + #print "instance-scenario-list",args + if args.name: + URLrequest = "http://%s:%s/openmano/%s/instances/%s" %(mano_host, mano_port, mano_tenant, args.name) + else: + URLrequest = "http://%s:%s/openmano/%s/instances" %(mano_host, mano_port, mano_tenant) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + content = mano_response.json() + #print json.dumps(content, indent=4) + if args.verbose==None: + args.verbose=0 + + result = 0 if mano_response.status_code==200 else mano_response.status_code + if mano_response.status_code == 200: + if not args.name: + if args.verbose >= 3: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + if len(content['instances']) == 0: + print "No scenario instances were found." + return result + for instance in content['instances']: + myoutput = "%s %s" %(instance['uuid'].ljust(38),instance['name'].ljust(20)) + if args.verbose >=1: + myoutput = "%s %s" %(myoutput, instance['created_at'].ljust(20)) + print myoutput + if args.verbose >=2: + print "Description: %s" %instance['description'] + else: + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + instance = content + print "%s %s %s" %(instance['uuid'].ljust(38),instance['name'].ljust(20),instance['created_at'].ljust(20)) + print "Description: %s" %instance['description'] + print "Template scenario id: %s" %instance['scenario_id'] + print "Template scenario name: %s" %instance['scenario_name'] + print "---------------------------------------" + print "VNF instances: %d" %len(instance['vnfs']) + for vnf in instance['vnfs']: + #print " %s %s Template vnf name: %s Template vnf id: %s" %(vnf['uuid'].ljust(38), vnf['name'].ljust(20), vnf['vnf_name'].ljust(20), vnf['vnf_id'].ljust(38)) + print " %s %s Template vnf id: %s" %(vnf['uuid'].ljust(38), vnf['vnf_name'].ljust(20), vnf['vnf_id'].ljust(38)) + if len(instance['nets'])>0: + print "---------------------------------------" + print "Internal nets:" + for net in instance['nets']: + if not net['external']: + print " %s %s VIM ID: %s" %(net['uuid'].ljust(38), net['status'].ljust(12), net['vim_net_id']) + print "---------------------------------------" + print "External nets:" + for net in instance['nets']: + if net['external']: + print " %s %s VIM ID: %s" %(net['uuid'].ljust(38), net['status'].ljust(12), net['vim_net_id']) + print "---------------------------------------" + print "VM instances:" + for vnf in instance['vnfs']: + for vm in vnf['vms']: + print " %s %s %s %s VIM ID: %s" %(vm['uuid'].ljust(38), vnf['vnf_name'].ljust(20), vm['name'].ljust(20), vm['status'].ljust(12), vm['vim_vm_id']) + else: + print content['error']['description'] + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + +def instance_scenario_status(args): + print "instance-scenario-status" + return 0 + +def instance_scenario_delete(args): + #print "instance-scenario-delete",args + if not args.force: + r = raw_input("Delete scenario instance %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return + URLrequest = "http://%s:%s/openmano/%s/instances/%s" %(mano_host, mano_port, mano_tenant, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def instance_scenario_action(args): + #print "instance-scenario-action", args + action={} + action[ args.action ] = args.param + if args.vnf: + action["vnfs"] = args.vnf + if args.vm: + action["vms"] = args.vm + + headers_req = {'content-type': 'application/json'} + payload_req = json.dumps(action, indent=4) + URLrequest = "http://%s:%s/openmano/%s/instances/%s/action" %(mano_host, mano_port, mano_tenant, args.name) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + if args.verbose: + print yaml.safe_dump(content, indent=4, default_flow_style=False) + return result + for uuid,c in content.iteritems(): + print "%s %s %s" %(uuid.ljust(38), c['name'].ljust(20),c['description'].ljust(20)) + else: + print content['error']['description'] + return result + + +def instance_vnf_list(args): + print "instance-vnf-list" + return 0 + +def instance_vnf_status(args): + print "instance-vnf-status" + return 0 + +def tenant_create(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + tenant_dict={"name": args.name} + if args.description!=None: + tenant_dict["description"] = args.description + payload_req = json.dumps( {"tenant": tenant_dict }) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/tenants" %(mano_host, mano_port) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + return _print_verbose(mano_response, args.verbose) + +def tenant_list(args): + #print "tenant-list",args + if args.name: + URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, args.name) + else: + URLrequest = "http://%s:%s/openmano/tenants" %(mano_host, mano_port) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + if args.name!=None: + args.verbose += 1 + return _print_verbose(mano_response, args.verbose) + +def tenant_delete(args): + #print "tenant-delete",args + if not args.force: + r = raw_input("Delete tenant %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def datacenter_attach(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + + datacenter_dict={} + if args.vim_tenant_id != None: + datacenter_dict['vim_tenant'] = args.vim_tenant_id + if args.vim_tenant_name != None: + datacenter_dict['vim_tenant_name'] = args.vim_tenant_name + if args.user != None: + datacenter_dict['vim_username'] = args.user + if args.password != None: + datacenter_dict['vim_password'] = args.password + payload_req = json.dumps( {"datacenter": datacenter_dict }) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, mano_tenant, args.name) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + result = _print_verbose(mano_response, args.verbose) + #provide addional information if error + if mano_response.status_code != 200: + content = mano_response.json() + if "already in use for 'name'" in content['error']['description'] and \ + "to database vim_tenants table" in content['error']['description']: + print "Try to specify a different name with --vim-tenant-name" + return result + +def datacenter_detach(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, mano_tenant, args.name) + mano_response = requests.delete(URLrequest, headers=headers_req) + logger.debug("openmano response: %s", mano_response.text ) + content = mano_response.json() + #print json.dumps(content, indent=4) + result = 0 if mano_response.status_code==200 else mano_response.status_code + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def datacenter_create(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + datacenter_dict={"name": args.name, "vim_url": args.url} + if args.description!=None: + datacenter_dict["description"] = args.description + if args.type!=None: + datacenter_dict["type"] = args.type + if args.url!=None: + datacenter_dict["vim_url_admin"] = args.url_admin + if args.config!=None: + datacenter_dict["config"] = _load_file_or_yaml(args.config) + payload_req = json.dumps( {"datacenter": datacenter_dict }) + + #print payload_req + + URLrequest = "http://%s:%s/openmano/datacenters" %(mano_host, mano_port) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + return _print_verbose(mano_response, args.verbose) + +def datacenter_delete(args): + #print "datacenter-delete",args + if not args.force: + r = raw_input("Delete datacenter %s (y/N)? " %(args.name)) + if not (len(r)>0 and r[0].lower()=="y"): + return 0 + URLrequest = "http://%s:%s/openmano/datacenters/%s" %(mano_host, mano_port, args.name) + mano_response = requests.delete(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + result = 0 if mano_response.status_code==200 else mano_response.status_code + content = mano_response.json() + #print json.dumps(content, indent=4) + if mano_response.status_code == 200: + print content['result'] + else: + print content['error']['description'] + return result + +def datacenter_list(args): + #print "datacenter-list",args + tenant='any' if args.all else mano_tenant + if args.name: + URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, args.name) + else: + URLrequest = "http://%s:%s/openmano/%s/datacenters" %(mano_host, mano_port, tenant) + mano_response = requests.get(URLrequest) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + if args.name!=None: + args.verbose += 1 + return _print_verbose(mano_response, args.verbose) + + +def datacenter_net_action(args): + #print "datacenter-net-action",args + if args.verbose==None: + args.verbose=0 + if args.action=="net-list": + URLrequest = "http://%s:%s/openmano/datacenters/%s/networks" %(mano_host, mano_port, args.datacenter) + mano_response = requests.get(URLrequest) + if args.datacenter!=None: + args.verbose += 1 + else: + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/action" %(mano_host, mano_port, mano_tenant, args.datacenter) + if not args.force: + if args.action=="net-update": + r = raw_input(" Edit datacenter " + args.datacenter + " (y/N)? ") + elif args.action=="net-delete": + r = raw_input(" Delete datacenter " + args.datacenter + " net " + args.net +" (y/N)? ") + else: + r = raw_input(" Edit datacenter " + args.datacenter + " net " + args.net +" (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + pass + else: + return 0 + if args.action=="net-update": + payload={args.action : None} + else: + payload = {args.action: {'net': args.net} } + if args.action=="net-edit": + payload[args.action].update(_load_file_or_yaml(args.file)) + + payload_req = json.dumps(payload) + logger.debug("openmano request: %s", payload_req) + mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + return _print_verbose(mano_response, args.verbose) + +def element_edit(args): + headers_req = {'Accept': 'application/json', 'content-type': 'application/json'} + URLrequest = "http://%s:%s/openmano/%s/%s" %(mano_host, mano_port, args.element, args.name) + payload=_load_file_or_yaml(args.file) + if args.element[:-1] not in payload: + payload = {args.element[:-1]: payload } + payload_req = json.dumps(payload) + + #print payload_req + if not args.force or (args.name==None and args.filer==None): + r = raw_input(" Edit " + args.element[:-1] + " " + args.name + " (y/N)? ") + if len(r)>0 and r[0].lower()=="y": + pass + else: + return 0 + logger.debug("openmano request: %s", payload_req) + mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req) + logger.debug("openmano response: %s", mano_response.text ) + if args.verbose==None: + args.verbose=0 + if args.name!=None: + args.verbose += 1 + return _print_verbose(mano_response, args.verbose) + + +global mano_host +global mano_port +global mano_tenant + +if __name__=="__main__": + + mano_tenant = os.getenv('OPENMANO_TENANT',"bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb") + mano_host = os.getenv('OPENMANO_HOST',"localhost") + mano_port = os.getenv('OPENMANO_PORT',"9090") + mano_datacenter = os.getenv('OPENMANO_DATACENTER',None) + + main_parser = ThrowingArgumentParser(description='User program to interact with OPENMANO-SERVER (openmanod)') + main_parser.add_argument('--version', action='version', version='%(prog)s ' + __version__ ) + + subparsers = main_parser.add_subparsers(help='commands') + + config_parser = subparsers.add_parser('config', help="prints configuration values") + config_parser.set_defaults(func=config) + + vnf_create_parser = subparsers.add_parser('vnf-create', help="adds a vnf into the catalogue") + vnf_create_parser.add_argument("file", action="store", help="location of the JSON file describing the VNF") + vnf_create_parser.add_argument("--name", action="store", help="name of the VNF (if it exists in the VNF descriptor, it is overwritten)") + vnf_create_parser.add_argument("--description", action="store", help="description of the VNF (if it exists in the VNF descriptor, it is overwritten)") + vnf_create_parser.add_argument("--image-path", action="store", help="change image path locations (overwritten)") + vnf_create_parser.add_argument('--verbose', '-v', action='count') + vnf_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + vnf_create_parser.set_defaults(func=vnf_create) + + vnf_list_parser = subparsers.add_parser('vnf-list', help="lists information about a vnf") + vnf_list_parser.add_argument("name", nargs='?', help="name of the VNF") + vnf_list_parser.add_argument('--verbose', '-v', action='count') + vnf_list_parser.add_argument('--details', action="store_true", help="prints details of the VNF (internal structure)") + #vnf_list_parser.add_argument('--descriptor', help="prints the VNF descriptor", action="store_true") + vnf_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + vnf_list_parser.set_defaults(func=vnf_list) + + vnf_delete_parser = subparsers.add_parser('vnf-delete', help="deletes a vnf from the catalogue") + vnf_delete_parser.add_argument("name", action="store", help="name or uuid of the VNF to be deleted") + vnf_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + vnf_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + vnf_delete_parser.set_defaults(func=vnf_delete) + + scenario_create_parser = subparsers.add_parser('scenario-create', help="adds a scenario into the OPENMANO DB") + scenario_create_parser.add_argument("file", action="store", help="location of the YAML file describing the scenario") + scenario_create_parser.add_argument("--name", action="store", help="name of the scenario (if it exists in the YAML scenario, it is overwritten)") + scenario_create_parser.add_argument("--description", action="store", help="description of the scenario (if it exists in the YAML scenario, it is overwritten)") + scenario_create_parser.add_argument('--verbose', '-v', action='count') + scenario_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_create_parser.set_defaults(func=scenario_create) + + scenario_list_parser = subparsers.add_parser('scenario-list', help="lists information about a scenario") + scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario") + scenario_list_parser.add_argument('--verbose', '-v', action='count') + scenario_list_parser.add_argument('--details', action="store_true", help="prints details of the scenario (internal structure)") + #scenario_list_parser.add_argument('--descriptor', help="prints the scenario descriptor", action="store_true") + scenario_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_list_parser.set_defaults(func=scenario_list) + + scenario_delete_parser = subparsers.add_parser('scenario-delete', help="deletes a scenario from the OPENMANO DB") + scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario to be deleted") + scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + scenario_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_delete_parser.set_defaults(func=scenario_delete) + + scenario_deploy_parser = subparsers.add_parser('scenario-deploy', help="deploys a scenario") + scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be deployed") + scenario_deploy_parser.add_argument("name", action="store", help="name of the instance") + scenario_deploy_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources") + scenario_deploy_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available") + scenario_deploy_parser.add_argument("--description", action="store", help="description of the instance") + scenario_deploy_parser.add_argument('--verbose', '-v', action='count') + scenario_deploy_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_deploy_parser.set_defaults(func=scenario_deploy) + + scenario_deploy_parser = subparsers.add_parser('scenario-verify', help="verifies if a scenario can be deployed (deploys it and deletes it)") + scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be verified") + scenario_deploy_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + scenario_deploy_parser.set_defaults(func=scenario_verify) + + instance_scenario_create_parser = subparsers.add_parser('instance-scenario-create', help="deploys a scenario") + instance_scenario_create_parser.add_argument("file", nargs='?', help="descriptor of the instance. Must be a file or yaml/json text") + instance_scenario_create_parser.add_argument("--scenario", action="store", help="name or uuid of the scenario to be deployed") + instance_scenario_create_parser.add_argument("--name", action="store", help="name of the instance") + instance_scenario_create_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources") + instance_scenario_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available") + instance_scenario_create_parser.add_argument("--net-map", action="append", type=str, dest="net_map", help="indicates maps between 'scenario-network=datacenter-network'. Can be used several times") + instance_scenario_create_parser.add_argument("--description", action="store", help="description of the instance") + instance_scenario_create_parser.add_argument('--verbose', '-v', action='count') + instance_scenario_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + instance_scenario_create_parser.set_defaults(func=instance_create) + + instance_scenario_list_parser = subparsers.add_parser('instance-scenario-list', help="lists information about a scenario instance") + instance_scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario instance") + instance_scenario_list_parser.add_argument('--verbose', '-v', action='count') + instance_scenario_list_parser.add_argument('--details', action="store_true", help="prints details of the scenario instance (internal structure)") + instance_scenario_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + instance_scenario_list_parser.set_defaults(func=instance_scenario_list) + + instance_scenario_delete_parser = subparsers.add_parser('instance-scenario-delete', help="deletes a scenario instance (and deletes all VM and net instances in VIM)") + instance_scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario instance to be deleted") + instance_scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + instance_scenario_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + instance_scenario_delete_parser.set_defaults(func=instance_scenario_delete) + + instance_scenario_action_parser = subparsers.add_parser('instance-scenario-action', help="invoke an action over part or the whole scenario instance") + instance_scenario_action_parser.add_argument("name", action="store", help="name or uuid of the scenario instance") + instance_scenario_action_parser.add_argument("action", action="store", type=str, \ + choices=["start","pause","resume","shutoff","shutdown","forceOff","rebuild","reboot", "console"],\ + help="action to send") + instance_scenario_action_parser.add_argument("param", nargs='?', help="addional param of the action. e.g. console type (novnc, ...), reboot type (TODO)") + instance_scenario_action_parser.add_argument("--vnf", action="append", help="VNF to act on (can use several entries)") + instance_scenario_action_parser.add_argument("--vm", action="append", help="VM to act on (can use several entries)") + instance_scenario_action_parser.add_argument('--verbose', '-v', action='count') + instance_scenario_action_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + instance_scenario_action_parser.set_defaults(func=instance_scenario_action) + + #instance_scenario_status_parser = subparsers.add_parser('instance-scenario-status', help="show the status of a scenario instance") + #instance_scenario_status_parser.add_argument("name", action="store", help="name or uuid of the scenario instance") + #instance_scenario_status_parser.set_defaults(func=instance_scenario_status) + + tenant_create_parser = subparsers.add_parser('tenant-create', help="creates a new tenant") + tenant_create_parser.add_argument("name", action="store", help="name for the tenant") + tenant_create_parser.add_argument("--description", action="store", help="description of the tenant") + tenant_create_parser.add_argument('--verbose', '-v', action='count') + tenant_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + tenant_create_parser.set_defaults(func=tenant_create) + + tenant_delete_parser = subparsers.add_parser('tenant-delete', help="deletes a tenant from the catalogue") + tenant_delete_parser.add_argument("name", action="store", help="name or uuid of the tenant to be deleted") + tenant_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + tenant_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + tenant_delete_parser.set_defaults(func=tenant_delete) + + tenant_list_parser = subparsers.add_parser('tenant-list', help="lists information about a tenant") + tenant_list_parser.add_argument("name", nargs='?', help="name or uuid of the tenant") + tenant_list_parser.add_argument('--verbose', '-v', action='count') + tenant_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + tenant_list_parser.set_defaults(func=tenant_list) + + item_list=('tenant','datacenter') #put tenant before so that help appear in order + for item in item_list: + element_edit_parser = subparsers.add_parser(item+'-edit', help="edits one "+item) + element_edit_parser.add_argument("name", help="name or uuid of the "+item) + element_edit_parser.add_argument("file", help="json/yaml text or file with the changes") + element_edit_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + element_edit_parser.add_argument('--verbose', '-v', action='count') + element_edit_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + element_edit_parser.set_defaults(func=element_edit, element=item + 's') + + datacenter_create_parser = subparsers.add_parser('datacenter-create', help="creates a new datacenter") + datacenter_create_parser.add_argument("name", action="store", help="name for the datacenter") + datacenter_create_parser.add_argument("url", action="store", help="url for the datacenter") + datacenter_create_parser.add_argument("--url_admin", action="store", help="url for administration for the datacenter") + datacenter_create_parser.add_argument("--type", action="store", help="datacenter type: openstack or openvim (default)") + datacenter_create_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format") + datacenter_create_parser.add_argument("--description", action="store", help="description of the datacenter") + datacenter_create_parser.add_argument('--verbose', '-v', action='count') + datacenter_create_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_create_parser.set_defaults(func=datacenter_create) + + datacenter_delete_parser = subparsers.add_parser('datacenter-delete', help="deletes a datacenter from the catalogue") + datacenter_delete_parser.add_argument("name", action="store", help="name or uuid of the datacenter to be deleted") + datacenter_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking") + datacenter_delete_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_delete_parser.set_defaults(func=datacenter_delete) + + datacenter_list_parser = subparsers.add_parser('datacenter-list', help="lists information about a datacenter") + datacenter_list_parser.add_argument("name", nargs='?', help="name or uuid of the datacenter") + datacenter_list_parser.add_argument('--verbose', '-v', action='count') + datacenter_list_parser.add_argument("-a", "--all", action="store_true", help="shows all datacenters, not only datacenters attached to tenant") + datacenter_list_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_list_parser.set_defaults(func=datacenter_list) + + datacenter_attach_parser = subparsers.add_parser('datacenter-attach', help="associates a datacenter to the operating tenant") + datacenter_attach_parser.add_argument("name", help="name or uuid of the datacenter") + datacenter_attach_parser.add_argument('--vim-tenant-id', action='store', help="specify a datacenter tenant to use. A new one is created by default") + datacenter_attach_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.") + datacenter_attach_parser.add_argument("--user", action="store", help="user credentials for the datacenter") + datacenter_attach_parser.add_argument("--password", action="store", help="password credentials for the datacenter") + datacenter_attach_parser.add_argument('--verbose', '-v', action='count') + datacenter_attach_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_attach_parser.set_defaults(func=datacenter_attach) + + datacenter_detach_parser = subparsers.add_parser('datacenter-detach', help="removes the association between a datacenter and the operating tenant") + datacenter_detach_parser.add_argument("name", help="name or uuid of the datacenter") + datacenter_detach_parser.add_argument('--verbose', '-v', action='count') + datacenter_detach_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_detach_parser.set_defaults(func=datacenter_detach) + + action_dict={'net-update': 'retrieves external networks from datacenter', + 'net-edit': 'edits an external network', + 'net-delete': 'deletes an external network', + 'net-list': 'lists external networks from a datacenter' + } + for item in action_dict: + datacenter_action_parser = subparsers.add_parser('datacenter-'+item, help=action_dict[item]) + datacenter_action_parser.add_argument("datacenter", help="name or uuid of the datacenter") + if item=='net-edit' or item=='net-delete': + datacenter_action_parser.add_argument("net", help="name or uuid of the datacenter net") + if item=='net-edit': + datacenter_action_parser.add_argument("file", help="json/yaml text or file with the changes") + if item!='net-list': + datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation") + datacenter_action_parser.add_argument('--verbose', '-v', action='count') + datacenter_action_parser.add_argument('--debug', '-d', action='store_true', help="show debug information") + datacenter_action_parser.set_defaults(func=datacenter_net_action, action=item) + + try: + args = main_parser.parse_args() + #logging info + level = logging.CRITICAL + streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s" + if "debug" in args and args.debug: + level = logging.DEBUG + logging.basicConfig(format=streamformat, level= level) + logger = logging.getLogger('mano') + logger.setLevel(level) + result = args.func(args) + if result == None: + result = 0 + #for some reason it fails if call exit inside try instance. Need to call exit at the end !? + except (requests.exceptions.ConnectionError): + print "Connection error: not possible to contact OPENMANO-SERVER (openmanod)" + result = -2 + except (KeyboardInterrupt): + print 'Exiting openmano' + result = -3 + except (SystemExit, ArgumentParserError): + result = -4 + + #print result + exit(result) + diff --git a/modules/core/mano/models/openmano/bin/openmano_cleanup.sh b/modules/core/mano/models/openmano/bin/openmano_cleanup.sh new file mode 100755 index 0000000..c306e68 --- /dev/null +++ b/modules/core/mano/models/openmano/bin/openmano_cleanup.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Run this on openmano VM to clean up all instances, scenarios and vnfs. + +./openmano instance-scenario-list | cut -d " " -f1 | while read line; do +./openmano instance-scenario-delete $line -f +done + +./openmano scenario-list | cut -d " " -f1 | while read line; do +./openmano scenario-delete $line -f +done + +./openmano vnf-list | cut -d " " -f1 | while read line; do +./openmano vnf-delete $line -f +done \ No newline at end of file diff --git a/modules/core/mano/models/openmano/python/CMakeLists.txt b/modules/core/mano/models/openmano/python/CMakeLists.txt new file mode 100644 index 0000000..abbf139 --- /dev/null +++ b/modules/core/mano/models/openmano/python/CMakeLists.txt @@ -0,0 +1,13 @@ +# Creation Date: 2016/1/12 +# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END) + +cmake_minimum_required(VERSION 2.8) + + +rift_python_install_tree( + FILES + rift/openmano/__init__.py + rift/openmano/rift2openmano.py + rift/openmano/openmano_client.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/models/openmano/python/rift/openmano/__init__.py b/modules/core/mano/models/openmano/python/rift/openmano/__init__.py new file mode 100644 index 0000000..00f74ea --- /dev/null +++ b/modules/core/mano/models/openmano/python/rift/openmano/__init__.py @@ -0,0 +1,15 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/modules/core/mano/models/openmano/python/rift/openmano/openmano_client.py b/modules/core/mano/models/openmano/python/rift/openmano/openmano_client.py new file mode 100755 index 0000000..a5ddb37 --- /dev/null +++ b/modules/core/mano/models/openmano/python/rift/openmano/openmano_client.py @@ -0,0 +1,479 @@ +#!/usr/bin/python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import logging +import os +import re +import subprocess +import sys +import tempfile +import requests + + +class OpenmanoCommandFailed(Exception): + pass + + +class OpenmanoUnexpectedOutput(Exception): + pass + + +class VNFExistsError(Exception): + pass + + +class InstanceStatusError(Exception): + pass + + +class OpenmanoHttpAPI(object): + def __init__(self, log, host, port, tenant): + self._log = log + self._host = host + self._port = port + self._tenant = tenant + + self._session = requests.Session() + + def get_instance(self, instance_uuid): + url = "http://{host}:{port}/openmano/{tenant}/instances/{instance}".format( + host=self._host, + port=self._port, + tenant=self._tenant, + instance=instance_uuid, + ) + + resp = self._session.get(url) + try: + resp.raise_for_status() + except requests.exceptions.HTTPError as e: + raise InstanceStatusError(e) + + return resp.json() + + +class OpenmanoCliAPI(object): + """ This class implements the necessary funtionality to interact with """ + + CMD_TIMEOUT = 15 + + def __init__(self, log, host, port, tenant): + self._log = log + self._host = host + self._port = port + self._tenant = tenant + + @staticmethod + def openmano_cmd_path(): + return os.path.join( + os.environ["RIFT_INSTALL"], + "usr/bin/openmano" + ) + + def _openmano_cmd(self, arg_list, expected_lines=None): + cmd_args = list(arg_list) + cmd_args.insert(0, self.openmano_cmd_path()) + + env = { + "OPENMANO_HOST": self._host, + "OPENMANO_PORT": str(self._port), + "OPENMANO_TENANT": self._tenant, + } + + self._log.debug( + "Running openmano command (%s) using env (%s)", + subprocess.list2cmdline(cmd_args), + env, + ) + + proc = subprocess.Popen( + cmd_args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + env=env + ) + try: + stdout, stderr = proc.communicate(timeout=self.CMD_TIMEOUT) + except subprocess.TimeoutExpired: + self._log.error("Openmano command timed out") + proc.terminate() + stdout, stderr = proc.communicate(timeout=self.CMD_TIMEOUT) + + if proc.returncode != 0: + self._log.error( + "Openmano command failed (rc=%s) with stdout: %s", + proc.returncode, stdout + ) + raise OpenmanoCommandFailed(stdout) + + self._log.debug("Openmano command completed with stdout: %s", stdout) + + output_lines = stdout.splitlines() + if expected_lines is not None: + if len(output_lines) != expected_lines: + msg = "Expected %s lines from openmano command. Got %s" % (expected_lines, len(output_lines)) + self._log.error(msg) + raise OpenmanoUnexpectedOutput(msg) + + return output_lines + + + def vnf_create(self, vnf_yaml_str): + """ Create a Openmano VNF from a Openmano VNF YAML string """ + + self._log.debug("Creating VNF: %s", vnf_yaml_str) + + with tempfile.NamedTemporaryFile() as vnf_file_hdl: + vnf_file_hdl.write(vnf_yaml_str.encode()) + vnf_file_hdl.flush() + + try: + output_lines = self._openmano_cmd( + ["vnf-create", vnf_file_hdl.name], + expected_lines=1 + ) + except OpenmanoCommandFailed as e: + if "already in use" in str(e): + raise VNFExistsError("VNF was already added") + raise + + vnf_info_line = output_lines[0] + vnf_id, vnf_name = vnf_info_line.split() + + self._log.info("VNF %s Created: %s", vnf_name, vnf_id) + + return vnf_id, vnf_name + + def vnf_delete(self, vnf_uuid): + self._openmano_cmd( + ["vnf-delete", vnf_uuid, "-f"], + ) + + self._log.info("VNF Deleted: %s", vnf_uuid) + + def vnf_list(self): + try: + output_lines = self._openmano_cmd( + ["vnf-list"], + ) + except OpenmanoCommandFailed as e: + self._log.warning("Vnf listing returned an error: %s", str(e)) + return {} + + name_uuid_map = {} + for line in output_lines: + line = line.strip() + uuid, name = line.split() + name_uuid_map[name] = uuid + + return name_uuid_map + + def ns_create(self, ns_yaml_str, name=None): + self._log.info("Creating NS: %s", ns_yaml_str) + + with tempfile.NamedTemporaryFile() as ns_file_hdl: + ns_file_hdl.write(ns_yaml_str.encode()) + ns_file_hdl.flush() + + cmd_args = ["scenario-create", ns_file_hdl.name] + if name is not None: + cmd_args.extend(["--name", name]) + + output_lines = self._openmano_cmd( + cmd_args, + expected_lines=1 + ) + + ns_info_line = output_lines[0] + ns_id, ns_name = ns_info_line.split() + + self._log.info("NS %s Created: %s", ns_name, ns_id) + + return ns_id, ns_name + + def ns_list(self): + self._log.debug("Getting NS list") + + try: + output_lines = self._openmano_cmd( + ["scenario-list"], + ) + + except OpenmanoCommandFailed as e: + self._log.warning("NS listing returned an error: %s", str(e)) + return {} + + name_uuid_map = {} + for line in output_lines: + line = line.strip() + uuid, name = line.split() + name_uuid_map[name] = uuid + + return name_uuid_map + + def ns_delete(self, ns_uuid): + self._log.info("Deleting NS: %s", ns_uuid) + + self._openmano_cmd( + ["scenario-delete", ns_uuid, "-f"], + ) + + self._log.info("NS Deleted: %s", ns_uuid) + + def ns_instance_list(self): + self._log.debug("Getting NS instance list") + + try: + output_lines = self._openmano_cmd( + ["instance-scenario-list"], + ) + + except OpenmanoCommandFailed as e: + self._log.warning("Instance scenario listing returned an error: %s", str(e)) + return {} + + if "No scenario instances were found" in output_lines[0]: + self._log.debug("No openmano instances were found") + return {} + + name_uuid_map = {} + for line in output_lines: + line = line.strip() + uuid, name = line.split() + name_uuid_map[name] = uuid + + return name_uuid_map + + + def ns_instantiate(self, scenario_name, instance_name, datacenter_name=None): + self._log.info( + "Instantiating NS %s using instance name %s", + scenario_name, + instance_name, + ) + + cmd_args = ["scenario-deploy", scenario_name, instance_name] + if datacenter_name is not None: + cmd_args.extend(["--datacenter", datacenter_name]) + + output_lines = self._openmano_cmd( + cmd_args, + expected_lines=4 + ) + + uuid, _ = output_lines[0].split() + + self._log.info("NS Instance Created: %s", uuid) + + return uuid + + def ns_terminate(self, ns_instance_name): + self._log.info("Terminating NS: %s", ns_instance_name) + + self._openmano_cmd( + ["instance-scenario-delete", ns_instance_name, "-f"], + ) + + self._log.info("NS Instance Deleted: %s", ns_instance_name) + + def datacenter_list(self): + lines = self._openmano_cmd(["datacenter-list",]) + + # The results returned from openmano are formatted with whitespace and + # datacenter names may contain whitespace as well, so we use a regular + # expression to parse each line of the results return from openmano to + # extract the uuid and name of a datacenter. + hex = '[0-9a-fA-F]' + uuid_pattern = '(xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)'.replace('x', hex) + name_pattern = '(.+?)' + datacenter_regex = re.compile(r'{uuid}\s+\b{name}\s*$'.format( + uuid=uuid_pattern, + name=name_pattern, + )) + + # Parse the results for the datacenter uuids and names + datacenters = list() + for line in lines: + result = datacenter_regex.match(line) + if result is not None: + uuid, name = result.groups() + datacenters.append((uuid, name)) + + return datacenters + + +def valid_uuid(uuid_str): + uuid_re = re.compile( + "^xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx$".replace('x', '[0-9a-fA-F]') + ) + + if not uuid_re.match(uuid_str): + raise argparse.ArgumentTypeError("Got a valid uuid: %s" % uuid_str) + + return uuid_str + + +def parse_args(argv=sys.argv[1:]): + """ Parse the command line arguments + + Arguments: + argv - The list of arguments to parse + + Returns: + Argparse Namespace instance + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '-d', '--host', + default='localhost', + help="Openmano host/ip", + ) + + parser.add_argument( + '-p', '--port', + default='9090', + help="Openmano port", + ) + + parser.add_argument( + '-t', '--tenant', + required=True, + type=valid_uuid, + help="Openmano tenant uuid to use", + ) + + subparsers = parser.add_subparsers(dest='command', help='openmano commands') + + vnf_create_parser = subparsers.add_parser( + 'vnf-create', + help="Adds a openmano vnf into the catalog" + ) + vnf_create_parser.add_argument( + "file", + help="location of the JSON file describing the VNF", + type=argparse.FileType('rb'), + ) + + vnf_delete_parser = subparsers.add_parser( + 'vnf-delete', + help="Deletes a openmano vnf into the catalog" + ) + vnf_delete_parser.add_argument( + "uuid", + help="The vnf to delete", + type=valid_uuid, + ) + + + ns_create_parser = subparsers.add_parser( + 'scenario-create', + help="Adds a openmano ns scenario into the catalog" + ) + ns_create_parser.add_argument( + "file", + help="location of the JSON file describing the NS", + type=argparse.FileType('rb'), + ) + + ns_delete_parser = subparsers.add_parser( + 'scenario-delete', + help="Deletes a openmano ns into the catalog" + ) + ns_delete_parser.add_argument( + "uuid", + help="The ns to delete", + type=valid_uuid, + ) + + + ns_instance_create_parser = subparsers.add_parser( + 'scenario-deploy', + help="Deploys a openmano ns scenario into the catalog" + ) + ns_instance_create_parser.add_argument( + "scenario_name", + help="The ns scenario name to deploy", + ) + ns_instance_create_parser.add_argument( + "instance_name", + help="The ns instance name to deploy", + ) + + + ns_instance_delete_parser = subparsers.add_parser( + 'instance-scenario-delete', + help="Deploys a openmano ns scenario into the catalog" + ) + ns_instance_delete_parser.add_argument( + "instance_name", + help="The ns instance name to delete", + ) + + + _ = subparsers.add_parser( + 'datacenter-list', + ) + + args = parser.parse_args(argv) + + return args + + +def main(): + logging.basicConfig(level=logging.DEBUG) + logger = logging.getLogger("openmano_client.py") + + if "RIFT_INSTALL" not in os.environ: + logger.error("Must be in rift-shell to run.") + sys.exit(1) + + args = parse_args() + openmano_cli = OpenmanoCliAPI(logger, args.host, args.port, args.tenant) + + if args.command == "vnf-create": + openmano_cli.vnf_create(args.file.read()) + + elif args.command == "vnf-delete": + openmano_cli.vnf_delete(args.uuid) + + elif args.command == "scenario-create": + openmano_cli.ns_create(args.file.read()) + + elif args.command == "scenario-delete": + openmano_cli.ns_delete(args.uuid) + + elif args.command == "scenario-deploy": + openmano_cli.ns_instantiate(args.scenario_name, args.instance_name) + + elif args.command == "instance-scenario-delete": + openmano_cli.ns_terminate(args.instance_name) + + elif args.command == "datacenter-list": + for uuid, name in openmano_cli.datacenter_list(): + print("{} {}".format(uuid, name)) + + else: + logger.error("Unknown command: %s", args.command) + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/python/rift/openmano/rift2openmano.py b/modules/core/mano/models/openmano/python/rift/openmano/rift2openmano.py new file mode 100755 index 0000000..6a4d796 --- /dev/null +++ b/modules/core/mano/models/openmano/python/rift/openmano/rift2openmano.py @@ -0,0 +1,515 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import collections +import logging +import math +import os +import sys +import tempfile +import yaml + +from gi.repository import ( + RwYang, + RwVnfdYang, + RwNsdYang, + ) + +logging.basicConfig(level=logging.WARNING) +logger = logging.getLogger("rift2openmano.py") + + +class VNFNotFoundError(Exception): + pass + + +class RiftNSD(object): + model = RwYang.Model.create_libncx() + model.load_module('nsd') + model.load_module('rw-nsd') + + def __init__(self, descriptor): + self._nsd = descriptor + + def __str__(self): + return str(self._nsd) + + @property + def name(self): + return self._nsd.name + + @property + def id(self): + return self._nsd.id + + @property + def vnfd_ids(self): + return [c.vnfd_id_ref for c in self._nsd.constituent_vnfd] + + @property + def constituent_vnfds(self): + return self._nsd.constituent_vnfd + + @property + def vlds(self): + return self._nsd.vld + + @property + def cps(self): + return self._nsd.connection_point + + @property + def description(self): + return self._nsd.description + + @classmethod + def from_xml_file_hdl(cls, hdl): + hdl.seek(0) + descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd() + descriptor.from_xml_v2(RiftNSD.model, hdl.read()) + return cls(descriptor) + + @classmethod + def from_dict(cls, nsd_dict): + descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict(nsd_dict) + return cls(descriptor) + + +class RiftVNFD(object): + model = RwYang.Model.create_libncx() + model.load_module('vnfd') + model.load_module('rw-vnfd') + + def __init__(self, descriptor): + self._vnfd = descriptor + + def __str__(self): + return str(self._vnfd) + + @property + def id(self): + return self._vnfd.id + + @property + def name(self): + return self._vnfd.name + + @property + def description(self): + return self._vnfd.description + + @property + def cps(self): + return self._vnfd.connection_point + + @property + def vdus(self): + return self._vnfd.vdu + + @property + def internal_vlds(self): + return self._vnfd.internal_vld + + @classmethod + def from_xml_file_hdl(cls, hdl): + hdl.seek(0) + descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd() + descriptor.from_xml_v2(RiftVNFD.model, hdl.read()) + return cls(descriptor) + + @classmethod + def from_dict(cls, vnfd_dict): + descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict(vnfd_dict) + return cls(descriptor) + + +def is_writable_directory(dir_path): + """ Returns True if dir_path is writable, False otherwise + + Arguments: + dir_path - A directory path + """ + if not os.path.exists(dir_path): + raise ValueError("Directory does not exist: %s", dir_path) + + try: + testfile = tempfile.TemporaryFile(dir=dir_path) + testfile.close() + except OSError: + return False + + return True + + +def create_vnfd_from_xml_files(vnfd_file_hdls): + """ Create a list of RiftVNFD instances from xml file handles + + Arguments: + vnfd_file_hdls - Rift VNFD XML file handles + + Returns: + A list of RiftVNFD instances + """ + vnfd_dict = {} + for vnfd_file_hdl in vnfd_file_hdls: + vnfd = RiftVNFD.from_xml_file_hdl(vnfd_file_hdl) + vnfd_dict[vnfd.id] = vnfd + + return vnfd_dict + + +def create_nsd_from_xml_file(nsd_file_hdl): + """ Create a list of RiftNSD instances from xml file handles + + Arguments: + nsd_file_hdls - Rift NSD XML file handles + + Returns: + A list of RiftNSD instances + """ + nsd = RiftNSD.from_xml_file_hdl(nsd_file_hdl) + return nsd + + +def ddict(): + return collections.defaultdict(dict) + +def convert_vnfd_name(vnfd_name, member_idx): + return vnfd_name + "__" + str(member_idx) + + +def rift2openmano_nsd(rift_nsd, rift_vnfds): + for vnfd_id in rift_nsd.vnfd_ids: + if vnfd_id not in rift_vnfds: + raise VNFNotFoundError("VNF id %s not provided" % vnfd_id) + + openmano = {} + openmano["name"] = rift_nsd.name + openmano["description"] = rift_nsd.description + topology = {} + openmano["topology"] = topology + + topology["nodes"] = {} + for vnfd in rift_nsd.constituent_vnfds: + vnfd_id = vnfd.vnfd_id_ref + rift_vnfd = rift_vnfds[vnfd_id] + member_idx = vnfd.member_vnf_index + topology["nodes"][rift_vnfd.name + "__" + str(member_idx)] = { + "type": "VNF", + "VNF model": rift_vnfd.name + } + + for vld in rift_nsd.vlds: + # Openmano has both bridge_net and dataplane_net models for network types + # For now, since we are using openmano in developer mode lets just hardcode + # to bridge_net since it won't matter anyways. + # topology["nodes"][vld.name] = {"type": "network", "model": "bridge_net"} + pass + + topology["connections"] = {} + for vld in rift_nsd.vlds: + + # Create a connections entry for each external VLD + topology["connections"][vld.name] = {} + topology["connections"][vld.name]["nodes"] = [] + + if vld.provider_network.has_field("physical_network"): + # Add the external datacenter network to the topology + # node list if it isn't already added + ext_net_name = vld.provider_network.physical_network + ext_net_name_with_seg = ext_net_name + if vld.provider_network.has_field("segmentation_id"): + ext_net_name_with_seg += ":{}".format(vld.provider_network.segmentation_id) + + if ext_net_name not in topology["nodes"]: + topology["nodes"][ext_net_name] = { + "type": "external_network", + "model": ext_net_name_with_seg, + } + + # Add the external network to the list of connection points + topology["connections"][vld.name]["nodes"].append( + {ext_net_name: "0"} + ) + + + for vnfd_cp in vld.vnfd_connection_point_ref: + + # Get the RIFT VNF for this external VLD connection point + vnfd = rift_vnfds[vnfd_cp.vnfd_id_ref] + + # For each VNF in this connection, use the same interface name + topology["connections"][vld.name]["type"] = "link" + # Vnf ref is the vnf name with the member_vnf_idx appended + member_idx = vnfd_cp.member_vnf_index_ref + vnf_ref = vnfd.name + "__" + str(member_idx) + topology["connections"][vld.name]["nodes"].append( + { + vnf_ref: vnfd_cp.vnfd_connection_point_ref + } + ) + + return openmano + + +def rift2openmano_vnfd(rift_vnfd): + openmano_vnf = {"vnf":{}} + vnf = openmano_vnf["vnf"] + + vnf["name"] = rift_vnfd.name + vnf["description"] = rift_vnfd.description + + vnf["external-connections"] = [] + + def find_vdu_and_ext_if_by_cp_ref(cp_ref_name): + for vdu in rift_vnfd.vdus: + for ext_if in vdu.external_interface: + if ext_if.vnfd_connection_point_ref == cp_ref_name: + return vdu, ext_if + + raise ValueError("External connection point reference %s not found" % cp_ref_name) + + def find_vdu_and_int_if_by_cp_ref(cp_ref_id): + for vdu in rift_vnfd.vdus: + for int_if in vdu.internal_interface: + if int_if.vdu_internal_connection_point_ref == cp_ref_id: + return vdu, int_if + + raise ValueError("Internal connection point reference %s not found" % cp_ref_id) + + def rift2openmano_if_type(rift_type): + if rift_type == "OM_MGMT": + return "mgmt" + elif rift_type == "VIRTIO": + return "bridge" + else: + return "data" + + # Add all external connections + for cp in rift_vnfd.cps: + # Find the VDU and and external interface for this connection point + vdu, ext_if = find_vdu_and_ext_if_by_cp_ref(cp.name) + connection = { + "name": cp.name, + "type": rift2openmano_if_type(ext_if.virtual_interface.type_yang), + "VNFC": vdu.name, + "local_iface_name": ext_if.name, + "description": "%s iface on VDU %s" % (ext_if.name, vdu.name), + } + + vnf["external-connections"].append(connection) + + # Add all internal networks + for vld in rift_vnfd.internal_vlds: + connection = { + "name": vld.name, + "description": vld.description, + "type": "data", + "elements": [], + } + + # Add the specific VDU connection points + for int_cp_ref in vld.internal_connection_point_ref: + vdu, int_if = find_vdu_and_int_if_by_cp_ref(int_cp_ref) + connection["elements"].append({ + "VNFC": vdu.name, + "local_iface_name": int_if.name, + }) + if "internal-connections" not in vnf: + vnf["internal-connections"] = [] + + vnf["internal-connections"].append(connection) + + # Add VDU's + vnf["VNFC"] = [] + for vdu in rift_vnfd.vdus: + vnfc = { + "name": vdu.name, + "description": vdu.name, + "VNFC image": vdu.image if os.path.isabs(vdu.image) else "/var/images/{}".format(vdu.image), + "numas": [{ + "memory": max(int(vdu.vm_flavor.memory_mb/1024), 1), + "interfaces":[], + }], + "bridge-ifaces": [], + } + + numa_node_policy = vdu.guest_epa.numa_node_policy + if numa_node_policy.has_field("node"): + numa_node = numa_node_policy.node[0] + + if numa_node.has_field("paired_threads"): + if numa_node.paired_threads.has_field("num_paired_threads"): + vnfc["numas"][0]["paired-threads"] = numa_node.paired_threads.num_paired_threads + if len(numa_node.paired_threads.paired_thread_ids) > 0: + vnfc["numas"][0]["paired-threads-id"] = [] + for pair in numa_node.paired_threads.paired_thread_ids: + vnfc["numas"][0]["paired-threads-id"].append( + [pair.thread_a, pair.thread_b] + ) + + else: + if vdu.vm_flavor.has_field("vcpu_count"): + vnfc["numas"][0]["cores"] = max(vdu.vm_flavor.vcpu_count, 1) + + if vdu.has_field("hypervisor_epa"): + vnfc["hypervisor"] = {} + if vdu.hypervisor_epa.has_field("type"): + if vdu.hypervisor_epa.type_yang == "REQUIRE_KVM": + vnfc["hypervisor"]["type"] = "QEMU-kvm" + + if vdu.hypervisor_epa.has_field("version"): + vnfc["hypervisor"]["version"] = vdu.hypervisor_epa.version + + if vdu.has_field("host_epa"): + vnfc["processor"] = {} + if vdu.host_epa.has_field("om_cpu_model_string"): + vnfc["processor"]["model"] = vdu.host_epa.om_cpu_model_string + if vdu.host_epa.has_field("om_cpu_feature"): + vnfc["processor"]["features"] = [] + for feature in vdu.host_epa.om_cpu_feature: + vnfc["processor"]["features"].append(feature) + + + if vdu.vm_flavor.has_field("storage_gb"): + vnfc["disk"] = vdu.vm_flavor.storage_gb + + vnf["VNFC"].append(vnfc) + + for int_if in list(vdu.internal_interface) + list(vdu.external_interface): + intf = { + "name": int_if.name, + } + if int_if.virtual_interface.has_field("vpci"): + intf["vpci"] = int_if.virtual_interface.vpci + + if int_if.virtual_interface.type_yang in ["VIRTIO", "OM_MGMT"]: + vnfc["bridge-ifaces"].append(intf) + + elif int_if.virtual_interface.type_yang == "SR-IOV": + intf["bandwidth"] = "10 Gbps" + intf["dedicated"] = "yes:sriov" + vnfc["numas"][0]["interfaces"].append(intf) + + elif int_if.virtual_interface.type_yang == "PCI_PASSTHROUGH": + intf["bandwidth"] = "10 Gbps" + intf["dedicated"] = "yes" + if "interfaces" not in vnfc["numas"][0]: + vnfc["numas"][0]["interfaces"] = [] + vnfc["numas"][0]["interfaces"].append(intf) + else: + raise ValueError("Interface type %s not supported" % int_if.virtual_interface) + + if int_if.virtual_interface.has_field("bandwidth"): + if int_if.virtual_interface.bandwidth != 0: + bps = int_if.virtual_interface.bandwidth + + # Calculate the bits per second conversion + for x in [('M', 1000000), ('G', 1000000000)]: + if bps/x[1] >= 1: + intf["bandwidth"] = "{} {}bps".format(math.ceil(bps/x[1]), x[0]) + + + return openmano_vnf + + +def parse_args(argv=sys.argv[1:]): + """ Parse the command line arguments + + Arguments: + arv - The list of arguments to parse + + Returns: + Argparse Namespace instance + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '-o', '--outdir', + default='-', + help="Directory to output converted descriptors. Default is stdout", + ) + + parser.add_argument( + '-n', '--nsd-file-hdl', + metavar="nsd_xml_file", + type=argparse.FileType('r'), + help="Rift NSD Descriptor File", + ) + + parser.add_argument( + '-v', '--vnfd-file-hdls', + metavar="vnfd_xml_file", + action='append', + type=argparse.FileType('r'), + help="Rift VNFD Descriptor File", + ) + + args = parser.parse_args(argv) + + if not os.path.exists(args.outdir): + os.makedirs(args.outdir) + + if not is_writable_directory(args.outdir): + logging.error("Directory %s is not writable", args.outdir) + sys.exit(1) + + return args + + +def write_yaml_to_file(name, outdir, desc_dict): + file_name = "%s.yaml" % name + yaml_str = yaml.dump(desc_dict) + if outdir == "-": + sys.stdout.write(yaml_str) + return + + file_path = os.path.join(outdir, file_name) + dir_path = os.path.dirname(file_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(file_path, "w") as hdl: + hdl.write(yaml_str) + + logger.info("Wrote descriptor to %s", file_path) + + +def main(argv=sys.argv[1:]): + args = parse_args(argv) + + nsd = None + if args.vnfd_file_hdls is not None: + vnf_dict = create_vnfd_from_xml_files(args.vnfd_file_hdls) + + if args.nsd_file_hdl is not None: + nsd = create_nsd_from_xml_file(args.nsd_file_hdl) + + openmano_nsd = rift2openmano_nsd(nsd, vnf_dict) + + write_yaml_to_file(openmano_nsd["name"], args.outdir, openmano_nsd) + + for vnf in vnf_dict.values(): + openmano_vnf = rift2openmano_vnfd(vnf) + write_yaml_to_file(openmano_vnf["vnf"]["name"], args.outdir, openmano_vnf) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/src/CMakeLists.txt b/modules/core/mano/models/openmano/src/CMakeLists.txt new file mode 100644 index 0000000..486a9df --- /dev/null +++ b/modules/core/mano/models/openmano/src/CMakeLists.txt @@ -0,0 +1,71 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/12/11 +# + +cmake_minimum_required(VERSION 2.8) + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/generate_tidgen_packages.sh.in + ${CMAKE_CURRENT_BINARY_DIR}/generate_tidgen_packages.sh + ESCAPE_QUOTES @ONLY + ) + +add_custom_command( + OUTPUT + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov_no_ctrlnet.tar.gz + + + COMMAND + ${CMAKE_CURRENT_BINARY_DIR}/generate_tidgen_packages.sh + + DEPENDS + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_2sriov.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_4sriov.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_2sriov.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_4sriov.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/src/openmano2rift.py + ) + +add_custom_target(tidgen ALL + DEPENDS + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov_no_ctrlnet.tar.gz + mano_yang + ) + +install( + FILES + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov_no_ctrlnet.tar.gz + ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov_no_ctrlnet.tar.gz + + + DESTINATION + usr/rift/mano/examples/tidgen_ns + COMPONENT ${PKG_LONG_NAME} + ) diff --git a/modules/core/mano/models/openmano/src/generate_tidgen_packages.sh.in b/modules/core/mano/models/openmano/src/generate_tidgen_packages.sh.in new file mode 100755 index 0000000..95c2f38 --- /dev/null +++ b/modules/core/mano/models/openmano/src/generate_tidgen_packages.sh.in @@ -0,0 +1,40 @@ +#! /usr/bin/bash + +set -e + +SOURCE_DIR=@CMAKE_CURRENT_SOURCE_DIR@ +BINARY_DIR=@CMAKE_CURRENT_BINARY_DIR@ +PROJECT_TOP_DIR=@PROJECT_TOP_DIR@ + +# These paths are needed for finding the overrides and so files +PYTHONPATH=${PYTHONPATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang +PYTHON3PATH=${PYTHON3PATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang +LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang + +# Remove any old directories +rm -rf ${BINARY_DIR}/2tidgenMWC_4sriov +rm -rf ${BINARY_DIR}/tidgenMWC_4sriov +rm -rf ${BINARY_DIR}/2tidgenMWC_2sriov +rm -rf ${BINARY_DIR}/tidgenMWC_2sriov +rm -rf ${BINARY_DIR}/2tidgenMWC_2sriov_noctrlnet +rm -rf ${BINARY_DIR}/tidgenMWC_2sriov_noctrlnet +rm -rf ${BINARY_DIR}/2tidgenMWC_4sriov_noctrlnet +rm -rf ${BINARY_DIR}/tidgenMWC_4sriov_noctrlnet + + +# Generate the descriptors +${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_4sriov.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_4sriov.yaml +${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_2sriov.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_2sriov.yaml +${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml +${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml + + +# Generate the tar files +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_4sriov +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_4sriov +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_2sriov +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_2sriov +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_2sriov_no_ctrlnet +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_2sriov_no_ctrlnet +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_4sriov_no_ctrlnet +${PROJECT_TOP_DIR}/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_4sriov_no_ctrlnet diff --git a/modules/core/mano/models/openmano/src/openmano2rift.py b/modules/core/mano/models/openmano/src/openmano2rift.py new file mode 100755 index 0000000..ba6ef57 --- /dev/null +++ b/modules/core/mano/models/openmano/src/openmano2rift.py @@ -0,0 +1,486 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import itertools +import logging +import os +import sys +import tempfile +import uuid +import yaml + +import gi +gi.require_version('RwYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwNsdYang', '1.0') +from gi.repository import ( + RwYang, + RwVnfdYang, + RwNsdYang, + ) + +logging.basicConfig(level=logging.WARNING) +logger = logging.getLogger("openmano2rift.py") + + +class UnknownVNFError(Exception): + pass + + +class DescriptorFileWriter(object): + def __init__(self, module_list, output_dir, output_format): + self._model = RwYang.Model.create_libncx() + for module in module_list: + self._model.load_module(module) + + self._output_dir = output_dir + self._output_format = output_format + + def _write_file(self, file_name, output): + file_path = os.path.join(self._output_dir, file_name) + dir_path = os.path.dirname(file_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(file_path, "w") as hdl: + hdl.write(output) + + logger.info("Wrote descriptor to %s", file_path) + + def _write_json(self, descriptor, subdir): + self._write_file( + '%s.json' % os.path.join(descriptor.name, subdir, descriptor.name), + descriptor.descriptor.to_json(self._model) + ) + + def _write_xml(self, descriptor, subdir): + self._write_file( + '%s.xml' % os.path.join(descriptor.name, subdir, descriptor.name), + descriptor.descriptor.to_xml_v2(self._model, pretty_print=True) + ) + + def _write_yaml(self, descriptor, subdir): + self._write_file( + '%s.yaml' % os.path.join(descriptor.name, subdir, descriptor.name), + yaml.dump(descriptor.descriptor.as_dict()), + ) + + def write_descriptor(self, descriptor, subdir=""): + if self._output_format == 'json': + self._write_json(descriptor, subdir=subdir) + + elif self._output_format == 'xml': + self._write_xml(descriptor, subdir=subdir) + + elif self._output_format == 'yaml': + self._write_yaml(descriptor, subdir=subdir) + + +class RiftManoDescriptor(object): + def __init__(self, openmano=None): + self.openmano = openmano + self.descriptor = None + + +class RiftNS(RiftManoDescriptor): + def __init__(self, openmano=None): + super().__init__(openmano) + self.nsd_catalog = None + self.nsd = None + self.name = None + + def get_vnfd_id(self, vnf_list, vnf_name): + for vnf in vnf_list: + if vnf.name == vnf_name: + return vnf.vnfd.id + + # Didn't find the vnf just return the vnf_name + return vnf_name + + def openmano2rift(self, vnf_list): + self.descriptor = RwNsdYang.YangData_Nsd_NsdCatalog() + openmano_nsd = self.openmano.dictionary + self.name = openmano_nsd['name'] + nsd = self.descriptor.nsd.add() + nsd.id = str(uuid.uuid1()) + nsd.name = self.name + nsd.short_name = self.name + nsd.description = openmano_nsd['description'] + + nodes = openmano_nsd['topology']['nodes'] + connections = openmano_nsd['topology']['connections'] + + def create_consituent_vnfds(): + vnf_member_index_dict = {} + + vnfd_idx_gen = itertools.count(1) + for key in nodes: + node = nodes[key] + if node['type'] != 'VNF': + continue + + vnfd_idx = next(vnfd_idx_gen) + constituent_vnfd = nsd.constituent_vnfd.add() + constituent_vnfd.member_vnf_index = vnfd_idx + constituent_vnfd.vnfd_id_ref = self.get_vnfd_id(vnf_list, node['VNF model']) + vnf_member_index_dict[key] = vnfd_idx + + return vnf_member_index_dict + + def create_connections(vnf_member_index_dict): + keys = connections.keys() + for key in keys: + # TODO: Need clarification from TEF + # skip the mgmtnet in OpenMANO descriptor + if key == 'mgmtnet': + continue + conn = connections[key] + vld = nsd.vld.add() + vld.from_dict(dict( + id=str(uuid.uuid1()), + name=key, + short_name=key, + type_yang='ELAN', + )) + + nodes = conn['nodes'] + for node, node_keys in [(node, node.keys()) for node in nodes]: + for node_key in node_keys: + topo_node = openmano_nsd['topology']['nodes'][node_key] + if topo_node['type'] == 'VNF': + cpref = vld.vnfd_connection_point_ref.add() + cpref.from_dict(dict( + member_vnf_index_ref=vnf_member_index_dict[node_key], + vnfd_id_ref=self.get_vnfd_id(vnf_list, topo_node['VNF model']), + #vnfd_id_ref=topo_node['VNF model'], + vnfd_connection_point_ref=node[node_key], + )) + if key != 'control-net': + vld.provider_network.physical_network = 'physnet_sriov' + vld.provider_network.overlay_type = 'VLAN' + + vnf_member_index_dict = create_consituent_vnfds() + create_connections(vnf_member_index_dict) + + +class RiftVnfd(RiftManoDescriptor): + def __init__(self, openmano=None): + super().__init__(openmano) + self.vnfd_catalog = None + self.vnfd = None + + def find_external_connection(self, vdu_name, if_name): + """ + Find if the vdu interface has an external connection. + """ + openmano_vnfd = self.openmano.dictionary['vnf'] + if 'external-connections' not in openmano_vnfd: + return None + + ext_conn_list = openmano_vnfd['external-connections'] + for ext_conn in ext_conn_list: + if ((ext_conn['VNFC'] == vdu_name) and + (ext_conn['local_iface_name'] == if_name)): + return ext_conn + + return None + + def openmano2rift(self): + self.descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog() + vnfd = self.descriptor.vnfd.add() + self.vnfd = vnfd + vnfd.id = str(uuid.uuid1()) + + openmano_vnfd = self.openmano.dictionary['vnf'] + self.name = openmano_vnfd['name'] + vnfd.name = self.name + if "description" in openmano_vnfd: + vnfd.description = openmano_vnfd['description'] + + # Parse and add all the external connection points + if 'external-connections' in openmano_vnfd: + ext_conn_list = openmano_vnfd['external-connections'] + + for ext_conn in ext_conn_list: + # TODO: Fix this + if ext_conn['name'] == 'eth0': + continue + conn_point = vnfd.connection_point.add() + conn_point.name = ext_conn['name'] + conn_point.type_yang = 'VPORT' + + # TODO: Need a concrete example of how openmano descriptor + # uses internal connections. + if 'internal-connections' in openmano_vnfd: + int_conn_list = openmano_vnfd['internal-connections'] + + def add_external_interfaces(vdu, numa): + if 'interfaces' not in numa: + return + + numa_if_list = numa['interfaces'] + for numa_if in numa_if_list: + ext_conn = self.find_external_connection(vdu.name, numa_if['name']) + if not ext_conn: + continue + + ext_iface = vdu.external_interface.add() + ext_iface.name = numa_if['name'] + ext_iface.vnfd_connection_point_ref = ext_conn['name'] + ext_iface.virtual_interface.vpci = numa_if['vpci'] + if numa_if['dedicated'] == 'no': + ext_iface.virtual_interface.type_yang = 'SR_IOV' + else: + ext_iface.virtual_interface.type_yang = 'PCI_PASSTHROUGH' + + vnfc_list = openmano_vnfd['VNFC'] + for vnfc in vnfc_list: + vdu = vnfd.vdu.add() + vdu_dict = dict( + id=str(uuid.uuid1()), + name=vnfc['name'], + image=vnfc['VNFC image'], + vm_flavor={"storage_gb": vnfc["disk"] if "disk" in vnfc else 20}, + ) + if "description" in vnfc: + vdu_dict["description"] = vnfc['description'] + + vdu.from_dict(vdu_dict) + + vnfd.mgmt_interface.vdu_id = vdu.id + + numa_list = vnfc['numas'] + memory = 0 + vcpu_count = 0 + numa_node_cnt = 0 + + for numa in numa_list: + node = vdu.guest_epa.numa_node_policy.node.add() + node.id = numa_node_cnt + # node.memory_mb = int(numa['memory']) * 1024 + numa_node_cnt += 1 + + memory = memory + node.memory_mb + # Need a better explanation of "cores", "paired-threads", "threads" + # in openmano descriptor. Particularly how they map to cpu and + # thread pinning policies + if 'paired-threads' in numa: + vcpu_count = vcpu_count + int(numa['paired-threads']) * 2 + + if 'cores' in numa: + vcpu_count = vcpu_count + int(numa['cores']) + + add_external_interfaces(vdu, numa) + + + # vdu.vm_flavor.memory_mb = memory + vdu.vm_flavor.memory_mb = 12 * 1024 + vdu.vm_flavor.vcpu_count = vcpu_count + vdu.guest_epa.numa_node_policy.node_cnt = numa_node_cnt + vdu.guest_epa.numa_node_policy.mem_policy = 'STRICT' + vdu.guest_epa.mempage_size = 'LARGE' + vdu.guest_epa.cpu_pinning_policy = 'DEDICATED' + vdu.guest_epa.cpu_thread_pinning_policy = 'PREFER' + + # TODO: Enable hypervisor epa + # vdu.hypervisor_epa.version = vnfc['hypervisor']['version'] + # if vnfc['hypervisor']['type'] == 'QEMU-kvm': + # vdu.hypervisor_epa.type_yang = 'REQUIRE_KVM' + # else: + # vdu.hypervisor_epa.type_yang = 'PREFER_KVM' + + # TODO: Enable host epa + # vdu.host_epa.cpu_feature = vnfc['processor']['features'] + + # Parse the bridge interfaces + if 'bridge-ifaces' in vnfc: + bridge_ifaces = vnfc['bridge-ifaces'] + + + for bridge_iface in bridge_ifaces: + # TODO: Fix this + if bridge_iface['name'] == 'eth0': + continue + + ext_conn = self.find_external_connection(vdu.name, + bridge_iface['name']) + if ext_conn: + ext_iface = vdu.external_interface.add() + ext_iface.name = bridge_iface['name'] + ext_iface.vnfd_connection_point_ref = ext_conn['name'] + if 'vpci' in bridge_iface: + ext_iface.virtual_interface.vpci = bridge_iface['vpci'] + ext_iface.virtual_interface.type_yang = 'VIRTIO' + + # set vpci information for the 'default' network + # TODO: This needs to be inferred gtom bridge ifaces, + # need input from TEF + vdu.mgmt_vpci = "0000:00:0a.0" + + +class OpenManoDescriptor(object): + def __init__(self, yaml_file_hdl): + self.dictionary = yaml.load(yaml_file_hdl) + + @property + def type(self): + """ The descriptor type (ns or vnf)""" + if 'vnf' in self.dictionary: + return "vnf" + else: + return "ns" + + def dump(self): + """ Dump the Descriptor out to stdout """ + print(yaml.dump(self.dictionary)) + + +def is_writable_directory(dir_path): + """ Returns True if dir_path is writable, False otherwise + + Arguments: + dir_path - A directory path + """ + if not os.path.exists(dir_path): + raise ValueError("Directory does not exist: %s", dir_path) + + try: + testfile = tempfile.TemporaryFile(dir=dir_path) + testfile.close() + except OSError: + return False + + return True + + +def create_vnfs_from_yaml_files(yaml_file_hdls): + """ Create a list of RiftVnfd instances from yaml file handles + + Arguments: + yaml_file_hdls - OpenMano Yaml file handles + + Returns: + A list of RiftVnfd instances + """ + vnf_list = [] + for yaml_file_hdl in yaml_file_hdls: + openmano = OpenManoDescriptor(yaml_file_hdl) + yaml_file_hdl.seek(0) + + if openmano.type != "vnf": + continue + + vnf = RiftVnfd(openmano) + vnf.openmano2rift() + vnf_list.append(vnf) + + return vnf_list + + +def create_ns_from_yaml_files(yaml_file_hdls, vnf_list): + """ Create a list of RiftNS instances from yaml file handles + + Arguments: + yaml_file_hdls - OpenMano Yaml file handles + vnf_list - list of RiftVnfd + + Returns: + A list of RiftNS instances + """ + ns_list = [] + for yaml_file_hdl in yaml_file_hdls: + openmano = OpenManoDescriptor(yaml_file_hdl) + if openmano.type != "ns": + continue + + net_svc = RiftNS(openmano) + net_svc.openmano2rift(vnf_list) + ns_list.append(net_svc) + + return ns_list + + +def parse_args(argv=sys.argv[1:]): + """ Parse the command line arguments + + Arguments: + arv - The list of arguments to parse + + Returns: + Argparse Namespace instance + + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '-o', '--outdir', + default='.', + help="Directory to output converted descriptors", + ) + + parser.add_argument( + '-f', '--format', + choices=['yaml', 'xml', 'json'], + default='xml', + help="Descriptor output format", + ) + + parser.add_argument( + 'yaml_file_hdls', + metavar="yaml_file", + nargs="+", + type=argparse.FileType('r'), + help="OpenMano YAML Descriptor File", + ) + + args = parser.parse_args(argv) + + if not os.path.exists(args.outdir): + os.makedirs(args.outdir) + + if not is_writable_directory(args.outdir): + logging.error("Directory %s is not writable", args.outdir) + sys.exit(1) + + return args + + +def main(argv=sys.argv[1:]): + args = parse_args(argv) + + vnf_list = create_vnfs_from_yaml_files(args.yaml_file_hdls) + ns_list = create_ns_from_yaml_files(args.yaml_file_hdls, vnf_list) + + writer = DescriptorFileWriter( + module_list=['nsd', 'rw-nsd', 'vnfd', 'rw-vnfd'], + output_dir=args.outdir, + output_format=args.format, + ) + + for nw_svc in ns_list: + writer.write_descriptor(nw_svc, subdir="nsd") + + for vnf in vnf_list: + writer.write_descriptor(vnf, subdir="vnfd") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/mwc16-gen_test.py b/modules/core/mano/models/openmano/test/osm_descriptors/mwc16-gen_test.py new file mode 100755 index 0000000..12832e8 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/mwc16-gen_test.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import dictdiffer +import logging +import os +import sys +import unittest +import xmlrunner +import yaml + +import rift.openmano.rift2openmano as rift2openmano +import rift.openmano.openmano_client as openmano_client + +logger = logging.getLogger() + +THIS_DIR = os.path.dirname(os.path.realpath(__file__)) + +def delete_list_dict_keys(source_list, lst_keys): + for l in source_list: + if isinstance(l, dict): + delete_keys_from_dict(l, lst_keys) + elif isinstance(l, list): + delete_list_dict_keys(l, lst_keys) + +def delete_keys_from_dict(source_dict, lst_keys): + for k in lst_keys: + try: + del source_dict[k] + except KeyError: + pass + for v in source_dict.values(): + if isinstance(v, dict): + delete_keys_from_dict(v, lst_keys) + if isinstance(v, list): + delete_list_dict_keys(v, lst_keys) + + +class Rift2OpenmanoTest(unittest.TestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.maxDiff = None + + def load_openmano_vnf(self, openmano_vnf_path): + with open(openmano_vnf_path, 'rb') as hdl: + openmano_vnf = yaml.load(hdl) + + return openmano_vnf + + def load_openmano_ns(self, openmano_ns_path): + with open(openmano_ns_path, 'rb') as hdl: + openmano_ns = yaml.load(hdl) + + return openmano_ns + + def rift_vnf(self, rift_vnf_path): + with open(rift_vnf_path, 'r') as xml_hdl: + rift_vnf = rift2openmano.RiftVNFD.from_xml_file_hdl(xml_hdl) + return rift_vnf + + def rift2openmano_vnf(self, rift_vnf_path): + rift_vnf = self.rift_vnf(rift_vnf_path) + openmano_vnfd = rift2openmano.rift2openmano_vnfd(rift_vnf) + + return openmano_vnfd + + def rift2openmano_ns(self, rift_ns_path, rift_vnf_paths): + rift_vnf_hdls = [open(path, 'r') for path in rift_vnf_paths] + vnf_dict = rift2openmano.create_vnfd_from_xml_files(rift_vnf_hdls) + + with open(rift_ns_path, 'r') as xml_hdl: + rift_ns = rift2openmano.RiftNSD.from_xml_file_hdl(xml_hdl) + + openmano_nsd = rift2openmano.rift2openmano_nsd(rift_ns, vnf_dict) + logger.debug( + "Converted ns: %s", + yaml.safe_dump(openmano_nsd, indent=4, default_flow_style=False)) + + return openmano_nsd + + def generate_vnf_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(source_dict, ["description"]) + delete_keys_from_dict(dest_dict, ["description"]) + + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + def generate_ns_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(dest_dict, ["graph"]) + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + +class Mwc16GenTest(Rift2OpenmanoTest): + OPENMANO_TIDGEN_VNF_PATH = os.path.join( + THIS_DIR, "openmano_descriptors/tidgen4pLarge.yaml" + ) + RIFT_TIDGEN_VNF_PATH = os.path.join( + THIS_DIR, "rift_descriptors/tidgen4pLarge.xml" + ) + + OPENMANO_MWC16_NS_PATH = os.path.join( + THIS_DIR, "openmano_descriptors/mwc16-gen.yaml" + ) + RIFT_MWC16_NS_PATH = os.path.join( + THIS_DIR, "rift_descriptors/mwc16-gen.xml" + ) + + def test_tidgen_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_TIDGEN_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_TIDGEN_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_mwc16_gen_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_MWC16_NS_PATH, [Mwc16GenTest.RIFT_TIDGEN_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_MWC16_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + logger.setLevel(logging.DEBUG if args.verbose else logging.WARN) + + unittest.main(testRunner=runner, argv=[sys.argv[0]]+unittest_args) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/6WindTR1.1.2.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/6WindTR1.1.2.yaml new file mode 100644 index 0000000..e6f7912 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/6WindTR1.1.2.yaml @@ -0,0 +1,99 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- + vnf: + VNFC: + - bridge-ifaces: + - vpci: "0000:00:03.0" + bandwidth: "1 Gbps" + name: "eth0" + numas: + - interfaces: + - vpci: "0000:00:05.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + - vpci: "0000:00:06.0" + bandwidth: "10 Gbps" + name: "xe1" + dedicated: "yes" + - vpci: "0000:00:07.0" + bandwidth: "10 Gbps" + name: "xe2" + dedicated: "yes" + - vpci: "0000:00:08.0" + bandwidth: "10 Gbps" + name: "xe3" + dedicated: "yes" + paired-threads-id: + - - 0 + - 1 + - - 2 + - 3 + - - 4 + - 5 + - - 6 + - 7 + - - 8 + - 9 + - - 10 + - 11 + paired-threads: 6 + memory: 8 + hypervisor: + version: "10002|12001|2.6.32-358.el6.x86_64" + type: "QEMU-kvm" + VNFC image: "/mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2" + image metadata: + use_incremental: "no" + processor: + model: "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" + features: + - "64b" + - "iommu" + - "lps" + - "tlbps" + - "hwsv" + - "dioc" + - "ht" + name: "VM" + name: "6WindTR1.1.2" + external-connections: + - local_iface_name: "eth0" + VNFC: "VM" + type: "bridge" + name: "eth0" + description: "Data" + - local_iface_name: "xe0" + VNFC: "VM" + type: "data" + name: "xe0" + description: "Data" + - local_iface_name: "xe1" + VNFC: "VM" + type: "data" + name: "xe1" + description: "Data" + - local_iface_name: "xe2" + VNFC: "VM" + type: "data" + name: "xe2" + description: "Data" + - local_iface_name: "xe3" + VNFC: "VM" + type: "data" + name: "xe3" + description: "Data" diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/Scenarios PE- Gen.jpg b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/Scenarios PE- Gen.jpg new file mode 100644 index 0000000..82e9a3b Binary files /dev/null and b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/Scenarios PE- Gen.jpg differ diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/TID-MGMTGW.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/TID-MGMTGW.yaml new file mode 100644 index 0000000..406ed71 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/TID-MGMTGW.yaml @@ -0,0 +1,61 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- + vnf: + VNFC: + - + bridge-ifaces: + - vpci: "0000:00:10.0" + bandwidth: "1 Gbps" + name: "mgmt0" + - vpci: "0000:00:11.0" + bandwidth: "1 Gbps" + name: "pub0" + numas: + - interfaces: [] + paired-threads-id: + - - 0 + - 1 + paired-threads: 1 + memory: 4 + hypervisor: + version: "10002|12001|2.6.32-358.el6.x86_64" + type: "QEMU-kvm" + VNFC image: "/mnt/powervault/virtualization/vnfs/tid/TID-MGMTGW-VM.qcow2" + processor: + model: "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" + features: + - "64b" + - "iommu" + - "lps" + - "tlbps" + - "hwsv" + - "dioc" + - "ht" + name: "VM" + name: "TID-MGMTGW" + external-connections: + - local_iface_name: "mgmt0" + VNFC: "VM" + type: "mgmt" + name: "mgmt0" + description: "Management interface" + - local_iface_name: "pub0" + VNFC: "VM" + type: "bridge" + name: "pub0" + description: "Interface to the Reference Lab" + diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-gen.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-gen.yaml new file mode 100644 index 0000000..604513a --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-gen.yaml @@ -0,0 +1,54 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-gen" +description: "mwc16 generator/sinc for testing a corporate network" +topology: + nodes: + tidgen4pLarge1: + type: VNF + VNF model: tidgen4pLarge + tidgen4pLarge2: + type: VNF + VNF model: tidgen4pLarge + +#external datacenter networks must be declared here + mwc16data1: + type: external_network + model: mwc16data1 + mwc16data2: + type: external_network + model: mwc16data2 + mwc16mgmt: + type: external_network + model: mwc16mgmt + connections: + connection 2: + type: link + nodes: + - mwc16data1: null + - tidgen4pLarge1: xe0 + connection 3: + type: link + nodes: + - mwc16data2: null + - tidgen4pLarge2: xe0 + connection 4: + type: link + nodes: + - mwc16mgmt: null + - tidgen4pLarge1: eth0 + - tidgen4pLarge2: eth0 + diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-pe.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-pe.yaml new file mode 100644 index 0000000..ca53092 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/mwc16-pe.yaml @@ -0,0 +1,189 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-pe" +description: "mwc16 3 PEs, plus a gateway VNF for access control " +topology: + nodes: + PE1: + graph: + x: 298 + y: 149 + ifaces: + left: + - - xe2 + - d + - - xe3 + - d + right: + - - xe0 + - d + - - xe1 + - d + bottom: + - - eth0 + - v + type: VNF + VNF model: 6WindTR1.1.2 + PE2: + graph: #all graph sections are optional, for web displaying + x: 745 + y: 148 + ifaces: + left: + - - xe0 + - d + - - xe1 + - d + right: + - - xe2 + - d + - - xe3 + - d + bottom: + - - eth0 + - v + type: VNF + VNF model: 6WindTR1.1.2 + PE3: + graph: + x: 536 + y: 320 + ifaces: + left: + - - xe0 + - d + - - xe2 + - d + right: + - - xe1 + - d + - - xe3 + - d + bottom: + - - eth0 + - v + type: VNF + VNF model: 6WindTR1.1.2 + "TID-MGMTGW": #this is gateway VM + graph: + x: 465 + y: 591 + ifaces: + left: + - - pub0 + - v + right: + - - mgmt0 + - m + type: VNF + VNF model: "TID-MGMTGW" + + #external datacenter networks that this scenario use must be declared here + "macvtap:em2": + graph: + x: 169 + y: 589 + ifaces: + right: + - - "0" + - v + type: external_network + model: "macvtap:em2" + MAN: + graph: + x: 872 + y: 324 + ifaces: + left: + - - "0" + - d + type: external_network + model: MAN + mwc16data1: + graph: + x: 51 + y: 149 + ifaces: + right: + - - "0" + - d + type: external_network + model: mwc16data1 + mwc16data2: + graph: + x: 989 + y: 149 + ifaces: + left: + - - "0" + - d + type: external_network + model: mwc16data2 + mwc16mgmt: + graph: + x: 751 + y: 567 + ifaces: + left: + - - "0" + - v + type: external_network + model: mwc16mgmt + connections: + connection 0: + type: link + nodes: + - "macvtap:em2": null #connect external network "macvtap:em2" (null because it does not have interfaces) + - "TID-MGMTGW": pub0 #connect interface "pub0" from VNF "TID-MGMTGW" + connection 1: + type: link + nodes: + - MAN: null + - PE3: xe3 + connection 2: + type: link + nodes: + - mwc16data1: null + - PE1: xe2 + connection 3: + type: link + nodes: + - mwc16data2: null + - PE2: xe2 + connection 4: + type: link + nodes: + - mwc16mgmt: null + - "TID-MGMTGW": mgmt0 + - PE1: eth0 + - PE2: eth0 + - PE3: eth0 + connection 8: + type: link + nodes: + - PE2: xe1 + - PE3: xe1 + connection 9: + type: link + nodes: + - PE1: xe1 + - PE3: xe0 + connection 10: + type: link + nodes: + - PE1: xe0 + - PE2: xe0 + diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openmano-openvim.txt b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openmano-openvim.txt new file mode 100644 index 0000000..93874ad --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openmano-openvim.txt @@ -0,0 +1,38 @@ + +#1 Create an openmano tenant (MANUAL) +openmano tenant-create SP --description="Tenant for service provider" + +openmano tenant-list + # 5b774582-b5e6-11e5-8b84-5254006be016 SP + +#2 Create openvim datacenter and attach to this tenant (MANUAL) +openmano datacenter-create openvim1 http://localhost:9080/openvim + +openmano datacenter-list --all + # 03edb122-b544-11e5-8b84-5254006be016 OSDC + +#3 Attach the datacenter to the tenant (MANUAL) +export OPENMANO_TENANT=5b774582-b5e6-11e5-8b84-5254006be016 #USE YOUR UUID +openmano datacenter-attach openvim1 --vim-tenant-id 21b586fa-b5e2-11e5-a97e-5254006be016 #USE YOUR UUID "openvim tenant-list" + + +#4 Update/Get the datacenter external networks +openmano datacenter-net-update -f openvim1 + +#5 Create VNFs (AUTO) +openmano vnf-create tidgen4pLarge.yaml +openmano vnf-create 6WindTR1.1.2.yaml +openmano vnf-create TID-MGMTGW.yaml + +#6 Create PEs (SP) scenario (monosite) (AUTO) +openmano scenario-create mwc16-pe.yaml + +#7 Desploy PEs (SP) scenario (AUTO) +openmano scenario-deploy mwc16-pe mwc16-pe + +#8 Create gen/sync traffic generators scenario (AUTO) +openmano scenario-create mwc16-gen.yaml + +#9 Deploy gen/sync traffic generators scenario (AUTO) +openmano scenario-deploy mwc16-gen mwc16-gen + diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openvim.txt b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openvim.txt new file mode 100644 index 0000000..32b877f --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/steps-openvim.txt @@ -0,0 +1,42 @@ +#1 add compute host to openvim (MANUAL) +#descriptors must be created with the ./script/host-add.sh +openvim host-add nfv102.yaml +openvim host-add nfv103.yaml +openvim host-add nfv104.yaml +openvim host-add nfv105.yaml + +openvim host-list + # 60b31d5a-b5e1-11e5-8492-5254006be016 nfv102 + # c3c1d9be-b5e0-11e5-8492-5254006be016 nfv104 + # cf0b5d22-b5e0-11e5-8492-5254006be016 nfv103 + # f6ce6b0c-b5df-11e5-8492-5254006be016 nfv105 + +#2 create external networks +openvim net-create openmano/openvim/test/networks/net-example0.yaml + # 0bcdd112-b5e2-11e5-a97e-5254006be016 default Created +openvim net-create openmano/openvim/test/networks/net-example1.yaml + # 0f019648-b5e2-11e5-a97e-5254006be016 macvtap:em1 Created +openvim net-create '{network: {name: "macvtap:em2", type: bridge_man, shared: true, "provider:physical":"macvtap:em2"}}' + #USE a appropiate value at provider:physical depending on your environment + #in our case is a physical compute node interface that can be use for accessing + #this field can be omitted, so that openvim will create the net using one of the preprovisioned compute node bridge interfaces + #also a specific bridge can be forced: e.g.: "provider:physical: bridge:virbrMan1" + + # 1f4e7d6c-b5ed-11e5-a97e-5254006be016 macvtap:em2 Created +openvim net-create '{network: {name: "mwc16data1", type: data, shared: true, "provider:physical":openflow:port1/5"}}' + #USE a appropiate value at provider:physical depending on your environment + #in our case this is a valid openflow port at the openflow switch port + #this field can be omitted, so that openvim will create the net but not attached to a concrete switch phyciscal port ... + #... but it will connect all the VNF of all scenarios attached to this network + #also it can be a fake name if "host only" mode is used for openvim + #the reason for forcing a concrete switch port is to make easier the connection of other scenarios deployed with a DIFFERENT VIM + +openvim net-create '{network: {name: "mwc16data2", type: data, shared: true, "provider:physical":openflow:port1/6"}}' +openvim net-create '{network: {name: "MAN", type: data, shared: true, "provider:physical":openflow:port1/7"}}' + + +#3 create a tenant +openvim tenant-create --name admin --description admin + # 21b586fa-b5e2-11e5-a97e-5254006be016 admin Created + +export OPENVIM_TENANT=21b586fa-b5e2-11e5-a97e-5254006be016 #USE YOUR UUID diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/tidgen4pLarge.yaml b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/tidgen4pLarge.yaml new file mode 100644 index 0000000..111b5dc --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/openmano_descriptors/tidgen4pLarge.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgen4pLarge + description: tidgen 4x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgen4pLarge-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: tidgen4pLarge-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: tidgen4pLarge-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgen4pLarge-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: tidgen4pLarge-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: tidgen4pLarge-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: tidgen4pLarge-VM # name of Virtual Machine + description: tidgen with 4x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenLarge.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/mwc16-gen.xml b/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/mwc16-gen.xml new file mode 100644 index 0000000..00f6d20 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/mwc16-gen.xml @@ -0,0 +1,84 @@ + + + + + + 11af9f00-baf8-11e5-99ee-001b21b98a9d + mwc16-gen + mwc16-gen + mwc16 generator/sinc for testing a corporate network + + 11be9258-baf8-11e5-99ee-001b21b98a9d + connection 4 + connection 4 + ELAN + + 1 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + eth0 + + + 2 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + eth0 + + + mwc16mgmt + VLAN + + + + 11beec62-baf8-11e5-99ee-001b21b98a9d + connection 3 + connection 3 + ELAN + + 2 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + xe0 + + + mwc16data2 + VLAN + + + + 11bf0634-baf8-11e5-99ee-001b21b98a9d + connection 2 + connection 2 + ELAN + + 1 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + xe0 + + + mwc16data1 + VLAN + + + + 1 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + + + 2 + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/tidgen4pLarge.xml b/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/tidgen4pLarge.xml new file mode 100644 index 0000000..d02d58a --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_descriptors/rift_descriptors/tidgen4pLarge.xml @@ -0,0 +1,139 @@ + + + + + + 1194cfd6-baf8-11e5-99ee-001b21b98a9d + tidgen4pLarge + tidgen 4x10Gbps 28GB 11cores + + 11aaab30-baf8-11e5-99ee-001b21b98a9d + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 11aaab30-baf8-11e5-99ee-001b21b98a9d + tidgen4pLarge-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/tid/tidgenLarge.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/checksums.txt b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/checksums.txt new file mode 100644 index 0000000..e89b164 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/checksums.txt @@ -0,0 +1,18 @@ +3767a8dfb15ce1f3ee9633f0e8c0a36f88adc0987b0f87dc8b04263770a89eed gen_pkgs.sh +566bc06c6ec1846d1bc003e59cd4a5e57e2c3d984c58987e5f78a710cb379616 mwc16-gen_test.py +c8e178ab93b4661721b441a3ae41e54f06ca34e498d8accf4acda80d69f97b73 openmano_scenarios/mwc16-gen.yaml +4ca3804ef29123f905702d989c671eaf619540a9534197e6196c07789b3e0718 openmano_scenarios/IMS-allin1-corpA.yaml +dd17551cd01c683014908724796df89af99dab25ef9c2930c1b24625ed78b4d0 openmano_scenarios/mwc16-pe.yaml +c20765d8cefb94d550267532677fd9e6aab64078d8d9c154ee0dcba1e2dcf175 openmano_scenarios/IMS-allin1-corpB.yaml +65d77b657ec52ed9e435fc87d12c2751526a37a6393fbe8a015f2fa0b1af310c openmano_vnfs/mwc16-gen2.yaml +ce37404f05e46ac8e24daf325f621402adef1322cfc287c1009f94fb86e1d676 openmano_vnfs/6WindTR1.1.2.yaml +01d994ed8d36ab844098f9dc3597a124bacc646ef596ed9c83faa4757eab30b9 openmano_vnfs/mwc16-gen1.yaml +55a6aae2549fffbe6ddf0cc187b4f38be14ed14f4e06be2fd63a9697124a779d openmano_vnfs/IMS-ALLin1.yaml +b24bfc8e468ca7b0665de98788b751c59416a5608de87ad28bf9f9b3467bfbdd rift_scenarios/IMS-corpB.xml +c065322e40cf7e4413e0ecebd70eaf2512ac80dac0bf31d7e986706801970d7b rift_scenarios/mwc16-gen.xml +a5c57ef25bb366aad3f548217d4b1e2d4bc60591168cf6173ee1853544c05651 rift_scenarios/IMS-corpA.xml +a3565ca6040654b72fb91acf0281f92dfda704c6dad12042d1f7de09e62ee8ed rift_scenarios/mwc16-pe.xml +5f03711d62432fcfe35038e2ed4f4adcacf5ab7b06f13969fac5bc9928cdb2ba rift_vnfs/IMS-ALLIN1.xml +9d9e1dec89b5cea0cd3a4cf69bd606a7f25f4607086f43fe2b3b1b16e7cdeba7 rift_vnfs/mwc16gen2.xml +f8bf47bc904f0b71dc766e27093ca22ddd2d36d28a0d22c48d210c5ddc9119fd rift_vnfs/6WindTR1.1.2.xml +6af440ccd412e95b6e7dd1638e30acffe0143a565fb7f208b052b74788b5dc64 rift_vnfs/mwc16gen1.xml diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/gen_pkgs.sh b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/gen_pkgs.sh new file mode 100755 index 0000000..762314a --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/gen_pkgs.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +tmp_dir=$(mktemp -d) +echo "Generating packages in temporary directory: ${tmp_dir}" + +#6WindTR1.1.2 VNF +mkdir -p ${tmp_dir}/6wind_vnf/vnfd +cp -f rift_vnfs/6WindTR1.1.2.xml ${tmp_dir}/6wind_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} 6wind_vnf + + +# mwc16-pe.yaml +mkdir -p ${tmp_dir}/mwc16_pe_ns/nsd +cp -f rift_scenarios/mwc16-pe.xml ${tmp_dir}/mwc16_pe_ns/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_pe_ns + +# mwc16-pe-onevnf.yaml +mkdir -p ${tmp_dir}/mwc16_pe_onevnf_ns/nsd +cp -f rift_scenarios/mwc16-pe-onevnf.xml ${tmp_dir}/mwc16_pe_onevnf_ns/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_pe_onevnf_ns + + +# mwc16-gen1.yaml +mkdir -p ${tmp_dir}/mwc16_gen1_vnf/vnfd +cp -f rift_vnfs/mwc16gen1.xml ${tmp_dir}/mwc16_gen1_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_gen1_vnf + +# mwc16-gen2.yaml +mkdir -p ${tmp_dir}/mwc16_gen2_vnf/vnfd +cp -f rift_vnfs/mwc16gen2.xml ${tmp_dir}/mwc16_gen2_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_gen2_vnf + +# mwc16-gen.yaml +mkdir -p ${tmp_dir}/mwc16_gen_ns/nsd +cp -f rift_scenarios/mwc16-gen.xml ${tmp_dir}/mwc16_gen_ns/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_gen_ns + + +# IMS-ALLin1.yaml +mkdir -p ${tmp_dir}/ims_allin1_vnf/vnfd +cp -f rift_vnfs/IMS-ALLIN1.xml ${tmp_dir}/ims_allin1_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} ims_allin1_vnf + +# IMS-allin1-corpa.yaml +mkdir -p ${tmp_dir}/ims_allin1_corpa/nsd +cp -f rift_scenarios/IMS-corpA.xml ${tmp_dir}/ims_allin1_corpa/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} ims_allin1_corpa + +# IMS-allin1-corpb.yaml +mkdir -p ${tmp_dir}/ims_allin1_corpb/nsd +cp -f rift_scenarios/IMS-corpB.xml ${tmp_dir}/ims_allin1_corpb/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} ims_allin1_corpb \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/mwc16-gen_test.py b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/mwc16-gen_test.py new file mode 100755 index 0000000..02149ad --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/mwc16-gen_test.py @@ -0,0 +1,258 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import dictdiffer +import logging +import os +import sys +import unittest +import xmlrunner +import yaml + +import rift.openmano.rift2openmano as rift2openmano +import rift.openmano.openmano_client as openmano_client + +logger = logging.getLogger() + +THIS_DIR = os.path.dirname(os.path.realpath(__file__)) + +def delete_list_dict_keys(source_list, lst_keys): + for l in source_list: + if isinstance(l, dict): + delete_keys_from_dict(l, lst_keys) + elif isinstance(l, list): + delete_list_dict_keys(l, lst_keys) + +def delete_keys_from_dict(source_dict, lst_keys): + for k in lst_keys: + try: + del source_dict[k] + except KeyError: + pass + for v in source_dict.values(): + if isinstance(v, dict): + delete_keys_from_dict(v, lst_keys) + if isinstance(v, list): + delete_list_dict_keys(v, lst_keys) + + +class Rift2OpenmanoTest(unittest.TestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.maxDiff = None + + def load_openmano_vnf(self, openmano_vnf_path): + with open(openmano_vnf_path, 'rb') as hdl: + openmano_vnf = yaml.load(hdl) + + return openmano_vnf + + def load_openmano_ns(self, openmano_ns_path): + with open(openmano_ns_path, 'rb') as hdl: + openmano_ns = yaml.load(hdl) + + return openmano_ns + + def rift_vnf(self, rift_vnf_path): + with open(rift_vnf_path, 'r') as xml_hdl: + rift_vnf = rift2openmano.RiftVNFD.from_xml_file_hdl(xml_hdl) + return rift_vnf + + def rift2openmano_vnf(self, rift_vnf_path): + rift_vnf = self.rift_vnf(rift_vnf_path) + openmano_vnfd = rift2openmano.rift2openmano_vnfd(rift_vnf) + + logger.debug( + "Converted vnf: %s", + yaml.safe_dump(openmano_vnfd, indent=4, default_flow_style=False)) + + return openmano_vnfd + + def rift2openmano_ns(self, rift_ns_path, rift_vnf_paths): + rift_vnf_hdls = [open(path, 'r') for path in rift_vnf_paths] + vnf_dict = rift2openmano.create_vnfd_from_xml_files(rift_vnf_hdls) + + with open(rift_ns_path, 'r') as xml_hdl: + rift_ns = rift2openmano.RiftNSD.from_xml_file_hdl(xml_hdl) + + openmano_nsd = rift2openmano.rift2openmano_nsd(rift_ns, vnf_dict) + logger.debug( + "Converted ns: %s", + yaml.safe_dump(openmano_nsd, indent=4, default_flow_style=False)) + + return openmano_nsd + + def generate_vnf_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(source_dict, ["description"]) + delete_keys_from_dict(dest_dict, ["description", "image metadata", "class"]) + + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + def generate_ns_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(dest_dict, ["graph"]) + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + +class Mwc16GenTest(Rift2OpenmanoTest): + OPENMANO_6WIND_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/6WindTR1.1.2.yaml" + ) + RIFT_6WIND_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/6WindTR1.1.2.xml" + ) + + OPENMANO_IMS_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/IMS-ALLin1.yaml" + ) + RIFT_IMS_VNF_PATH = os.path.join(THIS_DIR, + "rift_vnfs/IMS-ALLIN1.xml" + ) + + OPENMANO_GEN1_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/mwc16-gen1.yaml" + ) + RIFT_GEN1_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/mwc16gen1.xml" + ) + + OPENMANO_GEN2_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/mwc16-gen2.yaml" + ) + RIFT_GEN2_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/mwc16gen2.xml" + ) + + OPENMANO_MWC16_GEN_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/mwc16-gen.yaml" + ) + RIFT_MWC16_GEN_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/mwc16-gen.xml" + ) + + OPENMANO_MWC16_PE_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/mwc16-pe.yaml" + ) + RIFT_MWC16_PE_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/mwc16-pe.xml" + ) + + OPENMANO_IMS_CORPA_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/IMS-allin1-corpA.yaml" + ) + RIFT_IMS_CORPA_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/IMS-corpA.xml" + ) + + OPENMANO_IMS_CORPB_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/IMS-allin1-corpB.yaml" + ) + RIFT_IMS_CORPB_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/IMS-corpB.xml" + ) + + def test_6wind_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_6WIND_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_6WIND_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_ims_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_IMS_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_IMS_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_gen1_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_GEN1_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_GEN1_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_gen2_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_GEN2_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_GEN2_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_ims_corpa_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_IMS_CORPA_NS_PATH, + [Mwc16GenTest.RIFT_IMS_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_IMS_CORPA_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + def test_ims_corpb_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_IMS_CORPB_NS_PATH, + [Mwc16GenTest.RIFT_IMS_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_IMS_CORPB_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + def test_mwc16_gen_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_MWC16_GEN_NS_PATH, + [Mwc16GenTest.RIFT_GEN1_VNF_PATH, Mwc16GenTest.RIFT_GEN2_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_MWC16_GEN_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + def test_mwc16_pe_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_MWC16_PE_NS_PATH, + [Mwc16GenTest.RIFT_6WIND_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_MWC16_PE_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + logger.setLevel(logging.DEBUG if args.verbose else logging.WARN) + + unittest.main(testRunner=runner, argv=[sys.argv[0]]+unittest_args) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpA.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpA.yaml new file mode 100644 index 0000000..2152313 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpA.yaml @@ -0,0 +1,33 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: IMS-corpA +description: All in one Clearwater IMS for corporation A in MWC16 +topology: + nodes: + IMS-ALLIN1__1: # vnf/net name in the scenario + type: VNF # VNF, network, external_network (if it is a datacenter network) + VNF model: IMS-ALLIN1 # VNF name as introduced in OPENMANO DB + net-corpA: + type: external_network # Datacenter net + model: net-corpA + connections: + conn1: # provide a name for this net or connection + type: link + nodes: + - net-corpA: "0" # Datacenter net + - IMS-ALLIN1__1: eth0 # Node and its interface + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpB.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpB.yaml new file mode 100644 index 0000000..f6d5f7f --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/IMS-allin1-corpB.yaml @@ -0,0 +1,33 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: IMS-corpB +description: All in one Clearwater IMS for corporation B in MWC16 +topology: + nodes: + IMS-ALLIN1__1: # vnf/net name in the scenario + type: VNF # VNF, network, external_network (if it is a datacenter network) + VNF model: IMS-ALLIN1 # VNF name as introduced in OPENMANO DB + net-corpB: + type: external_network # Datacenter net + model: net-corpB + connections: + conn1: # provide a name for this net or connection + type: link + nodes: + - net-corpB: "0" # Datacenter net + - IMS-ALLIN1__1: eth0 # Node and its interface + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-gen.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-gen.yaml new file mode 100644 index 0000000..11755a7 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-gen.yaml @@ -0,0 +1,61 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-gen" +description: "mwc16-gen" +topology: + nodes: + mwc16gen1__1: + type: VNF + VNF model: mwc16gen1 + mwc16gen2__2: + type: VNF + VNF model: mwc16gen2 + "direct_vlan146": + type: external_network + model: "direct_vlan146" + mwc16data1: + type: external_network + model: mwc16data1 + mwc16data2: + type: external_network + model: mwc16data2 + mgmt: + type: external_network + model: mgmt + connections: + mgmt TEF: + type: link + nodes: + - "direct_vlan146": "0" + - mwc16gen1__1: eth0 + - mwc16gen2__2: eth0 + mwc16gen1__1-PE1: + type: link + nodes: + - mwc16data1: "0" + - mwc16gen1__1: xe0 + mwc16gen2__2-PE2: + type: link + nodes: + - mwc16data2: "0" + - mwc16gen2__2: xe0 + management: + type: link + nodes: + - mgmt: "0" + - mwc16gen1__1: eth1 + - mwc16gen2__2: eth1 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-pe.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-pe.yaml new file mode 100644 index 0000000..ae7a6a1 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_scenarios/mwc16-pe.yaml @@ -0,0 +1,79 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-pe" +description: "mwc16-pe" +topology: + nodes: + 6WindTR1.1.2__1: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__2: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__3: + type: VNF + VNF model: 6WindTR1.1.2 + interDC: + type: external_network + model: interDC + mwc16data1: + type: external_network + model: mwc16data1 + mwc16data2: + type: external_network + model: mwc16data2 + mgmt: + type: external_network + model: mgmt + connections: + 6WindTR1.1.2__1 enty point: + type: link + nodes: + - mwc16data1: "0" + - 6WindTR1.1.2__1: xe2 + 6WindTR1.1.2__3 to OpenStack: + type: link + nodes: + - interDC: "0" + - 6WindTR1.1.2__3: xe3 + 6WindTR1.1.2__2 entry point: + type: link + nodes: + - mwc16data2: "0" + - 6WindTR1.1.2__2: xe2 + management: + type: link + nodes: + - mgmt: "0" + - 6WindTR1.1.2__1: eth0 + - 6WindTR1.1.2__2: eth0 + - 6WindTR1.1.2__3: eth0 + 6WindTR1.1.2__2-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__2: xe1 + - 6WindTR1.1.2__3: xe1 + 6WindTR1.1.2__1-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__1: xe1 + - 6WindTR1.1.2__3: xe0 + 6WindTR1.1.2__1-6WindTR1.1.2__2: + type: link + nodes: + - 6WindTR1.1.2__1: xe0 + - 6WindTR1.1.2__2: xe0 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/6WindTR1.1.2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/6WindTR1.1.2.yaml new file mode 100644 index 0000000..a67797d --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/6WindTR1.1.2.yaml @@ -0,0 +1,81 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: "6WindTR1.1.2" + VNFC: + - bridge-ifaces: + - vpci: "0000:00:03.0" + bandwidth: "1 Gbps" + name: "eth0" + numas: + - interfaces: + - vpci: "0000:00:05.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + - vpci: "0000:00:06.0" + bandwidth: "10 Gbps" + name: "xe1" + dedicated: "yes" + - vpci: "0000:00:07.0" + bandwidth: "10 Gbps" + name: "xe2" + dedicated: "yes" + - vpci: "0000:00:08.0" + bandwidth: "10 Gbps" + name: "xe3" + dedicated: "yes" + paired-threads-id: [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]] + paired-threads: 6 + memory: 8 + hypervisor: + version: "10002|12001|2.6.32-358.el6.x86_64" + type: "QEMU-kvm" + VNFC image: "/mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2" + image metadata: + use_incremental: "no" + processor: + model: "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + name: "VM" + external-connections: + - local_iface_name: eth0 + VNFC: VM + type: mgmt + name: eth0 + description: management + - local_iface_name: xe0 + VNFC: VM + type: data + name: xe0 + description: Data plane + - local_iface_name: xe1 + VNFC: VM + type: data + name: xe1 + description: Data plane + - local_iface_name: xe2 + VNFC: VM + type: data + name: xe2 + description: Data plane + - local_iface_name: xe3 + VNFC: VM + type: data + name: xe3 + description: Data plane + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/IMS-ALLin1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/IMS-ALLin1.yaml new file mode 100644 index 0000000..6a03ba9 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/IMS-ALLin1.yaml @@ -0,0 +1,39 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: IMS-ALLIN1 + description: IMS-ALLIN1 + external-connections: + - name: eth0 + type: mgmt + VNFC: IMS-ALLIN1-VM + local_iface_name: eth0 + description: General purpose interface + VNFC: + - name: IMS-ALLIN1-VM + description: IMS-ALLIN1-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/datatronics/allin1.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: [] + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen1.yaml new file mode 100644 index 0000000..f874ff8 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen1.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: mwc16gen1 + description: tidgen 4x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: mwc16gen1-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: mwc16gen1-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: mwc16gen1-VM # name of Virtual Machine + description: tidgen with 4x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen1.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen2.yaml new file mode 100644 index 0000000..db1685a --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/openmano_vnfs/mwc16-gen2.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: mwc16gen2 + description: tidgen 4x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: mwc16gen2-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: mwc16gen2-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: mwc16gen2-VM # name of Virtual Machine + description: tidgen with 4x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen2.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpA.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpA.xml new file mode 100644 index 0000000..89a1c4f --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpA.xml @@ -0,0 +1,45 @@ + + + + + + 965dc8ea-c475-11e5-8040-fa163eb18cb8 + IMS-corpA + IMS-corpA + All in one Clearwater IMS for corporation A in MWC16 + + 9670b946-c475-11e5-8040-fa163eb18cb8 + conn1 + conn1 + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth0 + + + net-corpA + VLAN + + + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpB.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpB.xml new file mode 100644 index 0000000..c2dadf7 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/IMS-corpB.xml @@ -0,0 +1,45 @@ + + + + + + 123dc8ea-c475-11e5-8040-fa163eb18123 + IMS-corpB + IMS-corpB + All in one Clearwater IMS for corporation B in MWC16 + + 9670b946-c475-11e5-8040-fa163eb18cb8 + conn1 + conn1 + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth0 + + + net-corpB + VLAN + + + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-gen.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-gen.xml new file mode 100644 index 0000000..911a971 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-gen.xml @@ -0,0 +1,104 @@ + + + + + + 091e3932-c46c-11e5-8576-fa163eb18cb8 + mwc16-gen + mwc16-gen + mwc16-gen + + 094740d4-c46c-11e5-8576-fa163eb18cb8 + mwc16gen1__1-PE1 + mwc16gen1__1-PE1 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + xe0 + + + mwc16data1 + VLAN + + + + 0947754a-c46c-11e5-8576-fa163eb18cb8 + mwc16gen2__2-PE2 + mwc16gen2__2-PE2 + ELAN + + 2 + eecfd632-bef1-11e5-b5b8-0800273ab84b + xe0 + + + mwc16data2 + VLAN + + + + 0947888c-c46c-11e5-8576-fa163eb18cb8 + mgmt TEF + mgmt TEF + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + eth0 + + + 2 + eecfd632-bef1-11e5-b5b8-0800273ab84b + eth0 + + + direct_vlan146 + VLAN + + + + 0947bb90-c46c-11e5-8576-fa163eb18cb8 + management + management + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + eth1 + + + 2 + eecfd632-bef1-11e5-b5b8-0800273ab84b + eth1 + + + mgmt + VLAN + + + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + + + 2 + eecfd632-bef1-11e5-b5b8-0800273ab84b + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe-onevnf.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe-onevnf.xml new file mode 100644 index 0000000..a00d4ff --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe-onevnf.xml @@ -0,0 +1,60 @@ + + + + + + 764c375c-c44e-11e5-b325-fa163eb18cb8 + mwc16-pe-onevnf + mwc16-pe-onevnf + mwc16-pe-onevnf + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 7660f714-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1 enty point + 6WindTR1.1.2__1 enty point + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data1 + VLAN + + + + 7660d040-c44e-11e5-b325-fa163eb18cb8 + management + management + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + mgmt + VLAN + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe.xml new file mode 100644 index 0000000..8e8b3e4 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_scenarios/mwc16-pe.xml @@ -0,0 +1,156 @@ + + + + + + 764c375c-c44e-11e5-b325-fa163eb18cb8 + mwc16-pe + mwc16-pe + mwc16-pe + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 76610cb8-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__3 to OpenStack + 6WindTR1.1.2__3 to OpenStack + ELAN + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe3 + + + interDC + VLAN + + + + 7660f714-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1 enty point + 6WindTR1.1.2__1 enty point + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data1 + VLAN + + + + 76611fc8-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__2 entry point + 6WindTR1.1.2__2 entry point + ELAN + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data2 + VLAN + + + + 7660d040-c44e-11e5-b325-fa163eb18cb8 + management + management + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + mgmt + VLAN + + + + 7660b376-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__2-6WindTR1.1.2__3 + 6WindTR1.1.2__2-6WindTR1.1.2__3 + ELAN + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + + 76604f80-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1-6WindTR1.1.2__3 + 6WindTR1.1.2__1-6WindTR1.1.2__3 + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + + 766091de-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1-6WindTR1.1.2__2 + 6WindTR1.1.2__1-6WindTR1.1.2__2 + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/6WindTR1.1.2.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/6WindTR1.1.2.xml new file mode 100644 index 0000000..fcb6eee --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/6WindTR1.1.2.xml @@ -0,0 +1,151 @@ + + + + + + b7a3d170-c448-11e5-8795-fa163eb18cb8 + 6WindTR1.1.2 + + b7bbc9b0-c448-11e5-8795-fa163eb18cb8 + + + eth0 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + b7bbc9b0-c448-11e5-8795-fa163eb18cb8 + VM + 0000:00:0a.0 + + 12 + 8192 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 6 + + 0 + 1 + + + 2 + 3 + + + 4 + 5 + + + 6 + 7 + + + 8 + 9 + + + 10 + 11 + + + + + + /mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2 + + eth0 + eth0 + + OM-MGMT + 0000:00:03.0 + 1000000000 + + + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:05.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:06.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:07.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:08.0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/IMS-ALLIN1.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/IMS-ALLIN1.xml new file mode 100644 index 0000000..0a66d67 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/IMS-ALLIN1.xml @@ -0,0 +1,68 @@ + + + + + + 47914a30-c474-11e5-990a-fa163eb18cb8 + IMS-ALLIN1 + IMS-ALLIN1 + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + + + eth0 + VPORT + + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + IMS-ALLIN1-VM + IMS-ALLIN1-VM + 0000:00:0a.0 + + 2 + 4096 + 10 + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 1 + + + + + /mnt/powervault/virtualization/vnfs/datatronics/allin1.qcow2 + + eth0 + eth0 + + OM-MGMT + 0000:00:0a.0 + 0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen1.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen1.xml new file mode 100644 index 0000000..15c3ad2 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen1.xml @@ -0,0 +1,141 @@ + + + + + + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + mwc16gen1 + tidgen 4x10Gbps 28GB 11cores + + 09163412-c46c-11e5-8576-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 09163412-c46c-11e5-8576-fa163eb18cb8 + mwc16gen1-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen1.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 1000000 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 1000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen2.xml b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen2.xml new file mode 100644 index 0000000..c30312b --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_desctriptors/rift_vnfs/mwc16gen2.xml @@ -0,0 +1,141 @@ + + + + + + eecfd632-bef1-11e5-b5b8-0800273ab84b + mwc16gen2 + tidgen 4x10Gbps 28GB 11cores + + 09163412-c46c-11e5-8576-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 09163412-c46c-11e5-8576-fa163eb18cb8 + mwc16gen2-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen2.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 1000000 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 1000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/20160212_openmano_RO_descriptors.zip b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/20160212_openmano_RO_descriptors.zip new file mode 100644 index 0000000..3f9f1b1 Binary files /dev/null and b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/20160212_openmano_RO_descriptors.zip differ diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/gen_pkgs.sh b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/gen_pkgs.sh new file mode 100755 index 0000000..eebcb00 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/gen_pkgs.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +tmp_dir=$(mktemp -d) +echo "Generating packages in temporary directory: ${tmp_dir}" + +#6WindTR1.1.2 VNF +mkdir -p ${tmp_dir}/6wind_vnf/vnfd +cp -f rift_vnfs/6WindTR1.1.2.xml ${tmp_dir}/6wind_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} 6wind_vnf + + +# mwc16-pe.yaml +mkdir -p ${tmp_dir}/mwc16_pe_ns/nsd +cp -f rift_scenarios/mwc16-pe.xml ${tmp_dir}/mwc16_pe_ns/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_pe_ns + +# tidgen_mwc16_vnf.yaml +mkdir -p ${tmp_dir}/tidgen_mwc16_vnf/vnfd +cp -f rift_vnfs/mwc16gen1.xml ${tmp_dir}/tidgen_mwc16_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} tidgen_mwc16_vnf + +# mwc16-gen.yaml +mkdir -p ${tmp_dir}/mwc16_gen_ns/nsd +cp -f rift_scenarios/mwc16-gen.xml ${tmp_dir}/mwc16_gen_ns/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} mwc16_gen_ns + + +# IMS-ALLin1_2p.yaml +mkdir -p ${tmp_dir}/ims_allin1_2p_vnf/vnfd +cp -f rift_vnfs/IMS-ALLIN1.xml ${tmp_dir}/ims_allin1_2p_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} ims_allin1_2p_vnf + +# IMS-allin1-corpa.yaml +mkdir -p ${tmp_dir}/ims_allin1_corpa/nsd +cp -f rift_scenarios/IMS-corpA.xml ${tmp_dir}/ims_allin1_corpa/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} ims_allin1_corpa + + +# gw_corpA_PE1.yaml +mkdir -p ${tmp_dir}/gw_corpa_pe1_vnf/vnfd +cp -f rift_vnfs/gw-corpa-pe1.xml ${tmp_dir}/gw_corpa_pe1_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} gw_corpa_pe1_vnf + +# gw_corpA_PE2.yaml +mkdir -p ${tmp_dir}/gw_corpa_pe2_vnf/vnfd +cp -f rift_vnfs/gw-corpa-pe2.xml ${tmp_dir}/gw_corpa_pe2_vnf/vnfd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} gw_corpa_pe2_vnf + +# gw_corpa_ns.yaml +mkdir -p ${tmp_dir}/gw_corpa_ns/nsd +cp -f rift_scenarios/gwcorpA.xml ${tmp_dir}/gw_corpa_ns/nsd +${RIFT_ROOT}/bin/generate_descriptor_pkg.sh ${tmp_dir} gw_corpa_ns \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/mwc16-gen_test.py b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/mwc16-gen_test.py new file mode 100755 index 0000000..d6ec996 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/mwc16-gen_test.py @@ -0,0 +1,314 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import dictdiffer +import logging +import os +import sys +import unittest +import xmlrunner +import yaml + +import rift.openmano.rift2openmano as rift2openmano +import rift.openmano.openmano_client as openmano_client + +logger = logging.getLogger() + +THIS_DIR = os.path.dirname(os.path.realpath(__file__)) + +def delete_list_dict_keys(source_list, lst_keys): + for l in source_list: + if isinstance(l, dict): + delete_keys_from_dict(l, lst_keys) + elif isinstance(l, list): + delete_list_dict_keys(l, lst_keys) + +def delete_keys_from_dict(source_dict, lst_keys): + for k in lst_keys: + try: + del source_dict[k] + except KeyError: + pass + for v in source_dict.values(): + if isinstance(v, dict): + delete_keys_from_dict(v, lst_keys) + if isinstance(v, list): + delete_list_dict_keys(v, lst_keys) + + +class Rift2OpenmanoTest(unittest.TestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.maxDiff = None + + def load_openmano_vnf(self, openmano_vnf_path): + with open(openmano_vnf_path, 'rb') as hdl: + openmano_vnf = yaml.load(hdl) + + return openmano_vnf + + def load_openmano_ns(self, openmano_ns_path): + with open(openmano_ns_path, 'rb') as hdl: + openmano_ns = yaml.load(hdl) + + return openmano_ns + + def rift_vnf(self, rift_vnf_path): + with open(rift_vnf_path, 'r') as xml_hdl: + rift_vnf = rift2openmano.RiftVNFD.from_xml_file_hdl(xml_hdl) + return rift_vnf + + def rift2openmano_vnf(self, rift_vnf_path): + rift_vnf = self.rift_vnf(rift_vnf_path) + openmano_vnfd = rift2openmano.rift2openmano_vnfd(rift_vnf) + + logger.debug( + "Converted vnf: %s", + yaml.safe_dump(openmano_vnfd, indent=4, default_flow_style=False)) + + return openmano_vnfd + + def rift2openmano_ns(self, rift_ns_path, rift_vnf_paths): + rift_vnf_hdls = [open(path, 'r') for path in rift_vnf_paths] + vnf_dict = rift2openmano.create_vnfd_from_xml_files(rift_vnf_hdls) + + with open(rift_ns_path, 'r') as xml_hdl: + rift_ns = rift2openmano.RiftNSD.from_xml_file_hdl(xml_hdl) + + openmano_nsd = rift2openmano.rift2openmano_nsd(rift_ns, vnf_dict) + logger.debug( + "Converted ns: %s", + yaml.safe_dump(openmano_nsd, indent=4, default_flow_style=False)) + + return openmano_nsd + + def generate_vnf_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(source_dict, ["description"]) + delete_keys_from_dict(dest_dict, ["description", "image metadata", "class"]) + + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + def generate_ns_dict_diffs(self, source_dict, dest_dict): + delete_keys_from_dict(dest_dict, ["graph"]) + diff = dictdiffer.diff(source_dict, dest_dict) + return list(diff) + + +class Mwc16GenTest(Rift2OpenmanoTest): + OPENMANO_6WIND_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/6WindTR1.1.2.yaml" + ) + RIFT_6WIND_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/6WindTR1.1.2.xml" + ) + + OPENMANO_CORPA_PE1_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/gw_corpA_PE1.yaml" + ) + RIFT_CORPA_PE1_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/gw-corpa-pe1.xml" + ) + + OPENMANO_CORPA_PE2_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/gw_corpA_PE2.yaml" + ) + RIFT_CORPA_PE2_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/gw-corpa-pe2.xml" + ) + + OPENMANO_IMS_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/IMS-ALLin1.yaml" + ) + RIFT_IMS_VNF_PATH = os.path.join(THIS_DIR, + "rift_vnfs/IMS-ALLIN1.xml" + ) + + OPENMANO_GEN1_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/mwc16-gen1.yaml" + ) + RIFT_GEN1_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/mwc16gen1.xml" + ) + + OPENMANO_GEN2_VNF_PATH = os.path.join( + THIS_DIR, "openmano_vnfs/mwc16-gen2.yaml" + ) + RIFT_GEN2_VNF_PATH = os.path.join( + THIS_DIR, "rift_vnfs/mwc16gen2.xml" + ) + + OPENMANO_MWC16_GEN_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/mwc16-gen.yaml" + ) + RIFT_MWC16_GEN_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/mwc16-gen.xml" + ) + + OPENMANO_MWC16_PE_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/mwc16-pe.yaml" + ) + RIFT_MWC16_PE_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/mwc16-pe.xml" + ) + + OPENMANO_IMS_CORPA_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/IMS-allin1-corpA.yaml" + ) + RIFT_IMS_CORPA_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/IMS-corpA.xml" + ) + + OPENMANO_GW_CORPA_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/gwcorpA.yaml" + ) + RIFT_GW_CORPA_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/gwcorpA.xml" + ) + + OPENMANO_IMS_CORPB_NS_PATH = os.path.join( + THIS_DIR, "openmano_scenarios/IMS-allin1-corpB.yaml" + ) + RIFT_IMS_CORPB_NS_PATH = os.path.join( + THIS_DIR, "rift_scenarios/IMS-corpB.xml" + ) + + def test_6wind_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_6WIND_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_6WIND_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_corpa_pe1_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_CORPA_PE1_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_CORPA_PE1_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_corpa_pe2_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_CORPA_PE2_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_CORPA_PE2_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_ims_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_IMS_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_IMS_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_gen1_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_GEN1_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_GEN1_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_gen2_vnf(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_GEN2_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_GEN2_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_corpa_pe2(self): + converted_vnf = self.rift2openmano_vnf(Mwc16GenTest.RIFT_6WIND_VNF_PATH) + dest_vnf = self.load_openmano_vnf(Mwc16GenTest.OPENMANO_6WIND_VNF_PATH) + + diffs = self.generate_vnf_dict_diffs(converted_vnf, dest_vnf) + self.assertEqual([], diffs) + + def test_ims_corpa_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_IMS_CORPA_NS_PATH, + [Mwc16GenTest.RIFT_IMS_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_IMS_CORPA_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + def test_gw_corpa_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_GW_CORPA_NS_PATH, + [ + Mwc16GenTest.RIFT_CORPA_PE1_VNF_PATH, + Mwc16GenTest.RIFT_CORPA_PE2_VNF_PATH + ] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_GW_CORPA_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + #def test_ims_corpb_ns(self): + # converted_ns = self.rift2openmano_ns( + # Mwc16GenTest.RIFT_IMS_CORPB_NS_PATH, + # [Mwc16GenTest.RIFT_IMS_VNF_PATH] + # ) + + # dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_IMS_CORPB_NS_PATH) + + # diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + # self.assertEqual([], diffs) + + def test_mwc16_gen_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_MWC16_GEN_NS_PATH, + [Mwc16GenTest.RIFT_GEN1_VNF_PATH, Mwc16GenTest.RIFT_GEN2_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_MWC16_GEN_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + def test_mwc16_pe_ns(self): + converted_ns = self.rift2openmano_ns( + Mwc16GenTest.RIFT_MWC16_PE_NS_PATH, + [Mwc16GenTest.RIFT_6WIND_VNF_PATH] + ) + + dest_ns = self.load_openmano_ns(Mwc16GenTest.OPENMANO_MWC16_PE_NS_PATH) + + diffs = self.generate_ns_dict_diffs(converted_ns, dest_ns) + self.assertEqual([], diffs) + + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + logger.setLevel(logging.DEBUG if args.verbose else logging.WARN) + + unittest.main(testRunner=runner, argv=[sys.argv[0]]+unittest_args) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gw_corpA_PE1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gw_corpA_PE1.yaml new file mode 100644 index 0000000..9871af8 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gw_corpA_PE1.yaml @@ -0,0 +1,55 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: gw_corpA_PE1 + description: gw_corpA_PE1 + external-connections: + - name: eth0 + type: bridge + VNFC: gw_corpA_PE1-VM + local_iface_name: eth0 + description: Interface to Red10 (vlan146) + - name: eth1 + type: mgmt + VNFC: gw_corpA_PE1-VM + local_iface_name: eth1 + description: Interface to management network + - name: xe0 + type: data + VNFC: gw_corpA_PE1-VM + local_iface_name: xe0 + description: Interface to PE1 + VNFC: + - name: gw_corpA_PE1-VM + description: gw_corpA_PE1-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE1.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + - name: eth1 + vpci: "0000:00:0b.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: + - vpci: "0000:00:10.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gw_corpA_PE2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gw_corpA_PE2.yaml new file mode 100644 index 0000000..48f630e --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gw_corpA_PE2.yaml @@ -0,0 +1,55 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: gw_corpA_PE2 + description: gw_corpA_PE2 + external-connections: + - name: eth0 + type: bridge + VNFC: gw_corpA_PE2-VM + local_iface_name: eth0 + description: Interface to Red10 (vlan146) + - name: eth1 + type: mgmt + VNFC: gw_corpA_PE2-VM + local_iface_name: eth1 + description: Interface to management network + - name: xe0 + type: data + VNFC: gw_corpA_PE2-VM + local_iface_name: xe0 + description: Interface to PE2 + VNFC: + - name: gw_corpA_PE2-VM + description: gw_corpA_PE2-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE2.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + - name: eth1 + vpci: "0000:00:0b.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: + - vpci: "0000:00:10.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gwcorpA.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gwcorpA.yaml new file mode 100644 index 0000000..cfdf3d3 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/new_gwcorpa/gwcorpA.yaml @@ -0,0 +1,72 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: gw_corpA +topology: + nodes: + gw_corpA_PE1: + type: VNF + VNF model: gw_corpA_PE1 + gw_corpA_PE2: + type: VNF + VNF model: gw_corpA_PE2 + mwc1: + type: external_network + model: mwc1 + mwc2: + type: external_network + model: mwc2 + mgmt: + type: external_network + model: mgmt + mwc16data1_vlan: + type: external_network + model: "mwc16data1:101" + mwc16data2_vlan: + type: external_network + model: "mwc16data2:102" + connections: + connection 0: + type: link + nodes: + - mgmt: null + - gw_corpA_PE1: eth1 + connection 1: + type: link + nodes: + - mgmt: null + - gw_corpA_PE2: eth1 + connection 2: + type: link + nodes: + - mwc1: null + - gw_corpA_PE1: eth0 + connection 3: + type: link + nodes: + - mwc2: null + - gw_corpA_PE2: eth0 + connection 4: + type: link + nodes: + - mwc16data1_vlan: null + - gw_corpA_PE1: xe0 + connection 5: + type: link + nodes: + - mwc16data2_vlan: null + - gw_corpA_PE2: xe0 +description: Gateways to access as corpA to PE1 and PE2 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/IMS-allin1-corpA.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/IMS-allin1-corpA.yaml new file mode 100644 index 0000000..bbe11e1 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/IMS-allin1-corpA.yaml @@ -0,0 +1,41 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: IMS-corpA +description: All in one Clearwater IMS for corporation A in MWC16 +topology: + nodes: + IMS-ALLIN1_2p__1: # vnf/net name in the scenario + type: VNF # VNF, network, external_network (if it is a datacenter network) + VNF model: IMS-ALLIN1_2p # VNF name as introduced in OPENMANO DB + net-corp: + type: external_network # Datacenter net + model: net-corp:108 + net-mgmtOS: + type: external_network # Datacenter net + model: net-mgmtOS + connections: + data: # provide a name for this net or connection + type: link + nodes: + - net-corp: "0" # Datacenter net + - IMS-ALLIN1_2p__1: eth0 # Node and its interface + management: # provide a name for this net or connection + type: link + nodes: + - net-mgmtOS: "0" # Datacenter net + - IMS-ALLIN1_2p__1: eth1 # Node and its interface + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/IMS-allin1-corpB.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/IMS-allin1-corpB.yaml new file mode 100644 index 0000000..f6d5f7f --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/IMS-allin1-corpB.yaml @@ -0,0 +1,33 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: IMS-corpB +description: All in one Clearwater IMS for corporation B in MWC16 +topology: + nodes: + IMS-ALLIN1__1: # vnf/net name in the scenario + type: VNF # VNF, network, external_network (if it is a datacenter network) + VNF model: IMS-ALLIN1 # VNF name as introduced in OPENMANO DB + net-corpB: + type: external_network # Datacenter net + model: net-corpB + connections: + conn1: # provide a name for this net or connection + type: link + nodes: + - net-corpB: "0" # Datacenter net + - IMS-ALLIN1__1: eth0 # Node and its interface + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/ORIG_IMS-allin1-corpA.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/ORIG_IMS-allin1-corpA.yaml new file mode 100644 index 0000000..2152313 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/ORIG_IMS-allin1-corpA.yaml @@ -0,0 +1,33 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: IMS-corpA +description: All in one Clearwater IMS for corporation A in MWC16 +topology: + nodes: + IMS-ALLIN1__1: # vnf/net name in the scenario + type: VNF # VNF, network, external_network (if it is a datacenter network) + VNF model: IMS-ALLIN1 # VNF name as introduced in OPENMANO DB + net-corpA: + type: external_network # Datacenter net + model: net-corpA + connections: + conn1: # provide a name for this net or connection + type: link + nodes: + - net-corpA: "0" # Datacenter net + - IMS-ALLIN1__1: eth0 # Node and its interface + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/gwcorpA.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/gwcorpA.yaml new file mode 100644 index 0000000..be7c45b --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/gwcorpA.yaml @@ -0,0 +1,71 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: gw_corpA +topology: + nodes: + gw_corpA_PE1__1: + type: VNF + VNF model: gw_corpA_PE1 + gw_corpA_PE2__2: + type: VNF + VNF model: gw_corpA_PE2 + mwc1: + type: external_network + model: mwc1 + mwc2: + type: external_network + model: mwc2 + mgmt: + type: external_network + model: mgmt + mwc16data1: + type: external_network + model: "mwc16data1:101" + mwc16data2: + type: external_network + model: "mwc16data2:102" + connections: + connection 0: + type: link + nodes: + - mgmt: "0" + - gw_corpA_PE1__1: eth1 + connection 1: + type: link + nodes: + - mgmt: "0" + - gw_corpA_PE2__2: eth1 + connection 2: + type: link + nodes: + - mwc1: "0" + - gw_corpA_PE1__1: eth0 + connection 3: + type: link + nodes: + - mwc2: "0" + - gw_corpA_PE2__2: eth0 + connection 4: + type: link + nodes: + - mwc16data1: "0" + - gw_corpA_PE1__1: xe0 + connection 5: + type: link + nodes: + - mwc16data2: "0" + - gw_corpA_PE2__2: xe0 +description: Gateways to access as corpA to PE1 and PE2 diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/mwc16-gen.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/mwc16-gen.yaml new file mode 100644 index 0000000..65e0546 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/mwc16-gen.yaml @@ -0,0 +1,56 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: mwc16_traffic_generator +description: Traffic generator connected to the demo environment +topology: + nodes: + mwc16gen__1: + type: VNF + VNF model: mwc16gen + mwc: + type: external_network + model: mwc + mgmt: + type: external_network + model: mgmt + mwc16data1: + type: external_network + model: "mwc16data1:3000" + mwc16data2: + type: external_network + model: "mwc16data2:3000" + connections: + connection 0: + type: link + nodes: + - mwc: "0" + - mwc16gen__1: eth0 + connection 1: + type: link + nodes: + - mgmt: "0" + - mwc16gen__1: eth1 + connection 2: + type: link + nodes: + - mwc16data1: "0" + - mwc16gen__1: xe0 + connection 3: + type: link + nodes: + - mwc16data2: "0" + - mwc16gen__1: xe2 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/mwc16-pe.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/mwc16-pe.yaml new file mode 100644 index 0000000..ae7a6a1 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_scenarios/mwc16-pe.yaml @@ -0,0 +1,79 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-pe" +description: "mwc16-pe" +topology: + nodes: + 6WindTR1.1.2__1: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__2: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__3: + type: VNF + VNF model: 6WindTR1.1.2 + interDC: + type: external_network + model: interDC + mwc16data1: + type: external_network + model: mwc16data1 + mwc16data2: + type: external_network + model: mwc16data2 + mgmt: + type: external_network + model: mgmt + connections: + 6WindTR1.1.2__1 enty point: + type: link + nodes: + - mwc16data1: "0" + - 6WindTR1.1.2__1: xe2 + 6WindTR1.1.2__3 to OpenStack: + type: link + nodes: + - interDC: "0" + - 6WindTR1.1.2__3: xe3 + 6WindTR1.1.2__2 entry point: + type: link + nodes: + - mwc16data2: "0" + - 6WindTR1.1.2__2: xe2 + management: + type: link + nodes: + - mgmt: "0" + - 6WindTR1.1.2__1: eth0 + - 6WindTR1.1.2__2: eth0 + - 6WindTR1.1.2__3: eth0 + 6WindTR1.1.2__2-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__2: xe1 + - 6WindTR1.1.2__3: xe1 + 6WindTR1.1.2__1-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__1: xe1 + - 6WindTR1.1.2__3: xe0 + 6WindTR1.1.2__1-6WindTR1.1.2__2: + type: link + nodes: + - 6WindTR1.1.2__1: xe0 + - 6WindTR1.1.2__2: xe0 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/6WindTR1.1.2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/6WindTR1.1.2.yaml new file mode 100644 index 0000000..a67797d --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/6WindTR1.1.2.yaml @@ -0,0 +1,81 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: "6WindTR1.1.2" + VNFC: + - bridge-ifaces: + - vpci: "0000:00:03.0" + bandwidth: "1 Gbps" + name: "eth0" + numas: + - interfaces: + - vpci: "0000:00:05.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + - vpci: "0000:00:06.0" + bandwidth: "10 Gbps" + name: "xe1" + dedicated: "yes" + - vpci: "0000:00:07.0" + bandwidth: "10 Gbps" + name: "xe2" + dedicated: "yes" + - vpci: "0000:00:08.0" + bandwidth: "10 Gbps" + name: "xe3" + dedicated: "yes" + paired-threads-id: [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]] + paired-threads: 6 + memory: 8 + hypervisor: + version: "10002|12001|2.6.32-358.el6.x86_64" + type: "QEMU-kvm" + VNFC image: "/mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2" + image metadata: + use_incremental: "no" + processor: + model: "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + name: "VM" + external-connections: + - local_iface_name: eth0 + VNFC: VM + type: mgmt + name: eth0 + description: management + - local_iface_name: xe0 + VNFC: VM + type: data + name: xe0 + description: Data plane + - local_iface_name: xe1 + VNFC: VM + type: data + name: xe1 + description: Data plane + - local_iface_name: xe2 + VNFC: VM + type: data + name: xe2 + description: Data plane + - local_iface_name: xe3 + VNFC: VM + type: data + name: xe3 + description: Data plane + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/IMS-ALLin1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/IMS-ALLin1.yaml new file mode 100644 index 0000000..da5d92b --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/IMS-ALLin1.yaml @@ -0,0 +1,46 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: IMS-ALLIN1_2p + description: IMS-ALLIN1_2p + external-connections: + - name: eth0 + type: bridge + VNFC: IMS-ALLIN1_2p-VM + local_iface_name: eth0 + description: General purpose interface + - name: eth1 + type: mgmt + VNFC: IMS-ALLIN1_2p-VM + local_iface_name: eth1 + description: Management interface + VNFC: + - name: IMS-ALLIN1_2p-VM + description: IMS-ALLIN1_2p-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/allin1.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + - name: eth1 + vpci: "0000:00:0b.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: [] + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/gw_corpA_PE1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/gw_corpA_PE1.yaml new file mode 100644 index 0000000..9871af8 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/gw_corpA_PE1.yaml @@ -0,0 +1,55 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: gw_corpA_PE1 + description: gw_corpA_PE1 + external-connections: + - name: eth0 + type: bridge + VNFC: gw_corpA_PE1-VM + local_iface_name: eth0 + description: Interface to Red10 (vlan146) + - name: eth1 + type: mgmt + VNFC: gw_corpA_PE1-VM + local_iface_name: eth1 + description: Interface to management network + - name: xe0 + type: data + VNFC: gw_corpA_PE1-VM + local_iface_name: xe0 + description: Interface to PE1 + VNFC: + - name: gw_corpA_PE1-VM + description: gw_corpA_PE1-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE1.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + - name: eth1 + vpci: "0000:00:0b.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: + - vpci: "0000:00:10.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/gw_corpA_PE2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/gw_corpA_PE2.yaml new file mode 100644 index 0000000..48f630e --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/gw_corpA_PE2.yaml @@ -0,0 +1,55 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: gw_corpA_PE2 + description: gw_corpA_PE2 + external-connections: + - name: eth0 + type: bridge + VNFC: gw_corpA_PE2-VM + local_iface_name: eth0 + description: Interface to Red10 (vlan146) + - name: eth1 + type: mgmt + VNFC: gw_corpA_PE2-VM + local_iface_name: eth1 + description: Interface to management network + - name: xe0 + type: data + VNFC: gw_corpA_PE2-VM + local_iface_name: xe0 + description: Interface to PE2 + VNFC: + - name: gw_corpA_PE2-VM + description: gw_corpA_PE2-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE2.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + - name: eth1 + vpci: "0000:00:0b.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: + - vpci: "0000:00:10.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/mwc16-gen1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/mwc16-gen1.yaml new file mode 100644 index 0000000..d818d15 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/mwc16-gen1.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: mwc16gen + description: tidgen 2x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: mwc16gen1-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: mwc16gen1-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe0 + description: Data interface 0 + - name: xe1 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe1 + description: Data interface 1 + - name: xe2 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe2 + description: Data interface 2 + - name: xe3 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe3 + description: Data interface 3 + VNFC: # Virtual machine array + - name: mwc16gen1-VM # name of Virtual Machine + description: tidgen with 2x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/tidgen_mwc16.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/mwc16-gen2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/mwc16-gen2.yaml new file mode 100644 index 0000000..db1685a --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/openmano_vnfs/mwc16-gen2.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: mwc16gen2 + description: tidgen 4x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: mwc16gen2-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: mwc16gen2-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: mwc16gen2-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: mwc16gen2-VM # name of Virtual Machine + description: tidgen with 4x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen2.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpA.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpA.xml new file mode 100644 index 0000000..c20ea64 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpA.xml @@ -0,0 +1,149 @@ + + + + 965dc8ea-c475-11e5-8040-fa163eb18cb8 + IMS-corpA + IMS-corpA + All in one Clearwater IMS for corporation A in MWC16 + + 9670b946-c475-11e5-8040-fa163eb18cb8 + data + data + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth0 + + + net-corp + 108 + VLAN + + + + 9670b946-c475-11e5-8040-fa163eb18cb9 + management + management + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth1 + + + net-mgmtOS + VLAN + + + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + + juju + + clearwater-aio-proxy + + + 1 + 0 + + + config + + home_domain + string + true + ims.com + + + password + string + cw-aio + true + + + + create-update-user + + number + string + true + + + password + string + true + + + + delete-user + + number + string + true + + + + 1 + config + + proxied_ip + <rw_mgmt_ip> + + + + + + Update Domain + + 1 + 37838e08-d04c-11e5-8e5b-001b21b98a9d + cwims_vnfd + + 1 + config + + + + + Add User + + 1 + 37838e08-d04c-11e5-8e5b-001b21b98a9d + cwims_vnfd + + 1 + create-update-user + + + + + Delete User + + 1 + 37838e08-d04c-11e5-8e5b-001b21b98a9d + cwims_vnfd + + 1 + delete-user + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpB.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpB.xml new file mode 100644 index 0000000..c2dadf7 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/IMS-corpB.xml @@ -0,0 +1,45 @@ + + + + + + 123dc8ea-c475-11e5-8040-fa163eb18123 + IMS-corpB + IMS-corpB + All in one Clearwater IMS for corporation B in MWC16 + + 9670b946-c475-11e5-8040-fa163eb18cb8 + conn1 + conn1 + ELAN + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + eth0 + + + net-corpB + VLAN + + + + 1 + 47914a30-c474-11e5-990a-fa163eb18cb8 + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/add_corpA_input.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/add_corpA_input.yaml new file mode 100644 index 0000000..9a2dabd --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/add_corpA_input.yaml @@ -0,0 +1,53 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +config_agent: {host: 1.1.1.1, name: agent, port: 9090, secret: secret, user: user} +init_config: {} +rpc_ip: + name: Add Corporation + nsr_id_ref: 359d76ab-6618-4894-93f7-b37b2ecbc711 + parameter: + - {name: Corporation Name, value: CorpA} + - {name: Tunnel Key, value: '10'} + parameter_group: + - name: PE1 + parameter: + - {name: Vlan ID, value: 101} + - {name: Interface Name, value: eth3} + - {name: Corp. Network, value: 10.0.1.0/24} + - {name: Corp. Gateway, value: 10.0.1.1} + - {name: Local Network, value: 10.255.255.0/24} + - {name: Local Network Area, value: '0'} + - name: PE2 + parameter: + - {name: Vlan ID, value: 102} + - {name: Interface Name, value: eth3} + - {name: Corp. Network, value: 10.0.2.0/24} + - {name: Corp. Gateway, value: 10.0.2.1} + - {name: Local Network, value: 10.255.255.0/24} + - {name: Local Network Area, value: '0'} + - name: PE3 + parameter: + - {name: Vlan ID, value: 108} + - {name: Interface Name, value: eth4} + - {name: Corp. Network, value: 10.0.4.0/24} + - {name: Corp. Gateway, value: 10.0.4.1} + - {name: Local Network, value: 10.255.255.0/24} + - {name: Local Network Area, value: '0'} +unit_names: {159d76ab-6618-4894-93f7-b37b2ecbc711: pe1, 259d76ab-6618-4894-93f7-b37b2ecbc711: pe2, + 359d76ab-6618-4894-93f7-b37b2ecbc711: pe3} +vnfr_index_map: {159d76ab-6618-4894-93f7-b37b2ecbc711: 1, 259d76ab-6618-4894-93f7-b37b2ecbc711: 2, + 359d76ab-6618-4894-93f7-b37b2ecbc711: 3} + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/gwcorpA.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/gwcorpA.xml new file mode 100644 index 0000000..d447420 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/gwcorpA.xml @@ -0,0 +1,123 @@ + + + + 091e3932-c46c-11e5-8576-fa163eb18cb8 + gw_corpA + gw_corpA + Gateways to access as corpA to PE1 and PE2 + + 194740d4-c46c-11e5-8576-fa163eb18cb8 + connection 0 + connection 0 + ELAN + + 1 + 11115d5e-c474-11e5-990a-fa163eb18cb8 + eth1 + + + mgmt + VLAN + + + + 0947754a-c46c-11e5-8576-fa163eb18cb8 + connection 1 + connection 1 + ELAN + + 2 + 22215d5e-c474-11e5-990a-fa163eb18cb8 + eth1 + + + mgmt + VLAN + + + + 294740d4-c46c-11e5-8576-fa163eb18cb8 + connection 2 + connection 2 + ELAN + + 1 + 11115d5e-c474-11e5-990a-fa163eb18cb8 + eth0 + + + mwc1 + VLAN + + + + 4947754a-c46c-11e5-8576-fa163eb18cb8 + connection 3 + connection 3 + ELAN + + 2 + 22215d5e-c474-11e5-990a-fa163eb18cb8 + eth0 + + + mwc2 + VLAN + + + + 5947888c-c46c-11e5-8576-fa163eb18cb8 + connection 4 + connection 4 + ELAN + + 1 + 11115d5e-c474-11e5-990a-fa163eb18cb8 + xe0 + + + mwc16data1 + VLAN + 101 + + + + 7947bb90-c46c-11e5-8576-fa163eb18cb8 + connection 5 + connection 5 + ELAN + + 2 + 22215d5e-c474-11e5-990a-fa163eb18cb8 + xe0 + + + mwc16data2 + VLAN + 102 + + + + 1 + 11115d5e-c474-11e5-990a-fa163eb18cb8 + + + 2 + 22215d5e-c474-11e5-990a-fa163eb18cb8 + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-gen.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-gen.xml new file mode 100644 index 0000000..b5fc178 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-gen.xml @@ -0,0 +1,91 @@ + + + + + + 091e3932-c46c-11e5-8576-fa163eb18cb7 + mwc16_traffic_generator + mwc16_traffic_generator + Traffic generator connected to the demo environment + + 094740d4-c46c-11e5-8576-fa163eb18cb8 + connection 0 + connection 0 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + eth0 + + + mwc + VLAN + + + + 0947754a-c46c-11e5-8576-fa163eb18cb8 + connection 1 + connection 1 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + eth1 + + + mgmt + VLAN + + + + 0947888c-c46c-11e5-8576-fa163eb18cb8 + connection 2 + connection 2 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + xe0 + + + mwc16data1 + VLAN + 3000 + + + + 0947bb90-c46c-11e5-8576-fa163eb18cb8 + connection 3 + connection 3 + ELAN + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + xe2 + + + mwc16data2 + VLAN + 3000 + + + + 1 + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe-onevnf.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe-onevnf.xml new file mode 100644 index 0000000..a00d4ff --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe-onevnf.xml @@ -0,0 +1,60 @@ + + + + + + 764c375c-c44e-11e5-b325-fa163eb18cb8 + mwc16-pe-onevnf + mwc16-pe-onevnf + mwc16-pe-onevnf + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + + 7660f714-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1 enty point + 6WindTR1.1.2__1 enty point + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data1 + VLAN + + + + 7660d040-c44e-11e5-b325-fa163eb18cb8 + management + management + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + mgmt + VLAN + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe.xml new file mode 100644 index 0000000..0bc3f31 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/mwc16-pe.xml @@ -0,0 +1,566 @@ + + + + + + 764c375c-c44e-11e5-b325-fa163eb18cb8 + mwc16-pe + mwc16-pe + mwc16-pe + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + juju + + vpe-router + + + 0 + 0 + + + 1 + config + + vpe-router + <rw_mgmt_ip> + + + user + root + + + pass + 6windos + + + hostname + pe2 + + + + 2 + configure-interface + + iface-name + eth1 + + + cidr + 10.10.10.10/30 + + + + 3 + configure-interface + + iface-name + eth2 + + + cidr + 10.10.10.6/30 + + + + 4 + configure-interface + + iface-name + eth3 + + + + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + juju + + vpe-router + + + 0 + 0 + + + 1 + config + + vpe-router + <rw_mgmt_ip> + + + user + root + + + pass + 6windos + + + hostname + pe3 + + + + 2 + configure-interface + + iface-name + eth1 + + + cidr + 10.10.10.2/30 + + + + 3 + configure-interface + + iface-name + eth2 + + + cidr + 10.10.10.5/30 + + + + 4 + configure-interface + + iface-name + eth3 + + + + 5 + configure-interface + + iface-name + eth4 + + + + + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + + juju + + vpe-router + + + 0 + 0 + + + 1 + config + + vpe-router + <rw_mgmt_ip> + + + user + root + + + pass + 6windos + + + hostname + pe1 + + + + 2 + configure-interface + + iface-name + eth1 + + + cidr + 10.10.10.9/30 + + + + 3 + configure-interface + + iface-name + eth2 + + + cidr + 10.10.10.1/30 + + + + 4 + configure-interface + + iface-name + eth3 + + + + + + 76610cb8-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__3 to OpenStack + 6WindTR1.1.2__3 to OpenStack + ELAN + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe3 + + + interDC + VLAN + + + + 7660f714-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1 enty point + 6WindTR1.1.2__1 enty point + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data1 + VLAN + + + + 76611fc8-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__2 entry point + 6WindTR1.1.2__2 entry point + ELAN + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe2 + + + mwc16data2 + VLAN + + + + 7660d040-c44e-11e5-b325-fa163eb18cb8 + management + management + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + eth0 + + + mgmt + VLAN + + + + 7660b376-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__2-6WindTR1.1.2__3 + 6WindTR1.1.2__2-6WindTR1.1.2__3 + ELAN + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + + 76604f80-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1-6WindTR1.1.2__3 + 6WindTR1.1.2__1-6WindTR1.1.2__3 + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe1 + + + 3 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + + 766091de-c44e-11e5-b325-fa163eb18cb8 + 6WindTR1.1.2__1-6WindTR1.1.2__2 + 6WindTR1.1.2__1-6WindTR1.1.2__2 + ELAN + + 1 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + 2 + b7a3d170-c448-11e5-8795-fa163eb18cb8 + xe0 + + + + Add SP Test Corporation + + Corporation Name + string + true + SP + + + Tunnel Key + integer + true + 10 + true + + + PE1 + + Vlan ID + integer + true + 3000 + true + + + Interface Name + string + true + eth3 + true + + + Corp. Network + string + true + 10.0.1.0/24 + true + + + Corp. Gateway + string + true + 10.0.1.1 + true + + false + + + PE2 + + Vlan ID + integer + true + 3000 + true + + + Interface Name + string + true + eth3 + true + + + Corp. Network + string + true + 10.0.2.0/24 + true + + + Corp. Gateway + string + true + 10.0.2.1 + true + + false + + + PE3 + + Vlan ID + integer + true + 3000 + true + + + Interface Name + string + true + eth3 + true + + + Corp. Network + string + true + 10.0.3.0/24 + true + + + Corp. Gateway + string + true + 10.0.3.1 + true + + false + + /home/rift/.install/usr/bin/add_corporation.py + + + Add Corporation + + Corporation Name + string + true + CorpA + + + Tunnel Key + integer + true + 1 + true + + + PE1 + + Vlan ID + integer + true + 101 + true + + + Interface Name + string + true + eth3 + + + Corp. Network + string + true + 10.0.1.0/24 + + + Corp. Gateway + string + true + 10.0.1.1 + + false + + + PE2 + + Vlan ID + integer + true + 102 + true + + + Interface Name + string + true + eth3 + + + Corp. Network + string + true + 10.0.2.0/24 + + + Corp. Gateway + string + true + 10.0.2.1 + + false + + + PE3 + + Vlan ID + integer + true + 108 + true + + + Interface Name + string + true + eth4 + + + Corp. Network + string + true + 10.0.4.0/24 + + + Corp. Gateway + string + true + 10.0.4.1 + + false + + /home/rift/.install/usr/bin/add_corporation.py + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/pe_config.py b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/pe_config.py new file mode 100755 index 0000000..63426cd --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_scenarios/pe_config.py @@ -0,0 +1,383 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from gi.repository import NsdYang, RwYang + +def add_pe_vnf(nsd, vnf_index, intf_ip_pairs): + const_vnfd = nsd.constituent_vnfd.add() + const_vnfd.vnfd_id_ref = "b7a3d170-c448-11e5-8795-fa163eb18cb8" + const_vnfd.member_vnf_index = vnf_index + + vnf_config = const_vnfd.vnf_configuration + vnf_config.input_params.config_priority = 0 + vnf_config.input_params.config_delay = 0 + + # Select "script" configuration + vnf_config.config_type = 'juju' + vnf_config.juju.charm = 'vpe-router' + + # Set the initital-config + init_config = NsdYang.InitialConfigPrimitive.from_dict({ + "seq": 1, + "name": "config", + "parameter": [ + {"name": "vpe-router", "value": ""}, + {"name": "user", "value": "root"}, + {"name": "pass", "value": "6windos"} + ] + }) + vnf_config.initial_config_primitive.append(init_config) + + for seq, (intf, cidr) in enumerate(intf_ip_pairs, start=2): + params = [{"name": "iface-name", "value": intf}] + if cidr is not None: + params.append( + {"name": "cidr", "value": cidr} + ) + + vnf_config.initial_config_primitive.add().from_dict({ + "seq": seq, + "name": "configure-interface", + "parameter": params + }) + + +nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() +add_pe_vnf(nsd, 1, + [ + ("eth1", "10.10.10.9/30"), + ("eth2", "10.10.10.1/30"), + ("eth3", None), + ] +) + +add_pe_vnf(nsd, 2, + [ + ("eth1", "10.10.10.10/30"), + ("eth2", "10.10.10.6/30"), + ("eth3", None), + ] +) + +add_pe_vnf(nsd, 3, + [ + ("eth1", "10.10.10.2/30"), + ("eth2", "10.10.10.5/30"), + ("eth3", None), + ("eth4", None), + ] +) + +ns_cfg_prim = nsd.config_primitive.add() +ns_cfg_prim.name = "Add SP Test Corporation" +ns_cfg_prim.user_defined_script = "/home/rift/.install/usr/bin/add_corporation.py" + +ns_cfg_prim.parameter.add().from_dict({ + "name": "Corporation Name", + "data_type": "string", + "mandatory": True, + }) + +ns_cfg_prim.parameter.add().from_dict({ + "name": 'Tunnel Key', + "data_type": "integer", + "mandatory": True, + "default_value": "10", + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE1", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "3000", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.1.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.1.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE2", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "3000", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.2.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.2.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE3", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "3000", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.3.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.3.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim = nsd.config_primitive.add() +ns_cfg_prim.name = "Add Corporation" +ns_cfg_prim.user_defined_script = "/home/rift/.install/usr/bin/add_corporation.py" + +ns_cfg_prim.parameter.add().from_dict({ + "name": "Corporation Name", + "data_type": "string", + "mandatory": True, + }) + +ns_cfg_prim.parameter.add().from_dict({ + "name": 'Tunnel Key', + "data_type": "integer", + "mandatory": True, + "default_value": "1", + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE1", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "101", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.1.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.1.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE2", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "102", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth3", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.2.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.2.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +ns_cfg_prim.parameter_group.add().from_dict({ + "name": "PE3", + "mandatory": False, + "parameter": [ + { + "name": 'Vlan ID', + "data_type": "integer", + "mandatory": True, + "default_value": "108", + }, + { + "name": 'Interface Name', + "data_type": "string", + "mandatory": True, + "default_value": "eth4", + }, + { + "name": 'Corp. Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.4.0/24", + }, + { + "name": 'Corp. Gateway', + "data_type": "string", + "mandatory": True, + "default_value": "10.0.4.1", + }, + { + "name": 'Local Network', + "data_type": "string", + "mandatory": True, + "default_value": "10.255.255.0/24", + }, + { + "name": 'Local Network Area', + "data_type": "string", + "mandatory": True, + "default_value": "0", + }, + ] + }) + +model = RwYang.Model.create_libncx() +model.load_module("nsd") +print(nsd.to_xml_v2(model, pretty_print=True)) + +print("\n\n") +print(json.dumps(nsd.as_dict(), indent=4, separators=(',', ': '))) \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/6WindTR1.1.2.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/6WindTR1.1.2.xml new file mode 100644 index 0000000..fcb6eee --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/6WindTR1.1.2.xml @@ -0,0 +1,151 @@ + + + + + + b7a3d170-c448-11e5-8795-fa163eb18cb8 + 6WindTR1.1.2 + + b7bbc9b0-c448-11e5-8795-fa163eb18cb8 + + + eth0 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + b7bbc9b0-c448-11e5-8795-fa163eb18cb8 + VM + 0000:00:0a.0 + + 12 + 8192 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 6 + + 0 + 1 + + + 2 + 3 + + + 4 + 5 + + + 6 + 7 + + + 8 + 9 + + + 10 + 11 + + + + + + /mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2 + + eth0 + eth0 + + OM-MGMT + 0000:00:03.0 + 1000000000 + + + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:05.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:06.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:07.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:08.0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/IMS-ALLIN1.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/IMS-ALLIN1.xml new file mode 100644 index 0000000..cb0d1ff --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/IMS-ALLIN1.xml @@ -0,0 +1,81 @@ + + + + + + 47914a30-c474-11e5-990a-fa163eb18cb8 + IMS-ALLIN1_2p + IMS-ALLIN1_2p + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + IMS-ALLIN1_2p-VM + IMS-ALLIN1_2p-VM + 0000:00:0a.0 + + 2 + 4096 + 10 + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 1 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/allin1.qcow2 + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 0 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 0 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe1.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe1.xml new file mode 100644 index 0000000..5f69514 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe1.xml @@ -0,0 +1,94 @@ + + + + + + 11115d5e-c474-11e5-990a-fa163eb18cb8 + gw_corpA_PE1 + gw_corpA_PE1 + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + gw_corpA_PE1-VM + gw_corpA_PE1-VM + 0000:00:0a.0 + + 2 + 4096 + 10 + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 1 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE1.qcow2 + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 0 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 0 + + + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + 10000000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe2.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe2.xml new file mode 100644 index 0000000..2c00246 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/gw-corpa-pe2.xml @@ -0,0 +1,94 @@ + + + + + + 22215d5e-c474-11e5-990a-fa163eb18cb8 + gw_corpA_PE2 + gw_corpA_PE2 + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + 47915d5e-c474-11e5-990a-fa163eb18cb8 + gw_corpA_PE2-VM + gw_corpA_PE2-VM + 0000:00:0a.0 + + 2 + 4096 + 10 + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 1 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE2.qcow2 + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 0 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 0 + + + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + 10000000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen1.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen1.xml new file mode 100644 index 0000000..0de39b4 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen1.xml @@ -0,0 +1,141 @@ + + + + + + 08d9ffe2-c46c-11e5-8576-fa163eb18cb8 + mwc16gen + tidgen 4x10Gbps 28GB 11cores + + 09163412-c46c-11e5-8576-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 09163412-c46c-11e5-8576-fa163eb18cb8 + mwc16gen1-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/tidgen_mwc16.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 1000000 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 1000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen2.xml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen2.xml new file mode 100644 index 0000000..c30312b --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/rift_vnfs/mwc16gen2.xml @@ -0,0 +1,141 @@ + + + + + + eecfd632-bef1-11e5-b5b8-0800273ab84b + mwc16gen2 + tidgen 4x10Gbps 28GB 11cores + + 09163412-c46c-11e5-8576-fa163eb18cb8 + + + eth0 + VPORT + + + eth1 + VPORT + + + xe0 + VPORT + + + xe1 + VPORT + + + xe2 + VPORT + + + xe3 + VPORT + + + 09163412-c46c-11e5-8576-fa163eb18cb8 + mwc16gen2-VM + tidgen with 4x10Gbps 28GB + 0000:00:0a.0 + + 28672 + + + REQUIRE_KVM + 10002|12001|2.6.32-358.el6.x86_64 + + + Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + 64b + iommu + lps + tlbps + hwsv + dioc + ht + + + LARGE + DEDICATED + PREFER + + 1 + STRICT + + 0 + + 11 + + + + + /mnt/powervault/virtualization/vnfs/demos/mwc2016/mwc16-gen2.qcow2 + + xe0 + xe0 + + PCI-PASSTHROUGH + 0000:00:10.0 + + + + xe1 + xe1 + + PCI-PASSTHROUGH + 0000:00:11.0 + + + + xe2 + xe2 + + PCI-PASSTHROUGH + 0000:00:12.0 + + + + xe3 + xe3 + + PCI-PASSTHROUGH + 0000:00:13.0 + + + + eth0 + eth0 + + VIRTIO + 0000:00:0a.0 + 1000000 + + + + eth1 + eth1 + + OM-MGMT + 0000:00:0b.0 + 1000000 + + + + + + \ No newline at end of file diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/IMS-allin1-corpA.yaml.generic b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/IMS-allin1-corpA.yaml.generic new file mode 100644 index 0000000..87cdfd5 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/IMS-allin1-corpA.yaml.generic @@ -0,0 +1,24 @@ +--- +name: IMS-corpA +description: All in one Clearwater IMS for corporation A in MWC16 +topology: + nodes: + ims-corpA: # vnf/net name in the scenario + type: VNF # VNF, network, external_network (if it is a datacenter network) + VNF model: IMS-ALLIN1_2p # VNF name as introduced in OPENMANO DB + net-corpA: + type: external_network # Datacenter net + model: net-corp:${VLAN_CORPA_PE3} + net-mgmtOS: + type: external_network # Datacenter net + model: net-mgmtOS + connections: + data: # provide a name for this net or connection + nodes: + - net-corpA: null # Datacenter net + - ims-corpA: eth0 # Node and its interface + management: # provide a name for this net or connection + nodes: + - net-mgmtOS: null # Datacenter net + - ims-corpA: eth1 # Node and its interface + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml new file mode 100644 index 0000000..4736acf --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml @@ -0,0 +1,59 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: gw_corpA +topology: + nodes: + gw_corpA_PE1: + type: VNF + VNF model: gw_corpA_PE1 + gw_corpA_PE2: + type: VNF + VNF model: gw_corpA_PE2 + mwc1: + type: external_network + model: mwc1 + mwc2: + type: external_network + model: mwc2 + mwc16data1_vlan: + type: external_network + model: "mwc16data1:101" + mwc16data2_vlan: + type: external_network + model: "mwc16data2:102" + connections: + connection 0: + type: link + nodes: + - mwc1: null + - gw_corpA_PE1: eth0 + connection 1: + type: link + nodes: + - mwc2: null + - gw_corpA_PE2: eth0 + connection 2: + type: link + nodes: + - mwc16data1_vlan: null + - gw_corpA_PE1: xe0 + connection 3: + type: link + nodes: + - mwc16data2_vlan: null + - gw_corpA_PE2: xe0 +description: Gateways to access as corpA to PE1 and PE2 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml.generic b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml.generic new file mode 100644 index 0000000..fb953fc --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/gwcorpA.yaml.generic @@ -0,0 +1,92 @@ +name: gw_corpA +topology: + nodes: + gw_corpA_PE1: + graph: + x: 370 + y: 149 + ifaces: + left: + - - eth0 + - v + right: + - - xe0 + - v + type: VNF + VNF model: gw_corpA_PE1 + gw_corpA_PE2: + graph: + x: 370 + y: 308 + ifaces: + left: + - - eth0 + - v + right: + - - xe0 + - v + type: VNF + VNF model: gw_corpA_PE2 + mwc1: + graph: + x: 81 + y: 149 + ifaces: + right: + - - "0" + - v + type: external_network + model: mwc1 + mwc2: + graph: + x: 81 + y: 308 + ifaces: + right: + - - "0" + - v + type: external_network + model: mwc2 + mwc16data1_vlan: + graph: + x: 690 + y: 161 + ifaces: + left: + - - "0" + - d + type: external_network + model: "mwc16data1:${VLAN_CORPA_PE1}" + mwc16data2_vlan: + graph: + x: 688 + y: 327 + ifaces: + left: + - - "0" + - d + type: external_network + model: "mwc16data2:${VLAN_CORPA_PE2}" + connections: + connection 0: + type: link + nodes: + - mwc1: null + - gw_corpA_PE1: eth0 + connection 1: + type: link + nodes: + - mwc2: null + - gw_corpA_PE2: eth0 + connection 2: + type: link + nodes: + - mwc16data1_vlan: null + - gw_corpA_PE1: xe0 + connection 3: + type: link + nodes: + - mwc16data2_vlan: null + - gw_corpA_PE2: xe0 +description: Gateways to access as corpA to PE1 and PE2 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/mwc16-pe.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/mwc16-pe.yaml new file mode 100644 index 0000000..ae7a6a1 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/mwc16-pe.yaml @@ -0,0 +1,79 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "mwc16-pe" +description: "mwc16-pe" +topology: + nodes: + 6WindTR1.1.2__1: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__2: + type: VNF + VNF model: 6WindTR1.1.2 + 6WindTR1.1.2__3: + type: VNF + VNF model: 6WindTR1.1.2 + interDC: + type: external_network + model: interDC + mwc16data1: + type: external_network + model: mwc16data1 + mwc16data2: + type: external_network + model: mwc16data2 + mgmt: + type: external_network + model: mgmt + connections: + 6WindTR1.1.2__1 enty point: + type: link + nodes: + - mwc16data1: "0" + - 6WindTR1.1.2__1: xe2 + 6WindTR1.1.2__3 to OpenStack: + type: link + nodes: + - interDC: "0" + - 6WindTR1.1.2__3: xe3 + 6WindTR1.1.2__2 entry point: + type: link + nodes: + - mwc16data2: "0" + - 6WindTR1.1.2__2: xe2 + management: + type: link + nodes: + - mgmt: "0" + - 6WindTR1.1.2__1: eth0 + - 6WindTR1.1.2__2: eth0 + - 6WindTR1.1.2__3: eth0 + 6WindTR1.1.2__2-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__2: xe1 + - 6WindTR1.1.2__3: xe1 + 6WindTR1.1.2__1-6WindTR1.1.2__3: + type: link + nodes: + - 6WindTR1.1.2__1: xe1 + - 6WindTR1.1.2__3: xe0 + 6WindTR1.1.2__1-6WindTR1.1.2__2: + type: link + nodes: + - 6WindTR1.1.2__1: xe0 + - 6WindTR1.1.2__2: xe0 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/tidgen.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/tidgen.yaml new file mode 100644 index 0000000..d1b2b45 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/scenarios/tidgen.yaml @@ -0,0 +1,56 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: mwc16_traffic_generator +description: Traffic generator connected to the demo environment +topology: + nodes: + mwc16gen__1: + type: VNF + VNF model: mwc16gen + mwc: + type: external_network + model: mwc + mgmt: + type: external_network + model: mgmt + mwc16data1: + type: external_network + model: "mwc16data1:3000" + mwc16data2: + type: external_network + model: "mwc16data2:3000" + connections: + connection 0: + type: link + nodes: + - mwc: "0" + - mwc16gen__1: eth0 + connection 1: + type: link + nodes: + - mgmt: "0" + - mwc16gen__1: eth1 + connection 2: + type: link + nodes: + - "mwc16data1:3000": "0" + - mwc16gen__1: xe0 + connection 3: + type: link + nodes: + - "mwc16data2:3000": "0" + - mwc16gen__1: xe2 + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/6WindTR1.1.2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/6WindTR1.1.2.yaml new file mode 100644 index 0000000..a67797d --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/6WindTR1.1.2.yaml @@ -0,0 +1,81 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: "6WindTR1.1.2" + VNFC: + - bridge-ifaces: + - vpci: "0000:00:03.0" + bandwidth: "1 Gbps" + name: "eth0" + numas: + - interfaces: + - vpci: "0000:00:05.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + - vpci: "0000:00:06.0" + bandwidth: "10 Gbps" + name: "xe1" + dedicated: "yes" + - vpci: "0000:00:07.0" + bandwidth: "10 Gbps" + name: "xe2" + dedicated: "yes" + - vpci: "0000:00:08.0" + bandwidth: "10 Gbps" + name: "xe3" + dedicated: "yes" + paired-threads-id: [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]] + paired-threads: 6 + memory: 8 + hypervisor: + version: "10002|12001|2.6.32-358.el6.x86_64" + type: "QEMU-kvm" + VNFC image: "/mnt/powervault/virtualization/vnfs/6wind/6wind-turbo-router-1.1.2.img.qcow2" + image metadata: + use_incremental: "no" + processor: + model: "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + name: "VM" + external-connections: + - local_iface_name: eth0 + VNFC: VM + type: mgmt + name: eth0 + description: management + - local_iface_name: xe0 + VNFC: VM + type: data + name: xe0 + description: Data plane + - local_iface_name: xe1 + VNFC: VM + type: data + name: xe1 + description: Data plane + - local_iface_name: xe2 + VNFC: VM + type: data + name: xe2 + description: Data plane + - local_iface_name: xe3 + VNFC: VM + type: data + name: xe3 + description: Data plane + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/IMS-ALLin1_2p.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/IMS-ALLin1_2p.yaml new file mode 100644 index 0000000..ee58e17 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/IMS-ALLin1_2p.yaml @@ -0,0 +1,46 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: IMS-ALLIN1_2p + description: IMS-ALLIN1_2p + external-connections: + - name: eth0 + type: bridge + VNFC: IMS-ALLIN1_2p-VM + local_iface_name: eth0 + description: Virtio data interface + - name: eth1 + type: mgmt + VNFC: IMS-ALLIN1_2p-VM + local_iface_name: eth1 + description: Management interface + VNFC: + - name: IMS-ALLIN1_2p-VM + description: IMS-ALLIN1_2p-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/allin1.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + - name: eth1 + vpci: "0000:00:0b.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: [] + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE1.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE1.yaml new file mode 100644 index 0000000..c05e54c --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE1.yaml @@ -0,0 +1,48 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: gw_corpA_PE1 + description: gw_corpA_PE1 + external-connections: + - name: eth0 + type: bridge + VNFC: gw_corpA_PE1-VM + local_iface_name: eth0 + description: Interface to Red10 (vlan146) + - name: xe0 + type: data + VNFC: gw_corpA_PE1-VM + local_iface_name: xe0 + description: Interface to PE1 + VNFC: + - name: gw_corpA_PE1-VM + description: gw_corpA_PE1-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE1.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: + - vpci: "0000:00:10.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE2.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE2.yaml new file mode 100644 index 0000000..ca64c6e --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/gw_corpA_PE2.yaml @@ -0,0 +1,48 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: gw_corpA_PE2 + description: gw_corpA_PE2 + external-connections: + - name: eth0 + type: bridge + VNFC: gw_corpA_PE2-VM + local_iface_name: eth0 + description: Interface to Red10 (vlan146) + - name: xe0 + type: data + VNFC: gw_corpA_PE2-VM + local_iface_name: xe0 + description: Interface to PE2 + VNFC: + - name: gw_corpA_PE2-VM + description: gw_corpA_PE2-VM + #Copy the image to a compute path and edit this path + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/gw_corpA_PE2.qcow2 + disk: 10 + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + numas: + - paired-threads: 1 + memory: 4 # GBytes + interfaces: + - vpci: "0000:00:10.0" + bandwidth: "10 Gbps" + name: "xe0" + dedicated: "yes" + diff --git a/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/tidgen_mwc16.yaml b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/tidgen_mwc16.yaml new file mode 100644 index 0000000..d818d15 --- /dev/null +++ b/modules/core/mano/models/openmano/test/osm_mwc_generic_descriptors/vnfs/tidgen_mwc16.yaml @@ -0,0 +1,89 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: mwc16gen + description: tidgen 2x10Gbps 28GB 11cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: mwc16gen1-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: mwc16gen1-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe0 + description: Data interface 0 + - name: xe1 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe1 + description: Data interface 1 + - name: xe2 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe2 + description: Data interface 2 + - name: xe3 + type: data + VNFC: mwc16gen1-VM + local_iface_name: xe3 + description: Data interface 3 + VNFC: # Virtual machine array + - name: mwc16gen1-VM # name of Virtual Machine + description: tidgen with 2x10Gbps 28GB + VNFC image: /mnt/powervault/virtualization/vnfs/demos/mwc2016/tidgen_mwc16.qcow2 + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 11 # "cores", "paired-threads", "threads" + memory: 28 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "yes" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "yes" + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "yes" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/tidgen_ns_2sriov.yaml b/modules/core/mano/models/openmano/test/tidgen_ns_2sriov.yaml new file mode 100644 index 0000000..f9487bb --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_ns_2sriov.yaml @@ -0,0 +1,49 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: 2tidgenMWC_2sriov +description: scenario with 2 tidgenMWC VNFs +topology: + nodes: + tidgen1: #VNF name + type: VNF + VNF model: tidgenMWC_2sriov #VNF type + tidgen2: + type: VNF + VNF model: tidgenMWC_2sriov + default: #Name of external network + type: external_network + model: default + connections: + mgmtnet: + nodes: + - tidgen1: eth0 + - tidgen2: eth0 + datanet0: + nodes: + - tidgen1: xe0 + - tidgen2: xe0 + datanet1: + nodes: + - tidgen1: xe1 + - tidgen2: xe1 + control-net: + nodes: + - default: null + - tidgen1: eth1 + - tidgen2: eth1 + + diff --git a/modules/core/mano/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml b/modules/core/mano/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml new file mode 100644 index 0000000..501e307 --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml @@ -0,0 +1,44 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: 2tidgenMWC_2sriov_no_ctrlnet +description: scenario with 2 tidgenMWC VNFs +topology: + nodes: + tidgen1: #VNF name + type: VNF + VNF model: tidgenMWC_2sriov_no_ctrlnet #VNF type + tidgen2: + type: VNF + VNF model: tidgenMWC_2sriov_no_ctrlnet + default: #Name of external network + type: external_network + model: default + connections: + mgmtnet: + nodes: + - tidgen1: eth0 + - tidgen2: eth0 + datanet0: + nodes: + - tidgen1: xe0 + - tidgen2: xe0 + datanet1: + nodes: + - tidgen1: xe1 + - tidgen2: xe1 + + diff --git a/modules/core/mano/models/openmano/test/tidgen_ns_4sriov.yaml b/modules/core/mano/models/openmano/test/tidgen_ns_4sriov.yaml new file mode 100644 index 0000000..35cb543 --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_ns_4sriov.yaml @@ -0,0 +1,57 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: 2tidgenMWC_4sriov +description: scenario with 2 tidgenMWC VNFs +topology: + nodes: + tidgen1: #VNF name + type: VNF + VNF model: tidgenMWC_4sriov #VNF type + tidgen2: + type: VNF + VNF model: tidgenMWC_4sriov + default: #Name of external network + type: external_network + model: default + connections: + mgmtnet: + nodes: + - tidgen1: eth0 + - tidgen2: eth0 + datanet0: + nodes: + - tidgen1: xe0 + - tidgen2: xe0 + datanet1: + nodes: + - tidgen1: xe1 + - tidgen2: xe1 + datanet2: + nodes: + - tidgen1: xe2 + - tidgen2: xe2 + datanet3: + nodes: + - tidgen1: xe3 + - tidgen2: xe3 + control-net: + nodes: + - default: null + - tidgen1: eth1 + - tidgen2: eth1 + + diff --git a/modules/core/mano/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml b/modules/core/mano/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml new file mode 100644 index 0000000..de2b2b8 --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml @@ -0,0 +1,48 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +name: 2tidgenMWC_4sriov_no_ctrlnet +description: scenario with 2 tidgenMWC VNFs +topology: + nodes: + tidgen1: #VNF name + type: VNF + VNF model: tidgenMWC_4sriov_no_ctrlnet #VNF type + tidgen2: + type: VNF + VNF model: tidgenMWC_4sriov_no_ctrlnet + default: #Name of external network + type: external_network + model: default + connections: + datanet0: + nodes: + - tidgen1: xe0 + - tidgen2: xe0 + datanet1: + nodes: + - tidgen1: xe1 + - tidgen2: xe1 + datanet2: + nodes: + - tidgen1: xe2 + - tidgen2: xe2 + datanet3: + nodes: + - tidgen1: xe3 + - tidgen2: xe3 + + diff --git a/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov.yaml b/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov.yaml new file mode 100644 index 0000000..a1fe8ab --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov.yaml @@ -0,0 +1,73 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgenMWC_2sriov + description: tidgen for MWC2016; 12G 10 cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgenMWC-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: tidgenMWC-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe1 + description: Data interface 2 + VNFC: # Virtual machine array + - name: tidgenMWC-VM # name of Virtual Machine + disk: 10 + description: tidgen for MWC 12G 10 cores + # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2 + VNFC image: tidgenMWC + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 5 # "cores", "paired-threads", "threads" + memory: 12 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "no" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml b/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml new file mode 100644 index 0000000..9a9c9e3 --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml @@ -0,0 +1,65 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgenMWC_2sriov_no_ctrlnet + description: tidgen for MWC2016; 12G 10 cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgenMWC-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: xe0 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe1 + description: Data interface 2 + VNFC: # Virtual machine array + - name: tidgenMWC-VM # name of Virtual Machine + disk: 10 + description: tidgen for MWC 12G 10 cores + # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2 + VNFC image: tidgenMWC + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 5 # "cores", "paired-threads", "threads" + memory: 12 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "no" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" + bandwidth: 1 Mbps diff --git a/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov.yaml b/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov.yaml new file mode 100644 index 0000000..7d007cd --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov.yaml @@ -0,0 +1,91 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgenMWC_4sriov + description: tidgen for MWC2016; 12G 10 cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgenMWC-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: eth1 + type: mgmt # "mgmt"(autoconnect to management net), "bridge", "data" + VNFC: tidgenMWC-VM # Virtual Machine this interface belongs to + local_iface_name: eth1 # name inside this Virtual Machine + description: Other management interface for general use + - name: xe0 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: tidgenMWC-VM # name of Virtual Machine + disk: 10 + description: tidgen for MWC 12G 10 cores + # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2 + VNFC image: tidgenMWC + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 5 # "cores", "paired-threads", "threads" + memory: 12 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "no" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "no" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only + - name: eth1 + vpci: "0000:00:0b.0" # Optional + bandwidth: 1 Mbps # Optional, informative only diff --git a/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml b/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml new file mode 100644 index 0000000..54c92fd --- /dev/null +++ b/modules/core/mano/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml @@ -0,0 +1,83 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +--- +vnf: + name: tidgenMWC_4sriov_no_ctrlnet + description: tidgen for MWC2016; 12G 10 cores + class: TID + external-connections: + - name: eth0 + type: bridge + VNFC: tidgenMWC-VM + local_iface_name: eth0 + description: Bridge interface, request for dhcp + - name: xe0 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe0 + description: Data interface 1 + - name: xe1 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe1 + description: Data interface 2 + - name: xe2 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe2 + description: Data interface 3 + - name: xe3 + type: data + VNFC: tidgenMWC-VM + local_iface_name: xe3 + description: Data interface 4 + VNFC: # Virtual machine array + - name: tidgenMWC-VM # name of Virtual Machine + disk: 10 + description: tidgen for MWC 12G 10 cores + # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2 + VNFC image: tidgenMWC + image metadata: {"use_incremental": "no" } #is already incremental + processor: #Optional, leave it + model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz + features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"] + hypervisor: #Optional, leave it + type: QEMU-kvm + version: "10002|12001|2.6.32-358.el6.x86_64" + numas: + - paired-threads: 5 # "cores", "paired-threads", "threads" + memory: 12 # GBytes + interfaces: + - name: xe0 + vpci: "0000:00:10.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe1 + vpci: "0000:00:11.0" + dedicated: "no" + bandwidth: 10 Gbps + - name: xe2 + vpci: "0000:00:12.0" + dedicated: "no" # "yes"(passthrough), "no"(sriov) + bandwidth: 10 Gbps + - name: xe3 + vpci: "0000:00:13.0" + dedicated: "no" + bandwidth: 10 Gbps + bridge-ifaces: + - name: eth0 + vpci: "0000:00:0a.0" # Optional + bandwidth: 1 Mbps # Optional, informative only diff --git a/modules/core/mano/models/plugins/CMakeLists.txt b/modules/core/mano/models/plugins/CMakeLists.txt new file mode 100644 index 0000000..e26729f --- /dev/null +++ b/modules/core/mano/models/plugins/CMakeLists.txt @@ -0,0 +1,13 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 2014/12/11 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + yang + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/models/plugins/yang/CMakeLists.txt b/modules/core/mano/models/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..145a5e4 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/CMakeLists.txt @@ -0,0 +1,47 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/15/2014 +# + +# NOTE: These files will be used by the new MANO subsystem +set(source_yang_files + vnfd.yang vnfr.yang rw-vnfd.yang rw-vnfr.yang + vld.yang vlr.yang rw-vld.yang rw-vlr.yang + nsd.yang nsr.yang rw-nsd.yang rw-nsr.yang + pnfd.yang + vnffgd.yang + ietf-network.yang + ietf-network-topology.yang + ietf-l2-topology.yang + rw-topology.yang + ) + +rift_add_yang_target( + TARGET mano_yang + YANG_FILES ${source_yang_files} + GIR_PATHS ${CMAKE_CURRENT_BINARY_DIR} + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + rwmanifest_yang_gen + rwschema_yang_gen + rwcloud_yang_gen + ) + +rift_gen_yang_tree(mano-pyang-trees + OUTFILE_PREFIX mano.yang + YANG_FILES + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/vnfd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/vld.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/nsd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-vnfd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-vld.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-nsd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/pnfd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/vnffgd.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/ietf-network.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/ietf-network-topology.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/ietf-l2-topology.yang + ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-topology.yang + ) diff --git a/modules/core/mano/models/plugins/yang/Makefile b/modules/core/mano/models/plugins/yang/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/models/plugins/yang/ietf-l2-topology.tailf.yang b/modules/core/mano/models/plugins/yang/ietf-l2-topology.tailf.yang new file mode 100644 index 0000000..d09e914 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-l2-topology.tailf.yang @@ -0,0 +1,40 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-l2-topology-annotation +{ + namespace "urn:ietf:params:xml:ns:yang:ietf-l2-topology"; + prefix "ietf-l2-topology-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + import ietf-network-topology { + prefix nt; + } + + import ietf-l2-topology { + prefix lt; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nd:server-provided" { + tailf:callpoint base_show; + } + +} diff --git a/modules/core/mano/models/plugins/yang/ietf-l2-topology.yang b/modules/core/mano/models/plugins/yang/ietf-l2-topology.yang new file mode 100755 index 0000000..9f572cb --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-l2-topology.yang @@ -0,0 +1,578 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-l2-topology { + yang-version 1; + namespace "urn:ietf:params:xml:ns:yang:ietf-l2-topology"; + prefix "l2t"; + + import ietf-network { + prefix "nw"; + } + + import ietf-network-topology { + prefix "nt"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + organization "TBD"; + contact "I-D Editor: jie.dong@huawei.com"; + + description + "This module defines a basic model for + the layer-2 topology of a network"; + + revision "2015-06-23" { + description "Initial revision"; + reference "draft-ietf-i2rs-l2-network-topology-01"; + } + + /* + * Typedefs + */ + + typedef vlan { + type uint16 { + range "0..4095"; + } + description "VLAN ID"; + } + + typedef trill-nickname { + type uint16; + description "TRILL Nickname"; + } + + typedef flag-type { + type identityref { + base "flag-identity"; + } + description "Base type for flags"; + } + + typedef l2-network-event-type { + type enumeration { + enum "add" { + value 0; + description "An L2 node or link or termination-point + has been added"; + } + enum "remove" { + value 1; + description "An L2 node or link or termination-point + has been removed"; + } + enum "update" { + value 2; + description "An L2 node or link or termination-point + has been updated"; + } + } + description "l2 network event type for notifications"; + } // l2-topology-event-type + + + /* + * Features + */ + + feature VLAN { + description + "Indicates that the system supports the + vlan functions"; + } + + feature QinQ { + description + "Indicates that the system supports the + qinq functions"; + } + + feature PBB { + description + "Indicates that the device supports the + provider-backbone-bridging functions"; + } + + feature VPLS { + description + "Indicates that the device supports the + VPLS functions"; + reference "RFC 4761, RFC 4762"; + } + + feature TRILL { + description + "Indicates that the device supports the + TRILL functions"; + reference "RFC 6325"; + } + + feature VXLAN { + description + "Indicates that the device supports the + VXLAN functions"; + reference "RFC 7348"; + } + + /* + * Identities + */ + identity flag-identity { + description "Base type for flags"; + } + + identity encapsulation-type { + description + "Base identity from which specific encapsulation + types are derived."; + } + + identity eth-encapsulation-type { + base encapsulation-type; + description + "Base identity from which specific ethernet + encapsulation types are derived."; + + } + + identity ethernet { + base eth-encapsulation-type; + description + "native ethernet encapsulation"; + } + + identity vlan { + base eth-encapsulation-type; + description + "vlan encapsulation"; + } + + identity qinq { + base eth-encapsulation-type; + description + "qinq encapsulation"; + } + + identity pbb { + base eth-encapsulation-type; + description + "pbb encapsulation"; + } + + identity trill { + base eth-encapsulation-type; + description + "trill encapsulation"; + } + + identity vpls { + base eth-encapsulation-type; + description + "vpls encapsulation"; + } + + identity vxlan { + base eth-encapsulation-type; + description + "vxlan encapsulation"; + } + + identity frame-relay { + base encapsulation-type; + description + "Frame Relay encapsulation"; + } + + identity ppp { + base encapsulation-type; + description + "PPP encapsulation"; + } + + identity hdlc { + base encapsulation-type; + description + "HDLC encapsulation"; + } + + identity atm { + base encapsulation-type; + description + "Base identity from which specific ATM + encapsulation types are derived."; + + } + + identity pwe3 { + base encapsulation-type; + description + "Base identity from which specific pw + encapsulation types are derived."; + } + + + /* + * Groupings + */ + + + grouping l2-network-type { + description "Identify the topology type to be L2."; + container l2-network { + presence "indicates L2 Network"; + description + "The presence of the container node indicates + L2 Topology"; + } + } + + grouping l2-network-attributes { + description "L2 Topology scope attributes"; + container l2-network-attributes { + description "Containing L2 network attributes"; + leaf name { + type string; + description "Name of the L2 network"; + } + + leaf-list flag { + type flag-type; + description "L2 network flags"; + } + } + } + + grouping l2-node-attributes { + description "L2 node attributes"; + container l2-node-attributes { + description "Containing L2 node attributes"; + leaf name { + type string; + description "Node name"; + } + leaf description { + type string; + description "Node description"; + } + leaf-list management-address { + type inet:ip-address; + description "System management address"; + } + leaf management-vid { + if-feature VLAN; + type vlan; + description "System management VID"; + } + leaf-list nick-name { + if-feature TRILL; + type trill-nickname; + description "Nickname of the RBridge"; + } + leaf-list flag { + type flag-type; + description "Node operational flags"; + } + } + } // grouping l2-node-attributes + + + grouping l2-link-attributes { + description "L2 link attributes"; + container l2-link-attributes { + description "Containing L2 link attributes"; + leaf name { + type string; + description "Link name"; + } + leaf-list flag { + type flag-type; + description "Link flags"; + } + leaf rate { + type decimal64 { + fraction-digits 2; + } + description "Link rate"; + + } + leaf delay { + type uint32; + description "Link delay in microseconds"; + } + leaf-list srlg { + type uint32; + description + "List of Shared Risk Link Groups + this link belongs to."; + } + } + } // grouping l2-link-attributes + + grouping l2-termination-point-attributes { + description "L2 termination point attributes"; + container l2-termination-point-attributes { + description "Containing L2 TP attributes"; + leaf description { + type string; + description "Port description"; + } + + leaf maximum-frame-size { + type uint32; + description "Maximum frame size"; + } + + choice l2-termination-point-type { + description + "Indicates termination-point type + specific attributes"; + case ethernet { + leaf mac-address { + type yang:mac-address; + description "Interface MAC address"; + } + + leaf eth-encapsulation { + type identityref { + base eth-encapsulation-type; + } + description + "Encapsulation type of this + ternimation point."; + } + + leaf port-vlan-id { + if-feature VLAN; + type vlan; + description "Port VLAN ID"; + } + + list vlan-id-name { + if-feature VLAN; + key "vlan-id"; + description "Interface configured VLANs"; + leaf vlan-id { + type vlan; + description "VLAN ID"; + } + leaf vlan-name { + type string; + description "VLAN Name"; + } + } + } //case ethernet + + case legacy { + leaf encapsulation { + type identityref { + base encapsulation-type; + } + description + "Encapsulation type of this termination point."; + } + } //case legacy + + } //choice termination-point-type + + leaf tp-state { + type enumeration { + enum in-use { + value 0; + description + "the termination point is in forwarding state"; + } + enum blocking { + value 1; + description + "the termination point is in blocking state"; + } + enum down { + value 2; + description + "the termination point is in down state"; + } + enum others { + value 3; + description + "the termination point is in other state"; + } + } + config false; + description "State of the termination point"; + } + } + } // grouping l2-termination-point-attributes + +/*** grouping of network/node/link/tp leaf-refs ***/ + + grouping network-ref { + description + "Grouping for an absolute reference to a network topology + instance."; + leaf network-ref { + type leafref { + path "/nw:network/nw:network-id"; + } + description + "An absolute reference to a network topology instance."; + } + } + + grouping link-ref { + description + "Grouping for an absolute reference to a link instance."; + uses network-ref; + leaf link-ref { + type leafref { + path "/nw:network" + +"[nw:network-id = current()/../network-ref]" + +"/nt:link/nt:link-id"; + } + description + "An absolute reference to a link instance."; + } + } + + grouping node-ref { + description + "Grouping for an absolute reference to a node instance."; + uses network-ref; + leaf node-ref { + type leafref { + path "/nw:network" + +"[nw:network-id = current()/../network-ref]" + +"/nw:node/nw:node-id"; + } + description + "An absolute reference to a node instance."; + } + } + + grouping tp-ref { + description + "Grouping for an absolute reference to a termination point."; + uses node-ref; + leaf tp-ref { + type leafref { + path "/nw:network" + +"[nw:network-id = current()/../network-ref]" + +"/nw:node[nw:node-id = current()/../node-ref]" + +"/nt:termination-point/nt:tp-id"; + } + description + "Grouping for an absolute reference to a TP."; + } + } + + + /* + * Data nodes + */ + augment "/nw:network/nw:network-types" { + description + "Introduce new network type for L2 topology"; + uses l2-network-type; + } + + augment "/nw:network" { + /* RIFT-Change: when not to be used yet + when "nw:network-types/l2-network" { + description + "Augmentation parameters apply only for networks + with L2 topology"; + } + */ + description + "Configuration parameters for the L2 network + as a whole"; + uses l2-network-attributes; + } + + augment "/nw:network/nw:node" { + /* RIFT-Change: when not to be used yet + when "../nw:network-types/l2-network" { + description + "Augmentation parameters apply only for networks + with L2 topology"; + } + */ + description + "Configuration parameters for L2 at the node + level"; + uses l2-node-attributes; + } + + augment "/nw:network/nt:link" { + /* RIFT-Change: when not to be used yet + when "/nw:network/nw:network-types/l2-network" { + description + "Augmentation parameters apply only for networks + with L2 topology"; + } + */ + description "Augment L2 topology link information"; + uses l2-link-attributes; + } + + augment "/nw:network/nw:node/nt:termination-point" { + /* RIFT-Change: when not to be used yet + when "/nw:network/nw:network-types/l2-network" { + description + "Augmentation parameters apply only for networks + with L2 topology"; + } + */ + description + "Augment L2 topology termination point configuration"; + uses l2-termination-point-attributes; + } + + /* + * Notifications + */ + + notification l2-node-event { + description "Notification event for L2 node"; + leaf event-type { + type l2-network-event-type; + description "Event type"; + } + uses node-ref; + uses l2-network-type; + uses l2-node-attributes; + } + + notification l2-link-event { + description "Notification event for L2 link"; + leaf event-type { + type l2-network-event-type; + description "Event type"; + } + uses link-ref; + uses l2-network-type; + uses l2-link-attributes; + } + + notification l2-termination-point-event { + description "Notification event for L2 termination point"; + leaf event-type { + type l2-network-event-type; + description "Event type"; + } + uses tp-ref; + uses l2-network-type; + uses l2-termination-point-attributes; + } + +} // module l2-topology diff --git a/modules/core/mano/models/plugins/yang/ietf-network-topology.tailf.yang b/modules/core/mano/models/plugins/yang/ietf-network-topology.tailf.yang new file mode 100644 index 0000000..26868e5 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-network-topology.tailf.yang @@ -0,0 +1,34 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-network-topology-annotation +{ + namespace "urn:ietf:params:xml:ns:yang:ietf-network-topology"; + prefix "ietf-network-topology-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + import ietf-network-topology { + prefix nt; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + tailf:annotate "/nd:network/nd:server-provided" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/ietf-network-topology.yang b/modules/core/mano/models/plugins/yang/ietf-network-topology.yang new file mode 100755 index 0000000..e8f7c79 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-network-topology.yang @@ -0,0 +1,257 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-network-topology { + yang-version 1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network-topology"; + prefix lnk; + + import ietf-inet-types { + prefix inet; + } + import ietf-network { + prefix nd; + } + + organization "TBD"; + contact + "WILL-BE-DEFINED-LATER"; + description + "This module defines a common base model for network topology, + augmenting the base network model with links to connect nodes, + as well as termination points to terminate links on nodes."; + + revision 2015-06-08 { + description + "Initial revision."; + reference "draft-ietf-i2rs-yang-network-topo-01"; + } + + typedef link-id { + type inet:uri; + description + "An identifier for a link in a topology. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same link in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in + separate datastores. An implementation MAY choose to capture + semantics in the identifier, for example to indicate the type + of link and/or the type of topology that the link is a part + of."; + } + + typedef tp-id { + type inet:uri; + description + "An identifier for termination points on a node. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same TP in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in + separate datastores. An implementation MAY choose to capture + semantics in the identifier, for example to indicate the type + of TP and/or the type of node and topology that the TP is a + part of."; + } + + grouping link-ref { + description + "References a link in a specific network."; + leaf link-ref { + type leafref { + path "/nd:network[nd:network-id=current()/../"+ + "nd:network-ref]/link/link-id"; + } + description + "A type for an absolute reference a link instance. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nd:network-ref; + } + + grouping tp-ref { + description + "References a termination point in a specific node."; + leaf tp-ref { + type leafref { + path "/nd:network[nd:network-id=current()/../"+ + "nd:network-ref]/nd:node[nd:node-id=current()/../"+ + "nd:node-ref]/termination-point/tp-id"; + } + description + "A type for an absolute reference to a termination point. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nd:node-ref; + } + + augment "/nd:network" { + description + "Add links to the network model."; + list link { + key "link-id"; + + description + "A Network Link connects a by Local (Source) node and + a Remote (Destination) Network Nodes via a set of the + nodes' termination points. + As it is possible to have several links between the same + source and destination nodes, and as a link could + potentially be re-homed between termination points, to + ensure that we would always know to distinguish between + links, every link is identified by a dedicated link + identifier. + Note that a link models a point-to-point link, not a + multipoint link. + Layering dependencies on links in underlay topologies are + not represented as the layering information of nodes and of + termination points is sufficient."; + container source { + description + "This container holds the logical source of a particular + link."; + leaf source-node { + type leafref { + // RIFT change: + path "../../../../nd:network/nd:node/nd:node-id"; + } + mandatory true; + description + "Source node identifier, must be in same topology."; + } + leaf source-tp { + type leafref { + // RIFT change: + path "../../../../nd:network/nd:node[nd:node-id=current()/../"+ + "source-node]/termination-point/tp-id"; + } + description + "Termination point within source node that terminates + the link."; + } + } + container destination { + description + "This container holds the logical destination of a + particular link."; + leaf dest-node { + type leafref { + // RIFT change + path "../../../../nd:network/nd:node/nd:node-id"; + } + mandatory true; + description + "Destination node identifier, must be in the same + network."; + } + leaf dest-tp { + type leafref { + // RIFT change: + path "../../../../nd:network/nd:node[nd:node-id=current()/../"+ + "dest-node]/termination-point/tp-id"; + } + description + "Termination point within destination node that + terminates the link."; + } + } + leaf link-id { + type link-id; + description + "The identifier of a link in the topology. + A link is specific to a topology to which it belongs."; + } + list supporting-link { + key "network-ref link-ref"; + description + "Identifies the link, or links, that this link + is dependent on."; + leaf network-ref { + type leafref { + // RIFT change: + path "../../../../nd:network/nd:supporting-network/nd:network-ref"; + } + description + "This leaf identifies in which underlay topology + supporting link is present."; + } + leaf link-ref { + type leafref { + path "/nd:network[nd:network-id=current()/.."+ + "/network-ref]/link/link-id"; + } + description + "This leaf identifies a link which is a part + of this link's underlay. Reference loops, in which + a link identifies itself as its underlay, either + directly or transitively, are not allowed."; + } + } + } + } + augment "/nd:network/nd:node" { + description + "Augment termination points which terminate links. + Termination points can ultimately be mapped to interfaces."; + list termination-point { + key "tp-id"; + description + "A termination point can terminate a link. + Depending on the type of topology, a termination point + could, for example, refer to a port or an interface."; + leaf tp-id { + type tp-id; + description + "Termination point identifier."; + } + list supporting-termination-point { + key "network-ref node-ref tp-ref"; + description + "The leaf list identifies any termination points that + the termination point is dependent on, or maps onto. + Those termination points will themselves be contained + in a supporting node. + This dependency information can be inferred from + the dependencies between links. For this reason, + this item is not separately configurable. Hence no + corresponding constraint needs to be articulated. + The corresponding information is simply provided by the + implementing system."; + leaf network-ref { + type leafref { + // RIFT change: + path "/nd:network/nd:node/nd:supporting-node/nd:network-ref"; + } + description + "This leaf identifies in which topology the + supporting termination point is present."; + } + leaf node-ref { + type leafref { + // RIFT change: + path "/nd:network/nd:node/nd:supporting-node/nd:node-ref"; + } + description + "This leaf identifies in which node the supporting + termination point is present."; + } + leaf tp-ref { + type leafref { + path "/nd:network[nd:network-id=current()/../"+ + "network-ref]/nd:node[nd:node-id=current()/../"+ + "node-ref]/termination-point/tp-id"; + } + description + "Reference to the underlay node, must be in a + different topology"; + } + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/ietf-network.tailf.yang b/modules/core/mano/models/plugins/yang/ietf-network.tailf.yang new file mode 100644 index 0000000..705842a --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-network.tailf.yang @@ -0,0 +1,31 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-network-annotation +{ + namespace "urn:ietf:params:xml:ns:yang:ietf-network"; + prefix "ietf-network-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nd:server-provided" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/ietf-network.yang b/modules/core/mano/models/plugins/yang/ietf-network.yang new file mode 100755 index 0000000..a059e94 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/ietf-network.yang @@ -0,0 +1,157 @@ + +/* + * NO RW COPYRIGHT + * + */ + +module ietf-network { + yang-version 1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network"; + prefix nd; + + import ietf-inet-types { + prefix inet; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + organization "TBD"; + contact + "WILL-BE-DEFINED-LATER"; + description + "This module defines a common base model for a collection + of nodes in a network. Node definitions s are further used + in network topologies and inventories."; + + revision 2015-06-08 { + description + "Initial revision."; + reference "draft-ietf-i2rs-yang-network-topo-01"; + } + + typedef node-id { + type inet:uri; + description + "Identifier for a node."; + } + + typedef network-id { + type inet:uri; + description + "Identifier for a network."; + } + + grouping network-ref { + description + "Contains the information necessary to reference a network, + for example an underlay network."; + leaf network-ref { + type leafref { + path "/network/network-id"; + } + description + "Used to reference a network, for example an underlay + network."; + } + } + + grouping node-ref { + description + "Contains the information necessary to reference a node."; + leaf node-ref { + type leafref { + path "/network[network-id=current()/../network-ref]"+ + "/node/node-id"; + } + description + "Used to reference a node. + Nodes are identified relative to the network they are + contained in."; + } + uses network-ref; + } + + list network { + config false; + key "network-id"; + description + "Describes a network. + A network typically contains an inventory of nodes, + topological information (augmented through + network-topology model), as well as layering + information."; + container network-types { + description + "Serves as an augmentation target. + The network type is indicated through corresponding + presence containers augmented into this container."; + } + leaf network-id { + type network-id; + description + "Identifies a network."; + } + leaf server-provided { + type boolean; + config false; + description + "Indicates whether the information concerning this + particular network is populated by the server + (server-provided true, the general case for network + information discovered from the server), + or whether it is configured by a client + (server-provided true, possible e.g. for + service overlays managed through a controller)."; + } + list supporting-network { + key "network-ref"; + description + "An underlay network, used to represent layered network + topologies."; + + leaf network-ref { + type leafref { + path "/network/network-id"; + } + description + "References the underlay network."; + } + } + list node { + key "node-id"; + description + "The inventory of nodes of this network."; + leaf node-id { + type node-id; + description + "Identifies a node uniquely within the containing + network."; + } + list supporting-node { + key "network-ref node-ref"; + description + "Represents another node, in an underlay network, that + this node is supported by. Used to represent layering + structure."; + leaf network-ref { + type leafref { + path "../../../supporting-network/network-ref"; + } + description + "References the underlay network that the + underlay node is part of."; + } + leaf node-ref { + type leafref { + path "/network/node/node-id"; + } + description + "References the underlay node itself."; + } + } + } + } +} + diff --git a/modules/core/mano/models/plugins/yang/nsd.tailf.yang b/modules/core/mano/models/plugins/yang/nsd.tailf.yang new file mode 100644 index 0000000..80711c3 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsd.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module nsd-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/nsd-annotation"; + prefix "nsd-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import nsd { + prefix nsd; + } +} diff --git a/modules/core/mano/models/plugins/yang/nsd.yang b/modules/core/mano/models/plugins/yang/nsd.yang new file mode 100755 index 0000000..328ac0c --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsd.yang @@ -0,0 +1,871 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module nsd +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:nsd"; + prefix "nsd"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import vld { + prefix "vld"; + } + + import vnfd { + prefix "vnfd"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2014-10-27 { + description + "Initial revision. This YANG file defines + the Network Service Descriptor (NSD)"; + reference + "Derived from earlier versions of base YANG files"; + } + + typedef parameter-data-type { + type enumeration { + enum string; + enum integer; + enum boolean; + } + } + + grouping primitive-parameter { + leaf name { + description + "Name of the parameter."; + type string; + } + + leaf data-type { + description + "Data type associated with the name."; + type parameter-data-type; + } + + leaf mandatory { + description "Is this field mandatory"; + type boolean; + default false; + } + + leaf default-value { + description "The default value for this field"; + type string; + } + + leaf parameter-pool { + description "NSD Parameter pool name to use for this paramter"; + type string; + } + + leaf read-only { + description + "The value should be greyed out by the UI. + Only applies to parameters with default values."; + type boolean; + } + + leaf hidden { + description + "The value should be hidden by the UI. + Only applies to parameters with default values."; + type boolean; + } + } + + grouping vnf-configuration { + container vnf-configuration { + description + "Information regarding the VNF configuration + is captured here. Note that if the NS contains + multiple instances of the same VNF, each instance + of the VNF may have different configuration"; + + leaf config-type { + description + "Must use this configuration type and fill in only pertaining + config-method below"; + type enumeration { + enum none; + enum netconf; + enum rest; + enum script; + enum juju; + } + } + + choice config-method { + description + "Defines the configuration method for the VNF."; + case netconf { + description + "Use NETCONF for configuring the VNF."; + container netconf { + leaf target { + description + "Netconf configuration target"; + type enumeration { + enum running; + enum candidate; + } + } + + leaf protocol { + description + "Protocol to use for netconf (e.g. ssh)"; + type enumeration { + enum None; + enum ssh; + } + } + + leaf port { + description + "Port for the netconf server."; + type inet:port-number; + } + } + } + + case rest { + description + "Use REST for configuring the VNF."; + container rest { + leaf port { + description + "Port for the REST server."; + type inet:port-number; + } + } + } + + case script { + description + "Use custom script for configuring the VNF. + This script is executed in the context of + Orchestrator."; + container script { + leaf script-type { + description + "Script type - currently supported : bash, expect"; + type enumeration { + enum bash; + enum expect; + } + } + } + } + + case juju { + description + "Configure the VNF through Juju."; + container juju { + leaf charm { + description "Juju charm to use with the VNF."; + type string; + } + } + } + } + + container config-access { + leaf mgmt-ip-address { + description + "IP address to be used to configure this VNF, + optional if it is possible to resolve dynamically."; + type inet:ip-address; + } + + leaf username { + description + "username for configuration."; + type string; + } + + leaf password { + description + "Password for configuration access authentication."; + type string; + } + } + + container input-params { + description + "Miscelaneous input parameters to be considered + while processing the NSD to apply configuration"; + + leaf config-priority { + description + "Configuration priority - order of confgiration + to be applied to each VNF in this NS, + low number gets precedence over high number"; + type uint64; + } + + leaf config-delay { + description + "Wait (seconds) before applying the configuration to VNF"; + type uint64; + } + } + + list config-primitive { + rwpb:msg-new ConfigPrimitive; + description + "List of configuration primitives supported by the + configuration agent for this VNF."; + key "name"; + + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + list parameter { + description + "List of parameters to the configuration primitive."; + key "name"; + uses primitive-parameter; + } + } + + list initial-config-primitive { + rwpb:msg-new InitialConfigPrimitive; + description + "Initial set of configuration primitives."; + key "seq"; + leaf seq { + description + "Sequence number for the configuration primitive."; + type uint64; + } + + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + list parameter { + key "name"; + leaf name { + type string; + } + + leaf value { + type string; + } + } + } + + leaf config-template { + description + "Configuration template for each VNF"; + type string; + } + } + } // END - grouping vnf-configuration + + + container nsd-catalog { + + list nsd { + key "id"; + + leaf id { + description "Identifier for the NSD."; + type yang:uuid; + } + + leaf name { + description "NSD name."; + mandatory true; + type string; + } + + leaf short-name { + description "NSD short name."; + type string; + } + + + leaf vendor { + description "Vendor of the NSD."; + type string; + } + + leaf logo { + description + "Vendor logo for the Network Service"; + type string; + } + + leaf description { + description "Description of the NSD."; + type string; + } + + leaf version { + description "Version of the NSD"; + type string; + } + + list connection-point { + description + "List for external connection points. + Each NS has one or more external connection + points. As the name implies that external + connection points are used for connecting + the NS to other NS or to external networks. + Each NS exposes these connection points to + the orchestrator. The orchestrator can + construct network service chains by + connecting the connection points between + different NS."; + + key "name"; + leaf name { + description + "Name of the NS connection point."; + type string; + } + + leaf type { + description + "Type of the connection point."; + type manotypes:connection-point-type; + } + } + + leaf-list vld-ref { + type leafref { + path "/vld:vld-catalog/vld:vld/vld:id"; + } + } + + /* Still having issues modelling this, + see the comments under vnfd-connection-point-ref + */ + list vld { + description + "List of Virtual Link Descriptors."; + + key "id"; + + leaf id { + description + "Identifier for the VLD."; + type yang:uuid; + } + + leaf name { + description + "Virtual Link Descriptor (VLD) name."; + type string; + } + + leaf short-name { + description + "Short name for VLD for UI"; + type string; + } + + leaf vendor { + description "Provider of the VLD."; + type string; + } + + leaf description { + description "Description of the VLD."; + type string; + } + + leaf version { + description "Version of the VLD"; + type string; + } + + leaf type { + type manotypes:virtual-link-type; + } + + leaf root-bandwidth { + description + "For ELAN this is the aggregate bandwidth."; + type uint64; + } + + leaf leaf-bandwidth { + description + "For ELAN this is the bandwidth of branches."; + type uint64; + } + + list vnfd-connection-point-ref { + description + "A list of references to connection points."; + key "member-vnf-index-ref"; + + leaf member-vnf-index-ref { + description "Reference to member-vnf within constituent-vnfds"; + type leafref { + path "../../../nsd:constituent-vnfd/nsd:member-vnf-index"; + } + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type yang:uuid; + } + + leaf vnfd-connection-point-ref { + description + "A reference to a connection point name + in a vnfd. This is a leafref to path: + /vnfd:vnfd-catalog/vnfd:vnfd + + [vnfd:id = current()/../nsd:vnfd-id-ref] + + /vnfd:connection-point/vnfd:name + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type string; + } + } + + // replicate for pnfd container here + uses manotypes:provider-network; + } + + list constituent-vnfd { + description + "List of VNFDs that are part of this + network service."; + + key "member-vnf-index"; + + leaf member-vnf-index { + description + "Identifier/index for the VNFD. This separate id + is required to ensure that multiple VNFs can be + part of single NS"; + type uint64; + } + + leaf vnfd-id-ref { + description + "Identifier for the VNFD."; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + + // Provide this VNF configuration parameters + uses vnf-configuration; + } + + list vnf-dependency { + description + "List of VNF dependencies."; + key vnf-source-ref; + leaf vnf-source-ref { + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + leaf vnf-depends-on-ref { + description + "Reference to VNF that sorce VNF depends."; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + } + + list vnffgd { + description + "List of VNF Forwarding Graph Descriptors (VNFFGD)."; + + key "id"; + + leaf id { + description + "Identifier for the VNFFGD."; + type yang:uuid; + } + + leaf name { + description + "VNFFGD name."; + type string; + } + + leaf short-name { + description + "Short name for VNFFGD for UI"; + type string; + } + + leaf vendor { + description "Provider of the VNFFGD."; + type string; + } + + leaf description { + description "Description of the VNFFGD."; + type string; + } + + leaf version { + description "Version of the VNFFGD"; + type string; + } + + list rsp { + description + "List of Rendered Service Paths (RSP)."; + + key "id"; + + leaf id { + description + "Identifier for the RSP."; + type yang:uuid; + } + + leaf name { + description + "RSP name."; + type string; + } + + list vnfd-connection-point-ref { + description + "A list of references to connection points."; + key "member-vnf-index-ref"; + + leaf member-vnf-index-ref { + description "Reference to member-vnf within constituent-vnfds"; + type leafref { + path "../../../../nsd:constituent-vnfd/nsd:member-vnf-index"; + } + } + + leaf order { + type uint8; + description + "A number that denotes the order of a VNF in a chain"; + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type yang:uuid; + } + + leaf vnfd-connection-point-ref { + description + "A reference to a connection point name + in a vnfd. This is a leafref to path: + /vnfd:vnfd-catalog/vnfd:vnfd + + [vnfd:id = current()/../nsd:vnfd-id-ref] + + /vnfd:connection-point/vnfd:name + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type string; + } + } + } //rsp + + list classifier { + description + "List of classifier rules."; + + key "id"; + + leaf id { + description + "Identifier for the classifier rule."; + type yang:uuid; + } + + leaf name { + description + "Name of the classifier."; + type string; + } + + leaf rsp-id-ref { + description + "A reference to the RSP."; + type leafref { + path "../../nsd:rsp/nsd:id"; + } + } + + + leaf member-vnf-index-ref { + description "Reference to member-vnf within constituent-vnfds"; + type leafref { + path "../../../nsd:constituent-vnfd/nsd:member-vnf-index"; + } + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type yang:uuid; + } + + leaf vnfd-connection-point-ref { + description + "A reference to a connection point name + in a vnfd. This is a leafref to path: + /vnfd:vnfd-catalog/vnfd:vnfd + + [vnfd:id = current()/../nsd:vnfd-id-ref] + + /vnfd:connection-point/vnfd:name + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + type string; + } + + list match-attributes { + description + "List of match attributes."; + + key "id"; + + leaf id { + description + "Identifier for the classifier match attribute rule."; + type yang:uuid; + } + + leaf ip-proto { + description + "IP Protocol."; + type uint8; + } + + leaf source-ip-address { + description + "Source IP address."; + type inet:ip-address; + } + + leaf destination-ip-address { + description + "Destination IP address."; + type inet:ip-address; + } + + leaf source-port { + description + "Source port number."; + type inet:port-number; + } + + leaf destination-port { + description + "Destination port number."; + type inet:port-number; + } + //TODO: Add more match criteria + } //match-attributes + } // classifier + } // vnffgd + + uses manotypes:monitoring-param; + uses manotypes:input-parameter-xpath; + + list parameter-pool { + description + "Pool of parameter values which must be + pulled from during configuration"; + key "name"; + + leaf name { + description + "Name of the configuration value pool"; + type string; + } + + container range { + description + "Create a range of values to populate the pool with"; + + leaf start-value { + description + "Generated pool values start at this value"; + type uint32; + mandatory true; + } + + leaf end-value { + description + "Generated pool values stop at this value"; + type uint32; + mandatory true; + } + } + } + + list config-primitive { + description + "Network service level configuration primitives."; + + key "name"; + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + list parameter { + description + "List of parameters to the configuration primitive."; + + key "name"; + uses primitive-parameter; + } + + list parameter-group { + description + "Grouping of parameters which are logically grouped in UI"; + key "name"; + + leaf name { + description + "Name of the parameter group"; + type string; + } + + list parameter { + description + "List of parameters to the configuration primitive."; + key "name"; + uses primitive-parameter; + } + + leaf mandatory { + description "Is this parameter group mandatory"; + type boolean; + default true; + } + } + + list vnf-primitive-group { + description + "List of configuration primitives grouped by VNF."; + + key "member-vnf-index-ref"; + leaf member-vnf-index-ref { + description + "Reference to member-vnf within constituent-vnfds"; + type uint64; + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + + type string; + } + + leaf vnfd-name { + description + "Name of the VNFD"; + type string; + } + + list primitive { + key "index"; + + leaf index { + description "Index of this primitive"; + type uint32; + } + + leaf name { + description "Name of the primitive in the VNF primitive "; + type string; + } + } + } + + leaf user-defined-script { + description + "A user defined script."; + type string; + } + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/nsr.cli.xml b/modules/core/mano/models/plugins/yang/nsr.cli.xml new file mode 100755 index 0000000..61ea6f0 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsr.cli.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/modules/core/mano/models/plugins/yang/nsr.tailf.yang b/modules/core/mano/models/plugins/yang/nsr.tailf.yang new file mode 100644 index 0000000..8fca452 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsr.tailf.yang @@ -0,0 +1,35 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module nsr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/nsr-annotation"; + prefix "nsr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import nsr { + prefix nsr; + } + + tailf:annotate "/nsr:ns-instance-opdata" { + tailf:callpoint base_show; + } + tailf:annotate "/nsr:exec-ns-config-primitive" { + tailf:actionpoint rw_action; + } + tailf:annotate "/nsr:get-ns-config-primitive-values" { + tailf:actionpoint rw_action; + } +} diff --git a/modules/core/mano/models/plugins/yang/nsr.yang b/modules/core/mano/models/plugins/yang/nsr.yang new file mode 100755 index 0000000..6d7816e --- /dev/null +++ b/modules/core/mano/models/plugins/yang/nsr.yang @@ -0,0 +1,859 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module nsr +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:nsr"; + prefix "nsr"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import vlr { + prefix "vlr"; + } + + import vld { + prefix "vld"; + } + + import nsd { + prefix "nsd"; + } + + import vnfr { + prefix "vnfr"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + import rw-sdn { + prefix "rwsdn"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Network Service Record (NSR)"; + reference + "Derived from earlier versions of base YANG files"; + } + + typedef config-states { + type enumeration { + enum init; + enum configuring; + enum configured; + enum failed; + } + } + + container ns-instance-config { + + list nsr { + key "id"; + unique "name"; + + leaf id { + description "Identifier for the NSR."; + type yang:uuid; + } + + leaf name { + description "NSR name."; + type string; + } + + leaf short-name { + description "NSR short name."; + type string; + } + + leaf description { + description "NSR description."; + type string; + } + + leaf nsd-ref { + description "Reference to NSD"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:id"; + } + } + + leaf admin-status { + description + "This is the administrative status of the NS instance"; + + type enumeration { + enum ENABLED; + enum DISABLED; + } + } + + uses manotypes:input-parameter; + } + } + + + grouping vnffgr { + + list vnffgr { + key "id"; + + leaf id { + description "Identifier for the VNFFGR."; + type yang:uuid; + } + + leaf nsd-id { + description + "Network sevice descriptor ID reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:id"; + } + } + + + leaf vnffgd-id-ref { + description "VNFFG descriptor id reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd" + + "[nsd:id = current()/../nsr:nsd-id]" + + "/nsd:vnffgd/nsd:id"; + } + } + + leaf vnffgd-name-ref { + description "VNFFG descriptor name reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd" + + "[nsd:id = current()/../nsr:nsd-id]" + + "/nsd:vnffgd[nsd:id = current()/../vnffgd-id-ref]" + + "/nsd:name"; + } + } + + leaf sdn-account { + description + "The SDN account to use when requesting resources for + this vnffgr"; + type leafref { + path "/rwsdn:sdn-account/rwsdn:name"; + } + } + + leaf operational-status { + description + "The operational status of the VNFFGR instance + init : The VNFFGR has just started. + running : The VNFFGR is in running state. + terminate : The VNFFGR is being terminated. + terminated : The VNFFGR is in the terminated state. + failed : The VNFFGR instantiation failed + "; + + type enumeration { + rwpb:enum-type "VnffgrOperationalStatus"; + enum init; + enum running; + enum terminate; + enum terminated; + enum failed; + } + } + + list rsp { + key "id"; + + leaf id { + description + "Identifier for the RSP."; + type yang:uuid; + } + + leaf name { + description + "Name for the RSP"; + type string; + } + + leaf vnffgd-rsp-id-ref { + description + "Identifier for the VNFFG Descriptor RSP reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:vnffgd" + + "[nsd:id = current()/../../nsr:vnffgd-id-ref]" + + "/nsd:rsp/nsd:id"; + } + } + + leaf vnffgd-rsp-name-ref { + description + "Name for the VNFFG Descriptor RSP reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:vnffgd" + + "[nsd:id = current()/../../nsr:vnffgd-id-ref]" + + "/nsd:rsp[nsd:id=current()/../vnffgd-rsp-id-ref]" + + "/nsd:name"; + } + } + + leaf path-id { + description + "Unique Identifier for the service path"; + type uint32; + } + + list vnfr-connection-point-ref { + key "hop-number"; + leaf hop-number { + description + "Monotonically increasing number to show service path hop + order"; + type uint8; + } + leaf service-function-type { + description + "Type of Service Function. + NOTE: This needs to map with Service Function Type in ODL to + support VNFFG. Service Function Type is manadatory param in ODL + SFC. This is temporarily set to string for ease of use"; + type string; + } + + leaf member-vnf-index-ref { + type uint64; + } + leaf vnfd-id-ref { + description + "Reference to VNF Descriptor Id"; + type string; + } + leaf vnfr-id-ref { + description + "A reference to a vnfr id"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id"; + } + } + leaf vnfr-name-ref { + description + "A reference to a vnfr name"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name"; + } + } + leaf vnfr-connection-point-ref { + description + "A reference to a vnfr connection point."; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr" + + "[vnfr:id = current()/../nsr:vnfr-id-ref]" + + "/vnfr:connection-point/vnfr:name"; + } + } + leaf service-index { + description + "Location within the service path"; + type uint8; + } + container connection-point-params { + leaf mgmt-address { + type inet:ip-address; + } + leaf name { + type string; + } + leaf port-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf vm-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf address { + type inet:ip-address; + } + leaf port { + type inet:port-number; + } + } + + container service-function-forwarder { + leaf name { + description + "Service Function Forwarder name"; + type string; + } + leaf ip-address { + description + "Data Plane IP Address of the SFF"; + type inet:ip-address; + } + leaf port { + description + "Data Plane Port of the SFF"; + type inet:port-number; + } + } + } + } + } + } + + container ns-instance-opdata { + config false; + + list nsr { + key "ns-instance-config-ref"; + + leaf ns-instance-config-ref { + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:id"; + } + } + + leaf name-ref { + description "Network service name reference"; + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:name"; + } + } + + leaf nsd-name-ref { + description "Network service descriptor name reference"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:name"; + } + } + + + leaf create-time { + description + "Creation timestamp of this Network Service. + The timestamp is expressed as seconds + since unix epoch - 1970-01-01T00:00:00Z"; + + type uint32; + } + + list connection-point { + description + "List for external connection points. + Each NS has one or more external connection points. + As the name implies that external connection points + are used for connecting the NS to other NS or to + external networks. Each NS exposes these connection + points to the orchestrator. The orchestrator can + construct network service chains by connecting the + connection points between different NS."; + + key "name"; + leaf name { + description + "Name of the NS connection point."; + type string; + } + + leaf type { + description + "Type of the connection point."; + type manotypes:connection-point-type; + } + } + + list vlr { + key "vlr-ref"; + leaf vlr-ref { + description + "Reference to a VLR record in the VLR catalog"; + type leafref { + path "/vlr:vlr-catalog/vlr:vlr/vlr:id"; + } + } + + + list vnfr-connection-point-ref { + description + "A list of references to connection points."; + key "vnfr-id"; + + leaf vnfr-id { + description "A reference to a vnfr"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id"; + } + } + + leaf connection-point { + description + "A reference to a connection point name in a vnfr"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr" + + "[vnfr:id = current()/../nsr:vnfr-id]" + + "/vnfr:connection-point/vnfr:name"; + } + } + } + } + + leaf-list constituent-vnfr-ref { + description + "List of VNFRs that are part of this + network service."; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id"; + } + } + + uses vnffgr; + + leaf operational-status { + description + "The operational status of the NS instance + init : The network service has just started. + vl-init-phase : The VLs in the NS are being instantiated. + vnf-init-phase : The VNFs in the NS are being instantiated. + running : The NS is in running state. + terminate : The NS is being terminated. + vnf-terminate-phase : The NS is terminating the VNFs in the NS. + vl-terminate-phase : The NS is terminating the VLs in the NS. + terminated : The NS is in the terminated state. + failed : The NS instantiation failed. + "; + + type enumeration { + enum init; + enum vl-init-phase; + enum vnf-init-phase; + enum running; + enum terminate; + enum vnf-terminate-phase; + enum vl-terminate-phase; + enum terminated; + enum failed; + } + } + + leaf config-status { + description + "The configuration status of the NS instance + configuring: At least one of the VNFs in this instance is in configuring state + configured: All the VNFs in this NS instance are configured or config-not-needed state + "; + type config-states; + } + + uses manotypes:monitoring-param; + + list vnf-monitoring-param { + description + "List of VNF monitoring params."; + + key "vnfr-id-ref"; + + leaf vnfr-id-ref { + description + "Reference to vnfr-id"; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id"; + } + } + + uses manotypes:monitoring-param; + } + + list config-agent-job { + key "job-id"; + + leaf job-id { + description "config agent job Identifier for the NS."; + type uint64; + } + + leaf job-name { + description "Config agent job name"; + type string; + } + + leaf job-status { + description + "Job status to be set based on each VNF primitive execution, + pending - if at least one VNF is in pending state + and remaining VNFs are in success state. + Success - if all VNF executions are in success state + failure - if one of the VNF executions is failure"; + type enumeration { + enum pending; + enum success; + enum failure; + } + } + + list vnfr { + key "id"; + leaf id { + description "Identifier for the VNFR."; + type yang:uuid; + } + leaf vnf-job-status { + description + "Job status to be set based on each VNF primitive execution, + pending - if at least one primitive is in pending state + and remaining primitives are in success state. + Success - if all primitive executions are in success state + failure - if one of the primitive executions is failure"; + type enumeration { + enum pending; + enum success; + enum failure; + } + } + list primitive { + key "name"; + leaf name { + description "the name of the primitive"; + type string; + } + leaf execution-id { + description "Execution id of the primitive"; + type string; + } + leaf execution-status { + description "status of the Execution"; + type enumeration { + enum pending; + enum success; + enum failure; + } + } + leaf execution-error-details { + description "Error details if execution-status is failure"; + type string; + } + } + } + } + } + } + + + rpc get-ns-config-primitive-values { + description "Executes a VNF configuration primitive"; + input { + leaf nsr_id_ref { + description "Reference to NSR ID ref"; + mandatory true; + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:id"; + } + } + + leaf name { + description "Name of the NS config primitive group"; + mandatory true; + type string; + } + } + + output { + list ns-parameter { + description "Automatically generated parameter"; + key "name"; + + leaf name { + description "Parameter name which should be pulled from a parameter pool"; + type string; + } + leaf value { + description "Automatically generated value"; + type string; + } + } + + list ns-parameter-group { + description "Automatically generated parameters in parameter group"; + key "name"; + leaf name { + description "Parameter group name"; + type string; + } + list parameter { + description "Automatically generated group parameter"; + key "name"; + + leaf name { + description "Parameter name which should be pulled from a parameter pool"; + type string; + } + leaf value { + description "Automatically generated value"; + type string; + } + } + } + + list vnf-primitive-group { + description + "List of configuration primitives grouped by VNF."; + + key "member-vnf-index-ref"; + leaf member-vnf-index-ref { + description + "Reference to member-vnf within constituent-vnfds"; + type uint64; + } + + leaf vnfd-id-ref { + description + "A reference to a vnfd. This is a + leafref to path: + ../../../../nsd:constituent-vnfd + + [nsd:id = current()/../nsd:id-ref] + + /nsd:vnfd-id-ref + NOTE: An issue with confd is preventing the + use of xpath. Seems to be an issue with leafref + to leafref, whose target is in a different module. + Once that is resovled this will switched to use + leafref"; + + type string; + } + + list primitive { + key "index"; + leaf index { + description "Index of this primitive"; + type uint32; + } + + leaf name { + description "Name of the primitive associated with a value pool"; + type string; + } + + list parameter { + description "Automatically generated parameter"; + key "name"; + + leaf name { + description "Parameter name which should be pulled from a parameter pool"; + type string; + } + leaf value { + description "Automatically generated value"; + type string; + } + } + } + } + } + } + + + + rpc exec-ns-config-primitive { + description "Executes a NS configuration primitive or script"; + + input { + leaf name { + description "Name of the primitive"; + type string; + } + + leaf nsr_id_ref { + description "Reference to NSR ID ref"; + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:id"; + } + } + + list parameter { + description + "List of NS Primitive parameters"; + key "name"; + leaf name { + description + "Name of the parameter."; + type string; + } + + leaf value { + description + "Value associated with the name."; + type string; + } + } + + list parameter-group { + description + "List of NS Primitive parameter groups"; + key "name"; + leaf name { + description + "Name of the parameter."; + type string; + } + + list parameter { + description + "List of NS parameter group parameters"; + key "name"; + leaf name { + description + "Name of the parameter."; + type string; + } + + leaf value { + description + "Value associated with the name."; + type string; + } + } + } + + list vnf-list { + description + "List of VNFs whose primitives are being set."; + key "member_vnf_index_ref"; + + leaf member_vnf_index_ref { + description "Member VNF index"; + type uint64; + } + + leaf vnfr-id-ref { + description + "A reference to a vnfr. This is a + leafref to path"; + type yang:uuid; + } + + list vnf-primitive { + description + "List of configuration primitives supported by the + configuration agent for this VNF."; + key "index"; + + leaf index { + description + "index of the configuration primitive."; + type uint32; + } + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + list parameter { + description + "List of parameters to the configuration primitive."; + key "name"; + leaf name { + description + "Name of the parameter."; + type string; + } + + leaf value { + description + "Value associated with the name."; + type string; + } + } + } + } + leaf user-defined-script { + description + "A user defined script."; + type string; + } + } + output { + leaf job-id { + description "Job identifier for this RPC"; + type uint64; + } + + leaf name { + description "Name of the config"; + type string; + } + + leaf nsr_id_ref { + description "Reference to NSR ID ref"; + type leafref { + path "/nsr:ns-instance-config/nsr:nsr/nsr:id"; + } + } + + list vnf-out-list { + description + "List of VNFs whose primitives were set."; + key "member_vnf_index_ref"; + + leaf member_vnf_index_ref { + description "Member VNF index"; + type uint64; + } + leaf vnfr-id-ref { + description + "A reference to a vnfr. This is a + leafref to path"; + type yang:uuid; + } + + list vnf-out-primitive { + description + "List of configuration primitives supported by the + configuration agent for this VNF."; + key "index"; + + leaf index { + description + "index of the configuration primitive."; + type uint32; + } + + leaf name { + description + "Name of the configuration primitive."; + type string; + } + + leaf execution-id { + description "Execution id of this primitive"; + type string; + } + + leaf execution-status { + description "Status of the execution of this primitive"; + type string; + } + } + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/odl-network-topology.yang b/modules/core/mano/models/plugins/yang/odl-network-topology.yang new file mode 100644 index 0000000..ed15585 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/odl-network-topology.yang @@ -0,0 +1,347 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module odl-network-topology { + yang-version 1; + namespace "urn:TBD:params:xml:ns:yang:network-topology"; + // replace with IANA namespace when assigned + prefix "nt"; + + import ietf-inet-types { prefix "inet"; } + + organization "TBD"; + + contact "WILL-BE-DEFINED-LATER"; + + description + "This module defines a model for the topology of a network. + Key design decisions are as follows: + A topology consists of a set of nodes and links. + Links are point-to-point and unidirectional. + Bidirectional connections need to be represented through + two separate links. + Multipoint connections, broadcast domains etc can be represented + through a hierarchy of nodes, then connecting nodes at + upper layers of the hierarchy."; + + revision 2013-10-21 { + description + "Initial revision."; + } + + typedef topology-id { + type inet:uri; + description + "An identifier for a topology."; + } + + typedef node-id { + type inet:uri; + description + "An identifier for a node in a topology. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same node in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in separate + datastores. An implementation MAY choose to capture semantics + in the identifier, for example to indicate the type of node + and/or the type of topology that the node is a part of."; + } + + + typedef link-id { + type inet:uri; + description + "An identifier for a link in a topology. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same link in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in separate + datastores. An implementation MAY choose to capture semantics + in the identifier, for example to indicate the type of link + and/or the type of topology that the link is a part of."; + } + + typedef tp-id { + type inet:uri; + description + "An identifier for termination points on a node. + The identifier may be opaque. + The identifier SHOULD be chosen such that the same TP in a + real network topology will always be identified through the + same identifier, even if the model is instantiated in separate + datastores. An implementation MAY choose to capture semantics + in the identifier, for example to indicate the type of TP + and/or the type of node and topology that the TP is a part of."; + } + + typedef tp-ref { + type leafref { + path "/network-topology/topology/node/termination-point/tp-id"; + } + description + "A type for an absolute reference to a termination point. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + typedef topology-ref { + type leafref { + path "/network-topology/topology/topology-id"; + } + description + "A type for an absolute reference a topology instance."; + } + + typedef node-ref { + type leafref { + path "/network-topology/topology/node/node-id"; + } + description + + "A type for an absolute reference to a node instance. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + + typedef link-ref { + type leafref { + path "/network-topology/topology/link/link-id"; + } + description + "A type for an absolute reference a link instance. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + + grouping tp-attributes { + description + "The data objects needed to define a termination point. + (This only includes a single leaf at this point, used + to identify the termination point.) + Provided in a grouping so that in addition to the datastore, + the data can also be included in notifications."; + leaf tp-id { + type tp-id; + } + leaf-list tp-ref { + type tp-ref; + config false; + description + "The leaf list identifies any termination points that the + termination point is dependent on, or maps onto. + Those termination points will themselves be contained + in a supporting node. + This dependency information can be inferred from + the dependencies between links. For this reason, + this item is not separately configurable. Hence no + corresponding constraint needs to be articulated. + The corresponding information is simply provided by the + implementing system."; + } + } + + grouping node-attributes { + description + "The data objects needed to define a node. + The objects are provided in a grouping so that in addition to + the datastore, the data can also be included in notifications + as needed."; + + leaf node-id { + type node-id; + description + "The identifier of a node in the topology. + A node is specific to a topology to which it belongs."; + } + list supporting-node { + description + "This list defines vertical layering information for nodes. + It allows to capture for any given node, which node (or nodes) + in the corresponding underlay topology it maps onto. + A node can map to zero, one, or more nodes below it; + accordingly there can be zero, one, or more elements in the list. + If there are specific layering requirements, for example + specific to a particular type of topology that only allows + for certain layering relationships, the choice + below can be augmented with additional cases. + A list has been chosen rather than a leaf-list in order + to provide room for augmentations, e.g. for + statistics or priorization information associated with + supporting nodes."; + // This is not what was published in the initial draft, + // added topology-ref leaf and added it to the key + key "topology-ref node-ref"; + leaf topology-ref { + type topology-ref; + } + leaf node-ref { + type node-ref; + } + } + } + + grouping link-attributes { + // This is a grouping, not defined inline with the link definition itself, + // so it can be included in a notification, if needed + leaf link-id { + type link-id; + description + "The identifier of a link in the topology. + A link is specific to a topology to which it belongs."; + } + container source { + leaf source-node { + mandatory true; + type node-ref; + description + "Source node identifier, must be in same topology."; + } + leaf source-tp { + type tp-ref; + description + "Termination point within source node that terminates the link."; + + } + } + container destination { + leaf dest-node { + mandatory true; + type node-ref; + description + "Destination node identifier, must be in same topology."; + } + leaf dest-tp { + type tp-ref; + description + "Termination point within destination node that terminates the link."; + } + } + list supporting-link { + key "link-ref"; + leaf link-ref { + type link-ref; + } + } + } + + + container network-topology { + list topology { + description " + This is the model of an abstract topology. + A topology contains nodes and links. + Each topology MUST be identified by + unique topology-id for reason that a network could contain many + topologies. + "; + key "topology-id"; + leaf topology-id { + type topology-id; + description " + It is presumed that a datastore will contain many topologies. To + distinguish between topologies it is vital to have UNIQUE + topology identifiers. + "; + } + leaf server-provided { + type boolean; + config false; + description " + Indicates whether the topology is configurable by clients, + or whether it is provided by the server. This leaf is + + populated by the server implementing the model. + It is set to false for topologies that are created by a client; + it is set to true otherwise. If it is set to true, any + attempt to edit the topology MUST be rejected. + "; + } + container topology-types { + description + "This container is used to identify the type, or types + (as a topology can support several types simultaneously), + of the topology. + Topology types are the subject of several integrity constraints + that an implementing server can validate in order to + maintain integrity of the datastore. + Topology types are indicated through separate data nodes; + the set of topology types is expected to increase over time. + To add support for a new topology, an augmenting module + needs to augment this container with a new empty optional + container to indicate the new topology type. + The use of a container allows to indicate a subcategorization + of topology types. + The container SHALL NOT be augmented with any data nodes + that serve a purpose other than identifying a particular + topology type. + "; + } + list underlay-topology { + key "topology-ref"; + leaf topology-ref { + type topology-ref; + } + // a list, not a leaf-list, to allow for potential augmentation + // with properties specific to the underlay topology, + // such as statistics, preferences, or cost. + description + "Identifies the topology, or topologies, that this topology + is dependent on."; + } + + list node { + description "The list of network nodes defined for the topology."; + key "node-id"; + uses node-attributes; + must "boolean(../underlay-topology[*]/node[./supporting-nodes/node-ref])"; + // This constraint is meant to ensure that a referenced node is in fact + // a node in an underlay topology. + list termination-point { + description + + "A termination point can terminate a link. + Depending on the type of topology, a termination point could, + for example, refer to a port or an interface."; + key "tp-id"; + uses tp-attributes; + } + } + + list link { + description " + A Network Link connects a by Local (Source) node and + a Remote (Destination) Network Nodes via a set of the + nodes' termination points. + As it is possible to have several links between the same + source and destination nodes, and as a link could potentially + be re-homed between termination points, to ensure that we + would always know to distinguish between links, every link + is identified by a dedicated link identifier. + Note that a link models a point-to-point link, not a multipoint + link. + Layering dependencies on links in underlay topologies are + not represented as the layering information of nodes and of + termination points is sufficient. + "; + key "link-id"; + uses link-attributes; + must "boolean(../underlay-topology/link[./supporting-link])"; + // Constraint: any supporting link must be part of an underlay topology + must "boolean(../node[./source/source-node])"; + // Constraint: A link must have as source a node of the same topology + must "boolean(../node[./destination/dest-node])"; + // Constraint: A link must have as source a destination of the same topology + must "boolean(../node/termination-point[./source/source-tp])"; + // Constraint: The source termination point must be contained in the source node + must "boolean(../node/termination-point[./destination/dest-tp])"; + // Constraint: The destination termination point must be contained + // in the destination node + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/pnfd.yang b/modules/core/mano/models/plugins/yang/pnfd.yang new file mode 100755 index 0000000..077af1e --- /dev/null +++ b/modules/core/mano/models/plugins/yang/pnfd.yang @@ -0,0 +1,92 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module pnfd +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:pnfd"; + prefix "pnfd"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Physical Network Function Descriptor (PNFD)"; + reference + "Derived from earlier versions of base YANG files"; + } + + container pnfd-catalog { + + list pnfd { + key "id"; + + leaf id { + description "Identifier for the PNFD."; + type yang:uuid; + } + + leaf name { + description "PNFD name."; + type string; + } + + leaf short-name { + description "PNFD short name."; + type string; + } + + leaf vendor { + description "Vendor of the PNFD."; + type string; + } + + leaf description { + description "Description of the PNFD."; + type string; + } + + leaf version { + description "Version of the PNFD"; + type string; + } + + list connection-point { + description + "List for external connection points. Each PNF has one or more external + connection points."; + key "id"; + leaf id { + description + "Identifier for the external connection points"; + type uint64; + } + + leaf cp-type { + description + "Type of the connection point."; + type manotypes:connection-point-type; + } + } + } + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-nsd.tailf.yang b/modules/core/mano/models/plugins/yang/rw-nsd.tailf.yang new file mode 100644 index 0000000..d4a7c1e --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-nsd.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-nsd-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsd-annotation"; + prefix "rw-nsd-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-nsd { + prefix rw-nsd; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-nsd.yang b/modules/core/mano/models/plugins/yang/rw-nsd.yang new file mode 100755 index 0000000..b53a627 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-nsd.yang @@ -0,0 +1,45 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-nsd +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsd"; + prefix "rw-nsd"; + + import nsd { + prefix "nsd"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO NSD"; + reference + "Derived from earlier versions of base YANG files"; + } + + augment /nsd:nsd-catalog/nsd:nsd { + uses manotypes:control-param; + uses manotypes:action-param; + leaf meta { + description + "Any meta-data needed by the UI"; + type string; + } + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-nsr.tailf.yang b/modules/core/mano/models/plugins/yang/rw-nsr.tailf.yang new file mode 100644 index 0000000..c3bb827 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-nsr.tailf.yang @@ -0,0 +1,38 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-nsr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsr-annotation"; + prefix "rw-nsr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + + import rw-nsr { + prefix rw-nsr; + } + + import nsr { + prefix nsr; + } + + tailf:annotate "/nsr:ns-instance-opdata/nsr:nsr/rw-nsr:operational-events" { + tailf:callpoint base_show; + } + + tailf:annotate "/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-nsr.yang b/modules/core/mano/models/plugins/yang/rw-nsr.yang new file mode 100755 index 0000000..4231e80 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-nsr.yang @@ -0,0 +1,287 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-nsr +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsr"; + prefix "rw-nsr"; + + import mano-types { + prefix "manotypes"; + } + + import nsr { + prefix "nsr"; + } + + import nsd { + prefix "nsd"; + } + + import rw-cloud { + prefix "rw-cloud"; + } + + import ietf-yang-types { + prefix "yang"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO VNFD"; + reference + "Derived from earlier versions of base YANG files"; + } + + grouping operational-events { + list operational-events { + key "id"; + description + "Recent operational events for this network service. + Though the model does not impose any restrictions on the numbe of events, + the max operational events will be limited to the most recent 10"; + + leaf id { + description "The id of the instance"; + type uint64; + } + + leaf timestamp { + description + "The timestamp of this event expressed as seconds since + unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + leaf event { + description "Short description of the event"; + type string; + } + leaf description { + description + "The description of this event"; + type string; + } + } + } + + grouping nsd-ref-count { + list nsd-ref-count { + key "nsd-id-ref"; + description "This table maintains the number of NSRs used by each NSD"; + + leaf nsd-id-ref { + description "Reference to NSD"; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:id"; + } + } + leaf instance-ref-count { + description + "Reference count for the number of NSRs refering this NSD. + Every NS record instantiated using this descriptor takes + a reference on the NSD and releases the reference when the + network service is terminated. This desciptor cannot be + deleted when this counter is non zero"; + type uint64; + } + } + } + + augment /nsr:ns-instance-config/nsr:nsr { + leaf cloud-account { + description + "The configured cloud account which the NSR is instantiated within. + All VDU's, Virtual Links, and provider networks will be requested + using the cloud-account's associated CAL instance"; + type leafref { + path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name"; + } + } + + leaf om-datacenter { + description + "Openmano datacenter name to use when instantiating + the network service. This is only used when openmano + is selected as the cloud account. This should be superceded + by multiple cloud accounts when that becomes available."; + type string; + } + } + + augment /nsr:ns-instance-opdata/nsr:nsr { + uses manotypes:action-param; + uses manotypes:control-param; + + leaf cloud-account { + description + "The configured cloud account which the NSR is instantiated within. + All VDU's, Virtual Links, and provider networks will be requested + using the cloud-account's associated CAL instance"; + type leafref { + path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name"; + } + } + + container nfvi-metrics { + container vm { + leaf label { + description + "Label to show in UI"; + type string; + default "VM"; + } + + leaf active-vm { + description + "The number of active VMs."; + type uint64; + } + + leaf inactive-vm { + description + "The number of inactive VMs."; + type uint64; + } + } + + uses manotypes:nfvi-metrics; + } + + container epa-param { + container ovs-acceleration { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "OVS ACCELERATION"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + } + + container ovs-offload { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "OVS OFFLOAD"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + + } + + container ddio { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "DDIO"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + + } + + container cat { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "CAT"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + } + + container cmt { + leaf label { + description + "Label to show in UI for the param"; + type string; + default "CMT"; + } + + leaf vm { + description + "Number of VMs with the EPA attribute"; + type uint64; + } + + leaf unit { + description + "Unit label to show in the UI"; + type string; + default "vms"; + } + + } + } + uses operational-events; + } + + augment /nsr:ns-instance-opdata { + uses nsd-ref-count; + } + + augment /nsr:ns-instance-config { + leaf nfvi-polling-period { + description + "Defines the period (secons) that the NFVI metrics are polled at"; + type uint64; + default 4; + } + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-topology.tailf.yang b/modules/core/mano/models/plugins/yang/rw-topology.tailf.yang new file mode 100644 index 0000000..de33abe --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-topology.tailf.yang @@ -0,0 +1,34 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-topology-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-topology"; + prefix "rw-topology-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + import ietf-network-topology { + prefix nt; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + +} diff --git a/modules/core/mano/models/plugins/yang/rw-topology.yang b/modules/core/mano/models/plugins/yang/rw-topology.yang new file mode 100755 index 0000000..e2276ae --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-topology.yang @@ -0,0 +1,114 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-topology { + namespace "http://riftio.com/ns/riftware-1.0/rw-topology"; + prefix rw-topology; + + import ietf-inet-types {prefix inet;} + import ietf-network {prefix nw;} + import ietf-network-topology {prefix nt;} + import ietf-l2-topology {prefix l2t;} + + revision "2015-10-20" { + description "Initial revision of IP level addressing for L2 host topology"; + } + + grouping ip-address-group { + description "IP addresses if present for L2 termination points"; + container ip-attributes { + description "L2 termination points containing IP addresses"; + list ip-addresses { + key ip; + leaf ip { + type inet:ip-address; + description "IPv4 or IPv6 address"; + } + } + } + } // grouping ip-address-group + + + grouping rw-network-attributes { + description "RW Topology scope attributes"; + container rw-network-attributes { + description "Containing RW network attributes"; + leaf name { + type string; + description "Name of the RW Topology network"; + } + leaf sdn-account-name { + type string; + description "Name of the SDN account from which topology is got"; + } + } + } + + grouping rw-node-attributes { + description "RW node attributes"; + container rw-node-attributes { + description "Containing RW node attributes"; + leaf name { + type string; + description "Node name"; + } + leaf ovs-bridge-name { + type string; + description "Name of OVS bridge"; + } + } + } + + grouping rw-link-attributes { + description "RW link attributes"; + container rw-link-attributes { + description "Containing RW link attributes"; + leaf name { + type string; + description "Link name"; + } + } + } + + grouping rw-termination-point-attributes { + description "RW termination point attributes"; + container rw-termination-point-attributes { + description "Containing RW TP attributes"; + leaf description { + type string; + description "Port description"; + } + uses ip-address-group; + } + } + + augment "/nw:network" { + description + "Configuration parameters for the RW network + as a whole"; + uses rw-network-attributes; + } + + augment "/nw:network/nw:node" { + description + "Configuration parameters for RW at the node + level"; + uses rw-node-attributes; + } + + augment "/nw:network/nt:link" { + description "Augment RW topology link information"; + uses rw-link-attributes; + } + + augment "/nw:network/nw:node/nt:termination-point" { + description + "Augment RW topology termination point configuration"; + uses rw-termination-point-attributes; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vld.tailf.yang b/modules/core/mano/models/plugins/yang/rw-vld.tailf.yang new file mode 100644 index 0000000..30840fd --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vld.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vld-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vld-annotation"; + prefix "rw-vld-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-vld { + prefix rw-vld; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vld.yang b/modules/core/mano/models/plugins/yang/rw-vld.yang new file mode 100755 index 0000000..4525a6f --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vld.yang @@ -0,0 +1,27 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vld +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vld"; + prefix "rw-vld"; + + import vld { + prefix "vld"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO VLD"; + reference + "Derived from earlier versions of base YANG files"; + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-vlr.tailf.yang b/modules/core/mano/models/plugins/yang/rw-vlr.tailf.yang new file mode 100644 index 0000000..bbd4238 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vlr.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vlr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vlr-annotation"; + prefix "rw-vlr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-vlr { + prefix rw-vlr; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vlr.yang b/modules/core/mano/models/plugins/yang/rw-vlr.yang new file mode 100755 index 0000000..4d5c125 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vlr.yang @@ -0,0 +1,55 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vlr +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vlr"; + prefix "rw-vlr"; + + import mano-types { + prefix "manotypes"; + } + + import vlr { + prefix "vlr"; + } + + import rw-cloud { + prefix "rwcloud"; + } + + import ietf-yang-types { + prefix "yang"; + } + + revision 2015-09-30 { + description + "Initial revision. This YANG file augments + the base MANO VNFD"; + reference + "Derived from earlier versions of base YANG files"; + } + + augment /vlr:vlr-catalog/vlr:vlr { + leaf cloud-account { + description + "The cloud account to use when requesting resources for + this vlr"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + leaf network_pool { + description "The network pool the resource was allocated from."; + type string; + } + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-vnfd.tailf.yang b/modules/core/mano/models/plugins/yang/rw-vnfd.tailf.yang new file mode 100644 index 0000000..58049ee --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vnfd.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vnfd-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfd-annotation"; + prefix "rw-vnfd-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-vnfd { + prefix rw-vnfd; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vnfd.yang b/modules/core/mano/models/plugins/yang/rw-vnfd.yang new file mode 100755 index 0000000..f5c0947 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vnfd.yang @@ -0,0 +1,105 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vnfd +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfd"; + prefix "rw-vnfd"; + + import vnfd { + prefix "vnfd"; + } + + import rwvcs-types { + prefix "rwvcstypes"; + } + + import rw-pb-ext { prefix "rwpb"; } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO VNFD"; + reference + "Derived from earlier versions of base YANG files"; + } + + augment /vnfd:vnfd-catalog/vnfd:vnfd { + uses manotypes:control-param; + uses manotypes:action-param; + leaf meta { + description + "Any meta-data needed by the UI"; + type string; + } + list component { + description + "This section defines the RIFT.ware + virtual components"; + key "component-name"; + rwpb:msg-new VcsComponent; + rwpb:application-request-point; + + leaf component-name { + description ""; + type string; + } + + leaf component-type { + description ""; + type rwvcstypes:component_type; + mandatory true; + } + + choice component { + case rwvcs-rwcollection { + uses rwvcstypes:rwvcs-rwcollection; + } + case rwvcs-rwvm { + uses rwvcstypes:rwvcs-rwvm; + } + case rwvcs-rwproc { + uses rwvcstypes:rwvcs-rwproc; + } + case native-proc { + uses rwvcstypes:native-proc; + } + case rwvcs-rwtasklet { + uses rwvcstypes:rwvcs-rwtasklet; + } + } + } // list component + } + + augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu { + leaf vcs-component-ref { + description + "This defines the software components using the + RIFT.ware Virtual Component System (VCS). This + also allows specifying a state machine during + the VM startup. + NOTE: This is an significant addition to MANO, + since MANO doesn't clearly specify a method to + identify various software components in a VM. + Also using a state machine is not something that + is well described in MANO."; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/rw-vnfd:component/rw-vnfd:component-name"; + } + } + } +} +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/rw-vnfr.tailf.yang b/modules/core/mano/models/plugins/yang/rw-vnfr.tailf.yang new file mode 100644 index 0000000..a3d2756 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vnfr.tailf.yang @@ -0,0 +1,37 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vnfr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfr-annotation"; + prefix "rw-vnfr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-vnfr { + prefix rw-vnfr; + } + + import vnfr { + prefix vnfr; + } + + tailf:annotate "/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" { + tailf:callpoint base_show; + } + + tailf:annotate "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:operational-events" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/rw-vnfr.yang b/modules/core/mano/models/plugins/yang/rw-vnfr.yang new file mode 100755 index 0000000..df67ce5 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/rw-vnfr.yang @@ -0,0 +1,261 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vnfr +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfr"; + prefix "rw-vnfr"; + + import mano-types { + prefix "manotypes"; + } + + import rw-pb-ext { prefix "rwpb"; } + + import vnfr { + prefix "vnfr"; + } + + import vnfd { + prefix "vnfd"; + } + + import rw-cloud { + prefix "rwcloud"; + } + + import rwvcs-types { + prefix "rwvcstypes"; + } + + import ietf-yang-types { + prefix "yang"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file augments + the base MANO VNFD"; + reference + "Derived from earlier versions of base YANG files"; + } + + grouping vnfr-operational-events { + list operational-events { + key "id"; + description + "Recent operational events for VNFR + Though the model does not impose any restrictions on the numbe of events, + the max operational events will be limited to the most recent 10"; + + leaf id { + description "The id of the instance"; + type uint64; + } + + leaf timestamp { + description + "The timestamp of this event expressed as seconds since + unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + leaf event { + description "The event"; + type enumeration { + rwpb:enum-type "VnfrOperationalEvent"; + enum instantiate-rcvd; + enum vl-inited; + enum vnf-inited; + enum running; + enum terminate-rcvd; + enum vnf-terminated; + enum vl-terminated; + enum terminated; + } + } + leaf description { + description + "The description of this event"; + type string; + } + } + } + + grouping vdur-operational-events { + list operational-events { + key "id"; + description + "Recent operational events for VDUR + Though the model does not impose any restrictions on the numbe of events, + the max operational events will be limited to the most recent 10"; + + leaf id { + description "The id of the instance"; + type uint64; + } + + leaf timestamp { + description + "The timestamp of this event expressed as seconds since + unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + leaf event { + description "The event"; + type enumeration { + rwpb:enum-type "VdurOperationalEvent"; + enum instantiate-rcvd; + enum vm-allocation-requested; + enum running; + enum terminate-rcvd; + enum vm-terminate-requested; + enum terminated; + } + } + leaf description { + description + "The description of this event"; + type string; + } + } + } + + augment /vnfr:vnfr-catalog/vnfr:vnfr { + uses manotypes:action-param; + uses manotypes:control-param; + + leaf cloud-account { + description + "The cloud account to use when requesting resources for + this vnf"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + container nfvi-metrics { + container vm { + leaf label { + description + "Label to show in UI"; + type string; + default "VM"; + } + + leaf active-vm { + description + "The number of active VMs."; + type uint64; + } + + leaf inactive-vm { + description + "The number of inactive VMs."; + type uint64; + } + } + + uses manotypes:nfvi-metrics; + } + + list component { + description + "This section defines the RIFT.ware + virtual components"; + key "component-name"; + rwpb:msg-new VcsComponentOp; + rwpb:application-request-point; + + leaf component-name { + description ""; + type string; + } + + leaf component-type { + description ""; + type rwvcstypes:component_type; + mandatory true; + } + + choice component { + case rwvcs-rwcollection { + uses rwvcstypes:rwvcs-rwcollection; + } + case rwvcs-rwvm { + uses rwvcstypes:rwvcs-rwvm; + } + case rwvcs-rwproc { + uses rwvcstypes:rwvcs-rwproc; + } + case native-proc { + uses rwvcstypes:native-proc; + } + case rwvcs-rwtasklet { + uses rwvcstypes:rwvcs-rwtasklet; + } + } + } // list component + uses vnfr-operational-events; + } + + augment /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur { + leaf vm-pool { + description + "The pool from which this vm was allocated from"; + type string; + } + + container nfvi-metrics { + uses manotypes:nfvi-metrics; + } + + leaf vcs-component-ref { + description + "This defines the software components using the + RIFT.ware Virtual Component System (VCS). This + also allows specifying a state machine during + the VM startup. + NOTE: This is an significant addition to MANO, + since MANO doesn't clearly specify a method to + identify various software components in a VM. + Also using a state machine is not something that + is well described in MANO."; + type leafref { + path "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:component/rw-vnfr:component-name"; + } + } + uses vdur-operational-events; + } + grouping vnfd-ref-count { + list vnfd-ref-count { + key "vnfd-id-ref"; + description "This table maintains the number of VNFRs used by each VNFD"; + + leaf vnfd-id-ref { + description "Reference to VNFD"; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + leaf instance-ref-count { + description + "Reference count for the number of VNFRs refering this VNFD. + Every VNF Record instantiated using this descriptor takes + a reference on the VNFD and releases the reference when the + virtual network service is terminated. This desciptor cannot + be deleted when this counter is non zero"; + type uint64; + } + } + } + augment /vnfr:vnfr-catalog { + uses vnfd-ref-count; + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/vld.tailf.yang b/modules/core/mano/models/plugins/yang/vld.tailf.yang new file mode 100644 index 0000000..4d772e8 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vld.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vld-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/vld-annotation"; + prefix "vld-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import vld { + prefix vld; + } +} diff --git a/modules/core/mano/models/plugins/yang/vld.yang b/modules/core/mano/models/plugins/yang/vld.yang new file mode 100755 index 0000000..9608dfa --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vld.yang @@ -0,0 +1,129 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vld +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vld"; + prefix "vld"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import vnfd { + prefix "vnfd"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Virtual Link Descriptor (VLD)"; + reference + "Derived from earlier versions of base YANG files"; + } + + container vld-catalog { + + list vld { + key "id"; + + leaf id { + description "Identifier for the VLD."; + type yang:uuid; + } + + leaf name { + description "Virtual Link Descriptor (VLD) name."; + type string; + } + + leaf short-name { + description "Short name for VLD for UI"; + type string; + } + + leaf vendor { + description "Provider of the VLD."; + type string; + } + + leaf description { + description "Description of the VLD."; + type string; + } + + leaf version { + description "Version of the VLD"; + type string; + } + + leaf type { + type manotypes:virtual-link-type; + } + + leaf root-bandwidth { + description + "For ELAN this is the aggregate bandwidth."; + type uint64; + } + + leaf leaf-bandwidth { + description + "For ELAN this is the bandwidth of branches."; + type uint64; + } + + list vnfd-connection-point-ref { + description + "A list of references to connection points."; + key "vnfd-ref member-vnf-index-ref"; + + leaf vnfd-ref { + description "A reference to a vnfd"; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + + leaf member-vnf-index-ref { + description + "A reference to the consituent-vnfd id in nsd. + Should have been a leafref to: + '/nsd:nsd-catalog:/nsd:nsd/constituent-vnfd/member-vnf-index-ref'. + Instead using direct leaf to avoid circular reference."; + type uint64; + } + + leaf vnfd-connection-point-ref { + description + "A reference to a connection point name in a vnfd"; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd" + + "[vnfd:id = current()/../vld:vnfd-ref]" + + "/vnfd:connection-point/vnfd:name"; + } + } + } + + // replicate for pnfd container here + uses manotypes:provider-network; + } + } +} diff --git a/modules/core/mano/models/plugins/yang/vlr.cli.xml b/modules/core/mano/models/plugins/yang/vlr.cli.xml new file mode 100755 index 0000000..e2e54fa --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vlr.cli.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/modules/core/mano/models/plugins/yang/vlr.tailf.yang b/modules/core/mano/models/plugins/yang/vlr.tailf.yang new file mode 100644 index 0000000..b12a2d7 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vlr.tailf.yang @@ -0,0 +1,28 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vlr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/vlr-annotation"; + prefix "vlr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import vlr { + prefix vlr; + } + tailf:annotate "/vlr:vlr-catalog" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/vlr.yang b/modules/core/mano/models/plugins/yang/vlr.yang new file mode 100755 index 0000000..89a3299 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vlr.yang @@ -0,0 +1,159 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vlr +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vlr"; + prefix "vlr"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + import vld { + prefix "vld"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Virtual Link Record (VLR)"; + reference + "Derived from earlier versions of base YANG files"; + } + + container vlr-catalog { + config false; + + list vlr { + key "id"; + unique "name"; + + leaf id { + description "Identifier for the VLR."; + type yang:uuid; + } + + leaf name { + description "VLR name."; + type string; + } + + leaf res-id { + description "Identifier for resmgr id mapping"; + type yang:uuid; + } + + leaf short-name { + description "Short name for VLR for UI"; + type string; + } + + leaf vendor { + description "Provider of the VLR."; + type string; + } + + leaf description { + description "Description of the VLR."; + type string; + } + + leaf version { + description "Version of the VLR"; + type string; + } + + leaf type { + type manotypes:virtual-link-type; + } + + leaf root-bandwidth { + description + "For ELAN this is the aggregate bandwidth."; + type uint64; + } + + leaf leaf-bandwidth { + description + "For ELAN this is the bandwidth of branches."; + type uint64; + } + + leaf create-time { + description + "Creation timestamp of this Virtual Link. + The timestamp is expressed as seconds + since unix epoch - 1970-01-01T00:00:00Z"; + + type uint32; + } + + leaf vld-ref { + description "Reference to VLD"; + type leafref { + path "/vld:vld-catalog/vld:vld/vld:id"; + } + } + + leaf network-id { + description + "Identifier for the allocated network resource."; + type string; + } + + // replicate for pnfd container here + + uses manotypes:provider-network; + + leaf status { + description + "Status of the virtual link record."; + type enumeration { + enum LINK_UP; + enum DEGRADED; + enum LINK_DOWN; + } + } + leaf operational-status { + description + "The operational status of the Virtual Link + init : The VL is in init stat. + vl-alloc-pending : The VL alloc is pending in VIM + running : The VL is up and running in VM + vl-terminate-pending : The VL is being terminated in VIM. + terminated : The VL is terminated in the VM. + failed : The VL instantiation failed in VIM. + "; + + type enumeration { + rwpb:enum-type "VlOperationalStatus"; + enum init; + enum vl-alloc-pending; + enum running; + enum vl-terminate-pending; + enum terminated; + enum failed; + } + } + } + } +} + diff --git a/modules/core/mano/models/plugins/yang/vnfd.tailf.yang b/modules/core/mano/models/plugins/yang/vnfd.tailf.yang new file mode 100644 index 0000000..760d78c --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfd.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnfd-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/vnfd-annotation"; + prefix "vnfd-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import vnfd { + prefix vnfd; + } +} diff --git a/modules/core/mano/models/plugins/yang/vnfd.yang b/modules/core/mano/models/plugins/yang/vnfd.yang new file mode 100755 index 0000000..fe627ff --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfd.yang @@ -0,0 +1,461 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnfd +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vnfd"; + prefix "vnfd"; + + import mano-types { + prefix "manotypes"; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import ietf-inet-types { + prefix "inet"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Virtual Network Function (VNF)"; + reference + "Derived from earlier versions of base YANG files"; + } + + grouping virtual-interface { + container virtual-interface { + description + "Container for the virtual interface properties"; + + leaf type { + description + "Specifies the type of virtual interface + between VM and host. + VIRTIO : Use the traditional VIRTIO interface. + PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface. + SR-IOV : Use SR-IOV interface. + OM-MGMT : Used to specify openmano mgmt external-connection type"; + + type enumeration { + enum OM-MGMT; + enum PCI-PASSTHROUGH; + enum SR-IOV; + enum VIRTIO; + } + default "VIRTIO"; + } + + leaf vpci { + description + "Specifies the virtual PCI address. Expressed in + the following format dddd:dd:dd.d. For example + 0000:00:12.0. This information can be used to + pass as metadata during the VM creation."; + type string; + } + + leaf bandwidth { + description + "Aggregate bandwidth of the NIC."; + type uint64; + } + } + } + + container vnfd-catalog { + + description + "Virtual Network Function Descriptor (VNFD)."; + + list vnfd { + key "id"; + + leaf id { + description "Identifier for the VNFD."; + type yang:uuid; + } + + leaf name { + description "VNFD name."; + mandatory true; + type string; + } + + leaf short-name { + description "VNFD short name."; + type string; + } + + leaf vendor { + description "Vendor of the VNFD."; + type string; + } + + leaf logo { + description + "Vendor logo for the Virtual Network Function"; + type string; + } + + leaf description { + description "Description of the VNFD."; + type string; + } + + leaf version { + description "Version of the VNFD"; + type string; + } + + container mgmt-interface { + description + "Interface over which the VNF is managed."; + + choice endpoint-type { + description + "Indicates the type of management endpoint."; + + case ip { + description + "Specifies the static IP address for managing the VNF."; + leaf ip-address { + type inet:ip-address; + } + } + + case vdu-id { + description + "Use the default management interface on this VDU."; + leaf vdu-id { + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu/vnfd:id"; + } + } + } + + case cp { + description + "Use the ip address associated with this connection point."; + leaf cp { + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:connection-point/vnfd:name"; + } + } + } + } + + leaf port { + description + "Port for the management interface."; + type inet:port-number; + } + + choice interface-type { + // TODO: This needs to be moved to NSD. + case netconf { + leaf netconf { + type empty; + } + } + } + + container dashboard-params { + description "Parameters for the VNF dashboard"; + + leaf path { + description "The HTTP path for the dashboard"; + type string; + } + + leaf https { + description "Pick HTTPS instead of HTTP , Default is false"; + type boolean; + } + + leaf port { + description "The HTTP port for the dashboard"; + type inet:port-number; + } + } + } + + list internal-vld { + key "id"; + description + "List of Internal Virtual Link Descriptors (VLD). + The internal VLD describes the basic topology of + the connectivity (e.g. E-LAN, E-Line, E-Tree) + between internal VNF components of the system."; + + leaf id { + description "Identifier for the VLD"; + type yang:uuid; + } + + leaf name { + description "Name of the internal VLD"; + type string; + } + + leaf short-name { + description "Short name of the internal VLD"; + type string; + } + + leaf description { + type string; + } + + leaf type { + type manotypes:virtual-link-type; + } + + leaf root-bandwidth { + description + "For ELAN this is the aggregate bandwidth."; + type uint64; + } + + leaf leaf-bandwidth { + description + "For ELAN this is the bandwidth of branches."; + type uint64; + } + + leaf-list internal-connection-point-ref { + type leafref { + path "../../vdu/internal-connection-point/id"; + } + } + + uses manotypes:provider-network; + } + + list connection-point { + key "name"; + description + "List for external connection points. Each VNF has one + or more external connection points. As the name + implies that external connection points are used for + connecting the VNF to other VNFs or to external networks. + Each VNF exposes these connection points to the + orchestrator. The orchestrator can construct network + services by connecting the connection points between + different VNFs. The NFVO will use VLDs and VNFFGs at + the network service level to construct network services."; + + leaf name { + description "Name of the connection point"; + type string; + } + + leaf short-name { + description "Short name of the connection point"; + type string; + } + + leaf type { + description "Type of the connection point."; + type manotypes:connection-point-type; + } + } + + list vdu { + description "List of Virtual Deployment Units"; + key "id"; + + leaf id { + description "Unique id for the VDU"; + type yang:uuid; + } + + leaf name { + description "Unique name for the VDU"; + type string; + } + + leaf description { + description "Description of the VDU."; + type string; + } + + leaf count { + description "Number of instances of VDU"; + type uint64; + } + + leaf mgmt-vpci { + description + "Specifies the virtual PCI address. Expressed in + the following format dddd:dd:dd.d. For example + 0000:00:12.0. This information can be used to + pass as metadata during the VM creation."; + type string; + } + + + uses manotypes:vm-flavor; + uses manotypes:guest-epa; + uses manotypes:vswitch-epa; + uses manotypes:hypervisor-epa; + uses manotypes:host-epa; + + leaf image { + description + "Image name for the software image. + If the image name is found within the VNF packaage it will + be uploaded to all cloud accounts during onboarding process. + Otherwise, the image must be added to the cloud account with + the same name as entered here. + "; + mandatory true; + type string; + } + + leaf image-checksum { + description + "Image md5sum for the software image. + The md5sum, if provided, along with the image name uniquely + identifies an image uploaded to the CAL. + "; + type string; + } + + leaf cloud-init { + description "Content of cloud-init script"; + type string; + } + + list internal-connection-point { + key "id"; + description + "List for internal connection points. Each VNFC + has zero or more internal connection points. + Internal connection points are used for connecting + the VNF components internal to the VNF. If a VNF + has only one VNFC, it may not have any internal + connection points."; + + leaf id { + description "Identifier for the internal connection points"; + type yang:uuid; + } + + leaf type { + description "Type of the connection point."; + type manotypes:connection-point-type; + } + + leaf internal-vld-ref { + type leafref { + path "../../../internal-vld/id"; + } + } + } + + list internal-interface { + description + "List of internal interfaces for the VNF"; + key name; + + leaf name { + description + "Name of internal interface. Note that this + name has only local significance to the VDU."; + type string; + } + + leaf vdu-internal-connection-point-ref { + type leafref { + path "../../internal-connection-point/id"; + } + } + uses virtual-interface; + } + + list external-interface { + description + "List of external interfaces for the VNF. + The external interfaces enable sending + traffic to and from VNF."; + key name; + + leaf name { + description + "Name of the external interface. Note that + this name has only local significance."; + type string; + } + + leaf vnfd-connection-point-ref { + description + "Name of the external connection point."; + type leafref { + path "../../../connection-point/name"; + } + } + uses virtual-interface; + } + } + + list vdu-dependency { + description + "List of VDU dependencies."; + + key vdu-source-ref; + leaf vdu-source-ref { + type leafref { + path "../../vdu/id"; + } + } + + leaf vdu-depends-on-ref { + description + "Reference to the VDU that + source VDU depends."; + type leafref { + path "../../vdu/id"; + } + } + } + + leaf service-function-chain { + description "Type of node in Service Function Chaining Architecture"; + + type enumeration { + enum UNAWARE; + enum CLASSIFIER; + enum SF; + enum SFF; + } + default "UNAWARE"; + } + + leaf service-function-type { + description + "Type of Service Function. + NOTE: This needs to map with Service Function Type in ODL to + support VNFFG. Service Function Type is manadatory param in ODL + SFC. This is temporarily set to string for ease of use"; + type string; + } + + uses manotypes:monitoring-param; + } + } +} + +// vim: sw=2 diff --git a/modules/core/mano/models/plugins/yang/vnffgd.yang b/modules/core/mano/models/plugins/yang/vnffgd.yang new file mode 100755 index 0000000..b297569 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnffgd.yang @@ -0,0 +1,71 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnffgd +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vnffgd"; + prefix "vnffgd"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2014-10-27 { + description + "Initial revision. This YANG file defines + the VNF Forwarding Graph Descriptor (VNFFGD)"; + reference + "Derived from earlier versions of base YANG files"; + } + + container vnffgd-catalog { + + list vnffgd { + key "id"; + + leaf name { + description "VNF Forwarding Graph Descriptor name."; + type string; + } + + leaf id { + description "Identifier for the VNFFGD."; + type yang:uuid; + } + + leaf provider { + description "Provider of the VNFFGD."; + type string; + } + + leaf description { + description "Description of the VNFFGD."; + type string; + } + + leaf version { + description "Version of the VNFFGD"; + type string; + } + + //TODO: Add more content here + } + } +} diff --git a/modules/core/mano/models/plugins/yang/vnfr.cli.xml b/modules/core/mano/models/plugins/yang/vnfr.cli.xml new file mode 100755 index 0000000..4f0a109 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfr.cli.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/modules/core/mano/models/plugins/yang/vnfr.tailf.yang b/modules/core/mano/models/plugins/yang/vnfr.tailf.yang new file mode 100644 index 0000000..a1f83aa --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfr.tailf.yang @@ -0,0 +1,29 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnfr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/vnfr-annotation"; + prefix "vnfr-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import vnfr { + prefix vnfr; + } + + tailf:annotate "/vnfr:vnfr-catalog" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/models/plugins/yang/vnfr.yang b/modules/core/mano/models/plugins/yang/vnfr.yang new file mode 100755 index 0000000..b38ba21 --- /dev/null +++ b/modules/core/mano/models/plugins/yang/vnfr.yang @@ -0,0 +1,459 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module vnfr +{ + namespace "urn:ietf:params:xml:ns:yang:nfvo:vnfr"; + prefix "vnfr"; + + import mano-types { + prefix "manotypes"; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import vnfd { + prefix "vnfd"; + } + + import nsd { + prefix "nsd"; + } + + import vlr { + prefix "vlr"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import ietf-inet-types { + prefix "inet"; + } + + revision 2015-09-10 { + description + "Initial revision. This YANG file defines + the Virtual Network Function Record (VNFR)"; + reference + "Derived from earlier versions of base YANG files"; + } + + grouping virtual-interface { + container virtual-interface { + description + "Container for the virtual interface properties"; + + leaf type { + description + "Specifies the type of virtual interface + between VM and host. + VIRTIO : Use the traditional VIRTIO interface. + PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface. + SR-IOV : Use SR-IOV interface."; + type enumeration { + enum VIRTIO; + enum PCI-PASSTHROUGH; + enum SR-IOV; + } + } + + leaf bandwidth { + description + "Aggregate bandwidth of the NIC."; + type uint64; + } + + leaf ovs-offload { + description + "Defines if the NIC supports OVS offload. + MANDATORY : OVS offload support in the NIC is mandatory. + PREFERRED : OVS offload support in the NIC is preferred."; + type enumeration { + enum MANDATORY; + enum PREFERRED; + } + } + + leaf vendor-id { + description + "Specifies the vendor specific id for + the device. This is used when a NIC from + specific HW vendor is required."; + type string; + } + + leaf datapath-library { + description + "Specifies the name and version of the datapath + library the NIC is expected to support."; + type string; + } + + leaf provider-network-name { + description + "Name of the provider network to which this + NIC is attached."; + type string; + } + } + } + + container vnfr-catalog { + config false; + list vnfr { + description + "Virtual Network Function Record (VNFR)."; + key "id"; + unique "name"; + + leaf id { + description "Identifier for the VNFR."; + type yang:uuid; + } + + leaf member-vnf-index-ref { + description "Reference to member VNF index in Network service."; + type leafref { + path "/nsd:nsd-catalog/nsd:nsd/nsd:constituent-vnfd/nsd:member-vnf-index"; + } + } + + leaf dashboard-url { + description "Dashboard URL"; + type inet:uri; + } + + leaf name { + description "VNFR name."; + type string; + } + + leaf short-name { + description "VNFR short name."; + type string; + } + + leaf vendor { + description "Vendor of the VNFR."; + type string; + } + + leaf description { + description "Description of the VNFR."; + type string; + } + + leaf version { + description "Version of the VNFR"; + type string; + } + + leaf create-time { + description + "Creation timestamp of this Virtual Network + Function. The timestamp is expressed as + seconds since unix epoch - 1970-01-01T00:00:00Z"; + + type uint32; + } + + leaf vnfd-ref { + description "Reference to VNFD"; + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id"; + } + } + + // Use parameters provided here to configure this VNF + uses nsd:vnf-configuration; + + // Mainly used by Mon-params & dashboard url + container mgmt-interface { + leaf ip-address { + type inet:ip-address; + } + leaf port { + type inet:port-number; + } + } + + list internal-vlr { + key "vlr-ref"; + + leaf vlr-ref { + description "Reference to a VLR record in the VLR catalog"; + type leafref { + path "/vlr:vlr-catalog/vlr:vlr/vlr:id"; + } + } + + leaf-list internal-connection-point-ref { + type leafref { + path "../../vdur/internal-connection-point/id"; + } + } + } + + list connection-point { + key "name"; + description + "List for external connection points. Each VNF has one + or more external connection points. As the name + implies that external connection points are used for + connecting the VNF to other VNFs or to external networks. + Each VNF exposes these connection points to the + orchestrator. The orchestrator can construct network + services by connecting the connection points between + different VNFs. The NFVO will use VLDs and VNFFGs at + the network service level to construct network services."; + + leaf name { + description "Name of the connection point"; + type string; + } + + leaf short-name { + description "Short name of the connection point"; + type string; + } + + leaf type { + description "Type of the connection point."; + type manotypes:connection-point-type; + } + + leaf vlr-ref { + description + "Reference to the VLR associated with this connection point"; + type leafref { + path "/vlr:vlr-catalog/vlr:vlr/vlr:id"; + } + } + + leaf ip-address { + description + "IP address assigned to the external connection point"; + type inet:ip-address; + } + leaf connection-point-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + } + + list vdur { + description "List of Virtual Deployment Units"; + key "id"; + unique "name"; + + leaf id { + description "Unique id for the VDU"; + type yang:uuid; + } + + leaf name { + description "name of the instantiated VDUR"; + type string; + } + + leaf vdu-id-ref { + type leafref { + path "/vnfd:vnfd-catalog/vnfd:vnfd" + + "[vnfd:id = current()/../../vnfr:vnfd-ref]" + + "/vnfd:vdu/vnfd:id"; + } + } + + leaf vim-id { + description "Allocated VM resource id"; + type string; + } + + leaf flavor-id { + description "VIM assigned flavor id"; + type string; + } + + leaf image-id { + description "VIM assigned image id"; + type string; + } + + leaf management-ip { + description "Management IP address"; + type inet:ip-address; + } + + leaf vm-management-ip { + description "VM Private Management IP address"; + type inet:ip-address; + } + + uses manotypes:vm-flavor; + uses manotypes:guest-epa; + uses manotypes:vswitch-epa; + uses manotypes:hypervisor-epa; + uses manotypes:host-epa; + + list internal-connection-point { + key "id"; + description + "List for internal connection points. Each VNFC + has zero or more internal connection points. + Internal connection points are used for connecting + the VNF components internal to the VNF. If a VNF + has only one VNFC, it may not have any internal + connection points."; + + leaf id { + description "Identifier for the internal connection points"; + type yang:uuid; + } + + leaf type { + description "Type of the connection point."; + type manotypes:connection-point-type; + } + + leaf ip-address { + description + "IP address assigned to the external connection point"; + type inet:ip-address; + } + } + + list internal-interface { + description + "List of internal interfaces for the VNF"; + key name; + + leaf name { + description + "Name of internal interface. Note that this + name has only local significance to the VDU."; + type string; + } + + leaf vdur-internal-connection-point-ref { + type leafref { + path "../../internal-connection-point/id"; + } + } + uses virtual-interface; + } + + list external-interface { + description + "List of external interfaces for the VNF. + The external interfaces enable sending + traffic to and from VNF."; + key name; + + leaf name { + description + "Name of the external interface. Note that + this name has only local significance."; + type string; + } + + leaf vnfd-connection-point-ref { + description + "Name of the external connection point."; + type leafref { + path "../../../connection-point/name"; + } + } + uses virtual-interface; + } + leaf operational-status { + description + "The operational status of the VDU + init : The VDU has just started. + vm-init-phase : The VDUs in the VNF is being created in VIM. + vm-alloc-pending : The VM alloc is pending in VIM + running : The VDU is active in VM + terminate : The VDU is being terminated + vm-terminate-phase : The VDU in the VNF is being terminated in VIM. + terminated : The VDU is in the terminated state. + failed : The VDU instantiation failed. + "; + + type enumeration { + rwpb:enum-type "VduOperationalStatus"; + enum init; + enum vm-init-phase; + enum vm-alloc-pending; + enum running; + enum terminate; + enum vl-terminate-phase; + enum terminated; + enum failed; + } + } + } + + uses manotypes:monitoring-param; + + leaf operational-status { + description + "The operational status of the VNFR instance + init : The VNF has just started. + vl-init-phase : The internal VLs in the VNF are being instantiated. + vm-init-phase : The VMs for VDUs in the VNF are being instantiated. + running : The VNF is in running state. + terminate : The VNF is being terminated. + vm-terminate-phase : The VMs in the VNF are being terminated. + vl-terminate-phase : The internal VLs in the VNF are being terminated. + terminated : The VNF is in the terminated state. + failed : The VNF instantiation failed + "; + + type enumeration { + rwpb:enum-type "VnfrOperationalStatus"; + enum init; + enum vl-init-phase; + enum vm-init-phase; + enum running; + enum terminate; + enum vm-terminate-phase; + enum vl-terminate-phase; + enum terminated; + enum failed; + } + } + leaf config-status { + description + "The configuration status of the NS instance + configuring: At least one of the VNFs in this instance is in configuring state + configured: All the VNFs in this NS instance are configured or config-not-needed state + "; + + type enumeration { + enum configuring { + value 1; + } + enum configured { + value 2; + } + enum failed { + value 3; + } + enum config-not-needed { + value 4; + } + } + } + } + } +} + diff --git a/modules/core/mano/rwcm/CMakeLists.txt b/modules/core/mano/rwcm/CMakeLists.txt new file mode 100644 index 0000000..2fe5d3c --- /dev/null +++ b/modules/core/mano/rwcm/CMakeLists.txt @@ -0,0 +1,23 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/28/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME rwcm) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + test + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwcm/plugins/CMakeLists.txt b/modules/core/mano/rwcm/plugins/CMakeLists.txt new file mode 100644 index 0000000..a5203ce --- /dev/null +++ b/modules/core/mano/rwcm/plugins/CMakeLists.txt @@ -0,0 +1,18 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/29/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + yang + rwconman + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwcm/plugins/cli/cli_rwcm.xml b/modules/core/mano/rwcm/plugins/cli/cli_rwcm.xml new file mode 100644 index 0000000..8f2d75b --- /dev/null +++ b/modules/core/mano/rwcm/plugins/cli/cli_rwcm.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/CMakeLists.txt b/modules/core/mano/rwcm/plugins/rwconman/CMakeLists.txt new file mode 100644 index 0000000..b72dee7 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/CMakeLists.txt @@ -0,0 +1,41 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/28/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwconmantasklet) +set(CONMAN_INSTALL "etc/conman") + +## +# Install translation script in demos +## +install( + FILES + rift/tasklets/${TASKLET_NAME}/xlate_cfg.py + rift/tasklets/${TASKLET_NAME}/xlate_tags.yml + rift/tasklets/${TASKLET_NAME}/juju_if.py + DESTINATION ${CONMAN_INSTALL} + COMPONENT ${PKG_LONG_NAME}) + + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/rwconman_config.py + rift/tasklets/${TASKLET_NAME}/rwconman_events.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py new file mode 100644 index 0000000..143ffc8 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwconmantasklet import ConfigManagerTasklet diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/juju_if.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/juju_if.py new file mode 100755 index 0000000..f6a2ced --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/juju_if.py @@ -0,0 +1,659 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Part of the code taken from +# https://github.com/chuckbutler/juju_action_api_class/blob/master/juju_actions.py + +""" +This script is used to control a bootstrapped Juju environment +This has been tested only with local environemnt and local charms +Provide a yaml file with the details to this script to execute +Sample yaml file to deploy a service + +ims-a: + deploy: + store: local + directory: /usr/rift/charms/trusty/clearwater-aio-proxy + series: trusty + to: "lxc:0" + + #destroy: true + + # Data under config passed as such during deployment + config: + proxied_ip: 10.0.202.39 + home_domain: "ims.riftio.local" + base_number: "1234567000" + number_count: 1000 + + units: + - unit: + #id: 0 + # Wait for each command to complete + wait: true + # Bail on failure + bail: true + #destroy: true + actions: + - create-user: { number: "1234567001", password: "secret"} + - create-user: { number: "1234567002", password: "secret"} + + - unit: + wait: true + destroy: true + bail: true + actions: + - create-user: { number: "1234567010", password: "secret"} + - create-user: { number: "1234567011", password: "secret"} + +Sample yaml file to destroy a service +clearwater-aio-proxy: + destroy: true + + units: + - unit: + actions: + - delete-user: { number: "1234567001" } + - delete-user: { number: "1234567002" } + - unit: + actions: + - delete-user: { number: "1234567010" } + - delete-user: { number: "1234567011" } +""" + +import logging +import argparse +import yaml +import jujuclient +import sys +import time +import ssl +import os + +ssl_ok = False +ssh_cmd = None +scp_cmd = None + + +class Action(object): + def __init__(self, data): + # I am undecided if we need this + # model_id = "" + self.uuid = data['action']['tag'] + self.data = data # straight from juju api + self.juju_status = data['status'] + + @classmethod + def from_data(cls, data): + o = cls(data=data) + return o + + +def get_service_units(status): + results = {} + services = status.get('Services', {}) + for svc_name, svc_data in services.items(): + units = svc_data['Units'] or {} + sub_to = svc_data['SubordinateTo'] + if not units and sub_to: + for sub in sub_to: + for unit_name, unit_data in \ + (services[sub].get('Units') or {}).items(): + for sub_name, sub_data in \ + (unit_data['Subordinates'] or {}).items(): + if sub_name.startswith(svc_name): + units[sub_name] = sub_data + results[svc_name] = units + return results + + +class ApiEnvironment(jujuclient.Environment): + def actions_available(self, service=None): + args = { + "Type": 'Action', + "Request": 'ServicesCharmActions', + "Params": { + "Entities": [] + } + } + + services = self.status().get('Services', {}) + service_names = [service] if service else services + for name in service_names: + args['Params']['Entities'].append( + { + "Tag": 'service-' + name + } + ) + + return self._rpc(args) + + def actions_list_all(self, service=None): + args = { + "Type": 'Action', + "Request": 'ListAll', + "Params": { + "Entities": [] + } + } + + service_units = get_service_units(self.status()) + service_names = [service] if service else service_units.keys() + units = [] + + for name in service_names: + units += service_units[name].keys() + + for unit in set(units): + args['Params']['Entities'].append( + { + "Tag": "unit-%s" % unit.replace('/', '-'), + } + ) + + return self._rpc(args) + + def actions_enqueue(self, action, receivers, params=None): + args = { + "Type": "Action", + "Request": "Enqueue", + "Params": { + "Actions": [] + } + } + + for receiver in receivers: + args['Params']['Actions'].append({ + "Receiver": receiver, + "Name": action, + "Parameters": params or {}, + }) + + return self._rpc(args) + + def actions_cancel(self, uuid): + return self._rpc({ + 'Type': 'Action', + 'Request': 'Cancel', + "Params": { + "Entities": [{'Tag': 'action-' + uuid}] + } + }) + + +class API(object): + def __init__ (self, args, logger): + logger.debug("Args: %s" % args) + + self.file = args.file + stream = open(self.file) + self.yaml = yaml.load(stream) + stream.close() + + self.ca_cert = args.ca_cert + if args.ca_cert is None: + try: + self.ca_cert = self.yaml['ca-cert'] + except KeyError: + logger.warning("Did not get the CA certificate to use") + + endpoint = 'wss://%s:%d' % (args.server.split()[0], int(args.port)) + logger.info("Juju API endpoint %s" % endpoint) + self.env = ApiEnvironment(endpoint, ca_cert=self.ca_cert) + self.env.login(args.password, user=args.user) + #self.actions=jujuclient.Actions(self.env) + self.logger = logger + if args.file: + logger.debug("File %s" % args.file) + + self.server = args.server + self.user = args.user + self.port = args.port + self.deploy_timeout = args.deploy_timeout + self.password = args.password + self.cur_units = 0 + self.req_units = 0 + self.charm = None + + def get_status(self): + return self.env.status() + + def get_annotations(self, services): + ''' + Return dict of (servicename: annotations) for each servicename + in `services`. + ''' + if not services: + return None + + d = {} + for s in services: + d[s] = self.env.get_annotation(s, 'service')['Annotations'] + return d + + def get_actions(self, service=None): + return self.env.actions_list_all(service) + + def get_action_status(self, action_tag): + ''' + responds with the action status, which is one of three values: + + - completed + - pending + - failed + + @param action_tag - the action UUID return from the enqueue method + eg: action-3428e20d-fcd7-4911-803b-9b857a2e5ec9 + ''' + receiver = self.get_actions() + for receiver in receiver['actions']: + if 'actions' in receiver.keys(): + for action_record in receiver['actions']: + if 'action' in action_record.keys(): + if action_record['action']['tag'] == action_tag: + return action_record['status'] + + def cancel_action(self, uuid): + return self.env.actions_cancel(uuid) + + def get_service_units(self): + return get_service_units(self.env.status()) + + def get_action_specs(self): + results = self.env.actions_available() + return _parse_action_specs(results) + + def enqueue_action(self, action, receivers, params): + result = self.env.actions_enqueue(action, receivers, params) + return Action.from_data(result['results'][0]) + + def apply_config(self, service, details): + if self.cur_units == 0: + # Nothing to do + return + if 'config' in details: + self.logger.debug("Config for %s updated to: %s" % (service, details['config'])) + self.env.set_config(service, details['config']) + else: + self.logger.debug("No config section found for %s" % service) + + def deploy_service(self, service, details): + if self.cur_units == 0: + # No units of the service running + if details['deploy'] is not None: + deploy = details['deploy'] + self.logger.debug("Config used for deployment: %s" % details['config']) + if self.req_units > 0: + # Deploy the service + series = 'trusty' + try: + series = deploy['series'] + except KeyError: + self.logger.debug("Using default series %s" % series) + + store_type = 'online' + try: + store_type = deploy['store'] + except KeyError: + self.logger.debug("Using default store type %s" % store_type) + + deploy_to = None + try: + deploy_to = deploy['to'] + except KeyError: + self.logger.debug("No deploy machine specified") + + config = None + if 'config' in details: + config = details['config'] + self.logger.debug("Config for %s is %s" % (service, config)) + else: + self.logger.debug("No config section found") + + if store_type == 'local': + try: + directory = deploy['directory'] + prefix='' + try: + prefix=os.environ.get('RIFT_INSTALL') + except KeyError: + self.logger.info("RIFT_INSTALL not set in environemnt") + directory = "%s/%s" % (prefix, deploy['directory']) + if ssl_ok: + self.logger.debug("Local charm settings: dir=%s, series=%s" % (directory, series)) + result = self.env.add_local_charm_dir(directory, series) + url = result['CharmURL'] + else: + os.system('%s mkdir -p /home/ubuntu/charms/trusty' % (ssh_cmd)) + os.system('%s %s ubuntu@%s:/home/ubuntu/charms/trusty' % (scp_cmd, directory, self.server)) + + except: + self.logger.critical('Error deploying local charm %s: %s' % (service, sys.exc_info()[0])) + raise + else: + try: + self.logger.debug("Deploying from online") + url = deploy['url'] + except KeyError: + self.logger.critical("Charm url not specified") + raise + + try: + if ssl_ok: + self.logger.debug("Deploying using: service=%s, url=%s, num_units=%d, to=%s, config=%s" %(service, url, self.req_units, deploy_to, details['config'])) + self.env.deploy(service, url, num_units=self.req_units, config=config, machine_spec=deploy_to) + else: + os.system('%s juju deploy --repository=/home/ubuntu/charms --to %s local:trusty/%s %s' % (ssh_cmd, deploy_to, os.path.basename(directory), service)) + # Apply the config + self.apply_config(service, details) + except: + self.logger.critical('Error deploying %s: %s' % (service, sys.exc_info()[0])) + raise + + elif self.cur_units < self.req_units: + try: + self.env.add_units(service, (self.req_units - self.cur_units)) + except: + self.logger.critical('Error adding units for %s: %s' % (self.name, sys.exc_info()[0])) + raise + + # Wait for the deployed units to start + try: + self.logger.debug("Waiting for units to come up") + self.env.wait_for_units(timeout=self.deploy_timeout) + except: + self.logger.critical('Error starting all units for %s: %s' % (service, sys.exc_info()[0])) + raise + + def execute_on_units(self, service, details): + units = None + try: + units = details['units'] + except KeyError: + self.logger.info("No units for service %s defined" % service) + return + self.logger.debug("Service units def: %s" % units) + + try: + services = get_service_units(self.env.status()) + depl_units = services[service] + except KeyError: + self.logger.error("Unable to get units %s" % services) + raise + except: + self.logger.critical("Error on getting service details for service %s" % service) + raise + + # Slightly complicated logic to support define actions for + # specific units. + # Get all the unit definitions specified + units_ids = [] + units_no_ids = [] + for unit_conf in units: + try: + conf_id = unit_conf['id'] + self.logger.debug("Unit conf id %d" % conf_id) + units_ids[conf_id] = unit_conf['unit'] + except KeyError: + units_no_ids.append(unit_conf['unit']) + continue + + # Go through each unit deployed and apply the actions to the unit + # if the id is specified, else the first unit available + no_id_idx = 0 + for unit, status in depl_units.items(): + self.logger.debug("Execute on unit %s with %s" % (unit, status)) + idx = int(unit[unit.index('/')+1:]) + self.logger.debug("Unit index is %d" % idx) + try: + unit_conf = units_ids[idx] + self.logger.debug("Found unit config %s" % unit_conf) + except IndexError: + unit_conf = units_no_ids[no_id_idx] + self.logger.debug("Applying on unit %s" % unit_conf) + no_id_idx += 1 + + bail = False + try: + bail = unit_conf['bail'] + except KeyError: + pass + wait = False + try: + wait = unit_conf['wait'] + except KeyError: + pass + self.logger.debug("Bail is %s, Wait is %s" % (bail, wait)) + + unit_name = "unit-%s-%d" % (service, idx) + for entry in unit_conf['actions']: + for action, params in entry.items(): + self.logger.debug("Sending action: %s, %s, %s" % (action, unit_name, params)) + #result = self.actions.enqueue_units([unit], action, params) + try: + result = self.enqueue_action(action, [unit_name], params) + act_status = self.get_action_status(result.uuid) + except Exception as e: + self.logger.critical("Error applying the action %s on %s with params %s" % (action, unit, params)) + raise e + + self.logger.debug("Action %s status is %s on %s" % (action, act_status, unit)) + while wait and ((act_status == 'pending') or (act_status == 'running')): + time.sleep(1) + act_status = self.get_action_status(result.uuid) + self.logger.debug("Action %s status is %s on %s" % (action, act_status, unit)) + if bail and (act_status == 'failed'): + self.logger.critical("Error applying action %s on %s with %s" % (action, unit, params)) + raise RuntimeError("Error applying action %s on %s with %s" % (action, unit, params)) + + def remove_units(self, service, details): + if self.cur_units == 0: + # Nothing to do + return + try: + units = details['units'] + except KeyError: + self.logger.debug("remove_units: No units specified") + return + + for unit in units: + self.logger.debug("Check destroy units for %s, %s" %(service, unit)) + try: + if unit['destroy'] == False: + continue + except KeyError: + continue + try: + idx = unit['id'] + except KeyError: + self.logger.error("Need to specify unit id to destroy") + continue + + unit = '%s/%d' % (service, idx) + self.logger.debug("Destroy unit %s" % unit) + try: + status = self.env.status()['Services'][service]['Units'][unit] + except KeyError: + status = None + self.logger.debug("Status of unit %s" % status) + if status is None: + continue + unit_name = "unit-%s-%d" %(service, idx) + self.logger.debug("Destroying unit %s" % unit_name) + self.env.remove_units([unit_name]) + + def execute (self): + for service, details in self.yaml.items(): + self.cur_units = 0 + self.req_units = 0 + self.charm = service + try: + self.charm = details['charm'] + except KeyError: + pass + + self.logger.debug("Service: %s - %s" % (service, details)) + services = self.env.status()['Services'] + self.logger.debug("Services : %s" % services) + cur_units = 0 + try: + cur_units = len(services[service]['Units']) + except KeyError: + pass + req_units = 0 + try: + req_units = len(details['units']) + except KeyError: + # Deploy atleast one unit + req_units = 1 + + self.logger.debug("Units requested: %d, deployed: %d" % (req_units, cur_units)) + self.cur_units = cur_units + self.req_units = req_units + destroy = False + try: + destroy = details['destroy'] + except KeyError: + pass + if destroy: + if cur_units == 0: + # Nothing to do + return + # Execute any commands for units before destroy as this could have + # side effects for something like proxy charms + self.execute_on_units(service, details) + self.logger.debug("Destroying service %s" % service) + self.env.destroy_service(service) + return + # Apply config on already running units first + self.apply_config(service, details) + self.deploy_service(service, details) + self.execute_on_units(service, details) + # Removing units after execute to run any cleanup actions + self.remove_units(service, details) + +def _parse_action_specs(api_results): + results = {} + + r = api_results['results'] + for service in r: + servicetag = service['servicetag'] + service_name = servicetag[8:] # remove 'service-' prefix + specs = {} + if service['actions']['ActionSpecs']: + for spec_name, spec_def in \ + service['actions']['ActionSpecs'].items(): + specs[spec_name] = ActionSpec(spec_name, spec_def) + results[service_name] = specs + return results + + +def _parse_action_properties(action_properties_dict): + results = {} + + d = action_properties_dict + for prop_name, prop_def in d.items(): + results[prop_name] = ActionProperty(prop_name, prop_def) + return results + + +class Dict(dict): + def __getattr__(self, name): + return self[name] + + +class ActionSpec(Dict): + def __init__(self, name, data_dict): + params = data_dict['Params'] + super(ActionSpec, self).__init__( + name=name, + title=params['title'], + description=params['description'], + properties=_parse_action_properties(params['properties']) + ) + + +class ActionProperty(Dict): + types = { + 'string': str, + 'integer': int, + 'boolean': bool, + 'number': float, + } + type_checks = { + str: 'string', + int: 'integer', + bool: 'boolean', + float: 'number', + } + + def __init__(self, name, data_dict): + super(ActionProperty, self).__init__( + name=name, + description=data_dict.get('description', ''), + default=data_dict.get('default', ''), + type=data_dict.get( + 'type', self._infer_type(data_dict.get('default'))), + ) + + def _infer_type(self, default): + if default is None: + return 'string' + for _type in self.type_checks: + if isinstance(default, _type): + return self.type_checks[_type] + return 'string' + + def to_python(self, value): + f = self.types.get(self.type) + return f(value) if f else value + + + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Perform actions on Juju') + parser.add_argument("-s", "--server", required=True, help="Juju API server") + parser.add_argument("-u", "--user", default='user-admin', help="User, default user-admin") + parser.add_argument("-p", "--password", default='nfvjuju', help="Password for the user") + parser.add_argument("-P", "--port", default="17070", help="Port number, default 17070") + parser.add_argument("-c", "--ca-cert", default=None, help="CA certificate for the server"); + parser.add_argument("-T", "--deploy-timeout", default=600, help="Timeout when bringing up units, default 600") + parser.add_argument("--debug", action="store_true") + parser.add_argument("file", help="File with commands, config parameters and actions") + args = parser.parse_args() + + if args.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.ERROR) + logger = logging.getLogger("juju-client") + + #Workaround for certificae failure, insecure. Does not work with ssl module in Python 3.3 + if sys.version_info >= (3,4): + ssl._create_default_https_context = ssl._create_unverified_context + ssl_ok=True + else: + ssh_cmd = 'ssh -i %s %s@%s' % ('~/.ssh/id_grunt', 'ubuntu', args.server) + scp_cmd = 'scp -r -i %s %s@%s' % ('~/.ssh/id_grunt', 'ubuntu', args.server) + + ssl_ok=False + api = API(args, logger) + api.execute() \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py new file mode 100644 index 0000000..c3f7ee7 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py @@ -0,0 +1,953 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import sys +import subprocess +import yaml +import os + +from gi.repository import ( + RwDts as rwdts, + RwConmanYang as conmanY, + ProtobufC, +) + +import rift.tasklets + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + +def get_vnf_unique_name(nsr_name, vnfr_short_name, member_vnf_index): + return "{}.{}.{}".format(nsr_name, vnfr_short_name, member_vnf_index) + +class RaiseException(Exception): + pass + +def log_this_vnf(vnf_cfg): + log_vnf = "" + used_item_list = ['nsr_name', 'vnfr_name', 'member_vnf_index', 'mgmt_ip_address'] + for item in used_item_list: + if item in vnf_cfg: + if item == 'mgmt_ip_address': + log_vnf += "({})".format(vnf_cfg[item]) + else: + log_vnf += "{}/".format(vnf_cfg[item]) + return log_vnf + +class ConfigManagerConfig(object): + def __init__(self, dts, log, loop, parent): + self._dts = dts + self._log = log + self._loop = loop + self._parent = parent + self._nsr_dict = {} + self.pending_cfg = {} + self.terminate_cfg = {} + self.pending_tasks = [] # User for NSRid get retry (mainly excercised at restart case) + self._config_xpath = "C,/cm-config" + self._opdata_xpath = "D,/rw-conman:cm-state" + + self.cm_config = conmanY.SoConfig() + # RO specific configuration + self.ro_config = {} + for key in self.cm_config.ro_endpoint.fields: + self.ro_config[key] = None + + # Initialize cm-state + self.cm_state = {} + self.cm_state['cm_nsr'] = [] + self.cm_state['states'] = "Initialized" + + def add_to_pending_tasks(self, task): + try: + self.pending_tasks.append(task) + if len(self.pending_tasks) == 1: + self._loop.create_task(self.ConfigManagerConfig_pending_loop()) + # TBD - change to info level + self._log.debug("Started pending_loop!") + except Exception as e: + self._log.error("Failed adding to pending tasks as (%s)", str(e)) + + def del_from_pending_tasks(self, task): + try: + self.pending_tasks.remove(task) + except Exception as e: + self._log.error("Failed removing from pending tasks as (%s)", str(e)) + + @asyncio.coroutine + def ConfigManagerConfig_pending_loop(self): + loop_sleep = 2 + while True: + """ + This pending task queue is ordred by events, + must finish previous task successfully to be able to go on to the next task + """ + if self.pending_tasks: + task = self.pending_tasks.pop() + done = False + if 'nsrid' in task: + nsrid = task['nsrid'] + self._log.debug("Will execute pending task for NSR id(%s)", nsrid) + try: + # Try to configure this NSR + task['retries'] -= 1 + done = yield from self.config_NSR(nsrid) + except Exception as e: + self._log.error("Failed(%s) configuring NSR(%s), retries remained:%d!", + str(e), nsrid, task['retries']) + pass + if done: + self._log.debug("Finished pending task NSR id(%s):", nsrid) + else: + self._log.error("Failed configuring NSR(%s), retries remained:%d!", + nsrid, task['retries']) + + # Failed, re-insert (append at the end) this failed task to be retried later + # If any retries remained. + if task['retries']: + self.pending_tasks.append(task) + else: + self._log.debug("Stopped pending_loop!") + break + yield from asyncio.sleep(loop_sleep, loop=self._loop) + + @asyncio.coroutine + def register(self): + self.register_cm_config() + yield from self.register_cm_state_opdata() + + def register_cm_config(self): + def on_apply(dts, acg, xact, action, scratch): + """Apply the Service Orchestration configuration""" + if xact.id is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + self._log.debug("cm-config (xact: %s) (action: %s)", + xact, action) + + if xact.id in self.terminate_cfg: + msg = self.terminate_cfg.pop(xact.id, None) + if msg is not None: + msg_dict = msg.as_dict() + for delnsr in msg_dict['nsr']: + nsr_id = delnsr.get('id', None) + asyncio.ensure_future(self.terminate_NSR(nsr_id, xact), loop=self._loop) + return + + if xact.id not in self.pending_cfg: + self._log.debug("Could not find transaction data for transaction-id") + return + + # Pop saved config (from on_prepare) + msg = self.pending_cfg.pop(xact.id, None) + self._log.debug("Apply cm-config: %s", msg) + self.cm_state['states'] += ", cm-config" + + # Process entire configuration + ro_cfg = self.ro_config + + msg_dict = msg.as_dict() + self._log.debug("msg_dict is %s: %s", type(msg_dict), msg_dict) + ''' Process Resource Orchestrator endpoint config ''' + if 'ro_endpoint' in msg_dict: + self._log.debug("ro-endpoint = %s", msg_dict['ro_endpoint']) + for key, value in msg_dict['ro_endpoint'].items(): + ro_cfg[key] = value + self._log.debug("ro-config: key=%s, value=%s", key, ro_cfg[key]) + + # If all RO credentials are configured, initiate connection + + ro_complete = True + for key, value in ro_cfg.items(): + if value is None: + ro_complete = False + self._log.warning("key %s is not set", key) + # Get the ncclient handle (OR interface) + orif = self._parent._event._orif + # Get netconf connection + if ro_complete is True and orif._manager is None: + self._log.info("Connecting to RO = %s!", ro_cfg['ro_ip_address']) + asyncio.wait(asyncio.ensure_future(orif.connect(), loop=self._loop)) + #asyncio.ensure_future(orif.connect(), loop=self._loop) + self._log.info("Connected to RO = %s!", ro_cfg['ro_ip_address']) + self.cm_state['states'] += ", RO connected" + else: + self._log.warning("Already connected to RO, ignored!") + + if 'nsr' in msg_dict: + for addnsr in msg_dict['nsr']: + ''' Process Initiate NSR ''' + nsr_id = addnsr.get('id', None) + if nsr_id != None: + #asyncio.ensure_future(self.config_NSR(nsr_id), loop=self._loop) + # Add this to pending task + self.add_to_pending_tasks({'nsrid' : nsr_id, 'retries' : 10}) + + return + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + action = xact_info.handle.get_query_action() + + self._log.debug("Received cm-config: xact.id=%s, msg=%s, action=%s", xact.id, msg, action) + # print("<1<<< self.pending_cfg:", self.pending_cfg) + # fref = ProtobufC.FieldReference.alloc() + # pb_msg = msg.to_pbcm() + # fref.goto_whole_message(pb_msg) + # print(">>>>>> fref.is_field_deleted():", fref.is_field_deleted()) + + msg_dict = msg.as_dict() + pending_q = self.pending_cfg + if action == rwdts.QueryAction.DELETE: + pending_q = self.terminate_cfg + if 'nsr' in msg_dict: + # Do this only if NSR is deleted + # fref = ProtobufC.FieldReference.alloc() + # pb_msg = msg.to_pbcm() + # fref.goto_whole_message(pb_msg) + # print(">>>>>> fref.is_field_deleted():", fref.is_field_deleted()) + # # Got DELETE action in prepare callback + # if fref.is_field_deleted(): + + # NS is(are) terminated + for delnsr in msg_dict['nsr']: + nsr_id = delnsr.get('id', None) + # print('>>>>>>> Will delete pending NSR id={}'.format(nsr_id)) + if nsr_id is not None: + # print(">>>>>>> self.pending_cfg:", self.pending_cfg) + # Find this NSR id if it is scheduled to be added. + for i,pending in self.pending_cfg.items(): + p_dict = pending.as_dict() + if 'nsr' in p_dict: + for p_nsr in p_dict['nsr']: + p_nsr_id = p_nsr.get('id', None) + if p_nsr_id == nsr_id: + # Found it, remove + self.pending_cfg.pop(i, None) + pending_q = None + + # Enqueue the msg in proper queue + if pending_q is not None: + pending_q[xact.id] = msg + acg.handle.prepare_complete_ok(xact_info.handle) + + self._log.debug("Registering for ro-config using xpath: %s", + self._config_xpath) + + acg_handler = rift.tasklets.AppConfGroup.Handler(on_apply = on_apply) + + with self._dts.appconf_group_create(handler=acg_handler) as acg: + try: + self._pool_reg = acg.register(xpath=self._config_xpath, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + self._log.info("Successfully registered (%s)", self._config_xpath) + except Exception as e: + self._log.error("Failed to register as (%s)", e) + + + @asyncio.coroutine + def register_cm_state_opdata(self): + + def state_to_string(state): + state_dict = { + conmanY.RecordState.INIT : "init", + conmanY.RecordState.RECEIVED : "received", + conmanY.RecordState.CFG_PROCESS : "cfg_process", + conmanY.RecordState.CFG_PROCESS_FAILED : "cfg_process_failed", + conmanY.RecordState.CFG_SCHED : "cfg_sched", + conmanY.RecordState.CFG_DELAY : "cfg_delay", + conmanY.RecordState.CONNECTING : "connecting", + conmanY.RecordState.FAILED_CONNECTION : "failed_connection", + conmanY.RecordState.NETCONF_CONNECTED : "netconf_connected", + conmanY.RecordState.NETCONF_SSH_CONNECTED : "netconf_ssh_connected", + conmanY.RecordState.RESTCONF_CONNECTED : "restconf_connected", + conmanY.RecordState.CFG_SEND : "cfg_send", + conmanY.RecordState.CFG_FAILED : "cfg_failed", + conmanY.RecordState.READY_NO_CFG : "ready_no_cfg", + conmanY.RecordState.READY : "ready", + } + return state_dict[state] + + def prepare_show_output(): + self.show_output = conmanY.CmOpdata() + self.show_output.states = self.cm_states + nsr_dict = self._nsr_dict + + for nsr_obj in nsr_dict.values(): + cm_nsr = self.show_output.cm_nsr.add() + # Fill in this NSR from nsr object + cm_nsr.id = nsr_obj._nsr_id + cm_nsr.state = state_to_string(nsr_obj.state) + if nsr_obj.state == conmanY.RecordState.CFG_PROCESS_FAILED: + continue + cm_nsr.name = nsr_obj.nsr_name + + # Fill in each VNFR from this nsr object + vnfr_list = nsr_obj._vnfr_list + for vnfr in vnfr_list: + vnf_cfg = vnfr['vnf_cfg'] + + # Create & fill vnfr + cm_vnfr = cm_nsr.cm_vnfr.add() + cm_vnfr.id = vnfr['id'] + cm_vnfr.name = vnfr['name'] + cm_vnfr.state = state_to_string(vnf_cfg['cm_state']) + + # Fill in VNF management interface + cm_vnfr.mgmt_interface.ip_address = vnf_cfg['mgmt_ip_address'] + cm_vnfr.mgmt_interface.cfg_type = vnf_cfg['config_method'] + cm_vnfr.mgmt_interface.port = vnf_cfg['port'] + + # Fill in VNF configuration details + cm_vnfr.cfg_location = vnf_cfg['cfg_file'] + + # Fill in each connection-point for this VNF + if 'connection_point' in vnfr: + cp_list = vnfr['connection_point'] + for cp_item_dict in cp_list: + cm_cp = cm_vnfr.connection_point.add() + cm_cp.name = cp_item_dict['name'] + cm_cp.ip_address = cp_item_dict['ip_address'] + + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + + self._log.debug("Received cm-state: msg=%s, action=%s", msg, action) + + if action == rwdts.QueryAction.READ: + show_output = conmanY.CmOpdata() + show_output.from_dict(self.cm_state) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, + xpath=self._opdata_xpath, + msg=show_output) + else: + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.info("Registering for cm-opdata xpath: %s", + self._opdata_xpath) + + try: + handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + yield from self._dts.register(xpath=self._opdata_xpath, + handler=handler, + flags=rwdts.Flag.PUBLISHER) + self._log.info("Successfully registered for opdata(%s)", self._opdata_xpath) + except Exception as e: + self._log.error("Failed to register for opdata as (%s)", e) + + @asyncio.coroutine + def process_nsd_vnf_configuration(self, nsr_obj, vnfr): + + def get_cfg_file_extension(method, configuration_options): + ext_dict = { + "netconf" : "xml", + "script" : { + "bash" : "sh", + "expect" : "exp", + }, + "juju" : "yml" + } + + if method == "netconf": + return ext_dict[method] + elif method == "script": + return ext_dict[method][configuration_options['script_type']] + elif method == "juju": + return ext_dict[method] + else: + return "cfg" + + ## This is how the YAML file should look like, This routine will be called for each VNF, so keep appending the file. + ## priority order is determined by the number, hence no need to generate the file in that order. A dictionary will be + ## used that will take care of the order by number. + ''' + 1 : <== This is priority + name : trafsink_vnfd + member_vnf_index : 2 + configuration_delay : 120 + configuration_type : netconf + configuration_options : + username : admin + password : admin + port : 2022 + target : running + 2 : + name : trafgen_vnfd + member_vnf_index : 1 + configuration_delay : 0 + configuration_type : netconf + configuration_options : + username : admin + password : admin + port : 2022 + target : running + ''' + + # Save some parameters needed as short cuts in flat structure (Also generated) + vnf_cfg = vnfr['vnf_cfg'] + # Prepare unique name for this VNF + vnf_cfg['vnf_unique_name'] = get_vnf_unique_name(vnf_cfg['nsr_name'], vnfr['short_name'], vnfr['member_vnf_index_ref']) + + nsr_obj.this_nsr_dir = os.path.join(self._parent.cfg_dir, vnf_cfg['nsr_name'], self._nsr['name_ref']) + if not os.path.exists(nsr_obj.this_nsr_dir): + os.makedirs(nsr_obj.this_nsr_dir) + nsr_obj.cfg_path_prefix = '{}/{}_{}'.format(nsr_obj.this_nsr_dir, vnfr['short_name'], vnfr['member_vnf_index_ref']) + nsr_vnfr = '{}/{}_{}'.format(vnf_cfg['nsr_name'], vnfr['short_name'], vnfr['member_vnf_index_ref']) + + # Get vnf_configuration from vnfr + vnf_config = vnfr['vnf_configuration'] + + self._log.debug("vnf_configuration = %s", vnf_config) + #print("### TBR ### vnf_configuration = ", vnf_config) + + # Create priority dictionary + cfg_priority_order = 0 + if ('input_params' in vnf_config and + 'config_priority' in vnf_config['input_params']): + cfg_priority_order = vnf_config['input_params']['config_priority'] + + # All conditions must be met in order to process configuration + if (cfg_priority_order != 0 and + vnf_config['config_type'] is not None and + vnf_config['config_type'] != 'none' and + 'config_template' in vnf_config): + + # Create all sub dictionaries first + config_priority = { + 'name' : vnfr['short_name'], + 'member_vnf_index' : vnfr['member_vnf_index_ref'], + } + + if 'config_delay' in vnf_config['input_params']: + config_priority['configuration_delay'] = vnf_config['input_params']['config_delay'] + vnf_cfg['config_delay'] = config_priority['configuration_delay'] + + configuration_options = {} + method = vnf_config['config_type'] + config_priority['configuration_type'] = method + vnf_cfg['config_method'] = method + + cfg_opt_list = ['port', 'target', 'script_type', 'ip_address', 'user', 'secret'] + for cfg_opt in cfg_opt_list: + if cfg_opt in vnf_config[method]: + configuration_options[cfg_opt] = vnf_config[method][cfg_opt] + vnf_cfg[cfg_opt] = configuration_options[cfg_opt] + + cfg_opt_list = ['mgmt_ip_address', 'username', 'password'] + for cfg_opt in cfg_opt_list: + if cfg_opt in vnf_config['config_access']: + configuration_options[cfg_opt] = vnf_config['config_access'][cfg_opt] + vnf_cfg[cfg_opt] = configuration_options[cfg_opt] + + # TBD - see if we can neatly include the config in "input_params" file, no need though + #config_priority['config_template'] = vnf_config['config_template'] + # Create config file + vnf_cfg['cfg_template'] = '{}_{}_template.cfg'.format(nsr_obj.cfg_path_prefix, config_priority['configuration_type']) + vnf_cfg['cfg_file'] = '{}.{}'.format(nsr_obj.cfg_path_prefix, get_cfg_file_extension(method, configuration_options)) + vnf_cfg['xlate_script'] = os.path.join(self._parent.cfg_dir, 'xlate_cfg.py') + vnf_cfg['juju_script'] = os.path.join(self._parent.cfg_dir, 'juju_if.py') + + try: + # Now write this template into file + with open(vnf_cfg['cfg_template'], "w") as cf: + cf.write(vnf_config['config_template']) + except Exception as e: + self._log.error("Processing NSD, failed to generate configuration template : %s (Error : %s)", + vnf_config['config_template'], str(e)) + raise + + self._log.debug("VNF endpoint so far: %s", vnf_cfg) + + # Populate filled up dictionary + config_priority['configuration_options'] = configuration_options + nsr_obj.nsr_cfg_input_params_dict[cfg_priority_order] = config_priority + nsr_obj.num_vnfs_to_cfg += 1 + nsr_obj._vnfr_dict[vnf_cfg['vnf_unique_name']] = vnfr + nsr_obj._vnfr_dict[vnfr['id']] = vnfr + + self._log.debug("input_params = %s", nsr_obj.nsr_cfg_input_params_dict) + else: + self._log.info("NS/VNF %s is not to be configured by Configuration Manager!", nsr_vnfr) + yield from nsr_obj.update_vnf_cm_state(vnfr, conmanY.RecordState.READY_NO_CFG) + + # Update the cm-state + nsr_obj.populate_vm_state_from_vnf_cfg() + + @asyncio.coroutine + def config_NSR(self, id): + nsr_dict = self._nsr_dict + self._log.info("Initiate NSR fetch, id = %s", id) + + try: + if id not in nsr_dict: + nsr_obj = ConfigManagerNSR(self._log, self._loop, self, id) + nsr_dict[id] = nsr_obj + else: + self._log.info("NSR(%s) is already initialized!", id) + nsr_obj = nsr_dict[id] + except Exception as e: + self._log.error("Failed creating NSR object for (%s) as (%s)", id, str(e)) + raise + + # Populate this object with netconfd API from RO + + # Get the ncclient handle (OR interface) + orif = self._parent._event._orif + + if orif is None: + self._log.error("OR interface not initialized") + try: + # Fetch NSR + nsr = yield from orif.get_nsr(id) + self._log.debug("nsr = (%s/%s)", type(nsr), nsr) + if ('operational_status' in nsr and nsr['operational_status'] == "running"): + self._nsr = nsr + yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.RECEIVED) + else: + self._log.info("NSR(%s) is not ready yet!", nsr['nsd_name_ref']) + return False + + try: + # Parse NSR + if nsr is not None: + nsr_obj.set_nsr_name(nsr['nsd_name_ref']) + nsr_dir = os.path.join(self._parent.cfg_dir, nsr_obj.nsr_name) + self._log.info("Checking NS config directory: %s", nsr_dir) + if not os.path.isdir(nsr_dir): + os.makedirs(nsr_dir) + # self._log.critical("NS %s is not to be configured by Service Orchestrator!", nsr_obj.nsr_name) + # yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.READY_NO_CFG) + # return + + for vnfr_id in nsr['constituent_vnfr_ref']: + self._log.debug("Fetching VNFR (%s)", vnfr_id) + vnfr = yield from orif.get_vnfr(vnfr_id) + self._log.debug("vnfr = (%s/ %s)", type(vnfr), vnfr) + #print("### TBR ### vnfr = ", vnfr) + nsr_obj.add_vnfr(vnfr) + yield from self.process_nsd_vnf_configuration(nsr_obj, vnfr) + except Exception as e: + self._log.error("Failed processing NSR (%s) as (%s)", nsr_obj.nsr_name, str(e)) + yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED) + raise + + # Generate config_input_params.yaml (For debug reference) + nsr_cfg_input_file = os.path.join(nsr_obj.this_nsr_dir, "configuration_input_params.yml") + with open(nsr_cfg_input_file, "w") as yf: + yf.write(yaml.dump(nsr_obj.nsr_cfg_input_params_dict, default_flow_style=False)) + + self._log.debug("Starting to configure each VNF") + + ## Check if this NS has input parametrs + self._log.info("Checking NS configuration order: %s", nsr_cfg_input_file) + + if os.path.exists(nsr_cfg_input_file): + # Apply configuration is specified order + try: + # Fetch number of VNFs + num_vnfs = nsr_obj.num_vnfs_to_cfg + + # Go in loop to configure by specified order + self._log.info("Using Dynamic configuration input parametrs for NS: %s", nsr_obj.nsr_name) + + # cfg_delay = nsr_obj.nsr_cfg_input_params_dict['configuration_delay'] + # if cfg_delay: + # self._log.info("Applying configuration delay for NS (%s) ; %d seconds", + # nsr_obj.nsr_name, cfg_delay) + # yield from asyncio.sleep(cfg_delay, loop=self._loop) + + for i in range(1,num_vnfs+1): + if i not in nsr_obj.nsr_cfg_input_params_dict: + self._log.warning("NS (%s) - Ordered configuration is missing order-number: %d", nsr_obj.nsr_name, i) + else: + vnf_input_params_dict = nsr_obj.nsr_cfg_input_params_dict[i] + + # Make up vnf_unique_name with vnfd name and member index + #vnfr_name = "{}.{}".format(nsr_obj.nsr_name, vnf_input_params_dict['name']) + vnf_unique_name = get_vnf_unique_name( + nsr_obj.nsr_name, + vnf_input_params_dict['name'], + str(vnf_input_params_dict['member_vnf_index']), + ) + self._log.info("NS (%s) : VNF (%s) - Processing configuration input params", + nsr_obj.nsr_name, vnf_unique_name) + + # Find vnfr for this vnf_unique_name + if vnf_unique_name not in nsr_obj._vnfr_dict: + self._log.error("NS (%s) - Can not find VNF to be configured: %s", nsr_obj.nsr_name, vnf_unique_name) + else: + # Save this unique VNF's config input parameters + nsr_obj.vnf_input_params_dict[vnf_unique_name] = vnf_input_params_dict + nsr_obj.ConfigVNF(nsr_obj._vnfr_dict[vnf_unique_name]) + + # Now add the entire NS to the pending config list. + self._parent.pending_cfg.append(nsr_obj) + + except Exception as e: + self._log.error("Failed processing input parameters for NS (%s) as %s", nsr_obj.nsr_name, str(e)) + raise + else: + self._log.error("No configuration input parameters for NSR (%s)", nsr_obj.nsr_name) + + except Exception as e: + #print("##>> config_NSR Failed as:", str(e)) + self._log.error("Failed to configure NS (%s) as (%s)", nsr_obj.nsr_name, str(e)) + yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED) + raise + + return True + + @asyncio.coroutine + def terminate_NSR(self, id, xact): + nsr_dict = self._nsr_dict + if id not in nsr_dict: + self._log.error("NSR(%s) does not exist!", id) + return + else: + # Remove this object from global list + nsr_obj = nsr_dict.pop(id, None) + + # Remove this NSR if we have it on pending task list + for task in self.pending_tasks: + if task['nsrid'] == id: + self.del_from_pending_tasks(task) + #print("#### NSR({}/{}) is removed from pending task list".format(nsr_obj.nsr_name, id)) + + # Remove this NS cm-state from global status list + self.cm_state['cm_nsr'].remove(nsr_obj.cm_nsr) + + # Also remove any scheduled configuration event + for nsr_obj_p in self._parent.pending_cfg: + if nsr_obj_p == nsr_obj: + assert id == nsr_obj_p._nsr_id + #self._parent.pending_cfg.remove(nsr_obj_p) + # Mark this as being deleted so we do not try to configure it if we are in cfg_delay (will wake up and continue to process otherwise) + nsr_obj_p.being_deleted = True + #print("#### Removed scheduled configuration for NSR({})".format(nsr_obj.nsr_name)) + self._log.info("Removed scheduled configuration for NSR(%s)", nsr_obj.nsr_name) + + #print("#### NSR({}/{}) is deleted".format(nsr_obj.nsr_name, id)) + self._log.info("NSR(%s/%s) is deleted", nsr_obj.nsr_name, id) + + +class ConfigManagerNSR(object): + def __init__(self, log, loop, parent, id): + self._log = log + self._loop = loop + self._rwcal = None + self._vnfr_dict = {} + self._cp_dict = {} + self._nsr_id = id + self._parent = parent + self._log.info("Instantiated NSR entry for id=%s", id) + self.nsr_cfg_input_params_dict = {} + self.vnf_input_params_dict = {} + self.num_vnfs_to_cfg = 0 + self._vnfr_list = [] + self.vnf_cfg_list = [] + self.this_nsr_dir = None + self.being_deleted = False + + # Initialize cm-state for this NS + self.cm_nsr = {} + self.cm_nsr['cm_vnfr'] = [] + self.cm_nsr['id'] = id + self.cm_nsr['state'] = self.state_to_string(conmanY.RecordState.INIT) + + self.set_nsr_name('Not Set') + + # Add this NSR cm-state object to global cm-state + parent.cm_state['cm_nsr'].append(self.cm_nsr) + + def set_nsr_name(self, name): + self.nsr_name = name + self.cm_nsr['name'] = name + + def xlate_conf(self, vnfr, vnf_cfg): + + # If configuration type is not already set, try to read from input params + if vnf_cfg['interface_type'] is None: + # Prepare unique name for this VNF + vnf_unique_name = get_vnf_unique_name( + vnf_cfg['nsr_name'], + vnfr['short_name'], + vnfr['member_vnf_index_ref'], + ) + + # Find this particular (unique) VNF's config input params + if (vnf_unique_name in self.vnf_input_params_dict): + vnf_cfg_input_params_dict = self.vnf_input_params_dict[vnf_unique_name] + vnf_cfg['interface_type'] = vnf_cfg_input_params_dict['configuration_type'] + if 'configuration_options' in vnf_cfg_input_params_dict: + cfg_opts = vnf_cfg_input_params_dict['configuration_options'] + for key, value in cfg_opts.items(): + vnf_cfg[key] = value + + cfg_path_prefix = '{}/{}/{}_{}'.format( + self._parent._parent.cfg_dir, + vnf_cfg['nsr_name'], + vnfr['short_name'], + vnfr['member_vnf_index_ref'], + ) + + vnf_cfg['cfg_template'] = '{}_{}_template.cfg'.format(cfg_path_prefix, vnf_cfg['interface_type']) + vnf_cfg['cfg_file'] = '{}.cfg'.format(cfg_path_prefix) + vnf_cfg['xlate_script'] = self._parent._parent.cfg_dir + '/xlate_cfg.py' + + self._log.debug("VNF endpoint so far: %s", vnf_cfg) + + self._log.info("Checking cfg_template %s", vnf_cfg['cfg_template']) + if os.path.exists(vnf_cfg['cfg_template']): + return True + return False + + def ConfigVNF(self, vnfr): + + vnf_cfg = vnfr['vnf_cfg'] + vnf_cm_state = self.find_or_create_vnfr_cm_state(vnf_cfg) + + if (vnf_cm_state['state'] == self.state_to_string(conmanY.RecordState.READY_NO_CFG) + or + vnf_cm_state['state'] == self.state_to_string(conmanY.RecordState.READY)): + self._log.warning("NS/VNF (%s/%s) is already configured! Skipped.", self.nsr_name, vnfr['name']) + return + + #UPdate VNF state + vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS) + + # Now translate the configuration for iP addresses + try: + # Add cp_dict members (TAGS) for this VNF + self._cp_dict['rw_mgmt_ip'] = vnf_cfg['mgmt_ip_address'] + self._cp_dict['rw_username'] = vnf_cfg['username'] + self._cp_dict['rw_password'] = vnf_cfg['password'] + + script_cmd = 'python3 {} -i {} -o {} -x "{}"'.format(vnf_cfg['xlate_script'], vnf_cfg['cfg_template'], vnf_cfg['cfg_file'], repr(self._cp_dict)) + self._log.debug("xlate script command (%s)", script_cmd) + #xlate_msg = subprocess.check_output(script_cmd).decode('utf-8') + xlate_msg = subprocess.check_output(script_cmd, shell=True).decode('utf-8') + self._log.info("xlate script output (%s)", xlate_msg) + except Exception as e: + vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS_FAILED) + self._log.error("Failed to execute translation script for VNF: %s with (%s)", log_this_vnf(vnf_cfg), str(e)) + return + + self._log.info("Applying config to VNF: %s = %s!", log_this_vnf(vnf_cfg), vnf_cfg) + try: + self.vnf_cfg_list.append(vnf_cfg) + self._log.debug("Scheduled configuration!") + vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_SCHED) + except Exception as e: + self._log.error("Failed apply_vnf_config to VNF: %s as (%s)", log_this_vnf(vnf_cfg), str(e)) + vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS_FAILED) + raise + + def add(self, nsr): + self._log.info("Adding NS Record for id=%s", id) + self._nsr = nsr + + def sample_cm_state(self): + return ( + { + 'cm_nsr': [ + { + 'cm_vnfr': [ + { + 'cfg_location': 'location1', + 'cfg_type': 'script', + 'connection_point': [ + {'ip_address': '1.1.1.1', 'name': 'vnf1cp1'}, + {'ip_address': '1.1.1.2', 'name': 'vnf1cp2'} + ], + 'id': 'vnfrid1', + 'mgmt_interface': {'ip_address': '7.1.1.1', + 'port': 1001}, + 'name': 'vnfrname1', + 'state': 'init' + }, + { + 'cfg_location': 'location2', + 'cfg_type': 'netconf', + 'connection_point': [{'ip_address': '2.1.1.1', 'name': 'vnf2cp1'}, + {'ip_address': '2.1.1.2', 'name': 'vnf2cp2'}], + 'id': 'vnfrid2', + 'mgmt_interface': {'ip_address': '7.1.1.2', + 'port': 1001}, + 'name': 'vnfrname2', + 'state': 'init'} + ], + 'id': 'nsrid1', + 'name': 'nsrname1', + 'state': 'init'} + ], + 'states': 'Initialized, ' + }) + + def populate_vm_state_from_vnf_cfg(self): + # Fill in each VNFR from this nsr object + vnfr_list = self._vnfr_list + for vnfr in vnfr_list: + vnf_cfg = vnfr['vnf_cfg'] + vnf_cm_state = self.find_vnfr_cm_state(vnfr['id']) + + if vnf_cm_state: + # Fill in VNF management interface + vnf_cm_state['mgmt_interface']['ip_address'] = vnf_cfg['mgmt_ip_address'] + vnf_cm_state['mgmt_interface']['port'] = vnf_cfg['port'] + + # Fill in VNF configuration details + vnf_cm_state['cfg_type'] = vnf_cfg['config_method'] + vnf_cm_state['cfg_location'] = vnf_cfg['cfg_file'] + + # Fill in each connection-point for this VNF + cp_list = vnfr['connection_point'] + for cp_item_dict in cp_list: + vnf_cm_state['connection_point'].append( + { + 'name' : cp_item_dict['name'], + 'ip_address' : cp_item_dict['ip_address'], + } + ) + + def state_to_string(self, state): + state_dict = { + conmanY.RecordState.INIT : "init", + conmanY.RecordState.RECEIVED : "received", + conmanY.RecordState.CFG_PROCESS : "cfg_process", + conmanY.RecordState.CFG_PROCESS_FAILED : "cfg_process_failed", + conmanY.RecordState.CFG_SCHED : "cfg_sched", + conmanY.RecordState.CFG_DELAY : "cfg_delay", + conmanY.RecordState.CONNECTING : "connecting", + conmanY.RecordState.FAILED_CONNECTION : "failed_connection", + conmanY.RecordState.NETCONF_CONNECTED : "netconf_connected", + conmanY.RecordState.NETCONF_SSH_CONNECTED : "netconf_ssh_connected", + conmanY.RecordState.RESTCONF_CONNECTED : "restconf_connected", + conmanY.RecordState.CFG_SEND : "cfg_send", + conmanY.RecordState.CFG_FAILED : "cfg_failed", + conmanY.RecordState.READY_NO_CFG : "ready_no_cfg", + conmanY.RecordState.READY : "ready", + } + return state_dict[state] + + def find_vnfr_cm_state(self, id): + if self.cm_nsr['cm_vnfr']: + for vnf_cm_state in self.cm_nsr['cm_vnfr']: + if vnf_cm_state['id'] == id: + return vnf_cm_state + return None + + def find_or_create_vnfr_cm_state(self, vnf_cfg): + vnfr = vnf_cfg['vnfr'] + vnf_cm_state = self.find_vnfr_cm_state(vnfr['id']) + + if vnf_cm_state is None: + # Not found, Create and Initialize this VNF cm-state + vnf_cm_state = { + 'id' : vnfr['id'], + 'name' : vnfr['short_name'], + 'state' : self.state_to_string(conmanY.RecordState.RECEIVED), + 'mgmt_interface' : + { + 'ip_address' : vnf_cfg['mgmt_ip_address'], + 'port' : vnf_cfg['port'], + }, + 'cfg_type' : vnf_cfg['config_method'], + 'cfg_location' : vnf_cfg['cfg_file'], + 'connection_point' : [], + } + self.cm_nsr['cm_vnfr'].append(vnf_cm_state) + + return vnf_cm_state + + @asyncio.coroutine + def update_vnf_cm_state(self, vnfr, state): + if vnfr: + vnf_cm_state = self.find_vnfr_cm_state(vnfr['id']) + if vnf_cm_state: + vnf_cm_state['state'] = self.state_to_string(state) + else: + self._log.error("No opdata found for NS/VNF:%s/%s!", self.nsr_name, vnfr['name']) + else: + self._log.error("No VNFR supplied for state update (NS=%s)!", self.nsr_name) + + @asyncio.coroutine + def update_ns_cm_state(self, state): + self.cm_nsr['state'] = self.state_to_string(state) + + def add_vnfr(self, vnfr): + + if vnfr['id'] not in self._vnfr_dict: + self._log.info("NSR(%s) : Adding VNF Record for name=%s, id=%s", self._nsr_id, vnfr['name'], vnfr['id']) + # Add this vnfr to the list for show, or single traversal + self._vnfr_list.append(vnfr) + else: + self._log.warning("NSR(%s) : VNF Record for name=%s, id=%s already exists, overwriting", self._nsr_id, vnfr['name'], vnfr['id']) + + # Make vnfr available by id as well as by name + unique_name = get_vnf_unique_name(self.nsr_name, vnfr['short_name'], vnfr['member_vnf_index_ref']) + self._vnfr_dict[unique_name] = vnfr + self._vnfr_dict[vnfr['id']] = vnfr + + # Create vnf_cfg dictionary with default values + vnf_cfg = { + 'nsr_obj' : self, + 'vnfr' : vnfr, + 'nsr_name' : self.nsr_name, + 'nsr_id' : self._nsr_id, + 'vnfr_name' : vnfr['short_name'], + 'member_vnf_index' : vnfr['member_vnf_index_ref'], + 'port' : 0, + 'username' : 'admin', + 'password' : 'admin', + 'config_method' : 'None', + 'protocol' : 'None', + 'mgmt_ip_address' : '0.0.0.0', + 'cfg_file' : 'None', + 'script_type' : 'bash', + } + + vnfr['vnf_cfg'] = vnf_cfg + self.find_or_create_vnfr_cm_state(vnf_cfg) + + + ''' + Build the connection-points list for this VNF (self._cp_dict) + ''' + # Populate global CP list self._cp_dict from VNFR + if 'connection_point' not in vnfr: + return + + cp_list = vnfr['connection_point'] + + self._cp_dict[vnfr['member_vnf_index_ref']] = {} + for cp_item_dict in cp_list: + # Populate global dictionary + self._cp_dict[cp_item_dict['name']] = cp_item_dict['ip_address'] + + # Populate unique member specific dictionary + self._cp_dict[vnfr['member_vnf_index_ref']][cp_item_dict['name']] = cp_item_dict['ip_address'] + + return \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py new file mode 100644 index 0000000..11f67bc --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py @@ -0,0 +1,481 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import ncclient +import ncclient.asyncio_manager +import tornado.httpclient as tornadoh +import asyncio.subprocess +import asyncio +import time +import sys +import os, stat + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfrYang', '1.0') + +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwConmanYang as conmanY, + RwNsrYang as nsrY, + RwVnfrYang as vnfrY, +) + +import rift.tasklets + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + +def log_this_vnf(vnf_cfg): + log_vnf = "" + used_item_list = ['nsr_name', 'vnfr_name', 'member_vnf_index', 'mgmt_ip_address'] + for item in used_item_list: + if item in vnf_cfg: + if item == 'mgmt_ip_address': + log_vnf += "({})".format(vnf_cfg[item]) + else: + log_vnf += "{}/".format(vnf_cfg[item]) + return log_vnf + +class ConfigManagerROifConnectionError(Exception): + pass +class ScriptError(Exception): + pass + +class ConfigManagerROif(object): + + def __init__(self, log, loop, parent): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + + try: + self._model = RwYang.Model.create_libncx() + self._model.load_schema_ypbc(nsrY.get_schema()) + self._model.load_schema_ypbc(vnfrY.get_schema()) + except Exception as e: + self._log.error("Error generating models %s", str(e)) + + self.ro_config = self._parent._config.ro_config + + @property + def manager(self): + if self._manager is None: + raise + + return self._manager + + @asyncio.coroutine + def connect(self, timeout_secs=60): + ro_cfg = self.ro_config + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + + try: + self._log.info("Attemping Resource Orchestrator netconf connection.") + + self._manager = yield from ncclient.asyncio_manager.asyncio_connect( + loop=self._loop, + host=ro_cfg['ro_ip_address'], + port=ro_cfg['ro_port'], + username=ro_cfg['ro_username'], + password=ro_cfg['ro_password'], + allow_agent=False, + look_for_keys=False, + hostkey_verify=False, + ) + self._log.info("Connected to Resource Orchestrator netconf") + return + + except ncclient.transport.errors.SSHError as e: + self._log.error("Netconf connection to Resource Orchestrator ip %s failed: %s", + ro_cfg['ro_ip_address'], str(e)) + + yield from asyncio.sleep(2, loop=self._loop) + + self._manager = None + raise ConfigManagerROifConnectionError( + "Failed to connect to Resource Orchestrator within %s seconds" % timeout_secs + ) + + @asyncio.coroutine + def get_nsr(self, id): + self._log.debug("get_nsr() locals: %s", locals()) + xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']".format(id) + #xpath = "/ns-instance-opdata/nsr" + self._log.debug("Attempting to get NSR using xpath: %s", xpath) + response = yield from self._manager.get( + filter=('xpath', xpath), + ) + response_xml = response.data_xml.decode() + + self._log.debug("Received NSR(%s) response: %s", id, str(response_xml)) + + try: + nsr = nsrY.YangData_Nsr_NsInstanceOpdata_Nsr() + nsr.from_xml_v2(self._model, response_xml) + except Exception as e: + self._log.error("Failed to load nsr from xml e=%s", str(e)) + return + + self._log.debug("Deserialized NSR response: %s", nsr) + + return nsr.as_dict() + + @asyncio.coroutine + def get_vnfr(self, id): + xpath = "/vnfr-catalog/vnfr[id='{}']".format(id) + self._log.info("Attempting to get VNFR using xpath: %s", xpath) + response = yield from self._manager.get( + filter=('xpath', xpath), + ) + response_xml = response.data_xml.decode() + + self._log.debug("Received VNFR(%s) response: %s", id, str(response_xml)) + + vnfr = vnfrY.YangData_Vnfr_VnfrCatalog_Vnfr() + vnfr.from_xml_v2(self._model, response_xml) + + self._log.debug("Deserialized VNFR response: %s", vnfr) + + return vnfr.as_dict() + +class ConfigManagerEvents(object): + def __init__(self, dts, log, loop, parent): + self._dts = dts + self._log = log + self._loop = loop + self._parent = parent + self._nsr_xpath = "/cm-state/cm-nsr" + + def register(self): + try: + self._orif = ConfigManagerROif(self._log, self._loop, self._parent) + self.register_cm_rpc() + except Exception as e: + self._log.debug("Failed to register (%s)", e) + + + def register_cm_rpc(self): + + try: + self._rpc_hdl = self._dts.register( + xpath=self._nsr_xpath, + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=self.prepare_update_nsr), + flags=rwdts.Flag.PUBLISHER) + except Exception as e: + self._log.debug("Failed to register xpath(%s) as (%s)", self._nsr_xpath, e) + + @asyncio.coroutine + def prepare_update_nsr(self, xact_info, action, ks_path, msg): + """ Prepare callback for the RPC """ + self._log("Received prepare_update_nsr with action=%s, msg=%s", action, msg) + + # Fetch VNFR for each VNFR id in NSR + + @asyncio.coroutine + def update_vnf_state(self, vnf_cfg, state): + nsr_obj = vnf_cfg['nsr_obj'] + yield from nsr_obj.update_vnf_cm_state(vnf_cfg['vnfr'], state) + + @asyncio.coroutine + def apply_vnf_config(self, vnf_cfg): + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_DELAY) + yield from asyncio.sleep(vnf_cfg['config_delay'], loop=self._loop) + # See if we are still alive! + if vnf_cfg['nsr_obj'].being_deleted: + # Don't do anything, just return + return True + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_SEND) + try: + if vnf_cfg['config_method'] == 'netconf': + self._log.info("Creating ncc handle for VNF cfg = %s!", vnf_cfg) + self.ncc = ConfigManagerVNFnetconf(self._log, self._loop, self, vnf_cfg) + if vnf_cfg['protocol'] == 'ssh': + yield from self.ncc.connect_ssh() + else: + yield from self.ncc.connect() + yield from self.ncc.apply_edit_cfg() + elif vnf_cfg['config_method'] == 'rest': + if self.rcc is None: + self._log.info("Creating rcc handle for VNF cfg = %s!", vnf_cfg) + self.rcc = ConfigManagerVNFrestconf(self._log, self._loop, self, vnf_cfg) + self.ncc.apply_edit_cfg() + elif vnf_cfg['config_method'] == 'script': + self._log.info("Executing script for VNF cfg = %s!", vnf_cfg) + scriptc = ConfigManagerVNFscriptconf(self._log, self._loop, self, vnf_cfg) + yield from scriptc.apply_edit_cfg() + elif vnf_cfg['config_method'] == 'juju': + self._log.info("Executing juju config for VNF cfg = %s!", vnf_cfg) + jujuc = ConfigManagerVNFjujuconf(self._log, self._loop, self._parent, vnf_cfg) + yield from jujuc.apply_edit_cfg() + else: + self._log.error("Unknown configuration method(%s) received for %s", + vnf_cfg['config_method'], vnf_cfg['vnf_unique_name']) + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_FAILED) + return True + + #Update VNF state + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.READY) + self._log.info("Successfully applied configuration to VNF: %s", + log_this_vnf(vnf_cfg)) + except Exception as e: + self._log.error("Applying configuration(%s) file(%s) to VNF: %s failed as: %s", + vnf_cfg['config_method'], + vnf_cfg['cfg_file'], + log_this_vnf(vnf_cfg), + str(e)) + #raise + return False + + return True + +class ConfigManagerVNFscriptconf(object): + + def __init__(self, log, loop, parent, vnf_cfg): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + self._vnf_cfg = vnf_cfg + + #@asyncio.coroutine + def apply_edit_cfg(self): + vnf_cfg = self._vnf_cfg + self._log.debug("Attempting to apply scriptconf to VNF: %s", log_this_vnf(vnf_cfg)) + try: + st = os.stat(vnf_cfg['cfg_file']) + os.chmod(vnf_cfg['cfg_file'], st.st_mode | stat.S_IEXEC) + #script_msg = subprocess.check_output(vnf_cfg['cfg_file'], shell=True).decode('utf-8') + + proc = yield from asyncio.create_subprocess_exec( + vnf_cfg['script_type'], vnf_cfg['cfg_file'], + stdout=asyncio.subprocess.PIPE) + script_msg = yield from proc.stdout.read() + rc = yield from proc.wait() + + self._log.debug("Debug config script output (%s)", script_msg) + if rc != 0: + raise ScriptError( + "script config returned error code : %s" % rc + ) + + except Exception as e: + self._log.error("Error (%s) while executing script config for VNF: %s", + str(e), log_this_vnf(vnf_cfg)) + raise + +class ConfigManagerVNFrestconf(object): + + def __init__(self, log, loop, parent, vnf_cfg): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + self._vnf_cfg = vnf_cfg + + def fetch_handle(self, response): + if response.error: + self._log.error("Failed to send HTTP config request - %s", response.error) + else: + self._log.debug("Sent HTTP config request - %s", response.body) + + @asyncio.coroutine + def apply_edit_cfg(self): + vnf_cfg = self._vnf_cfg + self._log.debug("Attempting to apply restconf to VNF: %s", log_this_vnf(vnf_cfg)) + try: + http_c = tornadoh.AsyncHTTPClient() + # TBD + # Read the config entity from file? + # Convert connectoin-point? + http_c.fetch("http://", self.fetch_handle) + except Exception as e: + self._log.error("Error (%s) while applying HTTP config", str(e)) + +class ConfigManagerVNFnetconf(object): + + def __init__(self, log, loop, parent, vnf_cfg): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + self._vnf_cfg = vnf_cfg + + self._model = RwYang.Model.create_libncx() + self._model.load_schema_ypbc(conmanY.get_schema()) + + @asyncio.coroutine + def connect(self, timeout_secs=120): + vnf_cfg = self._vnf_cfg + start_time = time.time() + self._log.debug("connecting netconf .... %s", vnf_cfg) + while (time.time() - start_time) < timeout_secs: + + try: + self._log.info("Attemping netconf connection to VNF: %s", log_this_vnf(vnf_cfg)) + + self._manager = yield from ncclient.asyncio_manager.asyncio_connect( + loop=self._loop, + host=vnf_cfg['mgmt_ip_address'], + port=vnf_cfg['port'], + username=vnf_cfg['username'], + password=vnf_cfg['password'], + allow_agent=False, + look_for_keys=False, + hostkey_verify=False, + ) + + self._log.info("Netconf connected to VNF: %s", log_this_vnf(vnf_cfg)) + return + + except ncclient.transport.errors.SSHError as e: + yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.FAILED_CONNECTION) + self._log.error("Netconf connection to VNF: %s, failed: %s", + log_this_vnf(vnf_cfg), str(e)) + + yield from asyncio.sleep(2, loop=self._loop) + + raise ConfigManagerROifConnectionError( + "Failed to connect to VNF: %s within %s seconds" % + (log_this_vnf(vnf_cfg), timeout_secs) + ) + + @asyncio.coroutine + def connect_ssh(self, timeout_secs=120): + vnf_cfg = self._vnf_cfg + start_time = time.time() + + if (self._manager != None and self._manager.connected == True): + self._log.debug("Disconnecting previous session") + self._manager.close_session + + self._log.debug("connecting netconf via SSH .... %s", vnf_cfg) + while (time.time() - start_time) < timeout_secs: + + try: + yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.CONNECTING) + self._log.debug("Attemping netconf connection to VNF: %s", log_this_vnf(vnf_cfg)) + + self._manager = ncclient.asyncio_manager.manager.connect_ssh( + host=vnf_cfg['mgmt_ip_address'], + port=vnf_cfg['port'], + username=vnf_cfg['username'], + password=vnf_cfg['password'], + allow_agent=False, + look_for_keys=False, + hostkey_verify=False, + ) + + yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.NETCONF_SSH_CONNECTED) + self._log.debug("netconf over SSH connected to VNF: %s", log_this_vnf(vnf_cfg)) + return + + except ncclient.transport.errors.SSHError as e: + yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.FAILED_CONNECTION) + self._log.error("Netconf connection to VNF: %s, failed: %s", + log_this_vnf(vnf_cfg), str(e)) + + yield from asyncio.sleep(2, loop=self._loop) + + raise ConfigManagerROifConnectionError( + "Failed to connect to VNF: %s within %s seconds" % + (log_this_vnf(vnf_cfg), timeout_secs) + ) + + @asyncio.coroutine + def apply_edit_cfg(self): + vnf_cfg = self._vnf_cfg + self._log.debug("Attempting to apply netconf to VNF: %s", log_this_vnf(vnf_cfg)) + + if self._manager is None: + self._log.error("Netconf is not connected to VNF: %s, aborting!", log_this_vnf(vnf_cfg)) + return + + # Get config file contents + try: + with open(vnf_cfg['cfg_file']) as f: + configuration = f.read() + except Exception as e: + self._log.error("Reading contents of the configuration file(%s) failed: %s", vnf_cfg['cfg_file'], str(e)) + return + + try: + self._log.debug("apply_edit_cfg to VNF: %s", log_this_vnf(vnf_cfg)) + xml = '{}'.format(configuration) + response = yield from self._manager.edit_config(xml, target='running') + if hasattr(response, 'xml'): + response_xml = response.xml + else: + response_xml = response.data_xml.decode() + + self._log.debug("apply_edit_cfg response: %s", response_xml) + if '' in response_xml: + raise ConfigManagerROifConnectionError("apply_edit_cfg response has rpc-error : %s", + response_xml) + + self._log.debug("apply_edit_cfg Successfully applied configuration {%s}", xml) + except: + raise + +class ConfigManagerVNFjujuconf(object): + + def __init__(self, log, loop, parent, vnf_cfg): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + self._vnf_cfg = vnf_cfg + + #@asyncio.coroutine + def apply_edit_cfg(self): + vnf_cfg = self._vnf_cfg + self._log.debug("Attempting to apply juju conf to VNF: %s", log_this_vnf(vnf_cfg)) + try: + args = ['python3', + vnf_cfg['juju_script'], + '--server', vnf_cfg['mgmt_ip_address'], + '--user', vnf_cfg['user'], + '--password', vnf_cfg['secret'], + '--port', str(vnf_cfg['port']), + vnf_cfg['cfg_file']] + self._log.error("juju script command (%s)", args) + + proc = yield from asyncio.create_subprocess_exec( + *args, + stdout=asyncio.subprocess.PIPE) + juju_msg = yield from proc.stdout.read() + rc = yield from proc.wait() + + if rc != 0: + raise ScriptError( + "Juju config returned error code : %s" % rc + ) + + self._log.debug("Juju config output (%s)", juju_msg) + except Exception as e: + self._log.error("Error (%s) while executing juju config", str(e)) + raise \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg new file mode 100644 index 0000000..4510ffa --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg @@ -0,0 +1,27 @@ +# This template has all supported TAGs. +# This template can be used as input to the xlate_cfg.py script as follows: + +# python3 ./xlate_cfg.py -i ./rwconman_test_config_template.cfg -o ./rwconman_test_config.cfg -x "{1: {'test/cp1': '11.0.0.1'}, 2: {'test/cp1': '11.0.0.2'}, 'rw_mgmt_ip': '1.1.1.1', 'rw_username': 'admin', 'test/cp1': '11.0.0.3', 'rw_password': 'admin'}" + + +# This is error +#0. + +# Following are simple TAGs +1. This is Management IP: +2. This is Username: +3. This is Password: +4. This is globally unique connection point: + +# Following are colon separated complex TAGs +5. This is connection point for a given VNF with unique member index: +6. This is converting connection point IP address into network address: +7. This is converting connection point IP address into boadcast address: + +# Following generated tuple with original connectino point name (Global only) +8. This is not used anywhere: + +# Following test all of the above in single line +9. All at once: START| | | | | | | | |END + + diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py new file mode 100755 index 0000000..14bbf71 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py @@ -0,0 +1,180 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +''' +This file - ConfigManagerTasklet() +| ++--|--> ConfigurationManager() + | + +--> rwconman_config.py - ConfigManagerConfig() + | | + | +--> ConfigManagerNSR() + | + +--> rwconman_events.py - ConfigManagerEvents() + | + +--> ConfigManagerROif() + +''' + +import asyncio +import logging +import os + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwConmanYang', '1.0') + + +from gi.repository import ( + RwDts as rwdts, + RwConmanYang as conmanY, +) + +import rift.tasklets + +from . import rwconman_config as Config +from . import rwconman_events as Event + +class ConfigurationManager(object): + def __init__(self, log, loop, dts): + self._log = log + self._loop = loop + self._dts = dts + self.cfg_sleep = True + self.cfg_dir = os.path.join(os.environ["RIFT_INSTALL"], "etc/conman") + self._config = Config.ConfigManagerConfig(self._dts, self._log, self._loop, self) + self._event = Event.ConfigManagerEvents(self._dts, self._log, self._loop, self) + self.pending_cfg = [] + + @asyncio.coroutine + def update_vnf_state(self, vnf_cfg, state): + nsr_obj = vnf_cfg['nsr_obj'] + yield from nsr_obj.update_vnf_cm_state(vnf_cfg['vnfr'], state) + + @asyncio.coroutine + def update_ns_state(self, nsr_obj, state): + yield from nsr_obj.update_ns_cm_state(state) + + @asyncio.coroutine + def register(self): + yield from self._config.register() + self._event.register() + + @asyncio.coroutine + def configuration_handler(): + while True: + #self._log.debug("Pending Configuration = %s", self.pending_cfg) + if self.pending_cfg: + # pending_cfg is nsr_obj list + nsr_obj = self.pending_cfg[0] + if nsr_obj.being_deleted is False: + vnf_cfg_list = nsr_obj.vnf_cfg_list + while True: + if vnf_cfg_list: + vnf_cfg = vnf_cfg_list[0] + self._log.info("Applying Pending Configuration for NS/VNF = %s/%s", nsr_obj.nsr_name, vnf_cfg) + try: + done = yield from self._event.apply_vnf_config(vnf_cfg) + if done: + vnf_cfg_list.remove(vnf_cfg) + else: + # Do not update nsr state, since config failed for at least one VNF + nsr_obj = None + break + except Exception as e: + yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_FAILED) + self._log.info("Failed(%s) to Apply Pending Configuration for VNF = %s, will retry", e, vnf_cfg) + # Do not update nsr state, since config failed for at least one VNF + nsr_obj = None + # Do not attempt the next VNF config, there might be dependancies (hence config order) + break + else: + # Done iterating thru each VNF in this NS + break + + if nsr_obj is not None: + yield from self.update_ns_state(nsr_obj, conmanY.RecordState.READY) + # Now delete this NS from pending + self.pending_cfg.pop(0) + + yield from asyncio.sleep(1, loop=self._loop) + asyncio.ensure_future(configuration_handler(), loop=self._loop) + +class ConfigManagerTasklet(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super(ConfigManagerTasklet, self).__init__(*args, **kwargs) + self._dts = None + self._con_man = None + + def start(self): + super(ConfigManagerTasklet, self).start() + self.log.setLevel(logging.DEBUG) + self.log.info("Starting ConfigManagerTasklet") + + self.log.debug("Registering with dts") + + self._dts = rift.tasklets.DTS(self.tasklet_info, + conmanY.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def init(self): + self._log.info("Initializing the Service Orchestrator tasklet") + self._con_man = ConfigurationManager(self.log, + self.loop, + self._dts) + yield from self._con_man.register() + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py new file mode 100644 index 0000000..f83a3f8 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +''' +This script will go through the input conffiguration template and convert all the matching "regular expression" and "strings" +specified in xlate_cp_list & xlate_str_list with matching IP addresses passed in as dictionary to this script. + +-i Configuration template +-o Output final configuration complete with IP addresses +-x Xlate(Translate dictionary in string format +-t TAGS to be translated + +''' + +import sys +import getopt +import ast +import re +import yaml +import netaddr + +from inspect import getsourcefile +import os.path + +xlate_dict = None + +def xlate_cp_list(line, cp_list): + for cp_string in cp_list: + match = re.search(cp_string, line) + if match is not None: + # resolve IP address using Connection Point dictionary + resolved_ip = xlate_dict[match.group(1)] + if resolved_ip is None: + print("No matching CP found: ", match.group(1)) + exit(2) + else: + line = line[:match.start()] + resolved_ip + line[match.end():] + return line + +def xlate_colon_list(line, colon_list): + for ucp_string in colon_list: + #print("Searching :", ucp_string) + match = re.search(ucp_string, line) + if match is not None: + #print("match :", match.group()) + # resolve IP address using Connection Point dictionary for specified member (unique) index + ucp_str_list = match.group(1).split(':') + #print("matched = {}, split list = {}".format(match.group(1), ucp_str_list)) + if len(ucp_str_list) != 2: + print("Invalid TAG in the configuration: ", match.group(1)) + exit(2) + + # Unique Connection Point translation to IP + if ucp_string.startswith(' + +# Literal string translations +xlate_str_list : + - + - + - + +# This list contains 2 tags separated by colon (:) +xlate_colon_list : + # Fetch CP from the member_index dictionary (I.e. CP of a particular VNF) + - + # Generate network address from CP address and mask (mask is expected to be a hard coded number in config) + - + # Generate broadcast address from CP address and mask (mask is expected to be a hard coded number in config) + - + +# This translates connection point name and generates tuple with name:resolved IP +xlate_cp_to_tuple_list : + - + diff --git a/modules/core/mano/rwcm/plugins/rwconman/rwconmantasklet.py b/modules/core/mano/rwcm/plugins/rwconman/rwconmantasklet.py new file mode 100755 index 0000000..99f19e0 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/rwconman/rwconmantasklet.py @@ -0,0 +1,29 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwconmantasklet +class Tasklet(rift.tasklets.rwconmantasklet.ConfigManagerTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/yang/CMakeLists.txt b/modules/core/mano/rwcm/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..9e814b7 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/yang/CMakeLists.txt @@ -0,0 +1,30 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/28/2015 +# + +## +# Yang targets +## + +rift_add_yang_target( + TARGET rw_conman_yang + YANG_FILES rw-conman.yang + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + mano_yang_gen + DEPENDS + mano_yang +) + +## +# Install the XML file +## +install( + FILES ../cli/cli_rwcm.xml + DESTINATION usr/data/manifest + COMPONENT ${PKG_LONG_NAME} +) + diff --git a/modules/core/mano/rwcm/plugins/yang/rw-conman.tailf.yang b/modules/core/mano/rwcm/plugins/yang/rw-conman.tailf.yang new file mode 100644 index 0000000..ab34dbd --- /dev/null +++ b/modules/core/mano/rwcm/plugins/yang/rw-conman.tailf.yang @@ -0,0 +1,22 @@ +module rw-conman-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-conman-annotation"; + prefix "rw-conman-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import rw-conman { + prefix conman; + } + + tailf:annotate "/conman:cm-state" { + tailf:callpoint base_show; + } + +} \ No newline at end of file diff --git a/modules/core/mano/rwcm/plugins/yang/rw-conman.yang b/modules/core/mano/rwcm/plugins/yang/rw-conman.yang new file mode 100755 index 0000000..a7a6fc2 --- /dev/null +++ b/modules/core/mano/rwcm/plugins/yang/rw-conman.yang @@ -0,0 +1,236 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-conman.yang + * @author Manish Patel + * @date 2015/10/27 + * @brief Service Orchestrator configuration yang + */ + +module rw-conman +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-conman"; + prefix "rw-conman"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import nsr { + prefix "nsr"; + } + + import vnfr { + prefix "vnfr"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-base { + prefix "manobase"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-10-27 { + description + "Initial revision."; + } + + // typedef ro-endpoint-method { + // type enumeration { + // enum netconf; + // enum restconf; + // } + // } + + grouping ro-endpoint { + // leaf ro-endpoint-method { + // description "interface between CM & RO, defaults to netconf"; + // type ro-endpoint-method; + // default netconf; + // } + leaf ro-ip-address { + type inet:ip-address; + description "IP Address"; + default "127.0.0.1"; + } + leaf ro-port { + type inet:port-number; + description "Port Number"; + default 2022; + } + leaf ro-username { + description "RO endpoint username"; + type string; + default "admin"; + } + leaf ro-password { + description "RO endpoint password"; + type string; + default "admin"; + } + } + + grouping vnf-cfg-items { + leaf configuration-file { + description "Location of the confguration file on CM system"; + type string; + } + leaf translator-script { + description "Script that translates the templates in the configuration-file using VNFR information + Currently, we only use IP address translations. + configuration will use connection point name instead of IP addresses."; + type string; + } + } + + container cm-config { + description "Service Orchestrator specific configuration"; + rwpb:msg-new "SoConfig"; + rwcli:new-mode "cm-config"; + + container ro-endpoint { + description "Resource Orchestrator endpoint ip address"; + rwpb:msg-new "RoEndpoint"; + uses ro-endpoint; + } + + //uses vnf-cfg-items; + + list nsr { + key "id"; + leaf id { + description "Indicates NSR bringup complete, now initiate configuration of the NSR"; + type yang:uuid; + } + } + }// cm-config + + // =================== SHOW ================== + typedef record-state { + type enumeration { + enum init; + enum received; + enum cfg-delay; + enum cfg-process; + enum cfg-process-failed; + enum cfg-sched; + enum connecting; + enum failed-connection; + enum netconf-connected; + enum netconf-ssh-connected; + enum restconf-connected; + enum cfg-send; + enum cfg-failed; + enum ready-no-cfg; + enum ready; + } + } + + // TBD: Do we need this typedef, currently not used anywhere + typedef cfg-type { + type enumeration { + enum none; + enum scriptconf; + enum netconf; + enum restconf; + enum jujuconf; + } + } + + + // This is also used by RO (Resource Orchestrator) to indicate NSR is ready + // It will only fill in IDs + container cm-state { + rwpb:msg-new "CmOpdata"; + config false; + description "CM NS & VNF states"; + + leaf states { + description "CM various states"; + type string; + } + + list cm-nsr { + description "List of NS Records"; + key "id"; + leaf id { + type yang:uuid; + } + leaf name { + description "NSR name."; + type string; + } + leaf state { + description "State of NSR"; + type record-state; + } + + list cm-vnfr { + description "List of VNF Records within NS Record"; + key "id"; + leaf id { + type yang:uuid; + } + leaf name { + description "VNFR name."; + type string; + } + leaf state { + description "Last known state of this VNFR"; + type record-state; + } + container mgmt-interface { + leaf ip-address { + type inet:ip-address; + } + leaf port { + type inet:port-number; + } + } + leaf cfg-type { + type string; + } + leaf cfg-location { + type inet:uri; + } + list connection-point { + key "name"; + leaf name { + description "Connection Point name"; + type string; + } + leaf ip-address { + description "IP address assigned to this connection point"; + type inet:ip-address; + } + } + } // list VNFR + } // list NSR + } // cm-state + +} // rw-conman diff --git a/modules/core/mano/rwcm/test/CMakeLists.txt b/modules/core/mano/rwcm/test/CMakeLists.txt new file mode 100644 index 0000000..baebe67 --- /dev/null +++ b/modules/core/mano/rwcm/test/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Manish Patel +# Creation Date: 10/28/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(CONMAN_INSTALL "demos/conman") + +install( + FILES + start_cm_system.py + README.start_cm + DESTINATION ${CONMAN_INSTALL} + COMPONENT ${PKG_LONG_NAME}) + +# set(NS_NAME ping_pong_nsd) +# install( +# FILES +# ${NS_NAME}/configuration_input_params.yml +# ${NS_NAME}/ping_vnfd_1_scriptconf_template.cfg +# ${NS_NAME}/pong_vnfd_11_scriptconf_template.cfg +# DESTINATION ${CONMAN_INSTALL}/${NS_NAME} +# COMPONENT ${PKG_LONG_NAME}) + diff --git a/modules/core/mano/rwcm/test/README.start_cm b/modules/core/mano/rwcm/test/README.start_cm new file mode 100644 index 0000000..7a8098b --- /dev/null +++ b/modules/core/mano/rwcm/test/README.start_cm @@ -0,0 +1,4 @@ +# Following example command line to launch the system in collapse mode. +# Please tailor for expanded mode or any other requirements + +./start_cm_system.py -m ethsim -c --skip-prepare-vm diff --git a/modules/core/mano/rwcm/test/cwims_juju_nsd/configuration_input_params.yml b/modules/core/mano/rwcm/test/cwims_juju_nsd/configuration_input_params.yml new file mode 100644 index 0000000..a211660 --- /dev/null +++ b/modules/core/mano/rwcm/test/cwims_juju_nsd/configuration_input_params.yml @@ -0,0 +1,35 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is input parameters file for Network Service configuration. +# This file is formatted as below: + +# configuration_delay : 120 # Number of seconds to wait before applying configuration after NS is up +# number_of_vnfs_to_be_configured : 1 # Total number of VNFs in this NS to be configured by Service Orchestrator +# 1 : # Configuration Priority, order in which each VNF will be configured +# name : vnfd_name # Name of the VNF +# member_vnf_index : 11 # member index of the VNF that makes it unique (in case of multiple instances of same VNF) +# configuration_type : scriptconf # Type of configuration (Currently supported values : scriptconf, netconf) +# +# Repeat VNF block for as many VNFs + +configuration_delay : 30 +number_of_vnfs_to_be_configured : 1 +1 : + name : cwims_vnfd + member_vnf_index : 1 + configuration_type : jujuconf + diff --git a/modules/core/mano/rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg b/modules/core/mano/rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg new file mode 100644 index 0000000..d32efe3 --- /dev/null +++ b/modules/core/mano/rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg @@ -0,0 +1,23 @@ +ims-a: + deploy: + store: local + directory: /usr/rift/charms/cw-aio-proxy/trusty/ + series: trusty + to: "lxc:0" + + # Data under config passed as such during deployment + config: + proxied_ip: + home_domain: "ims.riftio.local" + base_number: "1234567000" + number_count: 1000 + + units: + - unit: + # Wait for each command to complete + wait: true + # Bail on failure + bail: true + actions: + - create-user: { number: "1234567001", password: "secret"} + - create-user: { number: "1234567002", password: "secret"} diff --git a/modules/core/mano/rwcm/test/ping_pong_nsd/configuration_input_params.yml b/modules/core/mano/rwcm/test/ping_pong_nsd/configuration_input_params.yml new file mode 100644 index 0000000..d5e48f2 --- /dev/null +++ b/modules/core/mano/rwcm/test/ping_pong_nsd/configuration_input_params.yml @@ -0,0 +1,38 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is input parameters file for Network Service configuration. +# This file is formatted as below: + +# configuration_delay : 120 # Number of seconds to wait before applying configuration after NS is up +# number_of_vnfs_to_be_configured : 1 # Total number of VNFs in this NS to be configured by Service Orchestrator +# 1 : # Configuration Priority, order in which each VNF will be configured +# name : vnfd_name # Name of the VNF +# member_vnf_index : 11 # member index of the VNF that makes it unique (in case of multiple instances of same VNF) +# configuration_type : scriptconf # Type of configuration (Currently supported values : scriptconf, netconf) +# +# Repeat VNF block for as many VNFs + +configuration_delay : 30 +number_of_vnfs_to_be_configured : 2 +1 : + name : pong_vnfd + member_vnf_index : 2 + configuration_type : scriptconf +2 : + name : ping_vnfd + member_vnf_index : 1 + configuration_type : scriptconf diff --git a/modules/core/mano/rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg b/modules/core/mano/rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg new file mode 100755 index 0000000..ffa2518 --- /dev/null +++ b/modules/core/mano/rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg @@ -0,0 +1,54 @@ +#!/usr/bin/bash + +# Rest API config +ping_mgmt_ip='' +ping_mgmt_port=18888 + +# VNF specific configuration +pong_server_ip='' +ping_rate=5 +server_port=5555 + +# Make rest API calls to configure VNF +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set server info for ping!" + exit $rc +fi + +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"rate\":$ping_rate}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set ping rate!" + exit $rc +fi + +output=$(curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state) +if [[ $output == *"Internal Server Error"* ]] +then + echo $output + exit 3 +else + echo $output +fi + + +exit 0 diff --git a/modules/core/mano/rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg b/modules/core/mano/rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg new file mode 100755 index 0000000..4f67c9d --- /dev/null +++ b/modules/core/mano/rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg @@ -0,0 +1,42 @@ +#!/usr/bin/bash + +# Rest API configuration +pong_mgmt_ip='' +pong_mgmt_port=18889 + +# Test +# username= +# password= + +# VNF specific configuration +pong_server_ip='' +server_port=5555 + +# Make Rest API calls to configure VNF +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \ + http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to set server(own) info for pong!" + exit $rc +fi + +curl -D /dev/stdout \ + -H "Accept: application/vnd.yang.data+xml" \ + -H "Content-Type: application/vnd.yang.data+json" \ + -X POST \ + -d "{\"enable\":true}" \ + http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/adminstatus/state +rc=$? +if [ $rc -ne 0 ] +then + echo "Failed to enable pong service!" + exit $rc +fi + +exit 0 diff --git a/modules/core/mano/rwcm/test/rwso_test.py b/modules/core/mano/rwcm/test/rwso_test.py new file mode 100755 index 0000000..7c1af7b --- /dev/null +++ b/modules/core/mano/rwcm/test/rwso_test.py @@ -0,0 +1,353 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import logging +import os +import sys +import types +import unittest +import uuid + +import xmlrunner + +import gi.repository.CF as cf +import gi.repository.RwDts as rwdts +import gi.repository.RwMain as rwmain +import gi.repository.RwManifestYang as rwmanifest +import gi.repository.RwConmanYang as conmanY +import gi.repository.RwLaunchpadYang as launchpadyang + +import rift.tasklets + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class RWSOTestCase(unittest.TestCase): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + rwmain = None + tinfo = None + schema = None + id_cnt = 0 + + @classmethod + def setUpClass(cls): + msgbroker_dir = os.environ.get('MESSAGE_BROKER_DIR') + router_dir = os.environ.get('ROUTER_DIR') + cm_dir = os.environ.get('SO_DIR') + + manifest = rwmanifest.Manifest() + manifest.init_phase.settings.rwdtsrouter.single_dtsrouter.enable = True + + cls.rwmain = rwmain.Gi.new(manifest) + cls.tinfo = cls.rwmain.get_tasklet_info() + + # Run router in mainq. Eliminates some ill-diagnosed bootstrap races. + os.environ['RWDTS_ROUTER_MAINQ']='1' + cls.rwmain.add_tasklet(msgbroker_dir, 'rwmsgbroker-c') + cls.rwmain.add_tasklet(router_dir, 'rwdtsrouter-c') + cls.rwmain.add_tasklet(cm_dir, 'rwconmantasklet') + + cls.log = rift.tasklets.logger_from_tasklet_info(cls.tinfo) + cls.log.setLevel(logging.DEBUG) + + stderr_handler = logging.StreamHandler(stream=sys.stderr) + fmt = logging.Formatter( + '%(asctime)-23s %(levelname)-5s (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s') + stderr_handler.setFormatter(fmt) + cls.log.addHandler(stderr_handler) + cls.schema = conmanY.get_schema() + + def setUp(self): + def scheduler_tick(self, *args): + self.call_soon(self.stop) + self.run_forever() + + self.loop = asyncio.new_event_loop() + self.loop.scheduler_tick = types.MethodType(scheduler_tick, self.loop) + self.loop.set_debug(True) + os.environ["PYTHONASYNCIODEBUG"] = "1" + asyncio_logger = logging.getLogger("asyncio") + asyncio_logger.setLevel(logging.DEBUG) + + self.asyncio_timer = None + self.stop_timer = None + self.id_cnt += 1 + + @asyncio.coroutine + def wait_tasklets(self): + yield from asyncio.sleep(1, loop=self.loop) + + def run_until(self, test_done, timeout=30): + """ + Attach the current asyncio event loop to rwsched and then run the + scheduler until the test_done function returns True or timeout seconds + pass. + + @param test_done - function which should return True once the test is + complete and the scheduler no longer needs to run. + @param timeout - maximum number of seconds to run the test. + """ + def shutdown(*args): + if args: + self.log.debug('Shutting down loop due to timeout') + + if self.asyncio_timer is not None: + self.tinfo.rwsched_tasklet.CFRunLoopTimerRelease(self.asyncio_timer) + self.asyncio_timer = None + + if self.stop_timer is not None: + self.tinfo.rwsched_tasklet.CFRunLoopTimerRelease(self.stop_timer) + self.stop_timer = None + + self.tinfo.rwsched_instance.CFRunLoopStop() + + def tick(*args): + self.loop.call_later(0.1, self.loop.stop) + self.loop.run_forever() + if test_done(): + shutdown() + + self.asyncio_timer = self.tinfo.rwsched_tasklet.CFRunLoopTimer( + cf.CFAbsoluteTimeGetCurrent(), + 0.1, + tick, + None) + + self.stop_timer = self.tinfo.rwsched_tasklet.CFRunLoopTimer( + cf.CFAbsoluteTimeGetCurrent() + timeout, + 0, + shutdown, + None) + + self.tinfo.rwsched_tasklet.CFRunLoopAddTimer( + self.tinfo.rwsched_tasklet.CFRunLoopGetCurrent(), + self.stop_timer, + self.tinfo.rwsched_instance.CFRunLoopGetMainMode()) + + self.tinfo.rwsched_tasklet.CFRunLoopAddTimer( + self.tinfo.rwsched_tasklet.CFRunLoopGetCurrent(), + self.asyncio_timer, + self.tinfo.rwsched_instance.CFRunLoopGetMainMode()) + + self.tinfo.rwsched_instance.CFRunLoopRun() + + self.assertTrue(test_done()) + + def new_tinfo(self, name): + """ + Create a new tasklet info instance with a unique instance_id per test. + It is up to each test to use unique names if more that one tasklet info + instance is needed. + + @param name - name of the "tasklet" + @return - new tasklet info instance + """ + ret = self.rwmain.new_tasklet_info(name, RWSOTestCase.id_cnt) + + log = rift.tasklets.logger_from_tasklet_info(ret) + log.setLevel(logging.DEBUG) + + stderr_handler = logging.StreamHandler(stream=sys.stderr) + fmt = logging.Formatter( + '%(asctime)-23s %(levelname)-5s (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s') + stderr_handler.setFormatter(fmt) + log.addHandler(stderr_handler) + + return ret + + def get_cloud_account_msg(self): + cloud_account = launchpadyang.CloudAccount() + cloud_account.name = "cloudy" + cloud_account.account_type = "mock" + cloud_account.mock.username = "rainy" + return cloud_account + + def get_compute_pool_msg(self, name, pool_type): + pool_config = rmgryang.ResourcePools() + pool = pool_config.pools.add() + pool.name = name + pool.resource_type = "compute" + if pool_type == "static": + # Need to query CAL for resource + pass + else: + pool.max_size = 10 + return pool_config + + def get_network_pool_msg(self, name, pool_type): + pool_config = rmgryang.ResourcePools() + pool = pool_config.pools.add() + pool.name = name + pool.resource_type = "network" + if pool_type == "static": + # Need to query CAL for resource + pass + else: + pool.max_size = 4 + return pool_config + + + def get_network_reserve_msg(self, xpath): + event_id = str(uuid.uuid4()) + msg = rmgryang.VirtualLinkEventData() + msg.event_id = event_id + msg.request_info.name = "mynet" + msg.request_info.subnet = "1.1.1.0/24" + return msg, xpath.format(event_id) + + def get_compute_reserve_msg(self,xpath): + event_id = str(uuid.uuid4()) + msg = rmgryang.VDUEventData() + msg.event_id = event_id + msg.request_info.name = "mynet" + msg.request_info.image_id = "This is a image_id" + msg.request_info.vm_flavor.vcpu_count = 4 + msg.request_info.vm_flavor.memory_mb = 8192*2 + msg.request_info.vm_flavor.storage_gb = 40 + c1 = msg.request_info.connection_points.add() + c1.name = "myport1" + c1.virtual_link_id = "This is a network_id" + return msg, xpath.format(event_id) + + def test_create_resource_pools(self): + self.log.debug("STARTING - test_create_resource_pools") + tinfo = self.new_tinfo('poolconfig') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools" + pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records" + account_xpath = "C,/rw-launchpad:cloud-account" + compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']" + network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']" + + @asyncio.coroutine + def configure_cloud_account(): + msg = self.get_cloud_account_msg() + self.log.info("Configuring cloud-account: %s",msg) + yield from dts.query_create(account_xpath, + rwdts.Flag.ADVISE, + msg) + yield from asyncio.sleep(3, loop=self.loop) + + @asyncio.coroutine + def configure_compute_resource_pools(): + msg = self.get_compute_pool_msg("virtual-compute", "dynamic") + self.log.info("Configuring compute-resource-pool: %s",msg) + yield from dts.query_create(pool_xpath, + rwdts.Flag.ADVISE, + msg) + yield from asyncio.sleep(3, loop=self.loop) + + + @asyncio.coroutine + def configure_network_resource_pools(): + msg = self.get_network_pool_msg("virtual-network", "dynamic") + self.log.info("Configuring network-resource-pool: %s",msg) + yield from dts.query_create(pool_xpath, + rwdts.Flag.ADVISE, + msg) + yield from asyncio.sleep(3, loop=self.loop) + + + @asyncio.coroutine + def verify_resource_pools(): + self.log.debug("Verifying test_create_resource_pools results") + res_iter = yield from dts.query_read(pool_records_xpath,) + for result in res_iter: + response = yield from result + records = response.result.records + #self.assertEqual(len(records), 2) + #names = [i.name for i in records] + #self.assertTrue('virtual-compute' in names) + #self.assertTrue('virtual-network' in names) + for record in records: + self.log.debug("Received Pool Record, Name: %s, Resource Type: %s, Pool Status: %s, Pool Size: %d, Busy Resources: %d", + record.name, + record.resource_type, + record.pool_status, + record.max_size, + record.busy_resources) + @asyncio.coroutine + def reserve_network_resources(): + msg,xpath = self.get_network_reserve_msg(network_xpath) + self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg)) + yield from dts.query_create(xpath, rwdts.Flag.TRACE, msg) + yield from asyncio.sleep(3, loop=self.loop) + yield from dts.query_delete(xpath, rwdts.Flag.TRACE) + + @asyncio.coroutine + def reserve_compute_resources(): + msg,xpath = self.get_compute_reserve_msg(compute_xpath) + self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg)) + yield from dts.query_create(xpath, rwdts.Flag.TRACE, msg) + yield from asyncio.sleep(3, loop=self.loop) + yield from dts.query_delete(xpath, rwdts.Flag.TRACE) + + @asyncio.coroutine + def run_test(): + yield from self.wait_tasklets() + yield from configure_cloud_account() + yield from configure_compute_resource_pools() + yield from configure_network_resource_pools() + yield from verify_resource_pools() + yield from reserve_network_resources() + yield from reserve_compute_resources() + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_create_resource_pools") + + +def main(): + top_dir = __file__[:__file__.find('/modules/core/')] + build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build') + launchpad_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build/rwlaunchpad') + + if 'MESSAGE_BROKER_DIR' not in os.environ: + os.environ['MESSAGE_BROKER_DIR'] = os.path.join(build_dir, 'rwmsg/plugins/rwmsgbroker-c') + + if 'ROUTER_DIR' not in os.environ: + os.environ['ROUTER_DIR'] = os.path.join(build_dir, 'rwdts/plugins/rwdtsrouter-c') + + if 'SO_DIR' not in os.environ: + os.environ['SO_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwconmantasklet') + + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + unittest.main(testRunner=runner) + +if __name__ == '__main__': + main() + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwcm/test/start_cm_system.py b/modules/core/mano/rwcm/test/start_cm_system.py new file mode 100755 index 0000000..7ec6e95 --- /dev/null +++ b/modules/core/mano/rwcm/test/start_cm_system.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import logging +import os +import sys + +import rift.vcs +import rift.vcs.demo +import rift.vcs.vms + +from rift.vcs.ext import ClassProperty + +logger = logging.getLogger(__name__) + + +class ConfigManagerTasklet(rift.vcs.core.Tasklet): + """ + This class represents SO tasklet. + """ + + def __init__(self, name='rwcmtasklet', uid=None): + """ + Creates a PingTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(ConfigManagerTasklet, self).__init__(name=name, uid=uid) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet') + plugin_name = ClassProperty('rwconmantasklet') + + +# Construct the system. This system consists of 1 cluster in 1 +# colony. The master cluster houses CLI and management VMs +sysinfo = rift.vcs.SystemInfo( + colonies=[ + rift.vcs.Colony( + clusters=[ + rift.vcs.Cluster( + name='master', + virtual_machines=[ + rift.vcs.VirtualMachine( + name='vm-so', + ip='127.0.0.1', + tasklets=[ + rift.vcs.uAgentTasklet(), + ], + procs=[ + rift.vcs.Confd(), + rift.vcs.CliTasklet(manifest_file="cli_rwcm.xml"), + rift.vcs.DtsRouterTasklet(), + rift.vcs.MsgBrokerTasklet(), + rift.vcs.RestconfTasklet(), + ConfigManagerTasklet() + ], + ), + ] + ) + ] + ) + ] + ) + + +# Define the generic portmap. +port_map = {} + + +# Define a mapping from the placeholder logical names to the real +# port names for each of the different modes supported by this demo. +port_names = { + 'ethsim': { + }, + 'pci': { + } +} + + +# Define the connectivity between logical port names. +port_groups = {} + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s') + + # Create a parser which includes all generic demo arguments + parser = rift.vcs.demo.DemoArgParser() + + args = parser.parse_args(argv) + + #load demo info and create Demo object + demo = rift.vcs.demo.Demo(sysinfo=sysinfo, + port_map=port_map, + port_names=port_names, + port_groups=port_groups) + + # Create the prepared system from the demo + system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args) + + # Start the prepared system + system.start() + + +if __name__ == "__main__": + try: + main() + except rift.vcs.demo.ReservationError: + print("ERROR: unable to retrieve a list of IP addresses from the reservation system") + sys.exit(1) + except rift.vcs.demo.MissingModeError: + print("ERROR: you need to provide a mode to run the script") + sys.exit(1) + finally: + os.system("stty sane") \ No newline at end of file diff --git a/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml new file mode 100644 index 0000000..a0791b5 --- /dev/null +++ b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml @@ -0,0 +1,38 @@ +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is input parameters file for Network Service configuration. +# This file is formatted as below: + +# configuration_delay : 120 # Number of seconds to wait before applying configuration after NS is up +# number_of_vnfs_to_be_configured : 1 # Total number of VNFs in this NS to be configured by Service Orchestrator +# 1 : # Configuration Priority, order in which each VNF will be configured +# name : vnfd_name # Name of the VNF +# member_vnf_index : 11 # member index of the VNF that makes it unique (in case of multiple instances of same VNF) +# configuration_type : scriptconf # Type of configuration (Currently supported values : scriptconf, netconf) +# +# Repeat VNF block for as many VNFs + +configuration_delay : 120 +number_of_vnfs_to_be_configured : 2 +1 : + name : trafsink_vnfd + member_vnf_index : 3 + configuration_type : netconf +2 : + name : trafgen_vnfd + member_vnf_index : 1 + configuration_type : netconf diff --git a/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg new file mode 100644 index 0000000..02dfc85 --- /dev/null +++ b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg @@ -0,0 +1,79 @@ + + + trafgen + 0 + + trafgen-lb + + N1TenGi-1 + + trafgen_vnfd/cp0 + + + + + trafgen_vnfd/cp0 + + + rw_trafgen + rw_trafgen + + 2 + + + direct + + + + + + + + + + + + + + + + + + 1 + + + + + + 1 + + + 10000 + 10000 + 10128 + 1 + + + 5678 + 5678 + 5678 + 1 + + + 512 + 512 + 512 + 1 + + + + + + + + + syslog + + 514 + + diff --git a/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg new file mode 100644 index 0000000..6402201 --- /dev/null +++ b/modules/core/mano/rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg @@ -0,0 +1,42 @@ + + + trafsink + 0 + + lb-trafsink + + N3TenGigi-1 + + trafsink_vnfd/cp0 + + + + + trafsink_vnfd/cp0 + + + rw_trafgen + rw_trafgen + + 2 + + + direct + + + + + + + + + + + + + + syslog + + 514 + + diff --git a/modules/core/mano/rwlaunchpad/CMakeLists.txt b/modules/core/mano/rwlaunchpad/CMakeLists.txt new file mode 100644 index 0000000..59695f7 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/CMakeLists.txt @@ -0,0 +1,25 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME rwlaunchpad) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + ra + test + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/CMakeLists.txt new file mode 100644 index 0000000..71a1d91 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + yang + rwiwp + rwlaunchpadtasklet + rwmonitor + rwnsm + rwvnfm + rwvns + rwresmgr + vala + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwiwp/CMakeLists.txt new file mode 100644 index 0000000..7aa112a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/CMakeLists.txt @@ -0,0 +1,26 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwiwptasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwiwp/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/__init__.py new file mode 100644 index 0000000..cf75f79 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwiwptasklet import IwpTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/rwiwptasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/rwiwptasklet.py new file mode 100755 index 0000000..dc9e0c4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rift/tasklets/rwiwptasklet/rwiwptasklet.py @@ -0,0 +1,621 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import sys + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwIwpYang, + RwLaunchpadYang, + RwcalYang as rwcal, +) + +import rw_peas +import rift.tasklets + + +class MissionControlConnectionError(Exception): + pass + + +class MissionControlNotConnected(Exception): + pass + + +class OutofResourcesError(Exception): + pass + + +class PluginLoadingError(Exception): + pass + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class ResourcePool(object): + def __init__(self, log, loop, dts, pool_name, resource_ids): + self._log = log + self._loop = loop + self._dts = dts + self._pool_name = pool_name + self._resource_ids = resource_ids + + self._reserved_resource_ids = [] + + self._dts_reg = None + + @property + def pool_xpath(self): + raise NotImplementedError() + + @property + def id_field(self): + raise NotImplementedError() + + def pool_resource_xpath(self, resource_id): + raise NotImplementedError() + + @asyncio.coroutine + def reserve_resource(self): + self._log.debug("Attempting to reserve a resource") + + for id in self._resource_ids: + self._log.debug("Iterated resource id: %s", id) + if id not in self._reserved_resource_ids: + self._log.debug("Reserving resource id %s from pool %s", + id, self._pool_name) + self._reserved_resource_ids.append(id) + return id + + self._log.warning("Did not find a unreserved resource in pool %s", self._pool_name) + return None + + +class VMResourcePool(ResourcePool): + @property + def pool_xpath(self): + return "C,/rw-iwp:resource-mgr/rw-iwp:pools/rw-iwp:vm-pool[rw-iwp:name='{}']/rw-iwp:resources".format( + self._pool_name, + ) + + @property + def id_field(self): + return "vm_id" + + def pool_resource_xpath(self, resource_id): + return self.pool_xpath + "[rw-iwp:vm-id='{}']".format( + resource_id, + ) + + +class NetworkResourcePool(ResourcePool): + @property + def pool_xpath(self): + return "C,/rw-iwp:resource-mgr/rw-iwp:pools/rw-iwp:network-pool[rw-iwp:name='{}']/rw-iwp:resources".format( + self._pool_name, + ) + + @property + def id_field(self): + return "network_id" + + def pool_resource_xpath(self, resource_id): + return self.pool_xpath + "[rw-iwp:network-id='{}']".format( + resource_id, + ) + + +class ResourceManager(object): + def __init__(self, log, loop, dts): + self._log = log + self._loop = loop + self._dts = dts + + self._resource_mgr_cfg = None + + self._vm_resource_pools = {} + self._network_resource_pools = {} + + self._periodic_sync_task = None + + @asyncio.coroutine + def _update_vm_pools(self, vm_pools): + self._log.debug("Updating vm pools: %s", vm_pools) + for pool in vm_pools: + if pool.name not in self._vm_resource_pools: + self._log.debug("Adding vm resource pool %s", pool.name) + self._vm_resource_pools[pool.name] = VMResourcePool( + self._log, + self._loop, + self._dts, + pool.name, + [r.vm_id for r in pool.resources], + ) + + @asyncio.coroutine + def _update_network_pools(self, network_pools): + self._log.debug("Updating network pools: %s", network_pools) + for pool in network_pools: + if pool.name not in self._network_resource_pools: + self._log.debug("Adding network resource pool %s", pool.name) + self._network_resource_pools[pool.name] = NetworkResourcePool( + self._log, + self._loop, + self._dts, + pool.name, + [r.network_id for r in pool.resources], + ) + + @asyncio.coroutine + def reserve_vm(self): + self._log.debug("Attempting to reserve a VM resource.") + for name, pool in self._vm_resource_pools.items(): + resource_id = yield from pool.reserve_resource() + if resource_id is None: + continue + + return RwIwpYang.VMResponse( + vm_id=resource_id, + vm_pool=name, + ) + + raise OutofResourcesError("Could not find an available network resource") + + @asyncio.coroutine + def reserve_network(self): + self._log.debug("Attempting to reserve a Network resource.") + for name, pool in self._network_resource_pools.items(): + resource_id = yield from pool.reserve_resource() + if resource_id is None: + continue + + return RwIwpYang.NetworkResponse( + network_id=resource_id, + network_pool=name, + ) + + raise OutofResourcesError("Could not find an available network resource") + + def apply_config(self, resource_mgr_cfg): + self._log.debug("Applying resource manager config: %s", + resource_mgr_cfg) + + self._resource_mgr_cfg = resource_mgr_cfg + + asyncio.ensure_future( + self._update_network_pools(self._resource_mgr_cfg.pools.network_pool), + loop=self._loop, + ) + + asyncio.ensure_future( + self._update_vm_pools(self._resource_mgr_cfg.pools.vm_pool), + loop=self._loop, + ) + + +class ResourceRequestHandler(object): + NETWORK_REQUEST_XPATH = "D,/rw-iwp:resource-mgr/network-request/requests" + VM_REQUEST_XPATH = "D,/rw-iwp:resource-mgr/vm-request/requests" + + def __init__(self, dts, loop, log, resource_manager, cloud_account): + self._dts = dts + self._loop = loop + self._log = log + self._resource_manager = resource_manager + self._cloud_account = cloud_account + + self._network_reg = None + self._vm_reg = None + + self._network_reg_event = asyncio.Event(loop=self._loop) + self._vm_reg_event = asyncio.Event(loop=self._loop) + + @asyncio.coroutine + def wait_ready(self, timeout=5): + self._log.debug("Waiting for all request registrations to become ready.") + yield from asyncio.wait( + [self._network_reg_event.wait(), self._vm_reg_event.wait()], + timeout=timeout, loop=self._loop, + ) + + def register(self): + def on_network_request_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got network request commit (xact_info: %s)", xact_info) + + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_request_ready(registration, status): + self._log.debug("Got request ready event (registration: %s) (status: %s)", + registration, status) + + if registration == self._network_reg: + self._network_reg_event.set() + elif registration == self._vm_reg: + self._vm_reg_event.set() + else: + self._log.error("Unknown registration ready event: %s", registration) + + @asyncio.coroutine + def on_network_request_prepare(xact_info, action, ks_path, request_msg): + self._log.debug( + "Got network request on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg + ) + + xpath = ks_path.to_xpath(RwIwpYang.get_schema()) + "/network-response" + + network_item = yield from self._resource_manager.reserve_network() + + network_response = RwIwpYang.NetworkResponse( + network_id=network_item.network_id, + network_pool=network_item.network_pool + ) + + self._log.debug("Responding with NetworkResponse at xpath %s: %s", + xpath, network_response) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, network_response) + + def on_vm_request_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got vm request commit (xact_info: %s)", xact_info) + + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_vm_request_prepare(xact_info, action, ks_path, request_msg): + def get_vm_ip_address(vm_id): + rc, vm_info_item = self._cloud_account.cal.get_vm( + self._cloud_account.account, + vm_id + ) + + return vm_info_item.management_ip + + self._log.debug( + "Got vm request on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg + ) + + xpath = ks_path.to_xpath(RwIwpYang.get_schema()) + "/vm-response" + + vm_item = yield from self._resource_manager.reserve_vm() + + vm_ip = get_vm_ip_address(vm_item.vm_id) + + vm_response = RwIwpYang.VMResponse( + vm_id=vm_item.vm_id, + vm_pool=vm_item.vm_pool, + vm_ip=vm_ip, + ) + + self._log.debug("Responding with VMResponse at xpath %s: %s", + xpath, vm_response) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, vm_response) + + with self._dts.group_create() as group: + self._log.debug("Registering for Network Resource Request using xpath: %s", + ResourceRequestHandler.NETWORK_REQUEST_XPATH, + ) + + self._network_reg = group.register( + xpath=ResourceRequestHandler.NETWORK_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler( + on_ready=on_request_ready, + on_commit=on_network_request_commit, + on_prepare=on_network_request_prepare, + ), + flags=rwdts.Flag.PUBLISHER, + ) + + self._log.debug("Registering for VM Resource Request using xpath: %s", + ResourceRequestHandler.VM_REQUEST_XPATH, + ) + self._vm_reg = group.register( + xpath=ResourceRequestHandler.VM_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler( + on_ready=on_request_ready, + on_commit=on_vm_request_commit, + on_prepare=on_vm_request_prepare, + ), + flags=rwdts.Flag.PUBLISHER, + ) + + +class ResourceMgrDtsConfigHandler(object): + XPATH = "C,/rw-iwp:resource-mgr" + + def __init__(self, dts, log, resource_manager): + self._dts = dts + self._log = log + + self._resource_manager = resource_manager + self._res_mgr_cfg = RwIwpYang.ResourceManagerConfig() + + def register(self): + def on_apply(dts, acg, xact, action, _): + """Apply the resource manager configuration""" + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + self._log.debug("Got resource mgr apply config (xact: %s) (action: %s)", + xact, action) + + self._resource_manager.apply_config(self._res_mgr_cfg) + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + self._log.debug("Got resource manager configuration: %s", msg) + + mgmt_domain = msg.mgmt_domain + if mgmt_domain.has_field("name"): + self._res_mgr_cfg.mgmt_domain.name = mgmt_domain.name + + mission_control = msg.mission_control + if mission_control.has_field("mgmt_ip"): + self._res_mgr_cfg.mission_control.mgmt_ip = mission_control.mgmt_ip + + if msg.has_field("pools"): + self._res_mgr_cfg.pools.from_dict(msg.pools.as_dict()) + + acg.handle.prepare_complete_ok(xact_info.handle) + + self._log.debug("Registering for Resource Mgr config using xpath: %s", + ResourceMgrDtsConfigHandler.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_handler) as acg: + self._pool_reg = acg.register( + xpath=ResourceMgrDtsConfigHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + on_prepare=on_prepare + ) + + +class CloudAccountDtsHandler(object): + XPATH = "C,/rw-launchpad:cloud-account" + log_hdl = None + + def __init__(self, dts, log, cal_account): + self.dts = dts + self.log = log + self.cal_account = cal_account + self.reg = None + + def add_account(self, account): + self.log.info("adding cloud account: {}".format(account)) + self.cal_account.account = rwcal.CloudAccount.from_dict(account.as_dict()) + self.cal_account.cal = self.load_cal_plugin(account) + + def delete_account(self, account_id): + self.log.info("deleting cloud account: {}".format(account_id)) + self.cal_account.account = None + self.cal_account.cal = None + + def update_account(self, account): + self.log.info("updating cloud account: {}".format(account)) + self.cal_account.account = rwcal.CloudAccount.from_dict(account.as_dict()) + self.cal_account.cal = self.load_cal_plugin(account) + + def load_cal_plugin(self, account): + try: + plugin = rw_peas.PeasPlugin( + getattr(account, account.account_type).plugin_name, + 'RwCal-1.0' + ) + + except AttributeError as e: + raise PluginLoadingError(str(e)) + + engine, info, ext = plugin() + + # Initialize the CAL interface + cal = plugin.get_interface("Cloud") + cal.init(CloudAccountDtsHandler.log_hdl) + + return cal + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + self.log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self.reg, + xact=xact, + key_name="name", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_account(cfg.name) + + # Handle Adds + for cfg in add_cfgs: + self.add_account(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_account(cfg) + + self.log.debug("Registering for Cloud Account config using xpath: %s", + CloudAccountDtsHandler.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=CloudAccountDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class CloudAccount(object): + def __init__(self): + self.cal = None + self.account = None + + +class IwpTasklet(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super(IwpTasklet, self).__init__(*args, **kwargs) + + self._dts = None + + self._resource_manager = None + self._resource_mgr_config_hdl = None + + self._cloud_account = CloudAccount() + + def start(self): + super(IwpTasklet, self).start() + self.log.info("Starting IwpTasklet") + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS( + self.tasklet_info, + RwLaunchpadYang.get_schema(), + self.loop, + self.on_dts_state_change + ) + + CloudAccountDtsHandler.log_hdl = self.log_hdl + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in IWP stop:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def init(self): + self._resource_manager = ResourceManager( + self._log, + self._loop, + self._dts + ) + + self.log.debug("creating resource mgr config request handler") + self._resource_mgr_config_hdl = ResourceMgrDtsConfigHandler( + self._dts, + self.log, + self._resource_manager, + ) + self._resource_mgr_config_hdl.register() + + self.log.debug("creating resource request handler") + self._resource_req_hdl = ResourceRequestHandler( + self._dts, + self.loop, + self.log, + self._resource_manager, + self._cloud_account, + ) + self._resource_req_hdl.register() + + self.log.debug("creating cloud account handler") + self.account_handler = CloudAccountDtsHandler(self._dts, self.log, self._cloud_account) + yield from self.account_handler.register() + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwiwp/rwiwptasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rwiwptasklet.py new file mode 100755 index 0000000..f1401b9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwiwp/rwiwptasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwiwptasklet + +class Tasklet(rift.tasklets.rwiwptasklet.IwpTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt new file mode 100644 index 0000000..5a0afe7 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwlaunchpad) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/archive.py + rift/tasklets/${TASKLET_NAME}/checksums.py + rift/tasklets/${TASKLET_NAME}/convert.py + rift/tasklets/${TASKLET_NAME}/datacenters.py + rift/tasklets/${TASKLET_NAME}/message.py + rift/tasklets/${TASKLET_NAME}/tasklet.py + rift/tasklets/${TASKLET_NAME}/uploader.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py new file mode 100644 index 0000000..2e19300 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .tasklet import LaunchpadTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/archive.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/archive.py new file mode 100644 index 0000000..470f554 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/archive.py @@ -0,0 +1,268 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import os +import re +import shutil +import tempfile + +from . import checksums +from . import convert +from . import message + + +class ArchiveError(Exception): + pass + + +class ArchiveInvalidPath(message.ErrorMessage): + def __init__(self, filename): + msg = "unable to match checksum filename {} to contents of archive" + super().__init__("archive-error", msg.format(filename)) + + +class LaunchpadArchive(object): + def __init__(self, tar, log): + self._descriptors = dict() + self._descriptors['images'] = list() + self._descriptors['pnfd'] = list() + self._descriptors['vnfd'] = list() + self._descriptors['vld'] = list() + self._descriptors['nsd'] = list() + self._descriptors['vnffgd'] = list() + self._descriptors['schema/libs'] = list() + self._descriptors['schema/yang'] = list() + self._descriptors['schema/fxs'] = list() + + self._checksums = dict() + self._manifest = None + + self.log = log + self.tarfile = tar + self.prefix = os.path.commonprefix(self.tarfile.getnames()) + + # There must be a checksums.txt file in the archive + if os.path.join(self.prefix, 'checksums.txt') not in tar.getnames(): + self.log.message(message.OnboardMissingChecksumsFile()) + raise ArchiveError() + + # Iterate through the paths in the checksums files and validate them. + # Note that any file in the archive that is not included in the + # checksums file will be ignored. + fd = tar.extractfile(os.path.join(self.prefix, 'checksums.txt')) + archive_checksums = checksums.ArchiveChecksums.from_file_desc(fd) + + def validate_checksums(): + archive_files = {info.name for info in self.tarfile.getmembers() if info.isfile()} + + # Identify files in the checksums.txt file that cannot be located in + # the archive. + for filename in archive_checksums: + if os.path.join(self.prefix, filename) not in archive_files: + self.log.message(message.OnboardMissingFile(filename)) + raise ArchiveError() + + # Use the checksums to validate the remaining files in the archive + for filename in archive_checksums: + path = os.path.join(self.prefix, filename) + if checksums.checksum(self.tarfile.extractfile(path)) != archive_checksums[filename]: + self.log.message(message.OnboardChecksumMismatch(filename)) + raise ArchiveError() + + # Disable checksum validations for onboard performance issues + # validate_checksums() + + def assign_manifest(filename): + self._manifest = filename + + patterns = [ + (re.compile(r"images/([^/]+)"), self._descriptors["images"].append), + (re.compile(r"pnfd/([^/]+)"), self._descriptors["pnfd"].append), + (re.compile(r"vnfd/([^/]+)"), self._descriptors["vnfd"].append), + (re.compile(r"vld/([^/]+)"), self._descriptors["vld"].append), + (re.compile(r"nsd/([^/]+)"), self._descriptors["nsd"].append), + (re.compile(r"vnffgd/([^/]+)"), self._descriptors["vnffgd"].append), + (re.compile(r"schema/libs/([^/]+)"), self._descriptors["schema/libs"].append), + (re.compile(r"schema/yang/([^/]+)"), self._descriptors["schema/yang"].append), + (re.compile(r"schema/fxs/([^/]+)"), self._descriptors["schema/fxs"].append), + (re.compile(r"manifest.xml"), assign_manifest), + ] + + # Iterate through the recognized patterns and assign files accordingly + for filename in archive_checksums: + relname = os.path.relpath(filename) + for pattern, store in patterns: + if pattern.match(relname): + store(relname) + self._checksums[relname] = archive_checksums[filename] + break + + else: + raise message.MessageException(ArchiveInvalidPath(filename)) + + @property + def checksums(self): + """A dictionary of the file checksums""" + return self._checksums + + @property + def pnfds(self): + """A list of PNFDs in the archive""" + return self._descriptors['pnfd'] + + @property + def vnfds(self): + """A list of VNFDs in the archive""" + return self._descriptors['vnfd'] + + @property + def vlds(self): + """A list of VLDs in the archive""" + return self._descriptors['vld'] + + @property + def vnffgds(self): + """A list of VNFFGDs in the archive""" + return self._descriptors['vnffgd'] + + @property + def nsds(self): + """A list of NSDs in the archive""" + return self._descriptors['nsd'] + + @property + def images(self): + """A list of images in the archive""" + return self._descriptors['images'] + + @property + def filenames(self): + """A list of all the files in the archive""" + return self.pnfds + self.vnfds + self.vlds + self.vnffgds + self.nsds + self.images + + def extract(self, dest): + # Ensure that the destination directory exists + if not os.path.exists(dest): + os.makedirs(dest) + + for filename in self.filenames: + # Create the full name to perform the lookup for the TarInfo in the + # archive. + fullname = os.path.join(self.prefix, filename) + member = self.tarfile.getmember(fullname) + + # Make sure that any preceeding directories in the path have been + # created. + dirname = os.path.dirname(filename) + if not os.path.exists(os.path.join(dest, dirname)): + os.makedirs(os.path.join(dest, dirname)) + + # Copy the contents of the file to the correct path + with open(os.path.join(dest, filename), 'wb') as dst: + src = self.tarfile.extractfile(member) + shutil.copyfileobj(src, dst, 10 * 1024 * 1024) + src.close() + +class PackageArchive(object): + def __init__(self): + self.images = dict() + self.vnfds = list() + self.nsds = list() + self.vlds = list() + self.checksums = dict() + + def add_image(self, image, chksum=None): + if image.name not in self.images: + if chksum is None: + with open(image.location, 'r+b') as fp: + self.checksums["images/" + image.name] = checksums.checksum(fp) + + else: + self.checksums["images/" + image.name] = chksum + + self.images[image.name] = image + + def add_vld(self, vld): + self.vlds.append(vld) + + def add_vnfd(self, vnfd): + self.vnfds.append(vnfd) + + def add_nsd(self, nsd): + self.nsds.append(nsd) + + def create_archive(self, archive_name, dest=None): + if dest is None: + dest = tempfile.gettempdir() + + if archive_name.endswith(".tar.gz"): + archive_name = archive_name[:-7] + + archive_path = os.path.join(dest, archive_name) + + if os.path.exists(archive_path): + shutil.rmtree(archive_path) + + os.makedirs(archive_path) + + def write_descriptors(descriptors, converter, name): + if descriptors: + os.makedirs(os.path.join(archive_path, name)) + + path = "{}/{{}}.xml".format(os.path.join(archive_path, name)) + for desc in descriptors: + xml = converter.to_xml_string(desc) + open(path.format(desc.id), 'w').write(xml) + + key = os.path.relpath(path.format(desc.id), archive_path) + self.checksums[key] = checksums.checksum_string(xml) + + def write_images(): + if self.images: + image_path = os.path.join(archive_path, "images") + os.makedirs(image_path) + + for image in self.images.values(): + shutil.copy2(image.location, image_path) + + def write_checksums(): + with open(os.path.join(archive_path, "checksums.txt"), "w") as fp: + for path, chksum in self.checksums.items(): + fp.write("{} {}\n".format(chksum, path)) + + # Start by writing the descriptors to the archive + write_descriptors(self.nsds, convert.NsdYangConverter(), "nsd") + write_descriptors(self.vlds, convert.VldYangConverter(), "vld") + write_descriptors(self.vnfds, convert.VnfdYangConverter(), "vnfd") + + # Copy the images to the archive + write_images() + + # Finally, write the checksums file + write_checksums() + + # Construct a tarball + cmd = "tar zcf {dest}/{name}.tar.gz.partial -C {dest} {name} &>/dev/null" + os.system(cmd.format(name=archive_name, dest=dest)) + + # Rename to final name + cmd = "mv {dest}/{name}.tar.gz.partial {dest}/{name}.tar.gz" + os.system(cmd.format(name=archive_name, dest=dest)) + + shutil.rmtree(archive_path) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/checksums.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/checksums.py new file mode 100644 index 0000000..93c1ce2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/checksums.py @@ -0,0 +1,65 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import hashlib +import re + +def checksum_string(s): + return hashlib.md5(s.encode('utf-8')).hexdigest() + + +def checksum(fd): + """ Calculate a md5 checksum of fd file handle + + Arguments: + fd: A file descriptor return from open() call + + Returns: + A md5 checksum of the file + + """ + current = hashlib.md5() + while True: + data = fd.read(2 ** 16) + if len(data) == 0: + return current.hexdigest() + current.update(data) + + +class ArchiveChecksums(dict): + @classmethod + def from_file_desc(cls, fd): + checksum_pattern = re.compile(r"(\S+)\s+(\S+)") + checksums = dict() + + for line in (line.decode('utf-8').strip() for line in fd if line): + + # Skip comments + if line.startswith('#'): + continue + + # Skip lines that do not contain the pattern we are looking for + result = checksum_pattern.search(line) + if result is None: + continue + + chksum, filepath = result.groups() + checksums[filepath] = chksum + + return cls(checksums) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert.py new file mode 100644 index 0000000..1b2ce63 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert.py @@ -0,0 +1,97 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import ( + NsdYang, + RwYang, + VldYang, + VnfdYang, + ) + + +class GenericYangConverter(object): + model = None + + def __init__(self): + cls = self.__class__ + + if cls.model is None: + cls.model = RwYang.model_create_libncx() + cls.model.load_schema_ypbc(cls.yang_namespace().get_schema()) + + @classmethod + def yang_namespace(cls): + return cls.YANG_NAMESPACE + + @classmethod + def yang_class(cls): + return cls.YANG_CLASS + + def from_xml_string(self, xml): + cls = self.__class__ + obj = cls.yang_class()() + obj.from_xml_v2(cls.model, xml) + return obj + + def from_xml_file(self, filename): + with open(filename, 'r') as fp: + xml = fp.read() + + cls = self.__class__ + obj = cls.yang_class()() + obj.from_xml_v2(cls.model, xml) + return obj + + def to_xml_string(self, obj): + return obj.to_xml_v2(self.__class__.model) + + def from_json_string(self, json): + cls = self.__class__ + obj = cls.yang_class()() + obj.from_json(cls.model, json) + return obj + + def from_json_file(self, filename): + with open(filename, 'r') as fp: + json = fp.read() + + cls = self.__class__ + obj = cls.yang_class()() + obj.from_json(cls.model, json) + return obj + + def to_json_string(self, obj): + return obj.to_json(self.__class__.model) + + +class VnfdYangConverter(GenericYangConverter): + YANG_NAMESPACE = VnfdYang + YANG_CLASS = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd + + +class NsdYangConverter(GenericYangConverter): + YANG_NAMESPACE = NsdYang + YANG_CLASS = NsdYang.YangData_Nsd_NsdCatalog_Nsd + + +class VldYangConverter(GenericYangConverter): + YANG_NAMESPACE = VldYang + YANG_CLASS = VldYang.YangData_Vld_VldCatalog_Vld \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py new file mode 100644 index 0000000..a6d1950 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py @@ -0,0 +1,134 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio + +from gi.repository import ( + RwDts, + RwLaunchpadYang, +) + +import rift.openmano.openmano_client as openmano_client +import rift.tasklets + + +class DataCenterPublisher(object): + """ + This class is reponsible for exposing the data centers associated with an + openmano cloud account. + """ + + XPATH = "D,/rw-launchpad:datacenters" + + def __init__(self, tasklet): + """Creates an instance of a DataCenterPublisher + + Arguments: + tasklet - the tasklet that this publisher is registered for + + """ + self.tasklet = tasklet + self.reg = None + + @property + def dts(self): + """The DTS instance used by this tasklet""" + return self.tasklet.dts + + @property + def log(self): + """The logger used by this tasklet""" + return self.tasklet.log + + @property + def loop(self): + """The event loop used by this tasklet""" + return self.tasklet.loop + + @property + def accounts(self): + """The known openmano cloud accounts""" + accounts = list() + for acc in self.tasklet.cloud_accounts: + if acc.account_type == "openmano": + accounts.append(acc.account_msg) + + return accounts + + @asyncio.coroutine + def register(self): + """Registers the publisher with DTS""" + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + try: + # Create a datacenters instance to hold all of the cloud + # account data. + datacenters = RwLaunchpadYang.DataCenters() + + # Iterate over the known openmano accounts and populate cloud + # account instances with the corresponding data center info + for account in self.accounts: + try: + cloud_account = RwLaunchpadYang.CloudAccount() + cloud_account.name = account.name + + # Create a client for this cloud account to query for + # the associated data centers + client = openmano_client.OpenmanoCliAPI( + self.log, + account.openmano.host, + account.openmano.port, + account.openmano.tenant_id, + ) + + # Populate the cloud account with the data center info + for uuid, name in client.datacenter_list(): + cloud_account.datacenters.append( + RwLaunchpadYang.DataCenter( + uuid=uuid, + name=name, + ) + ) + + datacenters.cloud_accounts.append(cloud_account) + + except Exception as e: + self.log.exception(e) + + xact_info.respond_xpath( + RwDts.XactRspCode.MORE, + 'D,/rw-launchpad:datacenters', + datacenters, + ) + + xact_info.respond_xpath(RwDts.XactRspCode.ACK) + + except Exception as e: + self.log.exception(e) + raise + + handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + + with self.dts.group_create() as group: + self.reg = group.register( + xpath=DataCenterPublisher.XPATH, + handler=handler, + flags=RwDts.Flag.PUBLISHER, + ) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py new file mode 100644 index 0000000..10df8f5 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py @@ -0,0 +1,346 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import logging +import time + + +class MessageException(Exception): + def __init__(self, msg): + self.msg = msg + + +class Message(object): + """ + Messages are events that describe stages of the onboarding process, and + any event that may occur during the onboarding process. + """ + + def __init__(self, level, name, text): + self._level = level + self._name = name + self._text = text + self._timestamp = time.time() + + def __repr__(self): + return "{} {}:{}:{}".format( + self.timestamp, + logging._levelNames.get(self.level, self.level), + self.name, + self.text, + ) + + @property + def level(self): + return self._level + + @property + def name(self): + return self._name + + @property + def text(self): + return self._text + + @property + def timestamp(self): + return self._timestamp + + def log(self, logger): + logger.log(self.level, self.text) + + +class WarningMessage(Message): + """ + A warning is a message that does not prevent the onboarding process for + continuing, but may not be the intention of the user when they initiated + the process. + """ + + def __init__(self, name, text): + super().__init__(logging.WARNING, name, text) + + +class ErrorMessage(Message): + """ + An error message alerts the user to an event that prevent the continuation + of the onboarding process. + """ + + def __init__(self, name, text): + super().__init__(logging.ERROR, name, text) + + +class StatusMessage(Message): + """ + A status message informs the user of an expected stage in the onboarding + process. + """ + + def __init__(self, name, text): + super().__init__(logging.INFO, name, text) + + def log(self, logger): + pass + + +class Logger(object): + """ + This class is used to augment a python logger class so that messages can be + passed to it. Messages are recorded so that the uploader application can + provide this information to the client, and the messages are also recorded + on the server via the standard logging facilities. + """ + + def __init__(self, logger, messages): + self._rift_logger = logger + self._messages = messages + + @property + def messages(self): + return self._messages + + def message(self, msg): + msg.log(self._rift_logger) + self._messages.append(msg) + + def debug(self, msg): + self._rift_logger.debug(msg) + + def info(self, msg): + self._rift_logger.info(msg) + + def error(self, msg): + self._rift_logger.error(msg) + + def fatal(self, msg): + self._rift_logger.fatal(msg) + + def warn(self, msg): + self._rift_logger.warn(msg) + + def warning(self, msg): + self._rift_logger.warning(msg) + + def critical(self, msg): + self._rift_logger.critical(msg) + + def exception(self, exc): + self._rift_logger.exception(exc) + + +class OnboardStart(StatusMessage): + def __init__(self): + super().__init__("onboard-started", "onboarding process started") + + +class OnboardError(ErrorMessage): + def __init__(self, msg): + super().__init__("onboard-error", msg) + + +class OnboardWarning(ErrorMessage): + def __init__(self, msg): + super().__init__("onboard-warning", msg) + + +class OnboardPackageUpload(StatusMessage): + def __init__(self): + super().__init__("onboard-pkg-upload", "uploading package") + + +class OnboardImageUpload(StatusMessage): + def __init__(self): + super().__init__("onboard-img-upload", "uploading image") + + +class OnboardPackageValidation(StatusMessage): + def __init__(self): + super().__init__("onboard-pkg-validation", "package contents validation") + + +class OnboardDescriptorValidation(StatusMessage): + def __init__(self): + super().__init__("onboard-dsc-validation", "descriptor validation") + + +class OnboardDescriptorError(OnboardError): + def __init__(self, filename): + super().__init__("unable to onboard {}".format(filename)) + + +class OnboardDescriptorOnboard(StatusMessage): + def __init__(self): + super().__init__("onboard-dsc-onboard", "onboarding descriptors") + + +class OnboardSuccess(StatusMessage): + def __init__(self): + super().__init__("onboard-success", "onboarding process successfully completed") + + +class OnboardFailure(StatusMessage): + def __init__(self): + super().__init__("onboard-failure", "onboarding process failed") + + +class OnboardMissingContentType(OnboardError): + def __init__(self): + super().__init__("missing content-type header") + + +class OnboardUnsupportedMediaType(OnboardError): + def __init__(self): + super().__init__("multipart/form-data required") + + +class OnboardMissingContentBoundary(OnboardError): + def __init__(self): + super().__init__("missing content boundary") + + +class OnboardMissingTerminalBoundary(OnboardError): + def __init__(self): + super().__init__("Unable to find terminal content boundary") + + +class OnboardUnreadableHeaders(OnboardError): + def __init__(self): + super().__init__("Unable to read message headers") + + +class OnboardUnreadablePackage(OnboardError): + def __init__(self): + super().__init__("Unable to read package") + + +class OnboardMissingChecksumsFile(OnboardError): + def __init__(self): + super().__init__("Package does not contain checksums.txt") + + +class OnboardChecksumMismatch(OnboardError): + def __init__(self, filename): + super().__init__("checksum mismatch for {}".format(filename)) + + +class OnboardMissingAccount(OnboardError): + def __init__(self): + super().__init__("no account information available") + + +class OnboardMissingFile(OnboardWarning): + def __init__(self, filename): + super().__init__("{} is not in the archive".format(filename)) + + +class OnboardInvalidPath(OnboardWarning): + def __init__(self, filename): + super().__init__("{} is not a valid package path".format(filename)) + + +class ExportStart(StatusMessage): + def __init__(self): + super().__init__("export-started", "export process started") + + +class ExportSuccess(StatusMessage): + def __init__(self): + super().__init__("export-success", "export process successfully completed") + + +class ExportFailure(StatusMessage): + def __init__(self): + super().__init__("export-failure", "export process failed") + + + + +class UpdateError(ErrorMessage): + def __init__(self, msg): + super().__init__("update-error", msg) + + +class UpdateMissingAccount(UpdateError): + def __init__(self): + super().__init__("no account information available") + +class UpdateMissingContentType(UpdateError): + def __init__(self): + super().__init__("missing content-type header") + + +class UpdateUnsupportedMediaType(UpdateError): + def __init__(self): + super().__init__("multipart/form-data required") + + +class UpdateMissingContentBoundary(UpdateError): + def __init__(self): + super().__init__("missing content boundary") + + +class UpdateStart(StatusMessage): + def __init__(self): + super().__init__("update-started", "update process started") + + +class UpdateSuccess(StatusMessage): + def __init__(self): + super().__init__("update-success", "updating process successfully completed") + + +class UpdateFailure(StatusMessage): + def __init__(self): + super().__init__("update-failure", "updating process failed") + + +class UpdatePackageUpload(StatusMessage): + def __init__(self): + super().__init__("update-pkg-upload", "uploading package") + + +class UpdateDescriptorError(UpdateError): + def __init__(self, filename): + super().__init__("unable to update {}".format(filename)) + + +class UpdateDescriptorUpdated(StatusMessage): + def __init__(self): + super().__init__("update-dsc-updated", "updated descriptors") + + +class UpdateUnreadableHeaders(UpdateError): + def __init__(self): + super().__init__("Unable to read message headers") + + +class UpdateUnreadablePackage(UpdateError): + def __init__(self): + super().__init__("Unable to read package") + + +class UpdateChecksumMismatch(UpdateError): + def __init__(self, filename): + super().__init__("checksum mismatch for {}".format(filename)) + + +class UpdateNewDescriptor(UpdateError): + def __init__(self, filename): + super().__init__("{} contains a new descriptor".format(filename)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py new file mode 100644 index 0000000..caa4e15 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py @@ -0,0 +1,511 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import math +import mmap +import os +import re +import tarfile +import tempfile +import sys + +import tornado +import tornado.httputil +import tornado.httpserver +import tornado.platform.asyncio + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwLaunchpadYang as rwlaunchpad, + RwcalYang as rwcal, + RwTypes, +) + +import rift.tasklets +import rift.mano.cloud + +from . import uploader +from . import datacenters + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class CatalogDtsHandler(object): + def __init__(self, tasklet, app): + self.app = app + self.reg = None + self.tasklet = tasklet + + @property + def log(self): + return self.tasklet.log + + @property + def dts(self): + return self.tasklet.dts + + +class VldCatalogDtsHandler(CatalogDtsHandler): + XPATH = "C,/vld:vld-catalog/vld:vld" + + def add_vld(self, vld): + self.log.debug('vld-catalog-handler:add:{}'.format(vld.id)) + if vld.id not in self.tasklet.vld_catalog: + self.tasklet.vld_catalog[vld.id] = vld + else: + self.log.error("vld already in catalog: {}".format(vld.id)) + + def update_vld(self, vld): + self.log.debug('vld-catalog-handler:update:{}'.format(vld.id)) + if vld.id in self.tasklet.vld_catalog: + self.tasklet.vld_catalog[vld.id] = vld + else: + self.log.error("unrecognized VLD: {}".format(vld.id)) + + def delete_vld(self, vld_id): + self.log.debug('vld-catalog-handler:delete:{}'.format(vld_id)) + if vld_id in self.tasklet.vld_catalog: + del self.tasklet.vld_catalog[vld_id] + else: + self.log.error("unrecognized VLD: {}".format(vld_id)) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self.reg, + xact=xact, + key_name="id", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_vld(cfg.id) + + # Handle Adds + for cfg in add_cfgs: + self.add_vld(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_vld(cfg) + + self.log.debug("Registering for VLD catalog") + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=VldCatalogDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class NsdCatalogDtsHandler(CatalogDtsHandler): + XPATH = "C,/nsd:nsd-catalog/nsd:nsd" + + def add_nsd(self, nsd): + self.log.debug('nsd-catalog-handler:add:{}'.format(nsd.id)) + if nsd.id not in self.tasklet.nsd_catalog: + self.tasklet.nsd_catalog[nsd.id] = nsd + else: + self.log.error("nsd already in catalog: {}".format(nsd.id)) + + def update_nsd(self, nsd): + self.log.debug('nsd-catalog-handler:update:{}'.format(nsd.id)) + if nsd.id in self.tasklet.nsd_catalog: + self.tasklet.nsd_catalog[nsd.id] = nsd + else: + self.log.error("unrecognized NSD: {}".format(nsd.id)) + + def delete_nsd(self, nsd_id): + self.log.debug('nsd-catalog-handler:delete:{}'.format(nsd_id)) + if nsd_id in self.tasklet.nsd_catalog: + del self.tasklet.nsd_catalog[nsd_id] + else: + self.log.error("unrecognized NSD: {}".format(nsd_id)) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self.reg, + xact=xact, + key_name="id", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_nsd(cfg.id) + + # Handle Adds + for cfg in add_cfgs: + self.add_nsd(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_nsd(cfg) + + self.log.debug("Registering for NSD catalog") + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=NsdCatalogDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class VnfdCatalogDtsHandler(CatalogDtsHandler): + XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd" + + def add_vnfd(self, vnfd): + self.log.debug('vnfd-catalog-handler:add:{}'.format(vnfd.id)) + if vnfd.id not in self.tasklet.vnfd_catalog: + self.tasklet.vnfd_catalog[vnfd.id] = vnfd + + else: + self.log.error("VNFD already in catalog: {}".format(vnfd.id)) + + def update_vnfd(self, vnfd): + self.log.debug('vnfd-catalog-handler:update:{}'.format(vnfd.id)) + if vnfd.id in self.tasklet.vnfd_catalog: + self.tasklet.vnfd_catalog[vnfd.id] = vnfd + + else: + self.log.error("unrecognized VNFD: {}".format(vnfd.id)) + + def delete_vnfd(self, vnfd_id): + self.log.debug('vnfd-catalog-handler:delete:{}'.format(vnfd_id)) + if vnfd_id in self.tasklet.vnfd_catalog: + del self.tasklet.vnfd_catalog[vnfd_id] + + else: + self.log.error("unrecognized VNFD: {}".format(vnfd_id)) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self.reg, + xact=xact, + key_name="id", + ) + + # Handle Deletes + for cfg in delete_cfgs: + self.delete_vnfd(cfg.id) + + # Handle Adds + for cfg in add_cfgs: + self.add_vnfd(cfg) + + # Handle Updates + for cfg in update_cfgs: + self.update_vnfd(cfg) + + self.log.debug("Registering for VNFD catalog") + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=VnfdCatalogDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class LaunchpadConfigDtsHandler(object): + XPATH = "C,/rw-launchpad:launchpad-config" + + def __init__(self, dts, log, launchpad): + self.dts = dts + self.log = log + self.task = launchpad + self.reg = None + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + cfg = list(self.reg.get_xact_elements(xact))[0] + self.task.set_mode(cfg.operational_mode) + + self.log.debug("Registering for Launchpad Config") + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=LaunchpadConfigDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class CloudAccountHandlers(object): + def __init__(self, dts, log, log_hdl, loop, app): + self._log = log + self._log_hdl = log_hdl + self._dts = dts + self._loop = loop + self._app = app + + self._log.debug("creating cloud account config handler") + self.cloud_cfg_handler = rift.mano.cloud.CloudAccountConfigSubscriber( + self._dts, self._log, self._log_hdl, + rift.mano.cloud.CloudAccountConfigCallbacks( + on_add_apply=self.on_cloud_account_added, + on_delete_apply=self.on_cloud_account_deleted, + ) + ) + + self._log.debug("creating cloud account opdata handler") + self.cloud_operdata_handler = rift.mano.cloud.CloudAccountDtsOperdataHandler( + self._dts, self._log, self._loop, + ) + + def on_cloud_account_deleted(self, account_name): + self._log.debug("cloud account deleted") + self._app.accounts = list(self.cloud_cfg_handler.accounts.values()) + self.cloud_operdata_handler.delete_cloud_account(account_name) + + def on_cloud_account_added(self, account): + self._log.debug("cloud account added") + self._app.accounts = list(self.cloud_cfg_handler.accounts.values()) + self._log.debug("accounts: %s", self._app.accounts) + self.cloud_operdata_handler.add_cloud_account(account) + + @asyncio.coroutine + def register(self): + self.cloud_cfg_handler.register() + yield from self.cloud_operdata_handler.register() + + +class LaunchpadTasklet(rift.tasklets.Tasklet): + UPLOAD_MAX_BODY_SIZE = 1e10 + UPLOAD_PORT = "4567" + + def __init__(self, *args, **kwargs): + super(LaunchpadTasklet, self).__init__(*args, **kwargs) + self.app = None + self.server = None + + self.account_handler = None + self.config_handler = None + self.nsd_catalog_handler = None + self.vld_catalog_handler = None + self.vnfd_catalog_handler = None + self.cloud_handler = None + self.datacenter_handler = None + + self.nsd_catalog = dict() + self.vld_catalog = dict() + self.vnfd_catalog = dict() + + self.mode = rwlaunchpad.OperationalMode.STANDALONE + + @property + def cloud_accounts(self): + if self.cloud_handler is None: + return list() + + return list(self.cloud_handler.cloud_cfg_handler.accounts.values()) + + def start(self): + super(LaunchpadTasklet, self).start() + self.log.info("Starting LaunchpadTasklet") + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + self.dts = rift.tasklets.DTS( + self.tasklet_info, + rwlaunchpad.get_schema(), + self.loop, + self.on_dts_state_change + ) + + self.log.debug("Created DTS Api GI Object: %s", self.dts) + + def stop(self): + try: + self.server.stop() + self.dts.deinit() + except Exception: + print("Caught Exception in LP stop:", sys.exc_info()[0]) + raise + def set_mode(self, mode): + """ Sets the mode of this launchpad""" + self.mode = mode + + @asyncio.coroutine + def init(self): + io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop) + self.app = uploader.UploaderApplication(self) + + manifest = self.tasklet_info.get_pb_manifest() + ssl_cert = manifest.bootstrap_phase.rwsecurity.cert + ssl_key = manifest.bootstrap_phase.rwsecurity.key + ssl_options = { + "certfile" : ssl_cert, + "keyfile" : ssl_key, + } + + if manifest.bootstrap_phase.rwsecurity.use_ssl: + self.server = tornado.httpserver.HTTPServer( + self.app, + max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE, + io_loop=io_loop, + ssl_options=ssl_options, + ) + + else: + self.server = tornado.httpserver.HTTPServer( + self.app, + max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE, + io_loop=io_loop, + ) + + self.log.debug("creating VLD catalog handler") + self.vld_catalog_handler = VldCatalogDtsHandler(self, self.app) + yield from self.vld_catalog_handler.register() + + self.log.debug("creating NSD catalog handler") + self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self.app) + yield from self.nsd_catalog_handler.register() + + self.log.debug("creating VNFD catalog handler") + self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self.app) + yield from self.vnfd_catalog_handler.register() + + self.log.debug("creating launchpad config handler") + self.lp_config_handler = LaunchpadConfigDtsHandler(self.dts, self.log, self) + yield from self.lp_config_handler.register() + + self.log.debug("creating datacenter handler") + self.datacenter_handler = datacenters.DataCenterPublisher(self) + yield from self.datacenter_handler.register() + + self.cloud_handler = CloudAccountHandlers( + self.dts, self.log, self.log_hdl, self.loop, self.app + ) + yield from self.cloud_handler.register() + + @asyncio.coroutine + def run(self): + self.server.listen(LaunchpadTasklet.UPLOAD_PORT) + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Handle DTS state change + + Take action according to current DTS state to transition application + into the corresponding application state + + Arguments + state - current dts state + + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self.dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py new file mode 100644 index 0000000..d400cc4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py @@ -0,0 +1,1379 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import collections +import mmap +import os +import shutil +import tarfile +import tempfile +import threading +import uuid +import xml.etree.ElementTree as ET +import json + +import requests +import tornado +import tornado.escape +import tornado.ioloop +import tornado.web +import tornado.httputil + +import gi +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwcalYang', '1.0') + +from gi.repository import ( + RwLaunchpadYang as rwlaunchpad, + RwYang, + RwcalYang as rwcal, + NsdYang, + VnfdYang, + ) +import rift.mano.cloud + +from . import archive +from . import checksums +from . import convert +from . import message +from .message import ( + ExportFailure, + ExportStart, + ExportSuccess, + MessageException, + OnboardDescriptorError, + OnboardDescriptorOnboard, + OnboardDescriptorValidation, + OnboardFailure, + OnboardImageUpload, + OnboardInvalidPath, + OnboardMissingAccount, + OnboardMissingContentBoundary, + OnboardMissingContentType, + OnboardMissingTerminalBoundary, + OnboardPackageUpload, + OnboardPackageValidation, + OnboardStart, + OnboardSuccess, + OnboardUnreadableHeaders, + OnboardUnreadablePackage, + OnboardUnsupportedMediaType, + UpdateChecksumMismatch, + UpdateDescriptorError, + UpdateDescriptorUpdated, + UpdateFailure, + UpdateMissingAccount, + UpdateMissingContentBoundary, + UpdateMissingContentType, + UpdateNewDescriptor, + UpdatePackageUpload, + UpdateStart, + UpdateSuccess, + UpdateUnreadableHeaders, + UpdateUnreadablePackage, + UpdateUnsupportedMediaType, + ) + + +class UnreadableHeadersError(Exception): + pass + + +class UnreadablePackageError(Exception): + pass + + +class HttpMessageError(Exception): + def __init__(self, code, msg): + self.code = code + self.msg = msg + + +class OnboardError(Exception): + def __init__(self, msg): + self.msg = msg + + +class UpdateError(Exception): + def __init__(self, msg): + self.msg = msg + + +class RequestHandler(tornado.web.RequestHandler): + def options(self, *args, **kargs): + pass + + def set_default_headers(self): + self.set_header('Access-Control-Allow-Origin', '*') + self.set_header('Access-Control-Allow-Headers', 'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization') + self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE') + + +def boundary_search(fd, boundary): + """ + Use the Boyer-Moore-Horpool algorithm to find matches to a message boundary + in a file-like object. + + Arguments: + fd - a file-like object to search + boundary - a string defining a message boundary + + Returns: + An array of indices corresponding to matches in the file + + """ + # It is easier to work with a search pattern that is reversed with this + # algorithm + needle = ''.join(reversed(boundary)).encode() + + # Create a lookup to efficiently determine how far we can skip through the + # file based upon the characters in the pattern. + lookup = dict() + for i, c in enumerate(needle[1:], start=1): + if c not in lookup: + lookup[c] = i + + blength = len(boundary) + indices = list() + + # A buffer that same length as the pattern is used to read characters from + # the file. Note that characters are added from the left to make for a + # straight forward comparison with the reversed orientation of the needle. + buffer = collections.deque(maxlen=blength) + buffer.extendleft(fd.read(blength)) + + # Iterate through the file and construct an array of the indices where + # matches to the boundary occur. + index = 0 + while True: + tail = buffer[0] + + # If the "tail" of the buffer matches the first character in the + # needle, perform a character by character check. + if tail == needle[0]: + for x, y in zip(buffer, needle): + if x != y: + break + + else: + # Success! Record the index of the of beginning of the match + indices.append(index) + + # Determine how far to skip based upon the "tail" character + skip = lookup.get(tail, blength) + chunk = fd.read(skip) + + if chunk == b'': + break + + # Push the chunk into the buffer and update the index + buffer.extendleft(chunk) + index += skip + + return indices + + +def extract_package(log, fd, boundary, pkgfile): + """Extract tarball from multipart message on disk + + Arguments: + fd - A file object that the package can be read from + boundary - a string defining the boundary of different parts in the + multipart message. + + """ + log.debug("extracting archive from data") + + # Find the indices of the message boundaries + boundaries = boundary_search(fd, boundary) + if not boundaries: + raise UnreadablePackageError() + + # check that the message has a terminal boundary + fd.seek(boundaries[-1]) + terminal = fd.read(len(boundary) + 2) + if terminal != boundary.encode() + b'--': + raise OnboardError(OnboardMissingTerminalBoundary()) + + log.debug("search for part containing archive") + # find the part of the message that contains the descriptor + for alpha, bravo in zip(boundaries[:-1], boundaries[1:]): + # Move to the beginning of the message part (and trim the + # boundary) + fd.seek(alpha) + fd.readline() + + # Extract the headers from the beginning of the message + headers = tornado.httputil.HTTPHeaders() + while fd.tell() < bravo: + line = fd.readline() + if not line.strip(): + break + + headers.parse_line(line.decode('utf-8')) + + else: + raise UnreadableHeadersError() + + # extract the content disposition and options or move on to the next + # part of the message. + try: + content_disposition = headers['content-disposition'] + disposition, options = tornado.httputil._parse_header(content_disposition) + except KeyError: + continue + + # If this is not form-data, it is not what we are looking for + if disposition != 'form-data': + continue + + # If there is no descriptor in the options, this data does not + # represent a descriptor archive. + if options.get('name', '') != 'descriptor': + continue + + # Write the archive section to disk + with open(pkgfile + ".partial", 'wb') as tp: + log.debug("writing archive ({}) to filesystem".format(pkgfile)) + + remaining = bravo - fd.tell() + while remaining > 0: + length = min(remaining, 1024) + tp.write(fd.read(length)) + remaining -= length + + tp.flush() + + # If the data contains a end-of-feed and carriage-return + # characters, this can cause gzip to issue warning or errors. Here, + # we check the last to bytes of the data and remove them if they + # corresponding to '\r\n'. + with open(pkgfile + ".partial", "rb+") as tp: + tp.seek(-2, 2) + if tp.read(2) == "\r\n": + tp.seek(-2, 2) + tp.truncate() + + log.debug("finished writing archive") + + # Strip the "upload" suffix from the basename + shutil.move(pkgfile + ".partial", pkgfile) + + return + + raise UnreadablePackageError() + + +@tornado.web.stream_request_body +class StreamingUploadHandler(RequestHandler): + def initialize(self, log, loop): + """Initialize the handler + + Arguments: + log - the logger that this handler should use + loop - the tasklets ioloop + + """ + self.transaction_id = str(uuid.uuid4()) + + self.loop = loop + self.log = self.application.get_logger(self.transaction_id) + + self.package_name = None + self.package_fp = None + self.boundary = None + + self.log.debug('created handler (transaction_id = {})'.format(self.transaction_id)) + + def msg_missing_account(self): + raise NotImplementedError() + + def msg_missing_content_type(self): + raise NotImplementedError() + + def msg_unsupported_media_type(self): + raise NotImplementedError() + + def msg_missing_content_boundary(self): + raise NotImplementedError() + + def msg_start(self): + raise NotImplementedError() + + def msg_success(self): + raise NotImplementedError() + + def msg_failure(self): + raise NotImplementedError() + + def msg_package_upload(self): + raise NotImplementedError() + + @tornado.gen.coroutine + def prepare(self): + """Prepare the handler for a request + + The prepare function is the first part of a request transaction. It + creates a temporary file that uploaded data can be written to. + + """ + if self.request.method != "POST": + return + + self.log.message(self.msg_start()) + + try: + # Retrieve the content type and parameters from the request + content_type = self.request.headers.get('content-type', None) + if content_type is None: + raise HttpMessageError(400, self.msg_missing_content_type()) + + content_type, params = tornado.httputil._parse_header(content_type) + + if "multipart/form-data" != content_type.lower(): + raise HttpMessageError(415, self.msg_unsupported_media_type()) + + if "boundary" not in params: + raise HttpMessageError(400, self.msg_missing_content_boundary()) + + self.boundary = params["boundary"] + self.package_fp = tempfile.NamedTemporaryFile( + prefix="pkg-", + delete=False, + ) + + self.package_name = self.package_fp.name + + self.log.debug('writing to {}'.format(self.package_name)) + + except HttpMessageError as e: + self.log.message(e.msg) + self.log.message(self.msg_failure()) + + raise tornado.web.HTTPError(e.code, e.msg.name) + + except Exception as e: + self.log.exception(e) + self.log.message(self.msg_failure()) + + @tornado.gen.coroutine + def data_received(self, data): + """Write data to the current file + + Arguments: + data - a chunk of data to write to file + + """ + self.package_fp.write(data) + + def post(self): + """Handle a post request + + The function is called after any data associated with the body of the + request has been received. + + """ + self.package_fp.close() + self.log.message(self.msg_package_upload()) + + +class UploadHandler(StreamingUploadHandler): + """ + This handler is used to upload archives that contain VNFDs, NSDs, and PNFDs + to the launchpad. This is a streaming handler that writes uploaded archives + to disk without loading them all into memory. + """ + + def msg_missing_account(self): + return OnboardMissingAccount() + + def msg_missing_content_type(self): + return OnboardMissingContentType() + + def msg_unsupported_media_type(self): + return OnboardUnsupportedMediaType() + + def msg_missing_content_boundary(self): + return OnboardMissingContentBoundary() + + def msg_start(self): + return OnboardStart() + + def msg_success(self): + return OnboardSuccess() + + def msg_failure(self): + return OnboardFailure() + + def msg_package_upload(self): + return OnboardPackageUpload() + + def post(self): + """Handle a post request + + The function is called after any data associated with the body of the + request has been received. + + """ + super().post() + + filesize = os.stat(self.package_name).st_size + self.log.debug('wrote {} bytes to {}'.format(filesize, self.package_name)) + + self.application.onboard( + self.package_name, + self.boundary, + self.transaction_id, + auth=self.request.headers.get('authorization', None), + ) + + self.set_status(200) + self.write(tornado.escape.json_encode({ + "transaction_id": self.transaction_id, + })) + + +class UpdateHandler(StreamingUploadHandler): + def msg_missing_account(self): + return UpdateMissingAccount() + + def msg_missing_content_type(self): + return UpdateMissingContentType() + + def msg_unsupported_media_type(self): + return UpdateUnsupportedMediaType() + + def msg_missing_content_boundary(self): + return UpdateMissingContentBoundary() + + def msg_start(self): + return UpdateStart() + + def msg_success(self): + return UpdateSuccess() + + def msg_failure(self): + return UpdateFailure() + + def msg_package_upload(self): + return UpdatePackageUpload() + + def post(self): + """Handle a post request + + The function is called after any data associated with the body of the + request has been received. + + """ + super().post() + + filesize = os.stat(self.package_name).st_size + self.log.debug('wrote {} bytes to {}'.format(filesize, self.package_name)) + + self.application.update( + self.package_name, + self.boundary, + self.transaction_id, + auth=self.request.headers.get('authorization', None), + ) + + self.set_status(200) + self.write(tornado.escape.json_encode({ + "transaction_id": self.transaction_id, + })) + + +class StateHandler(RequestHandler): + def initialize(self, log, loop): + self.log = log + self.loop = loop + + def success(self, messages): + success = self.__class__.SUCCESS + return any(isinstance(msg, success) for msg in messages) + + def failure(self, messages): + failure = self.__class__.FAILURE + return any(isinstance(msg, failure) for msg in messages) + + def started(self, messages): + started = self.__class__.STARTED + return any(isinstance(msg, started) for msg in messages) + + def status(self, messages): + if self.failure(messages): + return "failure" + elif self.success(messages): + return "success" + return "pending" + + def notifications(self, messages): + notifications = { + "errors": list(), + "events": list(), + "warnings": list(), + } + + for msg in messages: + if isinstance(msg, message.StatusMessage): + notifications["events"].append({ + 'value': msg.name, + 'text': msg.text, + 'timestamp': msg.timestamp, + }) + continue + + elif isinstance(msg, message.WarningMessage): + notifications["warnings"].append({ + 'value': msg.text, + 'timestamp': msg.timestamp, + }) + continue + + elif isinstance(msg, message.ErrorMessage): + notifications["errors"].append({ + 'value': msg.text, + 'timestamp': msg.timestamp, + }) + continue + + self.log.warning('unrecognized message: {}'.format(msg)) + + return notifications + + def get(self, transaction_id): + if transaction_id not in self.application.messages: + raise tornado.web.HTTPError(404, "unrecognized transaction ID") + + messages = self.application.messages[transaction_id] + messages.sort(key=lambda m: m.timestamp) + + if not self.started(messages): + raise tornado.web.HTTPError(404, "unrecognized transaction ID") + + notifications = self.notifications(messages) + notifications["status"] = self.status(messages) + + self.write(tornado.escape.json_encode(notifications)) + + +class ExportStateHandler(StateHandler): + STARTED = ExportStart + SUCCESS = ExportSuccess + FAILURE = ExportFailure + + +class UploadStateHandler(StateHandler): + STARTED = OnboardStart + SUCCESS = OnboardSuccess + FAILURE = OnboardFailure + + +class UpdateStateHandler(StateHandler): + STARTED = UpdateStart + SUCCESS = UpdateSuccess + FAILURE = UpdateFailure + + +class UpdatePackage(threading.Thread): + def __init__(self, log, app, accounts, filename, boundary, pkg_id, auth, use_ssl , ssl_cert, ssl_key): + super().__init__() + self.app = app + self.log = log + self.auth = auth + self.pkg_id = pkg_id + self.accounts = accounts + self.filename = filename + self.boundary = boundary + self.updates_dir = os.path.join( + os.environ['RIFT_ARTIFACTS'], + "launchpad/updates", + ) + self.pkg_dir = os.path.join( + self.updates_dir, + self.pkg_id, + ) + self.use_ssl = use_ssl + self.ssl_cert = ssl_cert + self.ssl_key = ssl_key + + # Get the IO loop from the import main thred + self.io_loop = tornado.ioloop.IOLoop.current() + + def run(self): + try: + arch = self.extract_package() + self.validate_images(arch) + self.validate_descriptors(arch) + + try: + self.update_images(arch) + finally: + self.remove_images(arch) + + self.update_descriptors(arch) + + self.log.message(UpdateSuccess()) + + except UpdateError as e: + self.log.message(e.msg) + self.log.message(UpdateFailure()) + + except Exception as e: + self.log.exception(e) + if str(e): + self.log.message(message.UpdateError(str(e))) + self.log.message(UpdateFailure()) + + finally: + self.remove_images(arch) + os.remove(self.filename) + + def validate_images(self, arch): + for filename in arch.images: + with open(os.path.join(self.pkg_dir, filename), 'r+b') as fp: + chksum = checksums.checksum(fp) + + if chksum != arch.checksums[filename]: + raise UpdateError(UpdateChecksumMismatch(filename)) + + def remove_images(self, arch): + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/packages', self.pkg_id) + for image in arch.images: + try: + os.remove(os.path.join(pkg_dir, image)) + except OSError: + pass + + def validate_descriptors(self, arch): + self.validate_descriptor_checksums(arch) + self.validate_descriptor_existence(arch) + + def validate_descriptor_checksums(self, arch): + def checksum_comparison(filename): + with open(os.path.join(self.pkg_dir, filename), 'r+b') as fp: + chksum = checksums.checksum(fp) + + if chksum != arch.checksums[filename]: + raise UpdateError(UpdateChecksumMismatch(filename)) + + for filename in arch.vnfds: + checksum_comparison(filename) + + for filename in arch.nsds: + checksum_comparison(filename) + + def validate_descriptor_existence(self, arch): + def validate_descriptor_existence_vnfd(): + converter = convert.VnfdYangConverter() + + descriptor_ids = set() + for desc in self.app.tasklet.vnfd_catalog_handler.reg.elements: + self.log.debug("validating descriptor: {}".format(desc.id)) + descriptor_ids.add(desc.id) + + for filename in arch.vnfds: + # Read the XML/JSON from file + filepath = os.path.join(self.pkg_dir, filename) + with open(filepath) as fp: + data = fp.read() + + # Construct the VNFD descriptor object from the XML/JSON data. We + # use this to determine the ID of the VNFD, which is a + # necessary part of the URL. + if 'xml' in filename: + vnfd = converter.from_xml_string(data) + elif 'json' in filename: + vnfd = converter.from_json_string(data) + + if vnfd.id not in descriptor_ids: + raise UpdateError(UpdateNewDescriptor(filename)) + + def validate_descriptor_existence_nsd(): + converter = convert.NsdYangConverter() + + descriptor_ids = set() + for desc in self.app.tasklet.nsd_catalog_handler.reg.elements: + self.log.debug("validating descriptor: {}".format(desc.id)) + descriptor_ids.add(desc.id) + + for filename in arch.nsds: + # Read the XML/JSON from file + filepath = os.path.join(self.pkg_dir, filename) + with open(filepath) as fp: + data = fp.read() + + # Construct the NSD descriptor object from the XML data. We use + # this to determine the ID of the NSD, which is a necessary + # part of the URL. + if 'xml' in filename: + vnfd = converter.from_xml_string(data) + elif 'json' in filename: + vnfd = converter.from_json_string(data) + + if vnfd.id not in descriptor_ids: + raise UpdateError(UpdateNewDescriptor(filename)) + + done = threading.Condition() + error = None + + # Define a callback that can be executed in the main thread in order to + # safely interact with the tasklet + def callback(): + nonlocal error + + done.acquire() + + try: + validate_descriptor_existence_vnfd() + validate_descriptor_existence_nsd() + + except UpdateError as e: + error = e + + except Exception as e: + error = UpdateError(str(e)) + + finally: + done.notify() + done.release() + + self.io_loop.add_callback(callback) + + done.acquire() + done.wait() + done.release() + + if error is not None: + raise error + + def update_images(self, arch): + if not arch.images: + return + + self.log.debug("cloud accounts: {}".format(self.accounts)) + + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/packages', self.pkg_id) + + account_images = {} + for account in self.accounts: + self.log.debug("getting image list for account {}".format(account.name)) + account_images[account] = [] + try: + account_images[account] = account.get_image_list() + except rift.mano.cloud.CloudAccountCalError as e: + self.log.warning("could not get image list for account {}".format(account.name)) + continue + + for filename in arch.images: + self.log.debug('uploading image: {}'.format(filename)) + + image = rwcal.ImageInfoItem() + image.name = os.path.basename(filename) + image.location = os.path.join(pkg_dir, filename) + image.checksum = arch.checksums[filename] + + for account in self.accounts: + # Find images on the cloud account which have the same name + matching_images = [i for i in account_images[account] if i.name == image.name] + matching_checksum = [i for i in matching_images if i.checksum == image.checksum] + if len(matching_checksum) > 0: + self.log.debug("found matching image with checksum, not uploading to {}".format(account.name)) + continue + + self.log.debug("uploading to account {}: {}".format(account.name, image)) + try: + image.id = account.create_image(filename) + except rift.mano.cloud.CloudAccountCalError as e: + self.log.error("error when uploading image {} to cloud account: {}".format( + filename, str(e))) + else: + self.log.debug('uploaded image to account{}: {}'.format(account.name, filename)) + + self.log.message(OnboardImageUpload()) + + def update_descriptors(self, arch): + self.update_descriptors_vnfd(arch) + self.update_descriptors_nsd(arch) + + self.log.message(UpdateDescriptorUpdated()) + self.log.debug("update complete") + + def update_descriptors_vnfd(self, arch): + converter = convert.VnfdYangConverter() + + auth = ('admin', 'admin') + + for filename in arch.vnfds: + # Read the XML/JSON from file + filepath = os.path.join(self.pkg_dir, filename) + with open(filepath) as fp: + data = fp.read() + + # Construct the VNFD descriptor object from the XML/JSON data. We use + # this to determine the ID of the VNFD, which is a necessary part + # of the URL. + if 'xml' in filename: + vnfd = converter.from_xml_string(data) + + # Remove the top-level element of the XML (the 'catalog' element) + tree = ET.fromstring(data) + data = ET.tostring(tree.getchildren()[0]) + headers = {"content-type": "application/vnd.yang.data+xml"} + elif 'json' in filename: + vnfd = converter.from_json_string(data) + + # Remove the top-level element of the JSON (the 'catalog' element) + key = "vnfd:vnfd-catalog" + if key in data: + newdict = json.loads(data) + if (key in newdict): + data = json.dumps(newdict[key]) + headers = {"content-type": "application/vnd.yang.data+json"} + + # Add authorization header if it has been specified + if self.auth is not None: + headers['authorization'] = self.auth + + # Send request to restconf + + if self.use_ssl: + url = "https://127.0.0.1:8008/api/config/vnfd-catalog/vnfd/{}" + response = requests.put( + url.format(vnfd.id), + data=data, + headers=headers, + auth=auth, + verify=False, + cert=(self.ssl_cert, self.ssl_key), + ) + else: + url = "http://127.0.0.1:8008/api/config/vnfd-catalog/vnfd/{}" + response = requests.put( + url.format(vnfd.id), + data=data, + headers=headers, + auth=auth, + ) + + if not response.ok: + self.log.error(response.text) + raise UpdateError(UpdateDescriptorError(filename)) + + self.log.debug('successfully updated: {}'.format(filename)) + + def update_descriptors_nsd(self, arch): + converter = convert.NsdYangConverter() + + auth = ('admin', 'admin') + + for filename in arch.nsds: + # Read the XML/JSON from file + filepath = os.path.join(self.pkg_dir, filename) + with open(filepath) as fp: + data = fp.read() + + # Construct the NSD descriptor object from the XML/JSON data. We use + # this to determine the ID of the NSD, which is a necessary part + # of the URL. + if 'xml' in filename: + nsd = converter.from_xml_string(data) + + # Remove the top-level element of the XML (the 'catalog' element) + tree = ET.fromstring(data) + data = ET.tostring(tree.getchildren()[0]) + headers = {"content-type": "application/vnd.yang.data+xml"} + elif 'json' in filename: + nsd = converter.from_json_string(data) + + # Remove the top-level element of the JSON (the 'catalog' element) + key = "nsd:nsd-catalog" + if key in data: + newdict = json.loads(data) + if (key in newdict): + data = json.dumps(newdict[key]) + headers = {"content-type": "application/vnd.yang.data+json"} + + # Add authorization header if it has been specified + if self.auth is not None: + headers['authorization'] = self.auth + + # Send request to restconf + + if self.use_ssl: + url = "https://127.0.0.1:8008/api/config/nsd-catalog/nsd/{}" + response = requests.put( + url.format(nsd.id), + data=data, + headers=headers, + auth=auth, + verify=False, + cert=(self.ssl_cert, self.ssl_key), + ) + else: + url = "http://127.0.0.1:8008/api/config/nsd-catalog/nsd/{}" + response = requests.put( + url.format(nsd.id), + data=data, + headers=headers, + auth=auth, + ) + + if not response.ok: + self.log.error(response.text) + raise UpdateError(UpdateDescriptorError(filename)) + + self.log.debug('successfully updated: {}'.format(filename)) + + def extract_package(self): + """Extract tarball from multipart message on disk + + The tarball contained in the message may be very large; Too large to + load into memory without possibly affecting the behavior of the + webserver. So the message is memory mapped and parsed in order to + extract just the tarball, and then to extract the contents of the + tarball. + + Arguments: + filename - The name of a file that contains a multipart message + boundary - a string defining the boundary of different parts in the + multipart message. + + """ + # Ensure the updates directory exists + try: + os.makedirs(self.updates_dir, exist_ok=True) + except FileExistsError as e: + pass + + try: + pkgpath = os.path.join(self.updates_dir, self.pkg_id) + pkgfile = pkgpath + ".tar.gz" + with open(self.filename, 'r+b') as fp: + # A memory mapped representation of the file is used to reduce + # the memory footprint of the running application. + mapped = mmap.mmap(fp.fileno(), 0) + extract_package( + self.log, + mapped, + self.boundary, + pkgfile, + ) + + # Process the package archive + tar = tarfile.open(pkgfile, mode="r:gz") + arc = archive.LaunchpadArchive(tar, self.log) + self.log.debug("archive extraction complete") + + arc.extract(pkgpath) + + return arc + + except MessageException as e: + raise OnboardError(e.msg) + + except UnreadableHeadersError: + raise UpdateError(UpdateUnreadableHeaders()) + + except UnreadablePackageError: + raise UpdateError(UpdateUnreadablePackage()) + + +class OnboardPackage(threading.Thread): + def __init__(self, log, app, accounts, filename, boundary, pkg_id, auth, use_ssl, ssl_cert, ssl_key): + super().__init__() + self.app = app + self.log = log + self.auth = auth + self.pkg_id = pkg_id + self.accounts = accounts + self.filename = filename + self.boundary = boundary + self.io_loop = tornado.ioloop.IOLoop.current() + self.use_ssl = use_ssl + self.ssl_cert = ssl_cert + self.ssl_key = ssl_key + + def run(self): + try: + arch = self.extract_package() + + try: + self.upload_images(arch) + finally: + self.remove_images(arch) + + self.onboard_descriptors(arch) + + self.log.message(OnboardSuccess()) + + except OnboardError as e: + self.log.message(e.msg) + self.log.message(OnboardFailure()) + + except Exception as e: + self.log.exception(e) + if str(e): + self.log.message(message.OnboardError(str(e))) + self.log.message(OnboardFailure()) + + finally: + os.remove(self.filename) + + def remove_images(self, arch): + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/packages', self.pkg_id) + for image in arch.images: + try: + os.remove(os.path.join(pkg_dir, image)) + except OSError: + pass + + def upload_images(self, arch): + if not arch.images: + return + + self.log.debug("cloud accounts: {}".format(self.accounts)) + + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/packages', self.pkg_id) + + account_images = {} + for account in self.accounts: + self.log.debug("getting image list for account {}".format(account.name)) + account_images[account] = [] + try: + account_images[account] = account.get_image_list() + except rift.mano.cloud.CloudAccountCalError as e: + self.log.warning("could not get image list for account {}".format(account.name)) + continue + + for filename in arch.images: + self.log.debug('uploading image: {}'.format(filename)) + + image = rwcal.ImageInfoItem() + image.name = os.path.basename(filename) + image.location = os.path.join(pkg_dir, filename) + image.checksum = arch.checksums[filename] + + for account in self.accounts: + # Find images on the cloud account which have the same name + matching_images = [i for i in account_images[account] if i.name == image.name] + matching_checksum = [i for i in matching_images if i.checksum == image.checksum] + if len(matching_checksum) > 0: + self.log.debug("found matching image with checksum, not uploading to {}".format(account.name)) + continue + + self.log.debug("uploading to account {}: {}".format(account.name, image)) + try: + image.id = account.create_image(image) + except rift.mano.cloud.CloudAccountCalError as e: + self.log.error("error when uploading image {} to cloud account: {}".format( + filename, str(e))) + else: + self.log.debug('uploaded image to account{}: {}'.format(account.name, filename)) + + self.log.message(OnboardImageUpload()) + + def onboard_descriptors(self, arch): + + pkg_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], "launchpad/packages", self.pkg_id) + + def post(url, data, headers): + auth = ('admin', 'admin') + + if self.use_ssl: + response = requests.post(url, data=data, headers=headers, auth=auth, verify=False, cert=(self.ssl_cert, self.ssl_key)) + else: + response = requests.post(url, data=data, headers=headers, auth=auth) + if not response.ok: + self.log.error(response.text) + raise OnboardError(OnboardDescriptorError(filename)) + + self.log.debug('successfully uploaded: {}'.format(filename)) + + self.log.message(OnboardDescriptorValidation()) + + def prepare_xml(filename): + # Read the uploaded XML + with open(filename, 'r') as fp: + data = fp.read() + + # Remove the top-level element of the XML (the 'catalog' element) + tree = ET.fromstring(data) + data = ET.tostring(tree.getchildren()[0]) + + return data + + json_toplevel_keys = ["vnfd:vnfd-catalog", "nsd:nsd-catalog"] + + def prepare_json(filename): + # Read the uploaded JSON + with open(filename, 'r') as fp: + data = fp.read() + # Remove the top-level element of the JSON (the 'catalog' element) + for key in json_toplevel_keys: + if key in data: + newdict = json.loads(data) + if (key in newdict): + newstr = json.dumps(newdict[key]) + return newstr + + return data + + endpoints = ( + ("vnfd-catalog", arch.vnfds), + ("pnfd-catalog", arch.pnfds), + ("vld-catalog", arch.vlds), + ("nsd-catalog", arch.nsds), + ("vnffgd-catalog", arch.vnffgds), + ) + + if self.use_ssl: + url = "https://127.0.0.1:8008/api/config/{catalog}" + else: + url = "http://127.0.0.1:8008/api/config/{catalog}" + + try: + for catalog, filenames in endpoints: + for filename in filenames: + path = os.path.join(pkg_dir, filename) + if 'xml' in filename: + data = prepare_xml(path) + headers = {"content-type": "application/vnd.yang.data+xml"} + elif 'json' in filename: + data = prepare_json(path) + headers = {"content-type": "application/vnd.yang.data+json"} + + # Add authorization header if it has been specified + if self.auth is not None: + headers['authorization'] = self.auth + + post(url.format(catalog=catalog), data, headers) + + self.log.message(OnboardDescriptorOnboard()) + self.log.debug("onboard complete") + + except Exception: + # TODO: At this point we need to roll back all of the descriptors + # that were successfully onboarded. + self.log.error("Unable to onboard {}".format(filename)) + raise + + def extract_package(self): + """Extract tarball from multipart message on disk + + The tarball contained in the message may be very large; Too large to + load into memory without possibly affecting the behavior of the + webserver. So the message is memory mapped and parsed in order to + extract just the tarball, and then to extract the contents of the + tarball. + + Arguments: + filename - The name of a file that contains a multipart message + boundary - a string defining the boundary of different parts in the + multipart message. + + """ + # Ensure the packages directory exists + packages = os.path.join(os.environ["RIFT_ARTIFACTS"], "launchpad/packages") + try: + os.makedirs(packages, exist_ok=True) + except FileExistsError as e: + pass + + try: + pkgpath = os.path.join(packages, self.pkg_id) + pkgfile = pkgpath + ".tar.gz" + with open(self.filename, 'r+b') as fp: + # A memory mapped representation of the file is used to reduce + # the memory footprint of the running application. + mapped = mmap.mmap(fp.fileno(), 0) + extract_package( + self.log, + mapped, + self.boundary, + pkgfile, + ) + + # Process the package archive + tar = tarfile.open(pkgfile, mode="r:gz") + arc = archive.LaunchpadArchive(tar, self.log) + self.log.debug("archive extraction complete") + + arc.extract(pkgpath) + + return arc + + except MessageException as e: + raise OnboardError(e.msg) + + except UnreadableHeadersError: + raise OnboardError(OnboardUnreadableHeaders()) + + except UnreadablePackageError: + raise OnboardError(OnboardUnreadablePackage()) + + +class ExportHandler(RequestHandler): + def initialize(self, log, loop): + self.loop = loop + self.transaction_id = str(uuid.uuid4()) + self.log = message.Logger( + log, + self.application.messages[self.transaction_id], + ) + + def get(self): + self.log.message(ExportStart()) + + # Parse the IDs + ids_query = self.get_query_argument("ids") + ids = [id.strip() for id in ids_query.split(',')] + + # Retrieve the list of the descriptors + descriptors = list() + for id in ids: + if id in self.application.vnfd_catalog: + descriptors.append(self.application.vnfd_catalog[id]) + continue + + if id in self.application.nsd_catalog: + descriptors.append(self.application.nsd_catalog[id]) + continue + + raise tornado.web.HTTPError(400, "unknown descriptor: {}".format(id)) + + pkg = archive.PackageArchive() + + # Add the VNFDs to the package + for desc in descriptors: + if isinstance(desc, VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd): + pkg.add_vnfd(desc) + + # Add any NSDs to the package + for desc in descriptors: + if isinstance(desc, NsdYang.YangData_Nsd_NsdCatalog_Nsd): + pkg.add_nsd(desc) + + # Create a closure to create the actual package and run it in a + # separate thread + def run(): + pkg.create_archive( + self.transaction_id, + dest=self.application.export_dir, + ) + + self.application.tasklet.loop.run_in_executor(None, run) + + self.log.message(ExportSuccess()) + + self.write(tornado.escape.json_encode({ + "transaction_id": self.transaction_id, + })) + + +class UploaderApplication(tornado.web.Application): + def __init__(self, tasklet): + self.tasklet = tasklet + self.accounts = [] + self.messages = collections.defaultdict(list) + self.export_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/exports') + + manifest = tasklet.tasklet_info.get_pb_manifest() + self.use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl + self.ssl_cert = manifest.bootstrap_phase.rwsecurity.cert + self.ssl_key = manifest.bootstrap_phase.rwsecurity.key + + attrs = dict(log=self.log, loop=self.loop) + + super(UploaderApplication, self).__init__([ + (r"/api/update", UpdateHandler, attrs), + (r"/api/upload", UploadHandler, attrs), + (r"/api/export", ExportHandler, attrs), + (r"/api/upload/([^/]+)/state", UploadStateHandler, attrs), + (r"/api/update/([^/]+)/state", UpdateStateHandler, attrs), + (r"/api/export/([^/]+)/state", ExportStateHandler, attrs), + (r"/api/export/([^/]+.tar.gz)", tornado.web.StaticFileHandler, { + "path": self.export_dir, + }) + ]) + + @property + def log(self): + return self.tasklet.log + + @property + def loop(self): + return self.tasklet.loop + + def get_logger(self, transaction_id): + return message.Logger(self.log, self.messages[transaction_id]) + + def onboard(self, package, boundary, transaction_id, auth=None): + log = message.Logger(self.log, self.messages[transaction_id]) + + pkg_id = str(uuid.uuid1()) + OnboardPackage( + log, + self, + self.accounts, + package, + boundary, + pkg_id, + auth, + self.use_ssl, + self.ssl_cert, + self.ssl_key, + ).start() + + def update(self, package, boundary, transaction_id, auth=None): + log = message.Logger(self.log, self.messages[transaction_id]) + + pkg_id = str(uuid.uuid1()) + UpdatePackage( + log, + self, + self.accounts, + package, + boundary, + pkg_id, + auth, + self.use_ssl, + self.ssl_cert, + self.ssl_key, + ).start() + + @property + def vnfd_catalog(self): + return self.tasklet.vnfd_catalog + + @property + def nsd_catalog(self): + return self.tasklet.nsd_catalog + + @property + def vld_catalog(self): + return self.tasklet.vld_catalog + + def get_vlds(self, vld_ids): + vlds = list() + for id in vld_ids: + vlds.append(self.vld_catalog[id]) + + return vlds \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py new file mode 100755 index 0000000..a8cb871 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwlaunchpad + +class Tasklet(rift.tasklets.rwlaunchpad.LaunchpadTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt new file mode 100644 index 0000000..9abde92 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Creation Date: 2015/10/30 +# + +include(rift_plugin) + +set(TASKLET_NAME rwmonitor) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/core.py + rift/tasklets/${TASKLET_NAME}/tasklet.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py new file mode 100644 index 0000000..1f96a0e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .tasklet import MonitorTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py new file mode 100644 index 0000000..4d653b2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py @@ -0,0 +1,542 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import collections +import time + +import gi +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwDts', '1.0') +from gi.repository import ( + RwVnfrYang, + RwNsrYang, + RwDts, + ) + +import rift.tasklets + + +class NfviMetricsAggregator(object): + def __init__(self, + tasklet, + cloud_account=None, + nfvi_monitor=None, + ): + """Create an instance of NfviMetricsAggregator + + Arguments: + tasklet - a tasklet object that provides access to DTS, + logging, the asyncio ioloop, and monitoring state + cloud_account - a cloud account + nfvi_monitor - an NFVI monitor plugin + + """ + self.tasklet = tasklet + self.nfvi_monitor = nfvi_monitor + self.cloud_account = cloud_account + + @property + def dts(self): + return self.tasklet.dts + + @property + def log(self): + return self.tasklet.log + + @property + def loop(self): + return self.tasklet.loop + + @property + def records(self): + return self.tasklet.records + + @property + def polling_period(self): + return self.tasklet.polling_period + + @asyncio.coroutine + def request_vdu_metrics(self, vdur): + try: + # self.log.debug('request_vdu_metrics: {}'.format(vdur.vim_id)) + + # Create uninitialized metric structure + vdu_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + + # No metrics can be collected if the monitor has not been + # specified. + if self.nfvi_monitor is None: + return vdu_metrics + + # Retrieve the NFVI metrics for this VDU + try: + _, metrics = yield from self.loop.run_in_executor( + self.tasklet.executor, + self.nfvi_monitor.nfvi_metrics, + self.cloud_account, + vdur.vim_id, + ) + + except Exception as e: + self.log.exception(e) + return vdu_metrics + + # VCPU + vdu_metrics.vcpu.total = vdur.vm_flavor.vcpu_count + vdu_metrics.vcpu.utilization = metrics.vcpu.utilization + + # Memory (in bytes) + vdu_metrics.memory.used = metrics.memory.used + vdu_metrics.memory.total = 1e6 * vdur.vm_flavor.memory_mb + vdu_metrics.memory.utilization = 100 * vdu_metrics.memory.used / vdu_metrics.memory.total + + # Storage + vdu_metrics.storage.used = metrics.storage.used + vdu_metrics.storage.total = 1e9 * vdur.vm_flavor.storage_gb + vdu_metrics.storage.utilization = 100 * vdu_metrics.storage.used / vdu_metrics.storage.total + + # Network (incoming) + vdu_metrics.network.incoming.packets = metrics.network.incoming.packets + vdu_metrics.network.incoming.packet_rate = metrics.network.incoming.packet_rate + vdu_metrics.network.incoming.bytes = metrics.network.incoming.bytes + vdu_metrics.network.incoming.byte_rate = metrics.network.incoming.byte_rate + + # Network (outgoing) + vdu_metrics.network.outgoing.packets = metrics.network.outgoing.packets + vdu_metrics.network.outgoing.packet_rate = metrics.network.outgoing.packet_rate + vdu_metrics.network.outgoing.bytes = metrics.network.outgoing.bytes + vdu_metrics.network.outgoing.byte_rate = metrics.network.outgoing.byte_rate + + # External ports + vdu_metrics.external_ports.total = len(vdur.external_interface) + + # Internal ports + vdu_metrics.internal_ports.total = len(vdur.internal_interface) + + # TODO publish the metrics at the VDU-level + + return vdu_metrics + + except Exception as e: + self.log.exception(e) + raise + + @asyncio.coroutine + def request_vnf_metrics(self, vnfr_id): + try: + # self.log.debug('request_vnf_metrics: {}'.format(vnfr_id)) + + # For each VDU contained within the VNF, create a task to + # retrieve the NFVI metrics associated with that VDU. + tasks = list() + for vdu in self.records.vdurs(vnfr_id): + task = self.loop.create_task(self.request_vdu_metrics(vdu)) + tasks.append(task) + + vnf_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_NfviMetrics() + + # If there is no pending data, early out + if not tasks: + return vnf_metrics + + # Wait for the tasks to complete. Aggregate the results and + # return them. + yield from asyncio.wait(tasks, loop=self.loop) + + # TODO aggregated the metrics + for task in tasks: + vdu_metrics = task.result() + + # VCPU + vnf_metrics.vcpu.total += vdu_metrics.vcpu.total + vnf_metrics.vcpu.utilization += vdu_metrics.vcpu.total * vdu_metrics.vcpu.utilization + + # Memory (in bytes) + vnf_metrics.memory.used += vdu_metrics.memory.used + vnf_metrics.memory.total += vdu_metrics.memory.total + vnf_metrics.memory.utilization += vdu_metrics.memory.used + + # Storage + vnf_metrics.storage.used += vdu_metrics.storage.used + vnf_metrics.storage.total += vdu_metrics.storage.total + vnf_metrics.storage.utilization += vdu_metrics.storage.used + + # Network (incoming) + vnf_metrics.network.incoming.packets += vdu_metrics.network.incoming.packets + vnf_metrics.network.incoming.packet_rate += vdu_metrics.network.incoming.packet_rate + vnf_metrics.network.incoming.bytes += vdu_metrics.network.incoming.bytes + vnf_metrics.network.incoming.byte_rate += vdu_metrics.network.incoming.byte_rate + + # Network (outgoing) + vnf_metrics.network.outgoing.packets += vdu_metrics.network.outgoing.packets + vnf_metrics.network.outgoing.packet_rate += vdu_metrics.network.outgoing.packet_rate + vnf_metrics.network.outgoing.bytes += vdu_metrics.network.outgoing.bytes + vnf_metrics.network.outgoing.byte_rate += vdu_metrics.network.outgoing.byte_rate + + # External ports + vnf_metrics.external_ports.total += vdu_metrics.external_ports.total + + # Internal ports + vnf_metrics.internal_ports.total += vdu_metrics.internal_ports.total + + + # TODO find out the correct way to determine the number of + # active and inactive VMs in a VNF + vnf_metrics.vm.active_vm = len(tasks) + vnf_metrics.vm.inactive_vm = 0 + + # VCPU (note that VCPU utilization if a weighted average) + if vnf_metrics.vcpu.total > 0: + vnf_metrics.vcpu.utilization /= vnf_metrics.vcpu.total + + # Memory (in bytes) + if vnf_metrics.memory.total > 0: + vnf_metrics.memory.utilization *= 100.0 / vnf_metrics.memory.total + + # Storage + if vnf_metrics.storage.total > 0: + vnf_metrics.storage.utilization *= 100.0 / vnf_metrics.storage.total + + # TODO publish the VNF-level metrics + + return vnf_metrics + + except Exception as e: + self.log.exception(e) + raise + + @asyncio.coroutine + def request_ns_metrics(self, ns_instance_config_ref): + try: + # self.log.debug('request_ns_metrics: {}'.format(ns_instance_config_ref)) + + # Create a task for each VNFR to retrieve the NFVI metrics + # associated with that VNFR. + vnfrs = self.records.vnfr_ids(ns_instance_config_ref) + tasks = list() + for vnfr in vnfrs: + task = self.loop.create_task(self.request_vnf_metrics(vnfr)) + tasks.append(task) + + ns_metrics = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_NfviMetrics() + + # If there are any VNFR tasks, wait for them to finish + # before beginning the next iteration. + if tasks: + yield from asyncio.wait(tasks, loop=self.loop) + + # Aggregate the VNFR metrics + for task in tasks: + vnf_metrics = task.result() + + ns_metrics.vm.active_vm += vnf_metrics.vm.active_vm + ns_metrics.vm.inactive_vm += vnf_metrics.vm.inactive_vm + + # VCPU + ns_metrics.vcpu.total += vnf_metrics.vcpu.total + ns_metrics.vcpu.utilization += vnf_metrics.vcpu.total * vnf_metrics.vcpu.utilization + + # Memory (in bytes) + ns_metrics.memory.used += vnf_metrics.memory.used + ns_metrics.memory.total += vnf_metrics.memory.total + ns_metrics.memory.utilization += vnf_metrics.memory.used + + # Storage + ns_metrics.storage.used += vnf_metrics.storage.used + ns_metrics.storage.total += vnf_metrics.storage.total + ns_metrics.storage.utilization += vnf_metrics.storage.used + + # Network (incoming) + ns_metrics.network.incoming.packets += vnf_metrics.network.incoming.packets + ns_metrics.network.incoming.packet_rate += vnf_metrics.network.incoming.packet_rate + ns_metrics.network.incoming.bytes += vnf_metrics.network.incoming.bytes + ns_metrics.network.incoming.byte_rate += vnf_metrics.network.incoming.byte_rate + + # Network (outgoing) + ns_metrics.network.outgoing.packets += vnf_metrics.network.outgoing.packets + ns_metrics.network.outgoing.packet_rate += vnf_metrics.network.outgoing.packet_rate + ns_metrics.network.outgoing.bytes += vnf_metrics.network.outgoing.bytes + ns_metrics.network.outgoing.byte_rate += vnf_metrics.network.outgoing.byte_rate + + # External ports + ns_metrics.external_ports.total += vnf_metrics.external_ports.total + + # Internal ports + ns_metrics.internal_ports.total += vnf_metrics.internal_ports.total + + # VCPU (note that VCPU utilization if a weighted average) + if ns_metrics.vcpu.total > 0: + ns_metrics.vcpu.utilization /= ns_metrics.vcpu.total + + # Memory (in bytes) + if ns_metrics.memory.total > 0: + ns_metrics.memory.utilization *= 100.0 / ns_metrics.memory.total + + # Storage + if ns_metrics.storage.total > 0: + ns_metrics.storage.utilization *= 100.0 / ns_metrics.storage.total + + return ns_metrics + + except Exception as e: + self.log.exception(e) + raise + + @asyncio.coroutine + def publish_nfvi_metrics(self, ns_instance_config_ref): + nfvi_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr[nsr:ns-instance-config-ref='{}']/rw-nsr:nfvi-metrics" + nfvi_xpath = nfvi_xpath.format(ns_instance_config_ref) + + registration_handle = yield from self.dts.register( + xpath=nfvi_xpath, + handler=rift.tasklets.DTS.RegistrationHandler(), + flags=(RwDts.Flag.PUBLISHER | RwDts.Flag.NO_PREP_READ), + ) + + self.log.debug('preparing to publish NFVI metrics for {}'.format(ns_instance_config_ref)) + + try: + # Create the initial metrics element + ns_metrics = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_NfviMetrics() + registration_handle.create_element(nfvi_xpath, ns_metrics) + + prev = time.time() + while True: + # Use a simple throttle to regulate the frequency that the + # VDUs are sampled at. + curr = time.time() + + if curr - prev < self.polling_period: + pause = self.polling_period - (curr - prev) + yield from asyncio.sleep(pause, loop=self.loop) + + prev = time.time() + + # Retrieve the NS NFVI metrics + ns_metrics = yield from self.request_ns_metrics(ns_instance_config_ref) + + # Check that that NSR still exists + if not self.records.has_nsr(ns_instance_config_ref): + break + + # Publish the NSR metrics + registration_handle.update_element(nfvi_xpath, ns_metrics) + + except Exception as e: + self.log.exception(e) + raise + + finally: + # Make sure that the NFVI metrics are removed from the operational + # data + yield from registration_handle.delete_element(nfvi_xpath) + self.log.debug('deleted: {}'.format(nfvi_xpath)) + + # Now that we are done with the registration handle, tell DTS to + # deregister it + registration_handle.deregister() + + self.log.debug('finished publishing NFVI metrics for {}'.format(ns_instance_config_ref)) + + +class RecordManager(object): + """ + There are two mappings that this class is reponsible for maintaining. The + first is a mapping from the set of NSR IDs to the VNFR IDs contained within + the network service, + + nsr-id + |-- vnfr-id-1 + |-- vnfr-id-2 + |-- ... + \-- vnfr-id-n + + The second, maps the set of VNFR IDs to the VDUR structures contains within + those network functions, + + vnfr-id + |-- vdur-1 + |-- vdur-2 + |-- ... + \-- vdur-m + + + Note that the VDURs can be identified by the vim-id contained in the VDUR + structure. + + It is important to understand that the current model of the system does not + have a direct connection from an NSR to a VNFR or VDUR. This means that the + NSR structure does not contain any VNFR/VDUR information, and it would be + necessary to query DTS in order to retrieve VNFR/VDUR information. This + class manages the two mappings to keep track of the NSRs and VNFRs so that + it is unnecessary to query DTS in order to determine which VNFRs/VDURs are + contained within a given NSR. On the other hand, a VNFR does in fact + contain VDUR information. + + Finally, note that it is necessary to retain the mapping from the VNFR to + the VDUR because NFVI metric aggregation needs to publish aggregate + information at both the NS ans VNF levels. + + """ + + def __init__(self): + self._nsr_to_vnfrs = dict() + self._vnfr_to_vdurs = dict() + + # A mapping from the VDURs VIM ID to the VDUR structure + self._vdurs = dict() + + def add_nsr(self, nsr): + """Add an NSR to the manager + + Arguments: + nsr - an NSR structure + + """ + if nsr.constituent_vnfr_ref: + if nsr.ns_instance_config_ref not in self._nsr_to_vnfrs: + self._nsr_to_vnfrs[nsr.ns_instance_config_ref] = set() + + mapping = self._nsr_to_vnfrs[nsr.ns_instance_config_ref] + mapping.update(nsr.constituent_vnfr_ref) + + def add_vnfr(self, vnfr): + """Add a VNFR to the manager + + Arguments: + vnfr - a VNFR structure + + """ + # Create a list of VDURs filtering out the VDURs that have not been + # assigned a vim-id + vdurs = [vdur for vdur in vnfr.vdur if vdur.vim_id is not None] + + # There are no valid VDURs, early out now + if not vdurs: + return + + # Create a set for the VNFR if necessary + if vnfr.id not in self._vnfr_to_vdurs: + self._vnfr_to_vdurs[vnfr.id] = set() + + # Update the vnfr-id mapping + mapping = self._vnfr_to_vdurs[vnfr.id] + mapping.update(vdur.vim_id for vdur in vdurs) + + # Update the vdur mapping + self._vdurs.update((vdur.vim_id, vdur) for vdur in vdurs) + + def has_nsr(self, nsr_id): + """Returns True if the specified NSR ID is in the record manager + + Arguments: + nsr_id - the ID of the NSR to check + + Returns: + a boolean indicating whether the record manager contains the NSR + + """ + return nsr_id in self._nsr_to_vnfrs + + def has_vnfr(self, vnfr_id): + """Returns True if the specified VNFR ID is in the record manager + + Arguments: + vnfr_id - the ID of the VNFR to check + + Returns: + a boolean indicating whether the record manager contains the VNFR + + """ + return vnfr_id in self._vnfr_to_vdurs + + def remove_vnfr(self, vnfr_id): + """Remove the specified VNFR + + The VNFR will be removed along with any of the associated VDURs. + + Arguments: + vnfr_id - the ID of the VNFR to remove + + """ + if vnfr_id not in self._vnfr_to_vdurs: + return + + # Construct a set of VDURs to be deleted from the dict of vdurs + vdur_ids = self._vnfr_to_vdurs[vnfr_id] + vdur_ids &= set(self._vdurs.keys()) + + # Remove the VDUR structures + for vdur_id in vdur_ids: + del self._vdurs[vdur_id] + + # Remove the mapping from the VNFR to the VDURs + del self._vnfr_to_vdurs[vnfr_id] + + def remove_nsr(self, nsr_id): + """Removes the specified NSR + + Note that none of the VNFRs associated with the NSR are removed; This + is related to the separation between the NSR and VNFR in the yang model + (see above). The removal of VNFRs is assumed to be a separate action. + + Arguments: + nsr_id - the ID of the NSR to remove + + """ + del self._nsr_to_vnfrs[nsr_id] + + def vdurs(self, vnfr_id): + """Return a list of the VDURs associated with a VNFR + + Arguments: + vnfr_id - the ID of the VNFR + + Returns: + a list of VDURs + + """ + vdurs = self._vnfr_to_vdurs.get(vnfr_id, set()) + return [self._vdurs[id] for id in vdurs] + + def vdur_ids(self, vnfr_id): + """Return a list of the VDUR IDs associated with a VNFR + + Arguments: + vnfr_id - the ID of the VNFR + + Returns: + a list of VDUR IDs + + """ + return list(self._vnfr_to_vdurs.get(vnfr_id, list())) + + def vnfr_ids(self, nsr_id): + """Return a list of the VNFR IDs associated with a NSR + + Arguments: + nsr_id - the ID of the NSR + + Returns: + a list of VNFR IDs + + """ + return list(self._nsr_to_vnfrs.get(nsr_id, list())) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py new file mode 100644 index 0000000..3ed50e1 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py @@ -0,0 +1,443 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import collections +import concurrent.futures +import os +import time +import uuid +import sys + +import gi +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwLog', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwMonitorYang', '1.0') +gi.require_version('RwmonYang', '1.0') +gi.require_version('RwNsdYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwTypes', '1.0') +gi.require_version('RwYang', '1.0') +from gi.repository import ( + NsrYang, + RwBaseYang, + RwDts as rwdts, + RwLaunchpadYang, + RwLog as rwlog, + RwcalYang as rwcal, + RwMonitorYang as rwmonitor, + RwmonYang as rwmon, + RwNsdYang as rwnsd, + RwTypes, + RwYang, + VnfrYang, +) + +import rift.tasklets +import rift.mano.cloud + +import rw_peas + +from .core import (NfviMetricsAggregator, RecordManager) + + +class DtsHandler(object): + def __init__(self, tasklet): + self.reg = None + self.tasklet = tasklet + + @property + def log(self): + return self.tasklet.log + + @property + def log_hdl(self): + return self.tasklet.log_hdl + + @property + def dts(self): + return self.tasklet.dts + + @property + def loop(self): + return self.tasklet.loop + + @property + def classname(self): + return self.__class__.__name__ + + +class NsInstanceOpdataSubscriber(DtsHandler): + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr" + + @asyncio.coroutine + def register(self): + def handle_create(msg): + self.tasklet.records.add_nsr(msg) + self.tasklet.start_ns_monitor(msg) + + def handle_update(msg): + self.tasklet.records.add_nsr(msg) + + def handle_delete(msg): + self.tasklet.records.remove_nsr(msg.ns_instance_config_ref) + + def ignore(msg): + pass + + dispatch = { + rwdts.QueryAction.CREATE: handle_create, + rwdts.QueryAction.UPDATE: handle_update, + rwdts.QueryAction.DELETE: handle_delete, + } + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + try: + # Disabling the following comments since they are too frequent + # self.log.debug("{}:on_prepare:msg {}".format(self.classname, msg)) + + if msg is not None: + dispatch.get(action, ignore)(msg) + + except Exception as e: + self.log.exception(e) + + finally: + # Disabling the following comments since they are too frequent + # self.log.debug("{}:on_prepare complete".format(self.classname)) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + handler = rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare, + ) + + with self.dts.group_create() as group: + group.register( + xpath=NsInstanceOpdataSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + handler=handler, + ) + + +class VnfrCatalogSubscriber(DtsHandler): + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + @asyncio.coroutine + def register(self): + def handle_create(msg): + self.log.debug("{}:handle_create:{}".format(self.classname, msg)) + self.tasklet.records.add_vnfr(msg) + + def handle_update(msg): + self.log.debug("{}:handle_update:{}".format(self.classname, msg)) + self.tasklet.records.add_vnfr(msg) + + def handle_delete(msg): + self.tasklet.records.remove_vnfr(msg) + + def ignore(msg): + pass + + dispatch = { + rwdts.QueryAction.CREATE: handle_create, + rwdts.QueryAction.UPDATE: handle_update, + rwdts.QueryAction.DELETE: handle_delete, + } + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + try: + self.log.debug("{}:on_prepare".format(self.classname)) + self.log.debug("{}:on_preparef:msg {}".format(self.classname, msg)) + + xpath = ks_path.to_xpath(VnfrYang.get_schema()) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath) + + dispatch.get(action, ignore)(msg) + + except Exception as e: + self.log.exception(e) + + finally: + self.log.debug("{}:on_prepare complete".format(self.classname)) + + handler = rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare, + ) + + with self.dts.group_create() as group: + group.register( + xpath=VnfrCatalogSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + handler=handler, + ) + + +class NfviPollingPeriodSubscriber(DtsHandler): + XPATH = "C,/nsr:ns-instance-config" + + @asyncio.coroutine + def register(self): + def on_apply(dts, acg, xact, action, _): + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self.log.debug("No xact handle. Skipping apply config") + return + + xact_config = list(self.reg.get_xact_elements(xact)) + for config in xact_config: + if config.nfvi_polling_period is not None: + self.tasklet.polling_period = config.nfvi_polling_period + self.log.debug("new polling period: {}".format(self.tasklet.polling_period)) + + self.log.debug( + "Registering for NFVI polling period config using xpath: %s", + NfviPollingPeriodSubscriber.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=on_apply, + ) + + with self.dts.appconf_group_create(acg_handler) as acg: + self.reg = acg.register( + xpath=NfviPollingPeriodSubscriber.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + + +class CloudAccountDtsHandler(DtsHandler): + def __init__(self, tasklet): + super().__init__(tasklet) + self._cloud_cfg_subscriber = None + + def on_account_added_apply(self, account): + self.log.info("adding cloud account: {}".format(account)) + self.tasklet.cloud_accounts[account.name] = account.cal_account_msg + self.tasklet.account_nfvi_monitors[account.name] = self.load_nfvi_monitor_plugin(account.cal_account_msg) + + def on_account_deleted_apply(self, account_name): + self.log.info("deleting cloud account: {}".format(account_name)) + if account_name in self.tasklet.cloud_accounts: + del self.tasklet.cloud_accounts[account_name] + + if account_name in self.tasklet.account_nfvi_monitors: + del self.tasklet.account_nfvi_monitors[account_name] + + @asyncio.coroutine + def on_account_updated_prepare(self, account): + raise NotImplementedError("Monitor does not support updating cloud account") + + def load_nfvi_monitor_plugin(self, cloud_account): + if cloud_account.account_type == "openstack": + self.log.debug('loading ceilometer plugin for NFVI metrics') + plugin = rw_peas.PeasPlugin( + "rwmon_ceilometer", + 'RwMon-1.0', + ) + + else: + self.log.debug('loading mock plugin for NFVI metrics') + plugin = rw_peas.PeasPlugin( + "rwmon_mock", + 'RwMon-1.0', + ) + + impl = plugin.get_interface("Monitoring") + impl.init(self.log_hdl) + + # Check that the plugin is available on this platform + _, available = impl.nfvi_metrics_available(cloud_account) + if not available: + self.log.warning('NFVI monitoring unavailable on this host') + return None + + return impl + + def register(self): + self.log.debug("creating cloud account config handler") + self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber( + self.dts, self.log, self.log_hdl, + rift.mano.cloud.CloudAccountConfigCallbacks( + on_add_apply=self.on_account_added_apply, + on_delete_apply=self.on_account_deleted_apply, + on_update_prepare=self.on_account_updated_prepare, + ) + ) + self._cloud_cfg_subscriber.register() + + +class MonitorTasklet(rift.tasklets.Tasklet): + """ + The MonitorTasklet is responsible for sampling NFVI mettrics (via a CAL + plugin) and publishing the aggregate information. + """ + + DEFAULT_POLLING_PERIOD = 1.0 + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.nsr_subscriber = NsInstanceOpdataSubscriber(self) + self.vnfr_subscriber = VnfrCatalogSubscriber(self) + self.cloud_cfg_subscriber = CloudAccountDtsHandler(self) + self.poll_period_subscriber = NfviPollingPeriodSubscriber(self) + self.cloud_account_handler = CloudAccountDtsHandler(self) + + self.vnfrs = collections.defaultdict(list) + self.vdurs = collections.defaultdict(list) + + self.monitors = dict() + self.cloud_accounts = {} + self.account_nfvi_monitors = {} + + self.records = RecordManager() + self.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD + self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=16) + + def start(self): + super().start() + self.log.info("Starting MonitoringTasklet") + + self.log.debug("Registering with dts") + self.dts = rift.tasklets.DTS( + self.tasklet_info, + rwmonitor.get_schema(), + self.loop, + self.on_dts_state_change + ) + + self.log.debug("Created DTS Api GI Object: %s", self.dts) + + def stop(self): + try: + self.dts.deinit() + except Exception: + print("Caught Exception in RWMON stop:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def init(self): + self.log.debug("creating cloud account handler") + self.cloud_cfg_subscriber.register() + + self.log.debug("creating NFVI poll period subscriber") + yield from self.poll_period_subscriber.register() + + self.log.debug("creating network service record subscriber") + yield from self.nsr_subscriber.register() + + self.log.debug("creating vnfr subscriber") + yield from self.vnfr_subscriber.register() + + def on_cloud_account_created(self, cloud_account): + pass + + def on_cloud_account_deleted(self, cloud_account): + pass + + @asyncio.coroutine + def run(self): + pass + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Handle DTS state change + + Take action according to current DTS state to transition application + into the corresponding application state + + Arguments + state - current dts state + + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self.dts.handle.set_state(next_state) + + def start_ns_monitor(self, ns_instance_opdata_msg): + ns_instance_config_ref = ns_instance_opdata_msg.ns_instance_config_ref + nsr_cloud_account = ns_instance_opdata_msg.cloud_account + + if nsr_cloud_account not in self.cloud_accounts: + self.log.error("cloud account %s has not been configured", nsr_cloud_account) + return + + if nsr_cloud_account not in self.account_nfvi_monitors: + self.log.warning("No NFVI monitoring available for cloud account %s", + nsr_cloud_account) + return + + cloud_account = self.cloud_accounts[nsr_cloud_account] + nfvi_monitor = self.account_nfvi_monitors[nsr_cloud_account] + + try: + if ns_instance_config_ref not in self.monitors: + aggregator = NfviMetricsAggregator( + tasklet=self, + cloud_account=cloud_account, + nfvi_monitor=nfvi_monitor, + ) + + # Create a task to run the aggregator independently + coro = aggregator.publish_nfvi_metrics(ns_instance_config_ref) + task = self.loop.create_task(coro) + self.monitors[ns_instance_config_ref] = task + + msg = 'started monitoring NFVI metrics for {}' + self.log.info(msg.format(ns_instance_config_ref)) + + except Exception as e: + self.log.exception(e) + raise + + def stop_ns_monitor(self, ns_instance_config_ref): + if ns_instance_config_ref not in self.monitors: + msg = "Trying the destroy non-existent monitor for {}" + self.log.error(msg.format(ns_instance_config_ref)) + + else: + self.monitors[ns_instance_config_ref].cancel() + del self.monitors[ns_instance_config_ref] \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rwmonitor.py b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rwmonitor.py new file mode 100755 index 0000000..24b37db --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwmonitor/rwmonitor.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwmonitor + +class Tasklet(rift.tasklets.rwmonitor.MonitorTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwnsm/CMakeLists.txt new file mode 100644 index 0000000..9265ce1 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/CMakeLists.txt @@ -0,0 +1,38 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwnsmtasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/rwnsm_conman.py + rift/tasklets/${TASKLET_NAME}/rwnsmplugin.py + rift/tasklets/${TASKLET_NAME}/openmano_nsm.py + rift/tasklets/${TASKLET_NAME}/cloud.py + rift/tasklets/${TASKLET_NAME}/config_value_pool.py + rift/tasklets/${TASKLET_NAME}/publisher.py + rift/tasklets/${TASKLET_NAME}/xpath.py + rift/tasklets/${TASKLET_NAME}/rwnsmconfigplugin.py + rift/tasklets/${TASKLET_NAME}/rwnsm_conagent.py + rift/tasklets/${TASKLET_NAME}/jujuconf_nsm.py + rift/tasklets/${TASKLET_NAME}/juju_intf.py + rift/tasklets/${TASKLET_NAME}/rwvnffgmgr.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwnsm/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py new file mode 100644 index 0000000..3fd29de --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwnsmtasklet import NsmTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py new file mode 100644 index 0000000..48c0770 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py @@ -0,0 +1,211 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +from gi.repository import ( + RwDts as rwdts, + RwcalYang as rwcal, + RwTypes, + ProtobufC, + ) + +import rift.tasklets +import rift.mano.cloud + +from . import openmano_nsm +from . import rwnsmplugin + + +class CloudAccountNotFoundError(Exception): + pass + + +class RwNsPlugin(rwnsmplugin.NsmPluginBase): + """ + RW Implentation of the NsmPluginBase + """ + def __init__(self, dts, log, loop, publisher, cloud_account): + self._dts = dts + self._log = log + self._loop = loop + + def create_nsr(self, nsr_msg, nsd): + """ + Create Network service record + """ + pass + + @asyncio.coroutine + def deploy(self, nsr): + pass + + @asyncio.coroutine + def instantiate_ns(self, nsr, xact): + """ + Instantiate NSR with the passed nsr id + """ + yield from nsr.instantiate(xact) + + @asyncio.coroutine + def instantiate_vnf(self, nsr, vnfr, xact): + """ + Instantiate NSR with the passed nsr id + """ + yield from vnfr.instantiate(nsr, xact) + + @asyncio.coroutine + def instantiate_vl(self, nsr, vlr, xact): + """ + Instantiate NSR with the passed nsr id + """ + yield from vlr.instantiate(xact) + + @asyncio.coroutine + def terminate_ns(self, nsr, xact): + """ + Terminate the network service + """ + pass + + @asyncio.coroutine + def terminate_vnf(self, vnfr, xact): + """ + Terminate the network service + """ + yield from vnfr.terminate(xact) + + @asyncio.coroutine + def terminate_vl(self, vlr, xact): + """ + Terminate the virtual link + """ + yield from vlr.terminate(xact) + + +class NsmPlugins(object): + """ NSM Plugins """ + def __init__(self): + self._plugin_classes = { + "openmano": openmano_nsm.OpenmanoNsPlugin, + } + + @property + def plugins(self): + """ Plugin info """ + return self._plugin_classes + + def __getitem__(self, name): + """ Get item """ + print("%s", self._plugin_classes) + return self._plugin_classes[name] + + def register(self, plugin_name, plugin_class, *args): + """ Register a plugin to this Nsm""" + self._plugin_classes[plugin_name] = plugin_class + + def deregister(self, plugin_name, plugin_class, *args): + """ Deregister a plugin to this Nsm""" + if plugin_name in self._plugin_classes: + del self._plugin_classes[plugin_name] + + def class_by_plugin_name(self, name): + """ Get class by plugin name """ + return self._plugin_classes[name] + + +class CloudAccountNsmPluginSelector(object): + def __init__(self, dts, log, log_hdl, loop, records_publisher): + self._dts = dts + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._records_publisher = records_publisher + + self._nsm_plugins = NsmPlugins() + + self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber( + self._dts, + self._log, + self._log_hdl, + rift.mano.cloud.CloudAccountConfigCallbacks( + on_add_apply=self._on_cloud_account_added, + on_delete_apply=self._on_cloud_account_deleted, + ) + ) + + self._cloud_plugins = {} + self._plugin_instances = {} + + def _on_cloud_account_added(self, cloud_account): + self._log.debug("Got nsm plugin cloud account: %s", cloud_account) + try: + nsm_cls = self._nsm_plugins.class_by_plugin_name( + cloud_account.account_type + ) + except KeyError as e: + self._log.debug( + "Cloud account nsm plugin not found: %s. Using standard rift nsm.", + cloud_account.name + ) + nsm_cls = RwNsPlugin + + # Check to see if the plugin was already instantiated + if nsm_cls in self._plugin_instances: + self._log.debug("Cloud account nsm plugin already instantiated. Using existing.") + self._cloud_plugins[cloud_account.name] = self._plugin_instances[nsm_cls] + return + + # Otherwise, instantiate a new plugin using the cloud account + self._log.debug("Instantiating new cloud account using class: %s", nsm_cls) + nsm_instance = nsm_cls(self._dts, self._log, self._loop, + self._records_publisher, cloud_account.account_msg) + self._plugin_instances[nsm_cls] = nsm_instance + + self._cloud_plugins[cloud_account.name] = nsm_instance + + def _on_cloud_account_deleted(self, account_name): + del self._cloud_plugins[account_name] + + def get_cloud_account_plugin_instance(self, account_name): + if account_name not in self._cloud_plugins: + msg = "Account %s was not configured" % account_name + self._log.error(msg) + raise CloudAccountNotFoundError(msg) + + instance = self._cloud_plugins[account_name] + self._log.debug("Got NSM plugin instance for account %s: %s", + account_name, instance) + + return instance + + def get_cloud_account_sdn_name(self, account_name): + if account_name in self._cloud_sub.accounts: + self._log.debug("Cloud accnt msg is %s",self._cloud_sub.accounts[account_name].account_msg) + if self._cloud_sub.accounts[account_name].account_msg.has_field("sdn_account"): + sdn_account = self._cloud_sub.accounts[account_name].account_msg.sdn_account + self._log.info("SDN associated with Cloud name %s is %s", account_name, sdn_account) + return sdn_account + else: + self._log.debug("No SDN Account associated with Cloud name %s", account_name) + return None + + + @asyncio.coroutine + def register(self): + self._cloud_sub.register() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py new file mode 100644 index 0000000..8134a18 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py @@ -0,0 +1,152 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pickle +import uuid + + +class ParameterValueError(Exception): + pass + + +class ParameterValuePool(object): + def __init__(self, log, name, value_iter): + self._log = log + self._name = name + + self._used_pool_values = [] + self._available_pool_values = list(value_iter) + + self._backing_filepath = os.path.join( + os.environ["RIFT_ARTIFACTS"], + "parameter_pools", + self._name + ) + + self._read_used_pool_values() + + def _save_used_pool_values(self): + dir_path = os.path.dirname(self._backing_filepath) + if not os.path.exists(dir_path): + try: + os.makedirs(dir_path, exist_ok=True) + except OSError as e: + self._log.error("Could not create directory for save used pool: %s", str(e)) + + try: + with open(self._backing_filepath, "wb") as hdl: + pickle.dump(self._used_pool_values, hdl) + except OSError as e: + self._log.error( + "Could not open the parameter value pool file: %s", + str(e)) + except pickle.PickleError as e: + self._log.error( + "Could not pickle the used parameter value pool: %s", + str(e)) + + def _read_used_pool_values(self): + try: + with open(self._backing_filepath, 'rb') as hdl: + self._used_pool_values = pickle.load(hdl) + + except (OSError, EOFError): + self._log.warning("Could not read from backing file: %s", + self._backing_filepath) + self._used_pool_values = [] + + except pickle.PickleError as e: + self._log.warning("Could not unpickle the used parameter value pool from %s: %s", + self._backing_filepath, str(e)) + self._used_pool_values = [] + + for value in self._used_pool_values: + self._available_pool_values.remove(value) + + def get_next_unused_value(self): + if len(self._available_pool_values) == 0: + raise ParameterValueError("Not more parameter values to to allocate") + + next_value = self._available_pool_values[0] + self._log.debug("Got next value for parameter pool %s: %s", self._name, next_value) + + return next_value + + def add_used_value(self, value): + value = int(value) + + if len(self._available_pool_values) == 0: + raise ParameterValueError("Not more parameter values to to allocate") + + if value in self._used_pool_values: + raise ParameterValueError( + "Primitive value of {} was already used for pool name: {}".format( + value, + self._name, + ) + ) + + if value != self._available_pool_values[0]: + raise ParameterValueError("Parameter value not the next in the available list: %s", value) + + self._available_pool_values.pop(0) + self._used_pool_values.append(value) + self._save_used_pool_values() + + def remove_used_value(self, value): + if value not in self._used_pool_values: + self._log.warning("Primitive value of %s was never allocated for pool name: %s", + value, self._name + ) + return + + self._used_pool_values.remove(value) + self._available_pool_values.insert(0, value) + self._save_used_pool_values() + + +if __name__ == "__main__": + import logging + logging.basicConfig(level=logging.DEBUG) + logger = logging.getLogger("config_value_pool.py") + name = str(uuid.uuid4()) + param_pool = ParameterValuePool(logger, name, range(1000, 2000)) + + a = param_pool.get_next_unused_value() + assert a == 1000 + + param_pool.add_used_value(a) + + a = param_pool.get_next_unused_value() + assert a == 1001 + param_pool.add_used_value(a) + + param_pool = ParameterValuePool(logger, name, range(1000, 2000)) + a = param_pool.get_next_unused_value() + assert a == 1002 + + try: + param_pool.add_used_value(1004) + except ParameterValueError: + pass + else: + assert False + + a = param_pool.get_next_unused_value() + assert a == 1002 + param_pool.add_used_value(1002) + + param_pool = ParameterValuePool(logger, name, range(1005, 2000)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/juju_intf.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/juju_intf.py new file mode 100644 index 0000000..07efb52 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/juju_intf.py @@ -0,0 +1,634 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +# Part of the code taken from +# https://github.com/chuckbutler/juju_action_api_class/blob/master/juju_actions.py + +import asyncio +import jujuclient +import os +import ssl +import sys +import time + + +class Action(object): + def __init__(self, data): + # I am undecided if we need this + # model_id = "" + self.uuid = data['action']['tag'] + self.data = data # straight from juju api + self.juju_status = data['status'] + + @classmethod + def from_data(cls, data): + o = cls(data=data) + return o + + +def get_service_units(status): + results = {} + services = status.get('Services', {}) + for svc_name, svc_data in services.items(): + units = svc_data['Units'] or {} + sub_to = svc_data['SubordinateTo'] + if not units and sub_to: + for sub in sub_to: + for unit_name, unit_data in \ + (services[sub].get('Units') or {}).items(): + for sub_name, sub_data in \ + (unit_data['Subordinates'] or {}).items(): + if sub_name.startswith(svc_name): + units[sub_name] = sub_data + results[svc_name] = units + return results + + +class ApiEnvironment(jujuclient.Environment): + def actions_available(self, service=None): + args = { + "Type": 'Action', + "Request": 'ServicesCharmActions', + "Params": { + "Entities": [] + } + } + + services = self.status().get('Services', {}) + service_names = [service] if service else services + for name in service_names: + args['Params']['Entities'].append( + { + "Tag": 'service-' + name + } + ) + + return self._rpc(args) + + def actions_list_all(self, service=None): + args = { + "Type": 'Action', + "Request": 'ListAll', + "Params": { + "Entities": [] + } + } + + service_units = get_service_units(self.status()) + service_names = [service] if service else service_units.keys() + units = [] + + for name in service_names: + units += service_units[name].keys() + + for unit in set(units): + args['Params']['Entities'].append( + { + "Tag": "unit-%s" % unit.replace('/', '-'), + } + ) + + return self._rpc(args) + + def actions_enqueue(self, action, receivers, params=None): + args = { + "Type": "Action", + "Request": "Enqueue", + "Params": { + "Actions": [] + } + } + + for receiver in receivers: + args['Params']['Actions'].append({ + "Receiver": receiver, + "Name": action, + "Parameters": params or {}, + }) + + return self._rpc(args) + + def actions_cancel(self, uuid): + return self._rpc({ + 'Type': 'Action', + 'Request': 'Cancel', + "Params": { + "Entities": [{'Tag': 'action-' + uuid}] + } + }) + + +def _parse_action_specs(api_results): + results = {} + + r = api_results['results'] + for service in r: + servicetag = service['servicetag'] + service_name = servicetag[8:] # remove 'service-' prefix + specs = {} + if service['actions']['ActionSpecs']: + for spec_name, spec_def in \ + service['actions']['ActionSpecs'].items(): + specs[spec_name] = ActionSpec(spec_name, spec_def) + results[service_name] = specs + return results + + +def _parse_action_properties(action_properties_dict): + results = {} + + d = action_properties_dict + for prop_name, prop_def in d.items(): + results[prop_name] = ActionProperty(prop_name, prop_def) + return results + + +class Dict(dict): + def __getattr__(self, name): + return self[name] + + +class ActionSpec(Dict): + def __init__(self, name, data_dict): + params = data_dict['Params'] + super(ActionSpec, self).__init__( + name=name, + title=params['title'], + description=params['description'], + properties=_parse_action_properties(params['properties']) + ) + + +class ActionProperty(Dict): + types = { + 'string': str, + 'integer': int, + 'boolean': bool, + 'number': float, + } + type_checks = { + str: 'string', + int: 'integer', + bool: 'boolean', + float: 'number', + } + + def __init__(self, name, data_dict): + super(ActionProperty, self).__init__( + name=name, + description=data_dict.get('description', ''), + default=data_dict.get('default', ''), + type=data_dict.get( + 'type', self._infer_type(data_dict.get('default'))), + ) + + def _infer_type(self, default): + if default is None: + return 'string' + for _type in self.type_checks: + if isinstance(default, _type): + return self.type_checks[_type] + return 'string' + + def to_python(self, value): + f = self.types.get(self.type) + return f(value) if f else value + + +class JujuApi(object): + + def __init__ (self, log, server, port, user, secret, loop): + ''' Connect to the juju host ''' + self.log = log + self.server = server + self.user = user + self.port = port + self.secret = secret + self.loop = loop + endpoint = 'wss://%s:%d' % (server.split()[0], int(port)) + self.endpoint = endpoint + self.env = ApiEnvironment(endpoint) + self.env.login(secret, user=user) + self.deploy_timeout = 600 + # Check python version and setup up SSL + if sys.version_info >= (3,4): + # This is needed for python 3.4 above as by default certificate + # validation is enabled + ssl._create_default_https_context = ssl._create_unverified_context + + def reconnect(self): + ''' Reconnect on error cases''' + self.log.info("Juju: try reconnect to endpoint {}". + format(self.endpoint)) + try: + self.env.close() + del self.env + except Exception as e: + self.log.debug("Juju: env close threw e {}". + format(e)) + self.log.exception(e) + + try: + self.env = ApiEnvironment(self.endpoint) + self.env.login(self.secret, user=self.user) + self.log.info("Juju: reconnected to endpoint {}". + format(self.endpoint)) + except Exception as e: + self.log.error("Juju: exception in reconnect e={}".format(e)) + self.log.exception(e) + + + def get_status(self): + try: + status = self.env.status() + return status + except Exception as e: + self.log.error("Juju: exception while getting status: {}".format(e)) + self.log.exception(e) + self.reconnect() + return None + + def get_annotations(self, services): + ''' + Return dict of (servicename: annotations) for each servicename + in `services`. + ''' + if not services: + return None + + d = {} + for s in services: + d[s] = self.env.get_annotation(s, 'service')['Annotations'] + return d + + def get_actions(self, service=None): + return self.env.actions_list_all(service) + + def get_action_status(self, action_tag): + ''' + responds with the action status, which is one of three values: + + - completed + - pending + - failed + + @param action_tag - the action UUID return from the enqueue method + eg: action-3428e20d-fcd7-4911-803b-9b857a2e5ec9 + ''' + try: + receiver = self.get_actions() + except Exception as e: + self.log.error("Juju: exception is get actions: {}".format(e)) + self.log.exception(e) + + try: + for receiver in receiver['actions']: + if 'actions' in receiver.keys(): + for action_record in receiver['actions']: + if 'action' in action_record.keys(): + if action_record['action']['tag'] == action_tag: + return action_record['status'] + except Exception as e: + self.log.error("Juju: exception in get action status {}".format(e)) + self.log.exception(e) + + def cancel_action(self, uuid): + return self.env.actions_cancel(uuid) + + def get_service_units(self): + return get_service_units(self.get_status()) + + def get_action_specs(self): + results = self.env.actions_available() + return _parse_action_specs(results) + + def enqueue_action(self, action, receivers, params): + try: + result = self.env.actions_enqueue(action, receivers, params) + resp = Action.from_data(result['results'][0]) + return resp + except Exception as e: + self.log.error("Juju: Exception enqueing action {} on units {} with params {}: {}". + format(action, receivers, params, e)) + self.log.exception(e) + return None + + @asyncio.coroutine + def is_deployed(self, service): + return self._is_deployed(service) + + def _is_deployed(self, service, status=None): + status = self.get_service_status(service, status=status) + if status not in ['terminated', 'NA']: + return True + + return False + + def get_service_status(self, service, status=None): + ''' Get service status: + maintenance : The unit is not yet providing services, but is actively doing stuff. + unknown : Service has finished an event but the charm has not called status-set yet. + waiting : Service is unable to progress to an active state because of dependency. + blocked : Service needs manual intervention to get back to the Running state. + active : Service correctly offering all the services. + None : Service is not deployed + *** Make sure this is NOT a asyncio coroutine function *** + ''' + try: + #self.log.debug ("In get service status for service %s, %s" % (service, services)) + if status is None: + status = self.get_status() + if status: + srv_status = status['Services'][service]['Status']['Status'] + return srv_status + except KeyError as e: + self.log.info("Juju: Did not find service {}, e={}".format(service, e)) + except Exception as e: + self.log.error("Juju: exception checking service status for {}, e {}". + format(service, e)) + + return 'NA' + + def is_service_active(self, service): + if self.get_service_status(service) == 'active': + self.log.debug("Juju: service is active for %s " % service) + return True + + return False + + def is_service_blocked(self, service): + if self.get_service_status(service) == 'blocked': + return True + + return False + + def is_service_up(self, service): + if self.get_service_status in ['active', 'blocked']: + return True + + return False + + def is_service_in_error(self, service): + if self.get_service_status == 'error': + self.log.debug("Juju: service is in error state for %s" % service) + + def wait_for_service(self, service): + # Check if the agent for the unit is up, wait for units does not wait for service to be up + # TBD: Should add a timeout, so we do not wait endlessly + waiting = True + delay = 5 # seconds + print ("In wait for service %s" % service) + while waiting: + if self.is_service_up(service): + return + else: + yield from asyncio.sleep(delay, loop=self.loop) + + @asyncio.coroutine + def apply_config(self, service, config): + return self._apply_config(service, config) + + def _apply_config(self, service,config): + if config is None or len(config) == 0: + self.log.warn("Juju: Empty config passed for service %s" % service) + return False + if not self._is_deployed(service): + self.log.warn("Juju: Charm service %s not deployed" % (service)) + return False + self.log.debug("Juju: Config for {} updated to: {}".format(service, config)) + try: + # Try to fix error on service, most probably due to config issue + if self.is_service_in_error: + self.resolve_error(service) + self.env.set_config(service, config) + return True + except Exception as e: + self.log.error("Juju: exception setting config for {} with {}, e {}". + format(service, config, e)) + self.reconnect() + return False + + @asyncio.coroutine + def set_parameter(self, service, parameter, value): + return self.apply_config(service, {parameter : value}) + + @asyncio.coroutine + def deploy_service(self, charm, service, config=None, wait=False): + self._deploy_service(charm, service, config=config, wait=wait) + + def _deploy_service(self, charm, service, config=None, wait=False): + self.log.debug("Juju: Deploy service for charm %s with service %s" % + (charm, service)) + if self._is_deployed(service): + self.log.info("Juju: Charm service %s already deployed" % (service)) + if config: + self._apply_config(service, config) + return 'deployed' + series = "trusty" + deploy_to = "lxc:0" + directory = "usr/rift/charms/%s/%s" % (series, charm) + prefix='' + try: + prefix=os.environ.get('RIFT_INSTALL') + except KeyError: + self.log.info("Juju: RIFT_INSTALL not set in environemnt") + directory = "%s/%s" % (prefix, directory) + + try: + self.log.debug("Juju: Local charm settings: dir=%s, series=%s" % + (directory, series)) + result = self.env.add_local_charm_dir(directory, series) + url = result['CharmURL'] + + except Exception as e: + self.log.critical('Juju: Error setting local charm directory {}: {}'. + format(service, e)) + self.log.exception(e) + self.reconnect() + return 'error' + + try: + self.log.debug("Juju: Deploying using: service={}, url={}, to={}, config={}". + format(service, url, deploy_to, config)) + if config: + self.env.deploy(service, url, machine_spec=deploy_to, config=config) + else: + self.env.deploy(service, url, machine_spec=deploy_to) + except Exception as e: + self.log.warn('Juju: Error deploying {}: {}'.format(service, e)) + if not self._is_deployed(service): + self.log.critical ("Juju: Service {} is not deployed" % service) + self.reconnect() + return 'error' + + if wait: + # Wait for the deployed units to start + try: + self.log.debug("Juju: Waiting for charm %s to come up" % service) + self.env.wait_for_units(timeout=self.deploy_timeout) + except Exception as e: + self.log.critical('Juju: Error starting all units for {}: {}'. + format(service, e)) + self.log.exception(e) + self.reconnect() + return 'error' + + self.wait_for_service(service) + return 'deploying' + + @asyncio.coroutine + def execute_actions(self, service, action, params, wait=False, bail=False): + return self._execute_actions(service, action, params, wait=wait, bail=bail) + + def _execute_actions(self, service, action, params, wait=False, bail=False): + tags = [] + try: + services = get_service_units(self.env.status()) + depl_units = services[service] + except KeyError as e: + self.log.error("Juju: Unable to get service units for {}, e={}". + format(services, e)) + return tags + except Exception as e: + self.log.error("Juju: Error on getting service details for service {}, e={}". + format(service, e)) + self.log.exception(e) + self.reconnect() + return tags + + # Go through each unit deployed and apply the actions to the unit + for unit, status in depl_units.items(): + self.log.debug("Juju: Execute on unit {} with {}". + format(unit, status)) + idx = int(unit[unit.index('/')+1:]) + self.log.debug("Juju: Unit index is %d" % idx) + + unit_name = "unit-%s-%d" % (service, idx) + self.log.debug("Juju: Sending action: {}, {}, {}". + format(action, unit_name, params)) + try: + result = self.enqueue_action(action, [unit_name], params) + if result: + tags.append(result.uuid) + else: + self.log.error("Juju: Error applying the action {} on {} with params {}". + format(action, unit, params)) + except Exception as e: + self.log.error("Juju: Error applying the action {} on {} with params {}, e={}" % + format(action, unit, params, e)) + self.log.exception(e) + self.reconnect() + + # act_status = 'pending' + # #self.log.debug("Juju: Action %s status is %s on %s" % (action, act_status, unit)) + # while wait and ((act_status == 'pending') or (act_status == 'running')): + # act_status = self.get_action_status(result.uuid) + # self.log.debug("Juju: Action %s status is %s on %s" % (action, act_status, unit)) + # if bail and (act_status == 'failed'): + # self.log.error("Juju: Error applying action %s on %s with %s" % (action, unit, params)) + # raise RuntimeError("Juju: Error applying action %s on %s with %s" % (action, unit, params)) + # yield from asyncio.sleep(1, loop=self.loop) + + return tags + + def get_service_units_status(self, service, status): + units_status = {} + if status is None: + return units_status + try: + units = get_service_units(status)[service] + for name, data in units.items(): + # Action rpc require unit name as unit-service-index + # while resolved API require unit name as service/index + #idx = int(name[name.index('/')+1:]) + #unit = "unit-%s-%d" % (service, idx) + units_status.update({name : data['Workload']['Status']}) + except KeyError: + pass + except Exception as e: + self.log.error("Juju: service unit status for service {}, e={}". + format(service, e)) + self.log.exception(e) + self.log.debug("Juju: service unit status for service {}: {}". + format(service, units_status)) + return units_status + + def resolve_error(self, service, status=None): + if status is None: + status = self.get_status() + + if status is None: + return + + srv_status = self.get_service_status(service, status) + if srv_status and srv_status not in ['terminated', 'NA']: + units = self.get_service_units_status(service, status) + for unit, ustatus in units.items(): + if ustatus == 'error': + self.log.info("Juju: Found unit %s with status %s" % + (unit, ustatus)) + try: + # Takes the unit name as service_name/idx unlike action + self.env.resolved(unit) + except Exception as e: + self.log.debug("Juju: Exception when running resolve on unit {}: {}". + format(unit, e)) + self.log.exception(e) + + + @asyncio.coroutine + def destroy_service(self, service): + self._destroy_service(service) + + def _destroy_service(self, service): + ''' Destroy juju service + *** Do NOT add aysncio yield on this function, run in separate thread *** + ''' + self.log.debug("Juju: Destroy charm service: %s" % service) + status = self.get_status() + if status is None: + return + + srv_status = self.get_service_status(service, status) + count = 0 + while srv_status and srv_status not in ['terminated', 'NA']: + count += 1 + self.log.debug("Juju: service %s is in %s state, count %d" % + (service, srv_status, count)) + if count > 25: + self.log.error("Juju: Not able to destroy service %s, status %s after %d tries" % + (service, srv_status, count)) + break + + self.resolve_error(service, status) + + try: + self.env.destroy_service(service) + except Exception as e: + self.log.debug("Juju: Exception when running destroy on service {}: {}". + format(service, e)) + self.log.exception(e) + self.reconnect() + + time.sleep(3) + status = self.get_status() + if status is None: + return + srv_status = self.get_service_status(service, status) + + self.log.debug("Destroyed service %s (%s)" % (service, srv_status)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/jujuconf_nsm.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/jujuconf_nsm.py new file mode 100644 index 0000000..9654cda --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/jujuconf_nsm.py @@ -0,0 +1,726 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import concurrent.futures +import re +import tempfile +import yaml + +from gi.repository import ( + RwDts as rwdts, +) + +from . import juju_intf +from . import rwnsmconfigplugin + + +# Charm service name accepts only a to z and -. +def get_vnf_unique_name(nsr_name, vnfr_short_name, member_vnf_index): + name = "{}-{}-{}".format(nsr_name, vnfr_short_name, member_vnf_index) + new_name = '' + for c in name: + if c.isdigit(): + c = chr(97 + int(c)) + elif not c.isalpha(): + c = "-" + new_name += c + return new_name.lower() + +class JujuExecuteHelper(object): + ''' Run Juju API calls that dwe do not need to wait for response ''' + def __init__(self, log, loop): + self._log = log + self._loop = loop + self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + + @property + def loop(self): + return self._loop + + @property + def log(self): + return self._log + + @property + def executor(self): + return self._executor + + @asyncio.coroutine + def deploy_service(self, api, charm, service): + self.log.debug("Deploying service using %s as %s" % (charm, service)) + try: + rc = yield from self.loop.run_in_executor( + self.executor, + api._deploy_service, + charm, service + ) + self.log.info("Deploy service {} returned {}".format(service, rc)) + except Exception as e: + self.log.error("Error deploying service {}, e={}". + format(service, e)) + self.log.debug("Deployed service using %s as %s " % (charm, service)) + + @asyncio.coroutine + def destroy_service(self, api, service): + self.log.debug("Destroying service %s" % (service)) + rc = yield from self.loop.run_in_executor( + self.executor, + api._destroy_service, + service + ) + self.log.debug("Destroyed service {} ({})".format(service, rc)) + + +class JujuNsmConfigPlugin(rwnsmconfigplugin.NsmConfigPluginBase): + """ + Juju implementation of the NsmConfPluginBase + """ + def __init__(self, dts, log, loop, publisher, account): + rwnsmconfigplugin.NsmConfigPluginBase.__init__(self, dts, log, loop, publisher, account) + self._name = account.name + self._ip_address = account.juju.ip_address + self._port = account.juju.port + self._user = account.juju.user + self._secret = account.juju.secret + self._juju_vnfs = {} + self._helper = JujuExecuteHelper(log, loop) + self._tasks = {} + + @asyncio.coroutine + def _get_api(self): + # Create an juju api instance + try: + self._log.debug("Juju config agent: Create API for {}:{}". + format(self._ip_address, self._port)) + api = yield from self._loop.run_in_executor( + None, + juju_intf.JujuApi, + self._log, self._ip_address, + self._port, self._user, self._secret, + self.loop + ) + if not isinstance(api, juju_intf.JujuApi): + self._log.error("Juju config agent: Did not get JujuApi instance: {}". + format(api)) + api = None + except Exception as e: + self._log.critical("Juju config agent: Instantiate API exception: {}". + format(e)) + self._log.exception(e) + api = None + + return api + + def _get_api_blocking(self): + # Create an juju api instance + try: + self._log.debug("Juju config agent: Blocking create API for {}:{}". + format(self._ip_address, self._port)) + api = juju_intf.JujuApi(self._log, self._ip_address, + self._port, self._user, self._secret, + self.loop) + if not isinstance(api, juju_intf.JujuApi): + self._log.error("Juju config agent: Did not get JujuApi instance blocking: {}". + format(api)) + api = None + except Exception as e: + self._log.critical("Juju config agent: Instantiate API exception blocking: {}". + format(e)) + self._log.exception(e) + api = None + + return api + + # TBD: Do a better, similar to config manager + def xlate(self, tag, tags): + # TBD + if tag is None: + return tag + val = tag + if re.search('<.*>', tag): + self._log.debug("Juju config agent: Xlate value %s" % tag) + try: + if tag == '': + val = tags['rw_mgmt_ip'] + except KeyError as e: + self._log.info("Juju config agent: Did not get a value for tag %s, e=%s" % (tag, e)) + return val + + @asyncio.coroutine + def notify_create_nsr(self, nsr, nsd): + """ + Notification of create Network service record + """ + pass + + + @asyncio.coroutine + def notify_create_vls(self, nsr, vld, vlr): + """ + Notification of create VL record + """ + pass + + @asyncio.coroutine + def notify_create_vnfr(self, nsr, vnfr): + """ + Notification of create Network VNF record + Returns True if configured using config_agent + """ + # Deploy the charm if specified for the vnf + self._log.debug("Juju config agent: create vnfr nsr=%s vnfr=%s" %(nsr, vnfr.name)) + self._log.debug("Juju config agent: Const = %s" %(vnfr._const_vnfd)) + try: + vnf_config = vnfr._const_vnfd.vnf_configuration + self._log.debug("Juju config agent: vnf_configuration = %s", vnf_config) + if vnf_config.config_type != 'juju': + return False + charm = vnf_config.juju.charm + self._log.debug("Juju config agent: charm = %s", charm) + except Exception as e: + self._log.debug("Juju config agent: vnf_configuration error for vnfr {}: {}". + format(vnfr.name, e)) + return False + + # Prepare unique name for this VNF + vnf_unique_name = get_vnf_unique_name(vnfr._nsr_name, vnfr.vnfd.name, vnfr.member_vnf_index) + if vnf_unique_name in self._juju_vnfs: + self._log.warn("Juju config agent: Service %s already deployed" % (vnf_unique_name)) + + self._juju_vnfs.update({vnfr.id: {'name': vnf_unique_name, 'charm': charm, + 'nsr_id': nsr, 'member_vnf_index': vnfr.member_vnf_index, + 'xpath': vnfr.xpath, 'tags': {}, + 'active': False, 'config': vnf_config, + 'vnfr_name' : vnfr.name}}) + self._log.debug("Juju config agent: Charm %s for vnf %s to be deployed as %s" % + (charm, vnfr.name, vnf_unique_name)) + + try: + if vnf_unique_name not in self._tasks: + self._tasks[vnf_unique_name] = {} + api = yield from self._get_api() + if api: + self._tasks[vnf_unique_name]['deploy'] = self.loop.create_task( + self._helper.deploy_service(api, charm, vnf_unique_name) + ) + self._log.debug("Juju config agent: Deployed service %s" % vnf_unique_name) + else: + self._log.error("Juju config agent: Unable to get API for deploy") + except Exception as e: + self._log.critical("Juju config agent: Unable to deploy service {} for charm {}: {}". + format(vnf_unique_name, charm, e)) + self.log.exception(e) + + return True + + @asyncio.coroutine + def notify_instantiate_ns(self, nsr): + """ + Notification of NSR instantiationwith the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_instantiate_vnf(self, nsr, vnfr, xact): + """ + Notification of Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_instantiate_vl(self, nsr, vlr, xact): + """ + Notification of Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_nsr_active(self, nsr, vnfrs): + """ Notify instantiate of the virtual link""" + for vnf in vnfrs.values(): + self._log.debug("Juju config agent: ns active VNF %s" % vnf.name) + try: + if vnf.id in self._juju_vnfs.keys(): + #self._log.debug("Juju config agent: Fetching VNF: %s in NS %s", vnf.name, nsr) + # vnfr = yield from self.fetch_vnfr(vnf.xpath) + + # Check if the deploy is done + if self.check_task_status(self._juju_vnfs[vnf.id]['name'], 'deploy'): + # apply initial config for the vnfr + yield from self.apply_initial_config(vnf.id, vnf) + else: + self._log.info("Juju config agent: service not yet deployed for %s" % vnf.name) + except Exception as e: + self._log.error("Juju config agent: ns active VNF {}, e {}".format(vnf.name, e)) + self._log.exception(e) + + @asyncio.coroutine + def notify_terminate_ns(self, nsr): + """ + Notification of Terminate the network service + """ + pass + + @asyncio.coroutine + def notify_terminate_vnf(self, nsr, vnfr, xact): + """ + Notification of Terminate the network service + """ + self._log.debug("Juju config agent: Terminate VNFr {}, current vnfrs={}". + format(vnfr.name, self._juju_vnfs)) + try: + api = yield from self._get_api() + vnf = self._juju_vnfs[vnfr.id] + service = vnf['name'] + if api: + self._log.debug ("Juju config agent: Terminating VNFr %s, %s" % + (vnfr.name, service)) + self._tasks[service]['destroy'] = self.loop.create_task( + self._helper.destroy_service(api, service) + ) + else: + self._log.error("Juju: Unable to get API for terminate") + del self._juju_vnfs[vnfr.id] + self._log.debug ("Juju config agent: current vnfrs={}". + format(self._juju_vnfs)) + if service in self._tasks: + tasks = [] + for action in self._tasks[service].keys(): + #if self.check_task_status(service, action): + tasks.append(action) + del tasks + except KeyError as e: + self._log.debug ("Juju config agent: Termiating charm service for VNFr {}, e={}". + format(vnfr.name, e)) + except Exception as e: + self._log.error("Juju config agent: Exception terminating charm service for VNFR {}: {}". + format(vnfr.name, e)) + + @asyncio.coroutine + def notify_terminate_vl(self, nsr, vlr, xact): + """ + Notification of Terminate the virtual link + """ + pass + + def check_task_status(self, service, action): + #self.log.debug("Juju config agent: check task status for %s, %s" % (service, action)) + try: + task = self._tasks[service][action] + if task.done(): + self.log.debug("Juju config agent: Task for %s, %s done" % (service, action)) + e = task.exception() + if e: + self.log.error("Juju config agent: Error in task for {} and {} : {}". + format(service, action, e)) + r= task.result() + if r: + self.log.debug("Juju config agent: Task for {} and {}, returned {}". + format(service, action,r)) + return True + else: + self.log.debug("Juju config agent: task {}, {} not done". + format(service, action)) + return False + except KeyError as e: + self.log.error("Juju config agent: KeyError for task for {} and {}: {}". + format(service, action, e)) + except Exception as e: + self.log.error("Juju config agent: Error for task for {} and {}: {}". + format(service, action, e)) + return True + + @asyncio.coroutine + def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output): + self._log.debug("Juju config agent: VNF config primititve {} for nsr {}, vnfr_id {}". + format(primitive, nsr_id, vnfr_id)) + output.execution_status = "failed" + output.execution_id = '' + api = None + try: + vnfr = self._juju_vnfs[vnfr_id] + except KeyError: + self._log.error("Juju config agent: Did not find VNFR %s in juju plugin" % vnfr_id) + return + + try: + service = vnfr['name'] + vnf_config = vnfr['config'] + self._log.debug("VNF config %s" % vnf_config) + configs = vnf_config.config_primitive + for config in configs: + if config.name == primitive.name: + self._log.debug("Juju config agent: Found the config primitive %s" % config.name) + params = {} + for parameter in primitive.parameter: + if parameter.value: + val = self.xlate(parameter.value, vnfr['tags']) + # TBD do validation of the parameters + data_type = 'string' + found = False + for ca_param in config.parameter: + if ca_param.name == parameter.name: + data_type = ca_param.data_type + found = True + break + if data_type == 'integer': + val = int(parameter.value) + if not found: + self._log.warn("Juju config agent: Did not find parameter {} for {}". + format(parameter, config.name)) + params.update({parameter.name: val}) + if config.name == 'config': + if len(params): + self._log.debug("Juju config agent: applying config with params {} for service {}". + format(params, service)) + if api is None: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: No API handle present for {}". + format(vnfr['name'])) + return + + rc = yield from self._loop.run_in_executor( + None, + api._apply_config, + service, + params + ) + if rc: + output.execution_status = "completed" + self._log.debug("Juju config agent: applied config {} on {}". + format(params, service)) + # Removing this as clearwater has fixed its config hook + # Sleep for sometime for the config to take effect + # self._log.debug("Juju config agent: Wait sometime for config to take effect") + # yield from self._loop.run_in_executor( + # None, + # time.sleep, + # 30 + # ) + # self._log.debug("Juju config agent: Wait over for config to take effect") + else: + output.execution_status = 'failed' + self._log.error("Juju config agent: Error applying config {} on service {}". + format(params, service)) + else: + self._log.warn("Juju config agent: Did not find valid paramaters for config : {}". + format(primitive.parameter)) + else: + self._log.debug("Juju config agent: Execute action {} on service {} with params {}". + format(config.name, service, params)) + if api is None: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: No API handle present for {}". + format(vnfr['name'])) + return + tags = yield from self._loop.run_in_executor( + None, + api._execute_actions, + service, config.name, params + ) + if len(tags): + output.execution_id = tags[0] + output.execution_status = api.get_action_status(tags[0]) + self._log.debug("Juju config agent: excute action {} on service {} returned {}". + format(config.name, service, output.execution_status)) + else: + self._log.error("Juju config agent: error executing action {} for {} with {}". + format(config.name, service, params)) + output.execution_id = '' + output.execution_status = 'failed' + break + except KeyError as e: + self._log.info("VNF %s does not have config primititves, e=%s" % (vnfr_id, e)) + + @asyncio.coroutine + def apply_config(self, rpc_ip, nsr, vnfrs): + """Hook: Runs the user defined script. Feeds all the necessary data + for the script thro' yaml file. + + Args: + rpc_ip (YangInput_Nsr_ExecNsConfigPrimitive): The input data. + nsr (NetworkServiceRecord): Description + vnfrs (dict): VNFR ID => VirtualNetworkFunctionRecord + """ + def get_meta(vnfrs): + unit_names, initial_params, vnfr_index_map = {}, {}, {} + + for vnfr_id, juju_vnf in self._juju_vnfs.items(): + # Only add vnfr info for vnfs in this particular nsr + if vnfr_id not in nsr.vnfrs: + continue + # Vnfr -> index ref + vnfr_index_map[vnfr_id] = juju_vnf['member_vnf_index'] + + # Unit name + unit_names[vnfr_id] = juju_vnf['name'] + + # Flatten the data for simplicity + param_data = {} + for primitive in juju_vnf['config'].initial_config_primitive: + for parameter in primitive.parameter: + value = self.xlate(parameter.value, juju_vnf['tags']) + param_data[parameter.name] = value + + initial_params[vnfr_id] = param_data + + + return unit_names, initial_params, vnfr_index_map + + for vnfr_id, vnf in self._juju_vnfs.items(): + print (vnf['config'].as_dict()) + + unit_names, init_data, vnfr_index_map = get_meta(vnfrs) + + # The data consists of 4 sections + # 1. Account data + # 2. The input passed. + # 3. Juju unit names (keyed by vnfr ID). + # 4. Initial config data (keyed by vnfr ID). + data = dict() + data['config_agent'] = dict( + name=self._name, + host=self._ip_address, + port=self._port, + user=self._user, + secret=self._secret + ) + data["rpc_ip"] = rpc_ip.as_dict() + data["unit_names"] = unit_names + data["init_config"] = init_data + data["vnfr_index_map"] = vnfr_index_map + + tmp_file = None + with tempfile.NamedTemporaryFile(delete=False) as tmp_file: + tmp_file.write(yaml.dump(data, default_flow_style=True) + .encode("UTF-8")) + + self._log.debug("Juju config agent: Creating a temp file: {} with input data".format( + tmp_file.name)) + + cmd = "{} {}".format(rpc_ip.user_defined_script, tmp_file.name) + self._log.debug("Juju config agent: Running the CMD: {}".format(cmd)) + + coro = asyncio.create_subprocess_shell(cmd, loop=self._loop) + process = yield from coro + task = self._loop.create_task(process.wait()) + + return task + + @asyncio.coroutine + def fetch_vnfr(self, vnfr_path): + """ Fetch VNFR record """ + vnfr = None + self._log.debug("Juju config agent: Fetching VNFR with key %s", vnfr_path) + res_iter = yield from self._dts.query_read(vnfr_path, rwdts.Flag.MERGE) + + for ent in res_iter: + res = yield from ent + vnfr = res.result + + return vnfr + + @asyncio.coroutine + def apply_initial_config(self, vnf_id, vnf): + """ + Apply the initial configuration + Expect config directives mostly, not actions + Actions in initial config may not work based on charm design + """ + try: + tags = [] + vnfr = self._juju_vnfs[vnf_id] + api = None + + vnf_cat = yield from self.fetch_vnfr(vnf.xpath) + if vnf_cat and vnf_cat.mgmt_interface.ip_address: + vnfr['tags'].update({'rw_mgmt_ip': vnf_cat.mgmt_interface.ip_address}) + config = {} + try: + for primitive in vnfr['config'].initial_config_primitive: + self._log.debug("Initial config %s" % (primitive)) + if primitive.name == 'config': + for param in primitive.parameter: + if vnfr['tags']: + val = self.xlate(param.value, vnfr['tags']) + config.update({param.name: val}) + except KeyError as e: + self._log.exception("Juju config agent: Initial config error: config=%s" % config) + config = None + + self._log.debug("Juju config agent: Applying initial config for {} as {}". + format(vnfr['name'], config)) + if config: + if api is None: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: No API available for apply initial config") + return + yield from self._loop.run_in_executor( + None, + api._apply_config, + vnfr['name'], + config + ) + + # Apply any actions specified as part of initial config + for primitive in vnfr['config'].initial_config_primitive: + self._log.debug("Juju config agent: Initial config %s" % (primitive)) + if primitive.name != 'config': + action = primitive.name + params = {} + for param in primitive.parameter: + val = self.xlate(param.value, self._juju_vnfs[vnf_id]['tags']) + params.update({param.name: val}) + + self._log.debug("Juju config agent: Action %s with params %s" % (action, params)) + if api is None: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: No API available for apply initial config actions") + return + tag = yield from self._loop.run_in_executor( + None, + api._execute_actions, + vnfr['name'], + action, params + ) + tags.append(tag) + + except KeyError as e: + self._log.info("Juju config agent: VNFR %s not managed by Juju" % (vnf_id)) + except Exception as e: + self._log.exception("Juju config agent: Exception juju apply_initial_config for VNFR {}: {}". + format(vnf_id, e)) + return tags + + def is_vnfr_managed(self, vnfr_id): + try: + if vnfr_id in self._juju_vnfs: + return True + except Exception as e: + self._log.debug("Juju config agent: Is VNFR {} managed: {}". + format(vnfr_id, e)) + return False + + def is_service_active(self, service): + """ Is the juju service active """ + resp = False + try: + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: Unable to get API for checking service is active") + return resp + + for vnf in self._juju_vnfrs: + if vnf['name'] == service and api: + # Check if deploy is over + if self.check_task_status(service, 'deploy'): + resp = yield from self._loop.run_in_executor( + None, + api.is_service_active, + service + ) + self._log.debug("Juju config agent: Is the service %s active? %s", service, resp) + return resp + except KeyError: + self._log.error("Juju config agent: Check active unknown service ", service) + except Exception as e: + self._log.error("Juju config agent: Caught exception when checking for service is active: ", e) + self._log.exception(e) + return resp + + @asyncio.coroutine + def is_configured(self, vnfr_id): + try: + if self._juju_vnfs[vnfr_id]['active']: + return True + + service = self._juju_vnfs[vnfr_id]['name'] + resp = self.is_service_active(service) + self._juju_vnfs[vnfr_id]['active'] = resp + self._log.debug("Juju config agent: Service state for {} is {}". + format(service, resp)) + return resp + except KeyError: + self._log.debug("Juju config agent: VNFR id {} not found in config agent". + format(vnfr_id)) + return True + except Exception as e: + self._log.error("Juju config agent: VNFR id {} is_configured: {}". + format(vnfr_id, e)) + return False + + @asyncio.coroutine + def get_status(self, vnfr_id): + resp = 'unknown' + try: + vnfr = self._juju_vnfs[vnfr_id] + if vnfr['active']: + return 'configured' + + service = vnfr['name'] + # Check if deploy is over + if self.check_task_status(service, 'deploy'): + api = yield from self._get_api() + if api is None: + self._log.error("Juju config agent: API not created for get status") + return 'failed' + + resp = yield from self._loop.run_in_executor( + None, + api.get_service_status, + service + ) + self._log.debug("Juju config agent: Service status for {} is {}". + format(service, resp)) + status = 'configuring' + if resp in ['active', 'blocked']: + vnfr['active'] = True + status = 'configured' + elif resp in ['error', 'NA']: + status = 'failed' + return status + except KeyError as e: + self._log.debug("Juju config agent: VNFR id {} not found in config agent, e={}". + format(vnfr_id, e)) + return 'configured' + except Exception as e: + self._log.error("Juju config agent: VNFR id {} gt_status, e={}". + format(vnfr_id, e)) + self._log.exception(e) + return resp + + def get_action_status(self, execution_id): + ''' Get the action status for an execution ID + *** Make sure this is NOT a asyncio coroutine function *** + ''' + api = self._get_api_blocking() + if api is None: + self._log.error("Juju config agent: Unable to get API in get_action_status") + return None + try: + return api.get_action_status(execution_id) + except Exception as e: + self._log.exception("Juju config agent: Error fetching execution status for %s", + execution_id) + self._log.exception(e) + return None \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py new file mode 100644 index 0000000..56b0346 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py @@ -0,0 +1,573 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import os +import sys +import time +import yaml + +import rift.openmano.rift2openmano as rift2openmano +import rift.openmano.openmano_client as openmano_client +from . import rwnsmplugin + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +DUMP_OPENMANO_DIR = os.path.join( + os.environ["RIFT_ARTIFACTS"], + "openmano_descriptors" + ) + + +def dump_openmano_descriptor(name, descriptor_str): + filename = "{}_{}.yaml".format( + time.strftime("%Y%m%d-%H%M%S"), + name + ) + + filepath = os.path.join( + DUMP_OPENMANO_DIR, + filename + ) + + try: + if not os.path.exists(DUMP_OPENMANO_DIR): + os.makedirs(DUMP_OPENMANO_DIR) + + with open(filepath, 'w') as hdl: + hdl.write(descriptor_str) + + except OSError as e: + print("Failed to dump openmano descriptor: %s" % str(e)) + + return filepath + + +class OpenmanoVnfr(object): + def __init__(self, log, loop, cli_api, vnfr): + self._log = log + self._loop = loop + self._cli_api = cli_api + self._vnfr = vnfr + self._vnfd_id = vnfr.vnfd.id + + self._vnf_id = None + + self._created = False + + @property + def vnfd(self): + return rift2openmano.RiftVNFD(self._vnfr.vnfd) + + @property + def vnfr(self): + return self._vnfr + + @property + def rift_vnfd_id(self): + return self._vnfd_id + + @property + def openmano_vnfd_id(self): + return self._vnf_id + + @property + def openmano_vnfd(self): + self._log.debug("Converting vnfd %s from rift to openmano", self.vnfd.id) + openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd) + return openmano_vnfd + + @property + def openmano_vnfd_yaml(self): + return yaml.safe_dump(self.openmano_vnfd, default_flow_style=False) + + @asyncio.coroutine + def create(self): + self._log.debug("Creating openmano vnfd") + openmano_vnfd = self.openmano_vnfd + name = openmano_vnfd["vnf"]["name"] + + # If the name already exists, get the openmano vnfd id + name_uuid_map = yield from self._loop.run_in_executor( + None, + self._cli_api.vnf_list, + ) + + if name in name_uuid_map: + vnf_id = name_uuid_map[name] + self._log.debug("Vnf already created. Got existing openmano vnfd id: %s", vnf_id) + self._vnf_id = vnf_id + return + + self._vnf_id, _ = yield from self._loop.run_in_executor( + None, + self._cli_api.vnf_create, + self.openmano_vnfd_yaml, + ) + + fpath = dump_openmano_descriptor( + "{}_vnf".format(name), + self.openmano_vnfd_yaml + ) + + self._log.debug("Dumped Openmano VNF descriptor to: %s", fpath) + + self._created = True + + @asyncio.coroutine + def delete(self): + if not self._created: + return + + self._log.debug("Deleting openmano vnfd") + if self._vnf_id is None: + self._log.warning("Openmano vnf id not set. Cannot delete.") + return + + yield from self._loop.run_in_executor( + None, + self._cli_api.vnf_delete, + self._vnf_id, + ) + + +class OpenmanoNsr(object): + TIMEOUT_SECS = 120 + + def __init__(self, log, loop, publisher, cli_api, http_api, nsd_msg, nsr_config_msg): + self._log = log + self._loop = loop + self._publisher = publisher + self._cli_api = cli_api + self._http_api = http_api + + self._nsd_msg = nsd_msg + self._nsr_config_msg = nsr_config_msg + + self._vnfrs = [] + + self._nsd_uuid = None + self._nsr_uuid = None + + self._created = False + + self._monitor_task = None + + @property + def nsd(self): + return rift2openmano.RiftNSD(self._nsd_msg) + + @property + def vnfds(self): + return {v.rift_vnfd_id: v.vnfd for v in self._vnfrs} + + @property + def vnfrs(self): + return self._vnfrs + + @property + def openmano_nsd_yaml(self): + self._log.debug("Converting nsd %s from rift to openmano", self.nsd.id) + openmano_nsd = rift2openmano.rift2openmano_nsd(self.nsd, self.vnfds) + return yaml.safe_dump(openmano_nsd, default_flow_style=False) + + @asyncio.coroutine + def add_vnfr(self, vnfr): + vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr) + yield from vnfr.create() + self._vnfrs.append(vnfr) + + @asyncio.coroutine + def delete(self): + if not self._created: + self._log.debug("NSD wasn't created. Skipping delete.") + return + + self._log.debug("Deleting openmano nsr") + + yield from self._loop.run_in_executor( + None, + self._cli_api.ns_delete, + self._nsd_uuid, + ) + + self._log.debug("Deleting openmano vnfrs") + for vnfr in self._vnfrs: + yield from vnfr.delete() + + @asyncio.coroutine + def create(self): + self._log.debug("Creating openmano scenario") + name_uuid_map = yield from self._loop.run_in_executor( + None, + self._cli_api.ns_list, + ) + + if self._nsd_msg.name in name_uuid_map: + self._log.debug("Found existing openmano scenario") + self._nsd_uuid = name_uuid_map[self._nsd_msg.name] + return + + + # Use the nsd uuid as the scenario name to rebind to existing + # scenario on reload or to support muliple instances of the name + # nsd + self._nsd_uuid, _ = yield from self._loop.run_in_executor( + None, + self._cli_api.ns_create, + self.openmano_nsd_yaml, + self._nsd_msg.name + ) + fpath = dump_openmano_descriptor( + "{}_nsd".format(self._nsd_msg.name), + self.openmano_nsd_yaml, + ) + + self._log.debug("Dumped Openmano NS descriptor to: %s", fpath) + + self._created = True + + @asyncio.coroutine + def instance_monitor_task(self): + self._log.debug("Starting Instance monitoring task") + + vnfr_uuid_map = {} + start_time = time.time() + active_vnfs = [] + + while True: + yield from asyncio.sleep(1, loop=self._loop) + + try: + instance_resp_json = yield from self._loop.run_in_executor( + None, + self._http_api.get_instance, + self._nsr_uuid, + ) + + self._log.debug("Got instance response: %s for NSR ID %s", + instance_resp_json, + self._nsr_uuid) + + except openmano_client.InstanceStatusError as e: + self._log.error("Could not get NS instance status: %s", str(e)) + continue + + def all_vms_active(vnf): + for vm in vnf["vms"]: + vm_status = vm["status"] + vm_uuid = vm["uuid"] + if vm_status != "ACTIVE": + self._log.debug("VM is not yet active: %s (status: %s)", vm_uuid, vm_status) + return False + + return True + + def any_vm_active_nomgmtip(vnf): + for vm in vnf["vms"]: + vm_status = vm["status"] + vm_uuid = vm["uuid"] + if vm_status != "ACTIVE": + self._log.debug("VM is not yet active: %s (status: %s)", vm_uuid, vm_status) + return False + + return True + + def any_vms_error(vnf): + for vm in vnf["vms"]: + vm_status = vm["status"] + vm_vim_info = vm["vim_info"] + vm_uuid = vm["uuid"] + if vm_status == "ERROR": + self._log.error("VM Error: %s (vim_info: %s)", vm_uuid, vm_vim_info) + return True + + return False + + def get_vnf_ip_address(vnf): + if "ip_address" in vnf: + return vnf["ip_address"].strip() + return None + + def get_ext_cp_info(vnf): + cp_info_list = [] + for vm in vnf["vms"]: + if "interfaces" not in vm: + continue + + for intf in vm["interfaces"]: + if "external_name" not in intf: + continue + + if not intf["external_name"]: + continue + + ip_address = intf["ip_address"] + if ip_address is None: + ip_address = "0.0.0.0" + + cp_info_list.append((intf["external_name"], ip_address)) + + return cp_info_list + + def get_vnf_status(vnfr): + # When we create an openmano descriptor we use __ + # to come up with openmano constituent VNF name. Use this + # knowledge to map the vnfr back. + openmano_vnfr_suffix = "__{}".format( + vnfr.vnfr.vnfr.member_vnf_index_ref + ) + + for vnf in instance_resp_json["vnfs"]: + if vnf["vnf_name"].endswith(openmano_vnfr_suffix): + return vnf + + self._log.warning("Could not find vnf status with name that ends with: %s", + openmano_vnfr_suffix) + return None + + for vnfr in self._vnfrs: + if vnfr in active_vnfs: + # Skipping, so we don't re-publish the same VNF message. + continue + + vnfr_msg = vnfr.vnfr.vnfr.deep_copy() + vnfr_msg.operational_status = "init" + + try: + vnf_status = get_vnf_status(vnfr) + self._log.debug("Found VNF status: %s", vnf_status) + if vnf_status is None: + self._log.error("Could not find VNF status from openmano") + vnfr_msg.operational_status = "failed" + yield from self._publisher.publish_vnfr(None, vnfr_msg) + return + + # If there was a VNF that has a errored VM, then just fail the VNF and stop monitoring. + if any_vms_error(vnf_status): + self._log.debug("VM was found to be in error state. Marking as failed.") + vnfr_msg.operational_status = "failed" + yield from self._publisher.publish_vnfr(None, vnfr_msg) + return + + if all_vms_active(vnf_status): + vnf_ip_address = get_vnf_ip_address(vnf_status) + + if vnf_ip_address is None: + self._log.warning("No IP address obtained " + "for VNF: {}, will retry.".format( + vnf_status['vnf_name'])) + continue + + self._log.debug("All VMs in VNF are active. Marking as running.") + vnfr_msg.operational_status = "running" + + self._log.debug("Got VNF ip address: %s", vnf_ip_address) + vnfr_msg.mgmt_interface.ip_address = vnf_ip_address + vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = vnf_ip_address + + # Add connection point information for the config manager + cp_info_list = get_ext_cp_info(vnf_status) + for (cp_name, cp_ip) in cp_info_list: + cp = vnfr_msg.connection_point.add() + cp.name = cp_name + cp.short_name = cp_name + cp.ip_address = cp_ip + + yield from self._publisher.publish_vnfr(None, vnfr_msg) + active_vnfs.append(vnfr) + + if (time.time() - start_time) > OpenmanoNsr.TIMEOUT_SECS: + self._log.error("NSR timed out before reaching running state") + vnfr_msg.operational_status = "failed" + yield from self._publisher.publish_vnfr(None, vnfr_msg) + return + + except Exception as e: + vnfr_msg.operational_status = "failed" + yield from self._publisher.publish_vnfr(None, vnfr_msg) + self._log.exception("Caught exception publishing vnfr info: %s", str(e)) + return + + if len(active_vnfs) == len(self._vnfrs): + self._log.info("All VNF's are active. Exiting NSR monitoring task") + return + + @asyncio.coroutine + def deploy(self): + if self._nsd_uuid is None: + raise ValueError("Cannot deploy an uncreated nsd") + + self._log.debug("Deploying openmano scenario") + + name_uuid_map = yield from self._loop.run_in_executor( + None, + self._cli_api.ns_instance_list, + ) + + + openmano_datacenter = None + if self._nsr_config_msg.has_field("om_datacenter"): + openmano_datacenter = self._nsr_config_msg.om_datacenter + + if self._nsr_config_msg.name in name_uuid_map: + self._log.debug("Found existing instance with nsr name: %s", self._nsr_config_msg.name) + self._nsr_uuid = name_uuid_map[self._nsr_config_msg.name] + + else: + self._nsr_uuid = yield from self._loop.run_in_executor( + None, + self._cli_api.ns_instantiate, + self._nsd_uuid, + self._nsr_config_msg.name, + openmano_datacenter + ) + + self._monitor_task = asyncio.ensure_future( + self.instance_monitor_task(), loop=self._loop + ) + + @asyncio.coroutine + def terminate(self): + if self._nsr_uuid is None: + self._log.warning("Cannot terminate an un-instantiated nsr") + return + + if self._monitor_task is not None: + self._monitor_task.cancel() + self._monitor_task = None + + self._log.debug("Terminating openmano nsr") + yield from self._loop.run_in_executor( + None, + self._cli_api.ns_terminate, + self._nsr_uuid, + ) + + +class OpenmanoNsPlugin(rwnsmplugin.NsmPluginBase): + """ + RW Implentation of the NsmPluginBase + """ + def __init__(self, dts, log, loop, publisher, cloud_account): + self._dts = dts + self._log = log + self._loop = loop + self._publisher = publisher + + self._cli_api = None + self._http_api = None + self._openmano_nsrs = {} + + self._set_cloud_account(cloud_account) + + def _set_cloud_account(self, cloud_account): + self._log.debug("Setting openmano plugin cloud account: %s", cloud_account) + self._cli_api = openmano_client.OpenmanoCliAPI( + self.log, + cloud_account.openmano.host, + cloud_account.openmano.port, + cloud_account.openmano.tenant_id, + ) + + self._http_api = openmano_client.OpenmanoHttpAPI( + self.log, + cloud_account.openmano.host, + cloud_account.openmano.port, + cloud_account.openmano.tenant_id, + ) + + def create_nsr(self, nsr_config_msg, nsd_msg): + """ + Create Network service record + """ + openmano_nsr = OpenmanoNsr( + self._log, + self._loop, + self._publisher, + self._cli_api, + self._http_api, + nsd_msg, + nsr_config_msg + ) + self._openmano_nsrs[nsr_config_msg.id] = openmano_nsr + + @asyncio.coroutine + def deploy(self, nsr_msg): + openmano_nsr = self._openmano_nsrs[nsr_msg.ns_instance_config_ref] + yield from openmano_nsr.create() + yield from openmano_nsr.deploy() + + @asyncio.coroutine + def instantiate_ns(self, nsr, xact): + """ + Instantiate NSR with the passed nsr id + """ + yield from nsr.instantiate(xact) + + @asyncio.coroutine + def instantiate_vnf(self, nsr, vnfr, xact): + """ + Instantiate NSR with the passed nsr id + """ + openmano_nsr = self._openmano_nsrs[nsr.id] + yield from openmano_nsr.add_vnfr(vnfr) + + # Mark the VNFR as running + # TODO: Create a task to monitor nsr/vnfr status + vnfr_msg = vnfr.vnfr.deep_copy() + vnfr_msg.operational_status = "init" + + self._log.debug("Attempting to publish openmano vnf: %s", vnfr_msg) + yield from self._publisher.publish_vnfr(xact, vnfr_msg) + + @asyncio.coroutine + def instantiate_vl(self, nsr, vlr, xact): + """ + Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def terminate_ns(self, nsr, xact): + """ + Terminate the network service + """ + nsr_id = nsr.id + openmano_nsr = self._openmano_nsrs[nsr_id] + yield from openmano_nsr.terminate() + yield from openmano_nsr.delete() + + for vnfr in openmano_nsr.vnfrs: + self._log.debug("Unpublishing VNFR: %s", vnfr.vnfr) + yield from self._publisher.unpublish_vnfr(xact, vnfr.vnfr) + + del self._openmano_nsrs[nsr_id] + + @asyncio.coroutine + def terminate_vnf(self, vnfr, xact): + """ + Terminate the network service + """ + pass + + @asyncio.coroutine + def terminate_vl(self, vlr, xact): + """ + Terminate the virtual link + """ + pass \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py new file mode 100644 index 0000000..3c8e4f9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py @@ -0,0 +1,228 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio + +from gi.repository import ( + RwDts as rwdts, + ) +import rift.tasklets + + +class NsrOpDataDtsHandler(object): + """ The network service op data DTS handler """ + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr" + + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + self._regh = None + + @property + def regh(self): + """ Return the registration handle""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for Nsr op data publisher registration""" + self._log.debug("Registering Nsr op data path %s as publisher", + NsrOpDataDtsHandler.XPATH) + + hdl = rift.tasklets.DTS.RegistrationHandler() + with self._dts.group_create() as group: + self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create an NS record in DTS with the path and message + """ + self._log.debug("Creating NSR xact = %s, %s:%s", xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created NSR xact = %s, %s:%s", xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg, flags=rwdts.Flag.REPLACE): + """ + Update an NS record in DTS with the path and message + """ + self._log.debug("Updating NSR xact = %s, %s:%s regh = %s", xact, path, msg, self.regh) + self.regh.update_element(path, msg, flags) + self._log.debug("Updated NSR xact = %s, %s:%s", xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Update an NS record in DTS with the path and message + """ + self._log.debug("Deleting NSR xact:%s, path:%s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted NSR xact:%s, path:%s", xact, path) + + + +class VnfrPublisherDtsHandler(object): + """ Registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' DTS""" + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + + self._regh = None + + @property + def regh(self): + """ Return registration handle""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for Vvnfr create/update/delete/read requests from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + self._log.debug( + "Got vnfr on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + raise NotImplementedError( + "%s action on VirtualNetworkFunctionRecord not supported", + action) + + self._log.debug("Registering for VNFR using xpath: %s", + VnfrPublisherDtsHandler.XPATH,) + + hdl = rift.tasklets.DTS.RegistrationHandler() + with self._dts.group_create() as group: + self._regh = group.register(xpath=VnfrPublisherDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.PUBLISHER | + rwdts.Flag.NO_PREP_READ | + rwdts.Flag.CACHE),) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create a VNFR record in DTS with path and message + """ + self._log.debug("Creating VNFR xact = %s, %s:%s", + xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created VNFR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg): + """ + Update a VNFR record in DTS with path and message + """ + self._log.debug("Updating VNFR xact = %s, %s:%s", + xact, path, msg) + self.regh.update_element(path, msg) + self._log.debug("Updated VNFR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Delete a VNFR record in DTS with path and message + """ + self._log.debug("Deleting VNFR xact = %s, %s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted VNFR xact = %s, %s", xact, path) + + +class VlrPublisherDtsHandler(object): + """ registers 'D,/vlr:vlr-catalog/vlr:vlr """ + XPATH = "D,/vlr:vlr-catalog/vlr:vlr" + + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + + self._regh = None + + @property + def regh(self): + """ Return registration handle""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for vlr create/update/delete/read requests from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + self._log.debug( + "Got vlr on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + raise NotImplementedError( + "%s action on VirtualLinkRecord not supported", + action) + + self._log.debug("Registering for VLR using xpath: %s", + VlrPublisherDtsHandler.XPATH,) + + hdl = rift.tasklets.DTS.RegistrationHandler() + with self._dts.group_create() as group: + self._regh = group.register(xpath=VlrPublisherDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.PUBLISHER | + rwdts.Flag.NO_PREP_READ | + rwdts.Flag.CACHE),) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create a VLR record in DTS with path and message + """ + self._log.debug("Creating VLR xact = %s, %s:%s", + xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created VLR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg): + """ + Update a VLR record in DTS with path and message + """ + self._log.debug("Updating VLR xact = %s, %s:%s", + xact, path, msg) + self.regh.update_element(path, msg) + self._log.debug("Updated VLR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Delete a VLR record in DTS with path and message + """ + self._log.debug("Deleting VLR xact = %s, %s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted VLR xact = %s, %s", xact, path) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conagent.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conagent.py new file mode 100644 index 0000000..543cef0 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conagent.py @@ -0,0 +1,244 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import rift.tasklets + +from . import rwnsmconfigplugin +from . import jujuconf_nsm +import rift.mano.config_agent + +class ConfigAgentExistsError(Exception): + pass + +class ConfigAccountHandler(object): + def __init__(self, dts, log, loop, on_add_config_agent): + self._log = log + self._dts = dts + self._loop = loop + self._on_add_config_agent = on_add_config_agent + + self._log.debug("creating config account handler") + self.cloud_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber( + self._dts, self._log, + rift.mano.config_agent.ConfigAgentCallbacks( + on_add_apply=self.on_config_account_added, + on_delete_apply=self.on_config_account_deleted, + on_update_prepare=self.on_config_account_update, + ) + ) + + def on_config_account_deleted(self, account_name): + self._log.debug("config account deleted") + self._log.debug(account_name) + self._log.error("Config agent update not supported yet") + + def on_config_account_added(self, account): + self._log.debug("config account added") + self._log.debug(account.as_dict()) + self._on_add_config_agent(account) + + @asyncio.coroutine + def on_config_account_update(self, account): + self._log.debug("config account being updated") + self._log.debug(account.as_dict()) + self._log.error("Config agent update not supported yet") + + @asyncio.coroutine + def register(self): + self.cloud_cfg_handler.register() + +class RwNsConfigPlugin(rwnsmconfigplugin.NsmConfigPluginBase): + """ + Default Implementation of the NsmConfPluginBase + """ + @asyncio.coroutine + def notify_create_nsr(self, nsr, nsd): + """ + Notification of create Network service record + """ + pass + + @asyncio.coroutine + def apply_config(self, config, nsr, vnfrs): + """ + Notification of configuration of Network service record + """ + pass + + @asyncio.coroutine + def notify_create_vls(self, nsr, vld): + """ + Notification of create Network service record + """ + pass + + @asyncio.coroutine + def notify_create_vnfr(self, nsr, vnfr): + """ + Notification of create Network service record + """ + pass + + @asyncio.coroutine + def notify_instantiate_ns(self, nsr): + """ + Notification of NSR instantiationwith the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_instantiate_vnf(self, nsr, vnfr, xact): + """ + Notification of Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_instantiate_vl(self, nsr, vlr, xact): + """ + Notification of Instantiate NSR with the passed nsr id + """ + pass + + @asyncio.coroutine + def notify_nsr_active(self, nsr, vnfrs): + """ Notify instantiate of the virtual link""" + pass + + @asyncio.coroutine + def notify_terminate_ns(self, nsr): + """ + Notification of Terminate the network service + """ + pass + + @asyncio.coroutine + def notify_terminate_vnf(self, nsr, vnfr, xact): + """ + Notification of Terminate the network service + """ + pass + + @asyncio.coroutine + def notify_terminate_vl(self, nsr, vlr, xact): + """ + Notification of Terminate the virtual link + """ + pass + + @asyncio.coroutine + def apply_initial_config(self, vnfr_id, vnf): + """Apply initial configuration""" + pass + + @asyncio.coroutine + def get_config_status(self, vnfr_id): + """Get the status for the VNF""" + pass + + def get_action_status(self, execution_id): + """Get the action exection status""" + pass + + @asyncio.coroutine + def is_configured(self, vnfr_if): + return True + + @asyncio.coroutine + def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output): + """Apply config primitive on a VNF""" + pass + + def is_vnfr_managed(self, vnfr_id): + return False + +class NsmConfigPlugins(object): + """ NSM Config Agent Plugins """ + def __init__(self): + self._plugin_classes = { + "juju": jujuconf_nsm.JujuNsmConfigPlugin, + } + + @property + def plugins(self): + """ Plugin info """ + return self._plugin_classes + + def __getitem__(self, name): + """ Get item """ + print("%s", self._plugin_classes) + return self._plugin_classes[name] + + def register(self, plugin_name, plugin_class, *args): + """ Register a plugin to this Nsm""" + self._plugin_classes[plugin_name] = plugin_class + + def deregister(self, plugin_name, plugin_class, *args): + """ Deregister a plugin to this Nsm""" + if plugin_name in self._plugin_classes: + del self._plugin_classes[plugin_name] + + def class_by_plugin_name(self, name): + """ Get class by plugin name """ + return self._plugin_classes[name] + + +class NsmConfigAgent(object): + def __init__(self, dts, log, loop, records_publisher, on_config_nsm_plugin): + self._dts = dts + self._log = log + self._loop = loop + + self._records_publisher = records_publisher + self._on_config_nsm_plugin = on_config_nsm_plugin + self._config_plugins = NsmConfigPlugins() + self._config_handler = ConfigAccountHandler( + self._dts, self._log, self._loop, self._on_config_agent) + self._plugin_instances = {} + + def _set_plugin_instance(self, instance): + self._on_config_nsm_plugin(instance) + + def _on_config_agent(self, config_agent): + self._log.debug("Got nsm plugin config agent account: %s", config_agent) + try: + nsm_cls = self._config_plugins.class_by_plugin_name( + config_agent.account_type) + except KeyError as e: + self._log.debug( + "Config agent nsm plugin type not found: {}. Using default plugin, e={}". + format(config_agent.account_type, e)) + nsm_cls = RwNsConfigPlugin + + # Check to see if the plugin was already instantiated + if nsm_cls in self._plugin_instances: + self._log.debug("Config agent nsm plugin already instantiated. Using existing.") + self._set_plugin_instance(self._plugin_instances[nsm_cls]) + + # Otherwise, instantiate a new plugin using the config agent account + self._log.debug("Instantiting new config agent using class: %s", nsm_cls) + nsm_instance = nsm_cls(self._dts, self._log, self._loop, self._records_publisher, config_agent) + self._plugin_instances[nsm_cls] = nsm_instance + + self._set_plugin_instance(self._plugin_instances[nsm_cls]) + + @asyncio.coroutine + def register(self): + self._log.debug("Registering for config agent nsm plugin manager") + yield from self._config_handler.register() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py new file mode 100644 index 0000000..f1823e8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py @@ -0,0 +1,314 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import time +import ncclient +import ncclient.asyncio_manager +import re + +import gi +gi.require_version('RwYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwTypes', '1.0') +gi.require_version('RwConmanYang', '1.0') +from gi.repository import ( + RwYang, + RwNsmYang as nsmY, + NsrYang as nsrY, + RwDts as rwdts, + RwTypes, + RwConmanYang as conmanY +) + +import rift.tasklets + +class ROSOConnectionError(Exception): + pass + +class ROServiceOrchif(object): + + def __init__(self, log, loop, parent): + self._log = log + self._loop = loop + self._parent = parent + self._manager = None + try: + self._model = RwYang.Model.create_libncx() + self._model.load_schema_ypbc(nsmY.get_schema()) + self._model.load_schema_ypbc(conmanY.get_schema()) + except Exception as e: + self._log.error("Error generating models %s", str(e)) + + + @asyncio.coroutine + def connect(self): + @asyncio.coroutine + def update_ns_cfg_state(self): + xpath="/cm-state" + while True: + try: + response = yield from self._manager.get(filter=('xpath', xpath)) + response_xml = response.data_xml.decode() + cm_state = conmanY.CmOpdata() + cm_state.from_xml_v2(self._model, response_xml) + cm_state_d = cm_state.as_dict() + #print("##>> Got NSR config state from RIFT-CM:", cm_state_d) + # Go in loop and update state for each NS + if cm_state_d and 'cm_nsr' in cm_state_d: + for nsr in cm_state_d['cm_nsr']: + if 'cm_vnfr' in nsr: + # Fill in new state to all vnfrs + for vnfr in nsr['cm_vnfr']: + vnfrid = vnfr['id'] + if vnfrid in self._parent.nsm._vnfrs: + # Need a consistent derivable way of checking state (hard coded for now) + if (vnfr['state'] == 'ready'): + if not self._parent.nsm._vnfrs[vnfrid].is_configured(): + yield from self._parent.nsm._vnfrs[vnfrid].set_config_status(nsrY.ConfigStates.CONFIGURED) + elif vnfr['state'] != 'ready_no_cfg': + if self._parent.nsm._vnfrs[vnfrid]._config_status != nsrY.ConfigStates.CONFIGURING: + yield from self._parent.nsm._vnfrs[vnfrid].set_config_status(nsrY.ConfigStates.CONFIGURING) + + except Exception as e: + self._log.error("Failed to get NS cfg state (may have been terminated) e=%s", str(e)) + return + yield from asyncio.sleep(5, loop=self._loop) + + so_endp = self._parent.cm_endpoint + try: + self._log.info("Attemping Resource Orchestrator netconf connection.") + self._manager = yield from ncclient.asyncio_manager.asyncio_connect(loop=self._loop, + host=so_endp['cm_ip_address'], + port=so_endp['cm_port'], + username=so_endp['cm_username'], + password=so_endp['cm_password'], + allow_agent=False, + look_for_keys=False, + hostkey_verify=False) + self._log.info("Connected to Service Orchestrator netconf @%s", so_endp['cm_ip_address']) + # Start the executor loop to monitor configuration status for this NS + yield from self._loop.create_task(update_ns_cfg_state(self)) + return True + except Exception as e: + self._log.error("Netconf connection to Service Orchestrator ip %s failed: %s", + so_endp['cm_ip_address'], str(e)) + return False + + @staticmethod + def wrap_netconf_config_xml(xml): + xml = '{}'.format(xml) + return xml + + def send_nsr_update(self, nsrid): + + self._log.debug("Attempting to send NSR id: %s", nsrid) + msg = conmanY.SoConfig() + addnsr = msg.nsr.add() + addnsr.id = nsrid + xml = msg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + try: + response = yield from self._manager.edit_config(target='running', config = netconf_xml) + self._log.info("Received edit config response: %s", str(response)) + except ncclient.transport.errors.SSHError as e: + so_endp = self._parent.cm_endpoint + self._log.error("Applying configuration %s to SO(%s) failed: %s", + netconf_xml, so_endp['cm_ip_address'], str(e)) + return + + def send_nsr_delete(self, nsrid): + self._log.debug("Attempting to send delete NSR id: %s", nsrid) + msg = conmanY.SoConfig() + addnsr = msg.nsr.add() + addnsr.id = nsrid + xml = msg.to_xml_v2(self._model) + delete_path = '/cm-config/nsr[id=\'{}\']'.format(nsrid) + + def _xpath_strip_keys(xpath): + ''' Copied from automation ''' + '''Strip key-value pairs from the supplied xpath + + Arguments: + xpath - xpath to be stripped of keys + + Returns: + an xpath without keys + ''' + RE_CAPTURE_KEY_VALUE = re.compile(r'\[[^=]*?\=[\"\']?([^\'\"\]]*?)[\'\"]?\]') + return re.sub(RE_CAPTURE_KEY_VALUE, '', xpath) + + # In leiu of protobuf delta support, try to place the attribute in the correct place + def add_attribute(xpath, xml): + xpath = xpath.lstrip('/') + xpath = _xpath_strip_keys(xpath) + xpath_elems = xpath.split('/') + pos = 0 + for elem in xpath_elems: + pos = xml.index(elem, pos) + pos = xml.index('>', pos) + if xml[pos-1] == '/': + pos -= 1 + xml = xml[:pos] + " xc:operation='delete'" + xml[pos:] + return xml + + xml = add_attribute(delete_path, xml) + # print('>>>>> delete xml=\n{}\n\n'.format(xml)) + netconf_xml = '{}'.format(xml) + + try: + response = yield from self._manager.edit_config(target='running', config = netconf_xml) + self._log.info("Received delete config response: %s", str(response)) + except ncclient.transport.errors.SSHError as e: + self._log.error("Deleting CM config for NSR id=%s failed: %s", + nsrid, str(e)) + return + +class ROServiceOrchConfig(object): + def __init__(self, log, loop, dts, parent): + self._log = log + self._loop = loop + self._dts = dts + self.nsm = parent + self._ro_config_xpath = "C,/ro-config/cm-endpoint" + self.soif = None + self._active_nsr = [] + self.cm_endpoint = {} + self._log.debug("Initialized ROServiceOrchConfig, cm_endpoint = %s", self.cm_endpoint) + + def is_ready(self): + return True + + @asyncio.coroutine + def register(self): + """ Register for Nsd cm-endpoint requests from dts """ + + @asyncio.coroutine + def initiate_connection(): + loop_cnt = 60 + #Create SO interface object + self._log.debug("Inside initiate_connection routine") + self.soif = ROServiceOrchif(self._log, self._loop, self) + for i in range(loop_cnt): + connect_status = yield from self.soif.connect() + if connect_status: + self._log.debug("Successfully connected to netconf") + for nsrid in self._active_nsr: + self._log.debug("Sending nsr-id : %s to SO from pending list", nsrid) + yield from self.soif.send_nsr_update(nsrid) + self._active_nsr.pop(nsrid) + self._log.debug("Deleting nsr-id : %s from pending list", nsrid) + break + else: + self._log.error("Can not connect to SO. Retrying!") + + self._log.debug("Sleeping for 1 second in initiate_connection()") + yield from asyncio.sleep(1, loop = self._loop) + else: + raise ROSOConnectionError("Failed to connect to Service Orchestrator within 60") + return + + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + ro_config = nsmY.RoConfig() + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + self._log.debug("Got nsr apply cfg (xact:%s) (action:%s) (cm_endpoint:%s)", + xact, action, self.cm_endpoint) + + # Verify that cm_endpoint is complete, we may get only default values if this is confd re-apply + so_complete = True + for field in ro_config.cm_endpoint.fields: + if field not in self.cm_endpoint: + so_complete = False + + # Create future for connect + if so_complete is True and self.soif is None: + asyncio.ensure_future(initiate_connection(), loop = self._loop) + + return + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for ro-config """ + + self._log.debug("ro-config received msg %s", msg) + + action = xact_info.handle.get_query_action() + # Save msg as dictionary + msg_dict = msg.as_dict() + + self._log.info("ro-config received msg %s action %s - dict = %s", msg, action, msg_dict) + + # Save configuration infomration + # Might be able to save entire msg_dict + for key, val in msg_dict.items(): + self.cm_endpoint[key] = val + + acg.handle.prepare_complete_ok(xact_info.handle) + + self._log.debug( + "Registering for NSD config using xpath: %s", + self._ro_config_xpath + ) + + acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + self._regh = acg.register(xpath=self._ro_config_xpath, + flags=rwdts.Flag.SUBSCRIBER, + on_prepare=on_prepare) + + + @asyncio.coroutine + def notify_nsr_up(self, nsrid): + self._log.info("Notifying NSR id = %s!", nsrid) + + if self.soif is None: + self._log.warning("No SO interface created yet! Buffering the nsr-id") + self._active_nsr.append(nsrid) + else: + # Send NSR id as configuration + try: + yield from self.soif.send_nsr_update(nsrid) + except Exception as e: + self._log.error("Failed to send NSR id to SO", str(e)) + return + + + @asyncio.coroutine + def notify_nsr_down(self, nsrid): + self._log.info("Notifying NSR id = %s DOWN!", nsrid) + + if self.soif is None: + self._log.warning("No SO interface created yet! find and delete the nsr-id from queue") + else: + # Send NSR id as configuration + try: + yield from self.soif.send_nsr_delete(nsrid) + except Exception as e: + self._log.error("Failed to send NSR id to SO", str(e)) + return \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmconfigplugin.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmconfigplugin.py new file mode 100644 index 0000000..1347d62 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmconfigplugin.py @@ -0,0 +1,183 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import abc + + +class NsmConfigPluginBase(object): + """ + Abstract base class for the NSM Configuration agent plugin. + There will be single instance of this plugin for each plugin type. + """ + + def __init__(self, dts, log, loop, publisher, config_agent): + self._dts = dts + self._log = log + self._loop = loop + self._publisher = publisher + self._config_agent = config_agent + + @property + def dts(self): + return self._dts + + @property + def log(self): + return self._log + + @property + def loop(self): + return self._loop + + @property + def nsm(self): + return self._nsm + + + @abc.abstractmethod + @asyncio.coroutine + def notify_create_nsr(self, nsr, nsd): + """ Notification on creation of an NSR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def apply_config(self, config, nsrs, vnfrs): + """ Notification on configuration of an NSR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_create_vls(self, nsr, vld): + """ Notification on creation of an VL """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_create_vnfr(self, nsr, vnfr): + """ Notification on creation of an VNFR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_instantiate_ns(self, nsr): + """ Notification for instantiate of the network service """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_instantiate_vnf(self, nsr, vnfr, xact): + """ Notify instantiation of the virtual network function """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_instantiate_vl(self, nsr, vl, xact): + """ Notify instantiate of the virtual link""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_nsr_active(self, nsr, vnfrs): + """ Notify instantiate of the virtual link""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_terminate_ns(self, nsr): + """Notify termination of the network service """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_terminate_vnf(self, nsr, vnfr, xact): + """Notify termination of the VNF """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def notify_terminate_vl(self, nsr, vlr, xact): + """Notify termination of the Virtual Link Record""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def apply_initial_config(self, vnfr_id, vnf): + """Apply initial configuration""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def get_config_status(self, vnfr_id): + """Get the status for the VNF""" + pass + + @abc.abstractmethod + def get_action_status(self, execution_id): + """Get the action exection status""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def is_configured(self, vnfr_if): + """ Check if the agent is configured for the VNFR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output): + """Apply config primitive on a VNF""" + pass + + @abc.abstractmethod + def is_vnfr_managed(self, vnfr_id): + """ Check if VNR is managed by config agent """ + pass + + @asyncio.coroutine + def invoke(self, method, *args): + self._log.debug("Config agent plugin: method {} with args {}: {}". + format(method, args, self)) + # TBD - Do a better way than string compare to find invoke the method + if method == 'notify_create_nsr': + yield from self.notify_create_nsr(args[0], args[1]) + elif method == 'notify_create_vls': + yield from self.notify_create_vls(args[0], args[1], args[2]) + elif method == 'notify_create_vnfr': + yield from self.notify_create_vnfr(args[0], args[1]) + elif method == 'notify_instantiate_ns': + yield from self.notify_instantiate_ns(args[0]) + elif method == 'notify_instantiate_vnf': + yield from self.notify_instantiate_vnf(args[0], args[1], args[2]) + elif method == 'notify_instantiate_vl': + yield from self.notify_instantiate_vl(args[0], args[1], args[2]) + elif method == 'notify_nsr_active': + yield from self.notify_nsr_active(args[0], args[1]) + elif method == 'notify_terminate_ns': + yield from self.notify_terminate_ns(args[0]) + elif method == 'notify_terminate_vnf': + yield from self.notify_terminate_vnf(args[0], args[1], args[2]) + elif method == 'notify_terminate_vl': + yield from self.notify_terminate_vl(args[0], args[1], args[2]) + elif method == 'apply_initial_config': + yield from self.apply_initial_config(args[0], args[1]) + elif method == 'apply_config': + yield from self.apply_config(args[0], args[1], args[2]) + else: + self._log.error("Unknown method %s invoked on config agent plugin" % method) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py new file mode 100755 index 0000000..14a2466 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py @@ -0,0 +1,114 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import asyncio +import abc + + +class NsmPluginBase(object): + """ + Abstract base class for the NSM plugin. + There will be single instance of this plugin for each plugin type. + """ + + def __init__(self, dts, log, loop, nsm, plugin_name, dts_publisher): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._plugin_name = plugin_name + self._dts_publisher = dts_publisher + + @property + def dts(self): + return self._dts + + @property + def log(self): + return self._log + + @property + def loop(self): + return self._loop + + @property + def nsm(self): + return self._nsm + + def create_nsr(self, nsr): + """ Create an NSR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def deploy(self, nsr_msg): + pass + + @abc.abstractmethod + @asyncio.coroutine + def instantiate_ns(self, nsr, xact): + """ Instantiate the network service """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def instantiate_vnf(self, nsr, vnfr, xact): + """ Instantiate the virtual network function """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def instantiate_vl(self, nsr, vl, xact): + """ Instantiate the virtual link""" + pass + + @abc.abstractmethod + @asyncio.coroutine + def get_nsr(self, nsr_path): + """ Get the NSR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def get_vnfr(self, vnfr_path): + """ Get the VNFR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def get_vlr(self, vlr_path): + """ Get the VLR """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def terminate_ns(self, nsr, xact): + """Terminate the network service """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def terminate_vnf(self, vnfr, xact): + """Terminate the VNF """ + pass + + @abc.abstractmethod + @asyncio.coroutine + def terminate_vl(self, vlr, xact): + """Terminate the Virtual Link Record""" + pass \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py new file mode 100755 index 0000000..10b275b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py @@ -0,0 +1,3185 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 +import asyncio +import logging +import uuid +import sys +import time + +from enum import Enum +from collections import deque +from collections import defaultdict + +import gi +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwTypes', '1.0') +gi.require_version('RwVlrYang', '1.0') +from gi.repository import ( + RwNsrYang, + NsrYang, + RwVlrYang, + VnfrYang, + RwVnfrYang, + RwNsmYang, + RwDts as rwdts, + RwTypes, + ProtobufC, +) + +import rift.mano.config_agent +import rift.tasklets + +from . import rwnsm_conman as conman +from . import cloud +from . import publisher +from . import xpath +from . import rwnsm_conagent as conagent +from . import config_value_pool +from . import rwvnffgmgr + + +class NetworkServiceRecordState(Enum): + """ Network Service Record State """ + INIT = 101 + VL_INIT_PHASE = 102 + VNF_INIT_PHASE = 103 + VNFFG_INIT_PHASE = 104 + RUNNING = 105 + TERMINATE = 106 + TERMINATE_RCVD = 107 + VL_TERMINATE_PHASE = 108 + VNF_TERMINATE_PHASE = 109 + VNFFG_TERMINATE_PHASE = 110 + TERMINATED = 111 + FAILED = 112 + + +class NetworkServiceRecordError(Exception): + """ Network Service Record Error """ + pass + + +class NetworkServiceDescriptorError(Exception): + """ Network Service Descriptor Error """ + pass + + +class VirtualNetworkFunctionRecordError(Exception): + """ Virtual Network Function Record Error """ + pass + + +class NetworkServiceDescriptorNotFound(Exception): + """ Cannot find Network Service Descriptor""" + pass + + +class NetworkServiceDescriptorRefCountExists(Exception): + """ Network Service Descriptor reference count exists """ + pass + + +class NetworkServiceDescriptorUnrefError(Exception): + """ Failed to unref a network service descriptor """ + pass + + +class NsrInstantiationFailed(Exception): + """ Failed to instantiate network service """ + pass + + +class VnfInstantiationFailed(Exception): + """ Failed to instantiate virtual network function""" + pass + +class VnffgInstantiationFailed(Exception): + """ Failed to instantiate virtual network function""" + pass + +class VnfDescriptorError(Exception): + """Failed to instantiate virtual network function""" + pass + + +class VlRecordState(Enum): + """ VL Record State """ + INIT = 101 + INSTANTIATION_PENDING = 102 + ACTIVE = 103 + TERMINATE_PENDING = 104 + TERMINATED = 105 + FAILED = 106 + + +class VnffgRecordState(Enum): + """ VNFFG Record State """ + INIT = 101 + INSTANTIATION_PENDING = 102 + ACTIVE = 103 + TERMINATE_PENDING = 104 + TERMINATED = 105 + FAILED = 106 + + +class VnffgRecord(object): + """ Vnffg Records class""" + def __init__(self, dts, log, loop, vnffgmgr, nsr, nsr_name, vnffgd_msg, sdn_account_name): + + self._dts = dts + self._log = log + self._loop = loop + self._vnffgmgr = vnffgmgr + self._nsr = nsr + self._nsr_name = nsr_name + self._vnffgd_msg = vnffgd_msg + if sdn_account_name is None: + self._sdn_account_name = '' + else: + self._sdn_account_name = sdn_account_name + + self._vnffgr_id = str(uuid.uuid4()) + self._vnffgr_rsp_id = list() + self._vnffgr_state = VnffgRecordState.INIT + + @property + def id(self): + """ VNFFGR id """ + return self._vnffgr_id + + @property + def state(self): + """ state of this VNF """ + return self._vnffgr_state + + def fetch_vnffgr(self): + """ + Get VNFFGR message to be published + """ + + if self._vnffgr_state == VnffgRecordState.INIT: + vnffgr_dict = {"id": self._vnffgr_id, + "nsd_id": self._nsr.nsd_id, + "vnffgd_id_ref": self._vnffgd_msg.id, + "vnffgd_name_ref": self._vnffgd_msg.name, + "sdn_account": self._sdn_account_name, + "operational_status": 'init', + } + vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict) + elif self._vnffgr_state == VnffgRecordState.TERMINATED: + vnffgr_dict = {"id": self._vnffgr_id, + "nsd_id": self._nsr.nsd_id, + "vnffgd_id_ref": self._vnffgd_msg.id, + "vnffgd_name_ref": self._vnffgd_msg.name, + "sdn_account": self._sdn_account_name, + "operational_status": 'terminated', + } + vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict) + else: + try: + vnffgr = self._vnffgmgr.fetch_vnffgr(self._vnffgr_id) + except Exception: + self._log.exception("Fetching VNFFGR for VNFFG with id %s failed", self._vnffgr_id) + self._vnffgr_state = VnffgRecordState.FAILED + vnffgr_dict = {"id": self._vnffgr_id, + "nsd_id": self._nsr.nsd_id, + "vnffgd_id_ref": self._vnffgd_msg.id, + "vnffgd_name_ref": self._vnffgd_msg.name, + "sdn_account": self._sdn_account_name, + "operational_status": 'failed', + } + vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict) + + return vnffgr + + @asyncio.coroutine + def vnffgr_create_msg(self): + """ Virtual Link Record message for Creating VLR in VNS """ + vnffgr_dict = {"id": self._vnffgr_id, + "nsd_id": self._nsr.nsd_id, + "vnffgd_id_ref": self._vnffgd_msg.id, + "vnffgd_name_ref": self._vnffgd_msg.name, + "sdn_account": self._sdn_account_name, + } + vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict) + for rsp in self._vnffgd_msg.rsp: + vnffgr_rsp = vnffgr.rsp.add() + vnffgr_rsp.id = str(uuid.uuid4()) + vnffgr_rsp.name = self._nsr.name + '.' + rsp.name + self._vnffgr_rsp_id.append(vnffgr_rsp.id) + vnffgr_rsp.vnffgd_rsp_id_ref = rsp.id + vnffgr_rsp.vnffgd_rsp_name_ref = rsp.name + for rsp_cp_ref in rsp.vnfd_connection_point_ref: + vnfd = [self._nsr._vnfds[vnfd_id] for vnfd_id in self._nsr._vnfds.keys() if vnfd_id == rsp_cp_ref.vnfd_id_ref] + if len(vnfd) > 0 and vnfd[0].has_field('service_function_type'): + self._log.debug("Service Function Type for VNFD ID %s is %s",rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type) + else: + self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",rsp_cp_ref.vnfd_id_ref) + continue + + vnfr_cp_ref = vnffgr_rsp.vnfr_connection_point_ref.add() + vnfr_cp_ref.member_vnf_index_ref = rsp_cp_ref.member_vnf_index_ref + vnfr_cp_ref.hop_number = rsp_cp_ref.order + vnfr_cp_ref.vnfd_id_ref =rsp_cp_ref.vnfd_id_ref + vnfr_cp_ref.service_function_type = vnfd[0].service_function_type + for nsr_vnfr in self._nsr.vnfrs.values(): + if (nsr_vnfr.vnfd.id == vnfr_cp_ref.vnfd_id_ref and + nsr_vnfr.member_vnf_index == vnfr_cp_ref.member_vnf_index_ref): + vnfr_cp_ref.vnfr_id_ref = nsr_vnfr.id + vnfr_cp_ref.vnfr_name_ref = nsr_vnfr.name + vnfr_cp_ref.vnfr_connection_point_ref = rsp_cp_ref.vnfd_connection_point_ref + + vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath) + self._log.debug(" Received VNFR is %s", vnfr) + while vnfr.operational_status != 'running': + self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status) + if vnfr.operational_status == 'failed': + self._log.error("Fetching VNFR for %s failed", vnfr.id) + raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id)) + yield from asyncio.sleep(2, loop=self._loop) + vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath) + self._log.debug("Received VNFR is %s", vnfr) + + vnfr_cp_ref.connection_point_params.mgmt_address = vnfr.mgmt_interface.ip_address + for cp in vnfr.connection_point: + if cp.name == vnfr_cp_ref.vnfr_connection_point_ref: + vnfr_cp_ref.connection_point_params.port_id = cp.connection_point_id + vnfr_cp_ref.connection_point_params.name = self._nsr.name + '.' + cp.name + for vdu in vnfr.vdur: + for ext_intf in vdu.external_interface: + if ext_intf.name == vnfr_cp_ref.vnfr_connection_point_ref: + vnfr_cp_ref.connection_point_params.vm_id = vdu.vim_id + self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id, + vnfr_cp_ref.connection_point_params.vm_id) + break + + vnfr_cp_ref.connection_point_params.address = cp.ip_address + vnfr_cp_ref.connection_point_params.port = 50000 + for vdu in vnfr.vdur: + pass + self._log.info("VNFFGR msg to be sent is %s", vnffgr) + return vnffgr + + @asyncio.coroutine + def instantiate(self, xact): + """ Instantiate this VNFFG """ + + self._log.info("Instaniating VNFFGR with vnffgd %s xact %s", + self._vnffgd_msg, xact) + vnffgr_request = yield from self.vnffgr_create_msg() + + try: + vnffgr = self._vnffgmgr.create_vnffgr(vnffgr_request,self._vnffgd_msg.classifier) + except Exception: + self._log.exception("VNFFG instantiation failed") + self._vnffgr_state = VnffgRecordState.FAILED + raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFFGR %s failure" % (self.id, vnffgr_request.id)) + + self._vnffgr_state = VnffgRecordState.INSTANTIATION_PENDING + + if vnffgr.operational_status == 'failed': + self._log.error("NS Id:%s VNFFG creation failed for vnffgr id %s", self.id, vnffgr.id) + self._vnffgr_state = VnffgRecordState.FAILED + raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFFGR %s failure" % (self.id, vnffgr.id)) + + self._log.info("Instantiated VNFFGR :%s",vnffgr) + self._vnffgr_state = VnffgRecordState.ACTIVE + + self._log.info("Invoking update_nsr_state to update NSR state for NSR ID: %s", self._nsr.id) + yield from self._nsr.update_nsr_state() + + def vnffgr_in_vnffgrm(self): + """ Is there a VNFR record in VNFM """ + if (self._vnffgr_state == VnffgRecordState.ACTIVE or + self._vnffgr_state == VnffgRecordState.INSTANTIATION_PENDING or + self._vnffgr_state == VnffgRecordState.FAILED): + return True + + return False + + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this VNFFGR """ + if not self.vnffgr_in_vnffgrm(): + self._log.error("Ignoring terminate request for id %s in state %s", + self.id, self._vnffgr_state) + return + + self._log.info("Terminating VNFFGR id:%s", self.id) + self._vnffgr_state = VnffgRecordState.TERMINATE_PENDING + + self._vnffgmgr.terminate_vnffgr(self._vnffgr_id) + + self._vnffgr_state = VnffgRecordState.TERMINATED + self._log.debug("Terminated VNFFGR id:%s", self.id) + + +class VirtualLinkRecord(object): + """ Virtual Link Records class""" + def __init__(self, dts, log, loop, nsr_name, vld_msg, cloud_account_name): + + self._dts = dts + self._log = log + self._loop = loop + self._nsr_name = nsr_name + self._vld_msg = vld_msg + self._cloud_account_name = cloud_account_name + + self._vlr_id = str(uuid.uuid4()) + self._state = VlRecordState.INIT + + @property + def xpath(self): + """ path for this object """ + return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self._vlr_id) + + @property + def id(self): + """ VLR id """ + return self._vlr_id + + @property + def nsr_name(self): + """ Get NSR name for this VL """ + return self.nsr_name + + @property + def vld_msg(self): + """ Virtual Link Desciptor """ + return self._vld_msg + + @property + def name(self): + """ + Get the name for this VLR. + VLR name is "nsr name:VLD name" + """ + if self.vld_msg.name == "multisite": + # This is a temporary hack to identify manually provisioned inter-site network + return self.vld_msg.name + else: + return self._nsr_name + "." + self.vld_msg.name + + @property + def cloud_account_name(self): + """ Cloud account that this VLR should be created in """ + return self._cloud_account_name + + @staticmethod + def vlr_xpath(vlr): + """ Get the VLR path from VLR """ + return (VirtualLinkRecord.XPATH + "[vlr:id = '{}']").format(vlr.id) + + @property + def vlr_msg(self): + """ Virtual Link Record message for Creating VLR in VNS """ + vld_fields = ["short_name", + "vendor", + "description", + "version", + "type_yang", + "provider_network"] + + vld_copy_dict = {k: v for k, v in self.vld_msg.as_dict().items() + if k in vld_fields} + vlr_dict = {"id": self._vlr_id, + "name": self.name, + "cloud_account": self.cloud_account_name, + } + + vlr_dict.update(vld_copy_dict) + + vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict) + return vlr + + def create_nsr_vlr_msg(self, vnfrs): + """ The VLR message""" + nsr_vlr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vlr() + nsr_vlr.vlr_ref = self._vlr_id + + for conn in self.vld_msg.vnfd_connection_point_ref: + for vnfr in vnfrs: + if (vnfr.vnfd.id == conn.vnfd_id_ref and + vnfr.member_vnf_index == conn.member_vnf_index_ref): + cp_entry = nsr_vlr.vnfr_connection_point_ref.add() + cp_entry.vnfr_id = vnfr.id + cp_entry.connection_point = conn.vnfd_connection_point_ref + + return nsr_vlr + + @asyncio.coroutine + def instantiate(self, xact): + """ Instantiate this VL """ + + self._log.debug("Instaniating VLR key %s, vld %s xact %s", + self.xpath, self._vld_msg, xact) + vlr = None + self._state = VlRecordState.INSTANTIATION_PENDING + with self._dts.transaction(flags=0) as xact: + block = xact.block_create() + block.add_query_create(self.xpath, self.vlr_msg) + self._log.debug("Executing VL create path:%s msg:%s", + self.xpath, self.vlr_msg) + res_iter = yield from block.execute(now=True) + for ent in res_iter: + res = yield from ent + vlr = res.result + + if vlr is None: + self._state = VlRecordState.FAILED + raise NsrInstantiationFailed("Failed NS %s instantiation due to empty response" % self.id) + + if vlr.operational_status == 'failed': + self._log.debug("NS Id:%s VL creation failed for vlr id %s", self.id, vlr.id) + self._state = VlRecordState.FAILED + raise NsrInstantiationFailed("Failed NS %s instantiation due to VL %s failure" % (self.id, vlr.id)) + + self._log.info("Instantiated VL with xpath %s and vlr:%s", + self.xpath, vlr) + self._state = VlRecordState.ACTIVE + + def vlr_in_vns(self): + """ Is there a VLR record in VNS """ + if (self._state == VlRecordState.ACTIVE or + self._state == VlRecordState.INSTANTIATION_PENDING or + self._state == VlRecordState.FAILED): + return True + + return False + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this VL """ + if not self.vlr_in_vns(): + self._log.debug("Ignoring terminate request for id %s in state %s", + self.id, self._state) + return + + self._log.debug("Terminating VL id:%s", self.id) + self._state = VlRecordState.TERMINATE_PENDING + block = xact.block_create() + block.add_query_delete(self.xpath) + yield from block.execute(flags=0, now=True) + self._state = VlRecordState.TERMINATED + self._log.debug("Terminated VL id:%s", self.id) + + +class VnfRecordState(Enum): + """ Vnf Record State """ + INIT = 101 + INSTANTIATION_PENDING = 102 + ACTIVE = 103 + TERMINATE_PENDING = 104 + TERMINATED = 105 + FAILED = 106 + + +class VirtualNetworkFunctionRecord(object): + """ Virtual Network Function Record class""" + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + def __init__(self, dts, log, loop, vnfd, const_vnfd, nsr_name, cloud_account_name): + self._dts = dts + self._log = log + self._loop = loop + self._vnfd = vnfd + self._nsr_name = nsr_name + self._const_vnfd = const_vnfd + self._cloud_account_name = cloud_account_name + + try: + self._config_type = const_vnfd.vnf_configuration.config_type + except: + self._config_type = 'none' + self._config_status = NsrYang.ConfigStates.INIT + self._mon_params = {} + self._state = VnfRecordState.INIT + self._vnfr_id = str(uuid.uuid4()) + self._vnfr = self.vnfr_msg + self._log.debug("Set VNFR {} config type to {}". + format(self.name, self._config_type)) + + @property + def id(self): + """ VNFR id """ + return self._vnfr_id + + @property + def xpath(self): + """ VNFR xpath """ + return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id) + + @property + def mon_param_xpath(self): + """ VNFR monitoring param xpath """ + return self.xpath + "/vnfr:monitoring-param" + + @property + def vnfr(self): + """ VNFR xpath """ + return self._vnfr + + @property + def vnfd(self): + """ vnfd """ + return self._vnfd + + @property + def active(self): + """ Is this VNF actve """ + return True if self._state == VnfRecordState.ACTIVE else False + + @property + def state(self): + """ state of this VNF """ + return self._state + + @property + def member_vnf_index(self): + """ Member VNF index """ + return self._const_vnfd.member_vnf_index + + @property + def nsr_name(self): + """ NSR name""" + return self._nsr_name + + @property + def name(self): + """ Name of this VNFR """ + return self._nsr_name + "." + self.vnfd.name + "." + str(self.member_vnf_index) + + @staticmethod + def vnfr_xpath(vnfr): + """ Get the VNFR path from VNFR """ + return (VirtualNetworkFunctionRecord.XPATH + "[vnfr:id = '{}']").format(vnfr.id) + + @property + def config_type(self): + return self._config_type + + @property + def config_status(self): + self._log.debug("Map VNFR {} config status {} ({})". + format(self.name, self._config_status, self._config_type)) + if self._config_type == 'none': + return 'config_not_needed' + if self._config_status == NsrYang.ConfigStates.CONFIGURED: + return 'configured' + if self._config_status == NsrYang.ConfigStates.FAILED: + return 'failed' + return 'configuring' + + @property + def vnfr_msg(self): + """ VNFR message for this VNFR """ + vnfd_fields = ["short_name", + "vendor", + "description", + "version", + "type_yang"] + vnfd_copy_dict = {k: v for k, v in self._vnfd.as_dict().items() + if k in vnfd_fields} + vnfr_dict = {"id": self.id, + "vnfd_ref": self.vnfd.id, + "name": self.name, + "cloud_account": self._cloud_account_name, + "config_status": self.config_status, + } + vnfr_dict.update(vnfd_copy_dict) + vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict) + vnfr.member_vnf_index_ref = self.member_vnf_index + vnfr.vnf_configuration.from_dict(self._const_vnfd.vnf_configuration.as_dict()) + + if self._vnfd.mgmt_interface.has_field("port"): + vnfr.mgmt_interface.port = self._vnfd.mgmt_interface.port + + # UI expects the monitoring param field to exist + vnfr.monitoring_param = [] + + self._log.debug("Get vnfr_msg for VNFR {} : {}". + format(self.name, vnfr)) + return vnfr + + @property + def msg(self): + """ message for this VNFR """ + return self.id + + @asyncio.coroutine + def update_vnfm(self): + self._vnfr = self.vnfr_msg + # Publish only after VNFM has the VNFR created + if self._config_status != NsrYang.ConfigStates.INIT: + self._log.debug("Send an update to VNFM for VNFR {} with {}". + format(self.name, self.vnfr)) + yield from self._dts.query_update(self.xpath, + 0, + self.vnfr) + + @asyncio.coroutine + def set_config_status(self, status): + self._log.debug("Update VNFR {} from {} ({}) to {}". + format(self.name, self._config_status, + self._config_type, status)) + if self._config_status == NsrYang.ConfigStates.CONFIGURED: + self._log.error("Updating already configured VNFR {}". + format(self.name)) + + if self._config_status != status: + self._config_status = status + self._log.debug("Updated VNFR {} status to {}". + format(self.name, status)) + try: + yield from self.update_vnfm() + except Exception as e: + self._log.error("Exception updating VNFM with new status {} of VNFR {}: {}". + format(status, self.name, e)) + self._log.exception(e) + + def is_configured(self): + if self._config_type == 'none': + return True + + if self._config_status == NsrYang.ConfigStates.CONFIGURED: + return True + return False + + @asyncio.coroutine + def instantiate(self, nsr, xact): + """ Instantiate this VL """ + + self._log.debug("Instaniating VNFR key %s, vnfd %s, xact %s", + self.xpath, self._vnfd, xact) + + self._log.debug("Create VNF with xpath %s and vnfr %s", + self.xpath, self.vnfr) + + self._state = VnfRecordState.INSTANTIATION_PENDING + + def find_vlr_for_cp(conn): + """ Find VLR for the given connection point """ + for vlr in nsr.vlrs: + for vnfd_cp in vlr.vld_msg.vnfd_connection_point_ref: + if (vnfd_cp.vnfd_id_ref == self._vnfd.id and + vnfd_cp.vnfd_connection_point_ref == conn.name and + vnfd_cp.member_vnf_index_ref == self.member_vnf_index): + self._log.debug("Found VLR for cp_name:%s and vnf-index:%d", + conn.name, self.member_vnf_index) + return vlr + return None + + # For every connection point in the VNFD fill in the identifier + for conn_p in self._vnfd.connection_point: + cpr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint() + cpr.name = conn_p.name + cpr.type_yang = conn_p.type_yang + vlr_ref = find_vlr_for_cp(conn_p) + if vlr_ref is None: + msg = "Failed to find VLR for cp = %s" % conn_p.name + self._log.debug("%s", msg) +# raise VirtualNetworkFunctionRecordError(msg) + continue + + cpr.vlr_ref = vlr_ref.id + self.vnfr.connection_point.append(cpr) + self._log.debug("Connection point [%s] added, vnf id=%s vnfd id=%s", + cpr, self.vnfr.id, self.vnfr.vnfd_ref) + + yield from self._dts.query_create(self.xpath, + # 0, # this is sub + 0, # this is sub + self.vnfr) + + self._log.info("Created VNF with xpath %s and vnfr %s", + self.xpath, self.vnfr) + + self._log.info("Instantiated VNFR with xpath %s and vnfd %s, vnfr %s", + self.xpath, self._vnfd, self.vnfr) + + @asyncio.coroutine + def update(self, vnfr): + """ Update this VNFR""" + curr_vnfr = self._vnfr + self._vnfr = vnfr + if vnfr.operational_status == "running": + if curr_vnfr.operational_status != "running": + yield from self.is_active() + elif vnfr.operational_status == "failed": + yield from self.instantiation_failed() + + @asyncio.coroutine + def is_active(self): + """ This VNFR is active """ + self._log.debug("VNFR %s is active", self._vnfr_id) + self._state = VnfRecordState.ACTIVE + + @asyncio.coroutine + def instantiation_failed(self): + """ This VNFR instantiation failed""" + self._log.error("VNFR %s instantiation failed", self._vnfr_id) + self._state = VnfRecordState.FAILED + + def vnfr_in_vnfm(self): + """ Is there a VNFR record in VNFM """ + if (self._state == VnfRecordState.ACTIVE or + self._state == VnfRecordState.INSTANTIATION_PENDING or + self._state == VnfRecordState.FAILED): + return True + + return False + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this VNF """ + if not self.vnfr_in_vnfm(): + self._log.debug("Ignoring terminate request for id %s in state %s", + self.id, self._state) + return + + self._log.debug("Terminating VNF id:%s", self.id) + self._state = VnfRecordState.TERMINATE_PENDING + block = xact.block_create() + block.add_query_delete(self.xpath) + yield from block.execute(flags=0, now=True) + self._state = VnfRecordState.TERMINATED + self._log.debug("Terminated VNF id:%s", self.id) + + @asyncio.coroutine + def get_monitoring_param(self): + """ Fetch monitoring params """ + res_iter = yield from self._dts.query_read(self.mon_param_xpath, rwdts.Flag.MERGE) + monp_list = [] + for ent in res_iter: + res = yield from ent + monp = res.result + if monp.id in self._mon_params: + if monp.has_field("value_integer"): + self._mon_params[monp.id].value_integer = monp.value_integer + if monp.has_field("value_decimal"): + self._mon_params[monp.id].value_decimal = monp.value_decimal + if monp.has_field("value_string"): + self._mon_params[monp.id].value_string = monp.value_string + else: + self._mon_params[monp.id] = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_VnfMonitoringParam_MonitoringParam.from_dict(monp.as_dict()) + monp_list.append(self._mon_params[monp.id]) + return monp_list + + +class NetworkServiceStatus(object): + """ A class representing the Network service's status """ + MAX_EVENTS_RECORDED = 10 + """ Network service Status class""" + def __init__(self, dts, log, loop): + self._dts = dts + self._log = log + self._loop = loop + + self._state = NetworkServiceRecordState.INIT + self._events = deque([]) + + def record_event(self, evt, evt_desc): + """ Record an event """ + self._log.debug("Recording event - evt %s, evt_descr %s len = %s", + evt, evt_desc, len(self._events)) + if len(self._events) >= NetworkServiceStatus.MAX_EVENTS_RECORDED: + self._events.popleft() + self._events.append((int(time.time()), evt, evt_desc)) + + def set_state(self, state): + """ set the state of this status object """ + self._state = state + + def yang_str(self): + """ Return the state as a yang enum string """ + state_to_str_map = {"INIT": "init", + "VL_INIT_PHASE": "vl_init_phase", + "VNF_INIT_PHASE": "vnf_init_phase", + "VNFFG_INIT_PHASE": "vnffg_init_phase", + "RUNNING": "running", + "TERMINATE_RCVD": "terminate_rcvd", + "TERMINATE": "terminate", + "VL_TERMINATE_PHASE": "vl_terminate_phase", + "VNF_TERMINATE_PHASE": "vnf_terminate_phase", + "VNFFG_TERMINATE_PHASE": "vnffg_terminate_phase", + "TERMINATED": "terminated", + "FAILED": "failed"} + return state_to_str_map[self._state.name] + + @property + def state(self): + """ State of this status object """ + return self._state + + @property + def msg(self): + """ Network Service Record as a message""" + event_list = [] + idx = 1 + for entry in self._events: + event = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_OperationalEvents() + event.id = idx + idx += 1 + event.timestamp, event.event, event.description = entry + event_list.append(event) + return event_list + + +class NetworkServiceRecord(object): + """ Network service record """ + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr" + + def __init__(self, dts, log, loop, nsm, nsm_plugin, config_agent_plugins, nsr_cfg_msg,sdn_account_name): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._nsr_cfg_msg = nsr_cfg_msg + self._nsm_plugin = nsm_plugin + self._config_agent_plugins = config_agent_plugins + self._sdn_account_name = sdn_account_name + + self._nsd = None + self._nsr_msg = None + self._nsr_regh = None + self._vlrs = [] + self._vnfrs = {} + self._vnfds = {} + self._vnffgrs = {} + self._param_pools = {} + self._create_time = int(time.time()) + self._op_status = NetworkServiceStatus(dts, log, loop) + self._mon_params = defaultdict(NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_VnfMonitoringParam) + self._config_status = NsrYang.ConfigStates.CONFIGURING + self._config_update = None + self._job_id = 0 + + # Initalise the state to init + # The NSR moves through the following transitions + # 1. INIT -> VLS_READY once all the VLs in the NSD are created + # 2. VLS_READY - VNFS_READY when all the VNFs in the NSD are created + # 3. VNFS_READY - READY when the NSR is published + + self.set_state(NetworkServiceRecordState.INIT) + + self.substitute_input_parameters = InputParameterSubstitution(self._log) + + @property + def nsm_plugin(self): + """ NSM Plugin """ + return self._nsm_plugin + + @property + def config_agent_plugins(self): + """ Config agent plugin list """ + return self._config_agent_plugins + + def set_state(self, state): + """ Set state for this NSR""" + self._log.debug("Setting state to %s", state) + self._op_status.set_state(state) + + @property + def id(self): + """ Get id for this NSR""" + return self._nsr_cfg_msg.id + + @property + def name(self): + """ Name of this network service record """ + return self._nsr_cfg_msg.name + + @property + def nsd_id(self): + """ Get nsd id for this NSR""" + return self._nsr_cfg_msg.nsd_ref + + @property + def cloud_account_name(self): + return self._nsr_cfg_msg.cloud_account + + @property + def state(self): + """State of this NetworkServiceRecord""" + return self._op_status.state + + def record_event(self, evt, evt_desc, state=None): + """ Record an event """ + self._op_status.record_event(evt, evt_desc) + if state is not None: + self.set_state(state) + + @property + def active(self): + """ Is this NSR active ?""" + return True if self._op_status.state == NetworkServiceRecordState.RUNNING else False + + @property + def vlrs(self): + """ VLRs associated with this NSR""" + return self._vlrs + + @property + def vnfrs(self): + """ VNFRs associated with this NSR""" + return self._vnfrs + + @property + def vnffgrs(self): + """ VNFFGRs associated with this NSR""" + return self._vnffgrs + + @property + def param_pools(self): + """ Parameter value pools associated with this NSR""" + return self._param_pools + + @property + def nsd(self): + """ NSD for this NSR """ + return self._nsd + + @property + def nsd_msg(self): + return self._nsd.msg + + @property + def job_id(self): + ''' Get a new job id for config primitive''' + self._job_id += 1 + return self._job_id + + @property + def config_status(self): + """ Config status for NSR """ + return self._config_status + + def __str__(self): + return "NSR(name={}, nsd_id={}, cloud_account={})".format( + self.name, self.nsd_id, self.cloud_account_name + ) + + @asyncio.coroutine + def invoke_config_agent_plugins(self, method, *args): + # Invoke the methods on all config agent plugins registered + for agent in self._config_agent_plugins: + try: + self._log.debug("Invoke %s on %s" % (method, agent)) + yield from agent.invoke(method, *args) + except Exception: + self._log.warning("Error invoking %s on %s : %s" % + (method, agent, sys.exc_info())) + pass + + @asyncio.coroutine + def instantiate_vls(self, xact): + """ + This function instantiates VLs for every VL in this Network Service + """ + self._log.debug("Instantiating %d VLs in NSD id %s", len(self._vlrs), + self.id) + for vlr in self._vlrs: + yield from self.nsm_plugin.instantiate_vl(self, vlr, xact) + yield from self.invoke_config_agent_plugins('notify_instantiate_vl', self.id, vlr, xact) + + @asyncio.coroutine + def create(self, xact): + """ Create this network service""" + yield from self.invoke_config_agent_plugins('notify_create_nsr', self.id, self._nsd) + # Create virtual links for all the external vnf + # connection points in this NS + yield from self.create_vls() + # Create VNFs in this network service + yield from self.create_vnfs(xact) + # Create VNFFG for network service + yield from self.create_vnffgs() + + self.create_param_pools() + + @asyncio.coroutine + def create_vnffgs(self): + """ This function creates VNFFGs for every VNFFG in the NSD + associated with this NSR""" + + for vnffgd in self.nsd.msg.vnffgd: + self._log.debug("Found vnffgd %s in nsr id %s", vnffgd, self.id) + vnffgr = VnffgRecord(self._dts, + self._log, + self._loop, + self._nsm._vnffgmgr, + self, + self.name, + vnffgd, + self._sdn_account_name + ) + self._vnffgrs[vnffgr.id] = vnffgr + + @asyncio.coroutine + def create_vls(self): + """ This function creates VLs for every VLD in the NSD + associated with this NSR""" + + for vld in self.nsd.msg.vld: + self._log.debug("Found vld %s in nsr id %s", vld, self.id) + vlr = VirtualLinkRecord(self._dts, + self._log, + self._loop, + self.name, + vld, + self.cloud_account_name + ) + self._vlrs.append(vlr) + yield from self.invoke_config_agent_plugins('notify_create_vls', self.id, vld, vlr) + + def is_vnfr_config_agent_managed(self, vnfr): + if vnfr.config_type == 'none': + return False + + for agent in self._config_agent_plugins: + try: + if agent.is_vnfr_managed(vnfr.id): + return True + except Exception as e: + self._log.debug("Check if VNFR {} is config agent managed: {}". + format(vnfr.name, e)) + return False + + @asyncio.coroutine + def create_vnfs(self, xact): + """ + This function creates VNFs for every VNF in the NSD + associated with this NSR + """ + + self._log.debug("Creating %u VNFs associated with this NS id %s", + len(self.nsd.msg.constituent_vnfd), self.id) + + # Fetch the VNFD associated with this VNF + @asyncio.coroutine + def fetch_vnfd(vnfd_ref): + """ Fetch vnfd for the passed vnfd ref """ + return (yield from self._nsm.get_vnfd(vnfd_ref, xact)) + + for const_vnfd in self.nsd.msg.constituent_vnfd: + vnfd = None + vnfd_id = const_vnfd.vnfd_id_ref + if vnfd_id in self._vnfds: + vnfd = self._vnfds[vnfd_id] + else: + vnfd = yield from fetch_vnfd(vnfd_id) + self._vnfds[vnfd_id] = vnfd + if vnfd is None: + self._log.debug("NS instantiation failed for NSR id %s" + "Cannot find VNF descriptor with VNFD id %s", + self.id, vnfd_id) + err = ("Failed NS instantiation-VNF desc not found:" + "nsr id %s, vnfd id %s" % (self.id, vnfd_id)) + + raise NetworkServiceRecordError(err) + + vnfr = VirtualNetworkFunctionRecord(self._dts, + self._log, + self._loop, + vnfd, + const_vnfd, + self.name, + self.cloud_account_name, + ) + if vnfr.id in self._vnfrs: + err = "VNF with VNFR id %s already in vnf list" % (vnfr.id,) + raise NetworkServiceRecordError(err) + + self._vnfrs[vnfr.id] = vnfr + self._nsm.vnfrs[vnfr.id] = vnfr + + yield from self.invoke_config_agent_plugins('notify_create_vnfr', + self.id, + vnfr) + yield from vnfr.set_config_status(NsrYang.ConfigStates.INIT) + + self._log.debug("Added VNFR %s to NSM VNFR list with id %s", + vnfr.name, + vnfr.id) + + def create_param_pools(self): + for param_pool in self.nsd.msg.parameter_pool: + self._log.debug("Found parameter pool %s in nsr id %s", param_pool, self.id) + + start_value = param_pool.range.start_value + end_value = param_pool.range.end_value + if end_value < start_value: + raise NetworkServiceRecordError( + "Parameter pool %s has invalid range (start: {}, end: {})".format( + start_value, end_value + ) + ) + + self._param_pools[param_pool.name] = config_value_pool.ParameterValuePool( + self._log, + param_pool.name, + range(start_value, end_value) + ) + + + @asyncio.coroutine + def fetch_vnfr(self, vnfr_path): + """ Fetch VNFR record """ + vnfr = None + self._log.debug("Fetching VNFR with key %s while instantiating %s", + vnfr_path, self.id) + res_iter = yield from self._dts.query_read(vnfr_path, rwdts.Flag.MERGE) + + for ent in res_iter: + res = yield from ent + vnfr = res.result + + return vnfr + + @asyncio.coroutine + def instantiate_vnfs(self, xact): + """ + This function instantiates VNFs for every VNF in this Network Service + """ + self._log.debug("Instantiating %u VNFs in NS %s", + len(self.nsd.msg.constituent_vnfd), self.id) + for vnf in self._vnfrs.values(): + self._log.debug("Instantiating VNF: %s in NS %s", vnf, self.id) + yield from self.nsm_plugin.instantiate_vnf(self, vnf, xact) + vnfr = yield from self.fetch_vnfr(vnf.xpath) + if vnfr.operational_status == 'failed': + self._log.debug("Instatiation of VNF %s failed", vnf.id) + raise VnfInstantiationFailed("Failed to instantiate vnf %s", vnf.id) + yield from self.invoke_config_agent_plugins('notify_instantiate_vnf', self.id, vnf, xact) + + @asyncio.coroutine + def instantiate_vnffgs(self, xact): + """ + This function instantiates VNFFGs for every VNFFG in this Network Service + """ + self._log.debug("Instantiating %u VNFFGs in NS %s", + len(self.nsd.msg.vnffgd), self.id) + for vnffg in self._vnffgrs.values(): + self._log.debug("Instantiating VNFFG: %s in NS %s", vnffg, self.id) + yield from vnffg.instantiate(xact) + #vnffgr = vnffg.fetch_vnffgr() + #if vnffgr.operational_status == 'failed': + if vnffg.state == VnffgRecordState.FAILED: + self._log.debug("Instatiation of VNFFG %s failed", vnffg.id) + raise VnffgInstantiationFailed("Failed to instantiate vnffg %s", vnffg.id) + + @asyncio.coroutine + def publish(self): + """ This function publishes this NSR """ + self._nsr_msg = self.create_msg() + self._log.debug("Publishing the NSR with xpath %s and nsr %s", + self.nsr_xpath, + self._nsr_msg) + with self._dts.transaction() as xact: + yield from self._nsm.nsr_handler.update(xact, self.nsr_xpath, self._nsr_msg) + self._log.info("Published the NSR with xpath %s and nsr %s", + self.nsr_xpath, + self._nsr_msg) + + @asyncio.coroutine + def unpublish(self, xact): + """ Unpublish this NSR object """ + self._log.debug("Unpublishing Network service id %s", self.id) + yield from self._nsm.nsr_handler.delete(xact, self.nsr_xpath) + + @property + def nsr_xpath(self): + """ Returns the xpath associated with this NSR """ + return( + "D,/nsr:ns-instance-opdata" + + "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" + ).format(self.id) + + @staticmethod + def xpath_from_nsr(nsr): + """ Returns the xpath associated with this NSR op data""" + return (NetworkServiceRecord.XPATH + + "[nsr:ns-instance-config-ref = '{}']").format(nsr.id) + + @property + def nsd_xpath(self): + """ Return NSD config xpath.""" + return( + "C,/nsd:nsd-catalog" + + "/nsd:nsd[nsd:id = '{}']" + ).format(self.nsd_id) + + @asyncio.coroutine + def instantiate(self, xact): + """"Instantiates a NetworkServiceRecord. + + This function instantiates a Network service + which involves the following steps, + + * Fetch the NSD associated with NSR from DTS. + * Merge the NSD withe NSR config to begin instantiating the NS. + * Instantiate every VL in NSD by sending create VLR request to DTS. + * Instantiate every VNF in NSD by sending create VNF reuqest to DTS. + * Publish the NSR details to DTS + + Arguments: + nsr: The NSR configuration request containing nsr-id and nsd_ref + xact: The transaction under which this instatiation need to be + completed + + Raises: + NetworkServiceRecordError if the NSR creation fails + + Returns: + No return value + """ + + self._log.debug("Instatiating NS - %s xact - %s", self, xact) + + # Move the state to INIITALIZING + self.set_state(NetworkServiceRecordState.INIT) + + event_descr = "Instatiation Request Received NSR Id:%s" % self.id + self.record_event("instantiating", event_descr) + + # Find the NSD + self._nsd = self._nsm.get_nsd_ref(self.nsd_id) + event_descr = "Fetched NSD with descriptor id %s" % self.nsd_id + self.record_event("nsd-fetched", event_descr) + + if self._nsd is None: + msg = "Failed to fetch NSD with nsd-id [%s] for nsr-id %s" + self._log.debug(msg, self.nsd_id, self.id) + raise NetworkServiceRecordError(self) + + self._log.debug("Got nsd result %s", self._nsd) + + # Sbustitute any input parameters + self.substitute_input_parameters(self._nsd._nsd, self._nsr_cfg_msg) + + # Create the record + yield from self.create(xact) + + # Publish the NSR to DTS + yield from self.publish() + yield from self.invoke_config_agent_plugins('notify_instantiate_ns', self.id) + + @asyncio.coroutine + def do_instantiate(): + """ + Instantiate network service + """ + self._log.debug("Instantiating VLs nsr id [%s] nsd id [%s]", + self.id, self.nsd_id) + + # instantiate the VLs + event_descr = ("Instantiating %s external VLs for NSR id %s" % + (len(self.nsd.msg.vld), self.id)) + self.record_event("begin-external-vls-instantiation", event_descr) + + self.set_state(NetworkServiceRecordState.VL_INIT_PHASE) + + try: + yield from self.instantiate_vls(xact) + except Exception: + self._log.exception("VL instantiation failed") + yield from self.instantiation_failed() + return + + # Publish the NSR to DTS + yield from self.publish() + + event_descr = ("Finished instantiating %s external VLs for NSR id %s" % + (len(self.nsd.msg.vld), self.id)) + self.record_event("end-external-vls-instantiation", event_descr) + + # Move the state to VLS_READY + self.set_state(NetworkServiceRecordState.VNF_INIT_PHASE) + + self._log.debug("Instantiating VNFs ...... nsr[%s], nsd[%s]", + self.id, self.nsd_id) + + # instantiate the VNFs + event_descr = ("Instantiating %s VNFS for NSR id %s" % + (len(self.nsd.msg.constituent_vnfd), self.id)) + + self.record_event("begin-vnf-instantiation", event_descr) + + try: + yield from self.instantiate_vnfs(xact) + except Exception: + self._log.exception("VNF instantiation failed") + yield from self.instantiation_failed() + return + + self._log.debug(" Finished instantiating %d VNFs for NSR id %s", + len(self.nsd.msg.constituent_vnfd), self.id) + + event_descr = ("Finished instantiating %s VNFs for NSR id %s" % + (len(self.nsd.msg.constituent_vnfd), self.id)) + self.record_event("end-vnf-instantiation", event_descr) + + if len(self.vnffgrs) > 0: + self._log.debug("Instantiating VNFFGRs ...... nsr[%s], nsd[%s]", + self.id, self.nsd_id) + + # instantiate the VNFs + event_descr = ("Instantiating %s VNFFGS for NSR id %s" % + (len(self.nsd.msg.vnffgd), self.id)) + + self.record_event("begin-vnffg-instantiation", event_descr) + + try: + yield from self.instantiate_vnffgs(xact) + except Exception: + self._log.exception("VNFFG instantiation failed") + yield from self.instantiation_failed() + return + + self._log.debug(" Finished instantiating %d VNFFGs for NSR id %s", + len(self.nsd.msg.vnffgd), self.id) + event_descr = ("Finished instantiating %s VNFFGDs for NSR id %s" % + (len(self.nsd.msg.vnffgd), self.id)) + self.record_event("end-vnffg-instantiation", event_descr) + + + # Give the plugin a chance to deploy the network service now that all + # virtual links and vnfs are instantiated + try: + yield from self.nsm_plugin.deploy(self._nsr_msg) + except Exception: + self._log.exception("NSM deploy failed") + yield from self.instantiation_failed() + return + + self._log.debug("Publishing NSR...... nsr[%s], nsd[%s]", + self.id, self.nsd_id) + + # Publish the NSR to DTS + yield from self.publish() + + event_descr = ("NSR in running state for NSR id %s" % self.id) + self.record_event("ns-running", event_descr) + + self._log.debug("Published NSR...... nsr[%s], nsd[%s]", + self.id, self.nsd_id) + + self._loop.create_task(do_instantiate()) + + @asyncio.coroutine + def get_vnfr_config_status(self, vnfr): + if vnfr.is_configured(): + return NsrYang.ConfigStates.CONFIGURED + + if self.is_vnfr_config_agent_managed(vnfr): + # Check if config agent has finished configuring + status = NsrYang.ConfigStates.CONFIGURED + for agent in self._config_agent_plugins: + try: + rc = yield from agent.get_status(vnfr.id) + self._log.debug("VNFR {} config agent status is {}". + format(vnfr.name, rc)) + if rc == 'configuring': + status = NsrYang.ConfigStates.CONFIGURING + break + elif rc == 'failed': + status == NsrYang.ConfigStates.FAILED + break + + except Exception as e: + self._log.debug("Exception in is_vnfr_config_agent_managed for {}: {}". + format(vnfr.name, e)) + status = NsrYang.ConfigStates.CONFIGURING + yield from vnfr.set_config_status(status) + else: + # Rift Configuration Manager + status = vnfr._config_status + + if status in [NsrYang.ConfigStates.CONFIGURED, NsrYang.ConfigStates.FAILED]: + if self.is_vnfr_config_agent_managed(vnfr): + # Re-apply initial config + self._log.debug("VNF active. Apply initial config for vnfr {}".format(vnfr.name)) + yield from self.invoke_config_agent_plugins('apply_initial_config', + vnfr.id, vnfr) + + return status + + @asyncio.coroutine + def update_config_status(self): + ''' Check if all VNFRs are configured ''' + self._log.debug("Check all VNFRs are configured for ns %s" % self.name) + + if self._config_status in [NsrYang.ConfigStates.CONFIGURED, NsrYang.ConfigStates.FAILED]: + return + + # Handle reload scenarios + for vnfr in self._vnfrs.values(): + if self.is_vnfr_config_agent_managed(vnfr): + yield from vnfr.set_config_status(NsrYang.ConfigStates.CONFIGURING) + + while True: + config_status = NsrYang.ConfigStates.CONFIGURED + for vnfr in self._vnfrs.values(): + config_status = yield from self.get_vnfr_config_status(vnfr) + if config_status == NsrYang.ConfigStates.CONFIGURING: + break + self._config_status = config_status + if config_status in [NsrYang.ConfigStates.CONFIGURED, NsrYang.ConfigStates.FAILED]: + self._log.debug("Publish config status for NS {}: {}". + format(self.name, config_status)) + yield from self.publish() + return + else: + yield from asyncio.sleep(10, loop=self._loop) + + + @asyncio.coroutine + def is_active(self): + """ This NS is active """ + self._log.debug("Network service %s is active ", self.id) + self.set_state(NetworkServiceRecordState.RUNNING) + + # Publish the NSR to DTS + yield from self.publish() + yield from self._nsm.so_obj.notify_nsr_up(self.id) + yield from self.invoke_config_agent_plugins('notify_nsr_active', self.id, self._vnfrs) + self._config_update = self._loop.create_task(self.update_config_status()) + self._log.debug("Created tasklet %s" % self._config_update) + + @asyncio.coroutine + def instantiation_failed(self): + """ The NS instantiation failed""" + self._log.debug("Network service %s instantiation failed", self.id) + self.set_state(NetworkServiceRecordState.FAILED) + + event_descr = "Instantiation of NS %s failed" % self.id + self.record_event("ns-failed", event_descr) + + # Publish the NSR to DTS + yield from self.publish() + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate a NetworkServiceRecord.""" + def terminate_vnfrs(xact): + """ Terminate VNFRS in this network service """ + self._log.debug("Terminating VNFs in network service %s", self.id) + for vnfr in self.vnfrs.values(): + yield from self.nsm_plugin.terminate_vnf(vnfr, xact) + yield from self.invoke_config_agent_plugins('notify_terminate_vnf', self.id, vnfr, xact) + + def terminate_vnffgrs(xact): + """ Terminate VNFFGRS in this network service """ + self._log.debug("Terminating VNFFGRs in network service %s", self.id) + for vnffgr in self.vnffgrs.values(): + yield from vnffgr.terminate(xact) + + + def terminate_vlrs(xact): + """ Terminate VLRs in this netork service """ + self._log.debug("Terminating VLs in network service %s", self.id) + for vlr in self.vlrs: + yield from self.nsm_plugin.terminate_vl(vlr, xact) + yield from self.invoke_config_agent_plugins('notify_terminate_vl', self.id, vlr, xact) + + self._log.debug("Terminating network service id %s", self.id) + + # Move the state to TERMINATE + self.set_state(NetworkServiceRecordState.TERMINATE) + event_descr = "Terminate being processed for NS Id:%s" % self.id + self.record_event("terminate", event_descr) + + # Move the state to VNF_TERMINATE_PHASE + self._log.debug("Terminating VNFFGs in NS ID: %s",self.id) + self.set_state(NetworkServiceRecordState.VNFFG_TERMINATE_PHASE) + event_descr = "Terminating VNFFGS in NS Id:%s" % self.id + self.record_event("terminating-vnffgss", event_descr) + yield from terminate_vnffgrs(xact) + + # Move the state to VNF_TERMINATE_PHASE + self.set_state(NetworkServiceRecordState.VNF_TERMINATE_PHASE) + event_descr = "Terminating VNFS in NS Id:%s" % self.id + self.record_event("terminating-vnfs", event_descr) + yield from terminate_vnfrs(xact) + + # Move the state to VL_TERMINATE_PHASE + self.set_state(NetworkServiceRecordState.VL_TERMINATE_PHASE) + event_descr = "Terminating VLs in NS Id:%s" % self.id + self.record_event("terminating-vls", event_descr) + yield from terminate_vlrs(xact) + + yield from self.nsm_plugin.terminate_ns(self, xact) + + # Move the state to TERMINATED + self.set_state(NetworkServiceRecordState.TERMINATED) + event_descr = "Terminated NS Id:%s" % self.id + self.record_event("terminated", event_descr) + self._loop.create_task(self._nsm.so_obj.notify_nsr_down(self.id)) + yield from self.invoke_config_agent_plugins('notify_terminate_ns', self.id) + self._log.debug("Checking tasklet %s" % (self._config_update)) + if self._config_update: + self._config_update.print_stack() + self._config_update.cancel() + self._config_update = None + + def enable(self): + """"Enable a NetworkServiceRecord.""" + pass + + def disable(self): + """"Disable a NetworkServiceRecord.""" + pass + + def map_config_status(self): + self._log.debug("Config status for ns {} is {}". + format(self.name, self._config_status)) + if self._config_status == NsrYang.ConfigStates.CONFIGURING: + return 'configuring' + if self._config_status == NsrYang.ConfigStates.FAILED: + return 'failed' + return 'configured' + + def create_msg(self): + """ The network serice record as a message """ + nsr_dict = {"ns_instance_config_ref": self.id} + nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict(nsr_dict) + nsr.cloud_account = self.cloud_account_name + nsr.name_ref = self.name + nsr.nsd_name_ref = self.nsd.name + nsr.operational_events = self._op_status.msg + nsr.operational_status = self._op_status.yang_str() + nsr.config_status = self.map_config_status() + nsr.create_time = self._create_time + for vnfr_id in self.vnfrs: + nsr.constituent_vnfr_ref.append(self.vnfrs[vnfr_id].msg) + for vlr in self.vlrs: + nsr.vlr.append(vlr.create_nsr_vlr_msg(self.vnfrs.values())) + for vnffgr in self.vnffgrs.values(): + nsr.vnffgr.append(vnffgr.fetch_vnffgr()) + return nsr + + def all_vnfs_active(self): + """ Are all VNFS in this NS active? """ + for _, vnfr in self.vnfrs.items(): + if vnfr.active is not True: + return False + return True + + @asyncio.coroutine + def update_nsr_state(self): + """ Re-evaluate this NS's state """ + curr_state = self._op_status.state + new_state = NetworkServiceRecordState.RUNNING + self._log.info("Received update_nsr_state for nsr: %s, curr-state: %s",self.id,curr_state) + #Check all the VNFRs are present + for _, vnfr in self.vnfrs.items(): + if vnfr.state == VnfRecordState.ACTIVE: + pass + elif vnfr.state == VnfRecordState.FAILED: + event_descr = "Instantiation of VNF %s failed" % vnfr.id + self.record_event("vnf-failed", event_descr) + new_state = NetworkServiceRecordState.FAILED + break + else: + new_state = curr_state + + # If new state is RUNNIG; check VNFFGRs are also active + if new_state == NetworkServiceRecordState.RUNNING: + for _, vnffgr in self.vnffgrs.items(): + self._log.info("Checking vnffgr state for nsr %s is: %s",self.id,vnffgr.state) + if vnffgr.state == VnffgRecordState.ACTIVE: + pass + elif vnffgr.state == VnffgRecordState.FAILED: + event_descr = "Instantiation of VNFFGR %s failed" % vnffgr.id + self.record_event("vnffg-failed", event_descr) + new_state = NetworkServiceRecordState.FAILED + break + else: + self._log.info("VNFFGR %s in NSR %s is still not active; current state is: %s", + vnffgr.id, self.state, vnffgr.state) + new_state = curr_state + + if new_state != curr_state: + self._log.debug("Changing state of Network service %s from %s to %s", + self.id, curr_state, new_state) + if new_state == NetworkServiceRecordState.RUNNING: + yield from self.is_active() + elif new_state == NetworkServiceRecordState.FAILED: + yield from self.instantiation_failed() + + @asyncio.coroutine + def get_monitoring_param(self): + """ Get monitoring params for this network service """ + vnfrs = list(self.vnfrs.values()) + monp_list = [] + for vnfr in vnfrs: + self._mon_params[vnfr.id].vnfr_id_ref = vnfr.id + self._mon_params[vnfr.id].monitoring_param = yield from vnfr.get_monitoring_param() + monp_list.append(self._mon_params[vnfr.id]) + + return monp_list + + +class InputParameterSubstitution(object): + """ + This class is responsible for substituting input parameters into an NSD. + """ + + def __init__(self, log): + """Create an instance of InputParameterSubstitution + + Arguments: + log - a logger for this object to use + + """ + self.log = log + + def __call__(self, nsd, nsr_config): + """Substitutes input parameters from the NSR config into the NSD + + This call modifies the provided NSD with the input parameters that are + contained in the NSR config. + + Arguments: + nsd - a GI NSD object + nsr_config - a GI NSR config object + + """ + if nsd is None or nsr_config is None: + return + + # Create a lookup of the xpath elements that this descriptor allows + # to be modified + optional_input_parameters = set() + for input_parameter in nsd.input_parameter_xpath: + optional_input_parameters.add(input_parameter.xpath) + + # Apply the input parameters to the descriptor + if nsr_config.input_parameter: + for param in nsr_config.input_parameter: + if param.xpath not in optional_input_parameters: + msg = "tried to set an invalid input parameter ({})" + self.log.error(msg.format(param.xpath)) + + continue + + self.log.debug( + "input-parameter:{} = {}".format( + param.xpath, + param.value, + ) + ) + + try: + xpath.setxattr(nsd, param.xpath, param.value) + + except Exception as e: + self.log.exception(e) + + +class NetworkServiceDescriptor(object): + """ + Network service descriptor class + """ + + def __init__(self, dts, log, loop, nsd): + self._dts = dts + self._log = log + self._loop = loop + + self._nsd = nsd + self._ref_count = 0 + + @property + def id(self): + """ Returns nsd id """ + return self._nsd.id + + @property + def name(self): + """ Returns name of nsd """ + return self._nsd.name + + @property + def ref_count(self): + """ Returns reference count""" + return self._ref_count + + def in_use(self): + """ Returns whether nsd is in use or not """ + return True if self.ref_count > 0 else False + + def ref(self): + """ Take a reference on this object """ + self._ref_count += 1 + + def unref(self): + """ Release reference on this object """ + if self.ref_count < 1: + msg = ("Unref on a NSD object - nsd id %s, ref_count = %s" % + (self.id, self.ref_count)) + self._log.critical(msg) + raise NetworkServiceDescriptorError(msg) + self._ref_count -= 1 + + @property + def msg(self): + """ Return the message associated with this NetworkServiceDescriptor""" + return self._nsd + + @staticmethod + def path_for_id(nsd_id): + """ Return path for the passed nsd_id""" + return "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}'".format(nsd_id) + + def path(self): + """ Return the message associated with this NetworkServiceDescriptor""" + return NetworkServiceDescriptor.path_for_id(self.id) + + def update(self, nsd): + """ Update the NSD descriptor """ + if self.in_use(): + self._log.error("Cannot update descriptor %s in use", self.id) + raise NetworkServiceDescriptorError("Cannot update descriptor in use %s" % self.id) + self._nsd = nsd + + +class NsdDtsHandler(object): + """ The network service descriptor DTS handler """ + XPATH = "C,/nsd:nsd-catalog/nsd:nsd" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for Nsd create/update/delete/read requests from dts """ + + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + self._log.debug("Got nsd apply cfg (xact:%s) (action:%s)", + xact, action) + # Create/Update an NSD record + for cfg in self._regh.get_xact_elements(xact): + # Only interested in those NSD cfgs whose ID was received in prepare callback + if cfg.id in acg.scratch['nsds']: + self._nsm.update_nsd(cfg) + + del acg._scratch['nsds'][:] + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for NSD config """ + + self._log.info("Got nsd prepare - config received nsd id %s, msg %s", + msg.id, msg) + + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + # Delete an NSD record + self._log.debug("Deleting NSD with id %s", msg.id) + if self._nsm.nsd_in_use(msg.id): + self._log.debug("Cannot delete NSD in use - %s", msg.id) + err = "Cannot delete an NSD in use - %s" % msg.id + raise NetworkServiceDescriptorRefCountExists(err) + self._nsm.delete_nsd(msg.id) + else: + # Handle actual adds/updates in apply_callback, + # just check if NSD in use in prepare_callback + if self._nsm.nsd_in_use(msg.id): + self._log.debug("Cannot modify an NSD in use - %s", msg.id) + err = "Cannot modify an NSD in use - %s" % msg.id + raise NetworkServiceDescriptorRefCountExists(err) + + # Add this NSD to scratch to create/update in apply callback + acg._scratch['nsds'].append(msg.id) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug( + "Registering for NSD config using xpath: %s", + NsdDtsHandler.XPATH, + ) + + acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + # Need a list in scratch to store NSDs to create/update later + acg._scratch['nsds'] = list() + self._regh = acg.register( + xpath=NsdDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + + +class VnfdDtsHandler(object): + """ DTS handler for VNFD config changes """ + XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._regh = None + + @property + def regh(self): + """ DTS registration handle """ + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for VNFD configuration""" + + @asyncio.coroutine + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + self._log.debug("Got NSM VNFD apply (xact: %s) (action: %s)(scr: %s)", + xact, action, scratch) + + # Create/Update a VNFD record + for cfg in self._regh.get_xact_elements(xact): + # Only interested in those VNFD cfgs whose ID was received in prepare callback + if cfg.id in acg.scratch['vnfds']: + self._nsm.update_vnfd(cfg) + + for cfg in self._regh.elements: + if cfg.id in acg.scratch['deleted_vnfds']: + yield from self._nsm.delete_vnfd(cfg.id) + + del acg._scratch['vnfds'][:] + del acg._scratch['deleted_vnfds'][:] + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ on prepare callback """ + self._log.debug("Got on prepare for VNFD (path: %s) (action: %s) (msg: %s)", + ks_path.to_xpath(RwNsmYang.get_schema()), xact_info.query_action, msg) + # RIFT-10161 + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + # Handle deletes in prepare_callback, but adds/updates in apply_callback + if fref.is_field_deleted(): + self._log.debug("Adding msg to deleted field") + acg._scratch['deleted_vnfds'].append(msg.id) + else: + # Add this VNFD to scratch to create/update in apply callback + acg._scratch['vnfds'].append(msg.id) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug( + "Registering for VNFD config using xpath: %s", + VnfdDtsHandler.XPATH, + ) + acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + # Need a list in scratch to store VNFDs to create/update later + acg._scratch['vnfds'] = list() + acg._scratch['deleted_vnfds'] = list() + self._regh = acg.register( + xpath=VnfdDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + + +class NsrDtsHandler(object): + """ The network service DTS handler """ + XPATH = "C,/nsr:ns-instance-config/nsr:nsr" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @asyncio.coroutine + def register(self): + """ Register for Nsr create/update/delete/read requests from dts """ + + def on_init(acg, xact, scratch): + """ On init callback """ + + def on_deinit(acg, xact, scratch): + """ On deinit callback """ + pass + + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)", + xact, action, scratch) + + def handle_create_nsr(): + """ Handle create nsr requests """ + # Do some validations + if not msg.has_field("nsd_ref"): + err = "NSD reference not provided" + self._log.error(err) + raise NetworkServiceRecordError(err) + + self._log.info("Creating NetworkServiceRecord %s from nsd_id %s", + msg.id, msg.nsd_ref) + + nsr = self.nsm.create_nsr(msg) + return nsr + + @asyncio.coroutine + def begin_instantiation(nsr, xact): + """ Begin instantiation """ + self._log.info("Beginning NS instantiation: %s", nsr.id) + yield from self._nsm.instantiate_ns(nsr.id, xact) + + if action == rwdts.AppconfAction.INSTALL and xact.id is None: + self._log.debug("No xact handle. Skipping apply config") + xact = None + + for msg in self.regh.get_xact_elements(xact): + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + self._log.error("Ignoring delete in apply - msg:%s", msg) + continue + + if msg.id not in self._nsm.nsrs: + nsr = handle_create_nsr() + self._loop.create_task(begin_instantiation(nsr, xact)) + + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare calllback from DTS for NSR """ + + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + self._log.debug( + "Got Nsr prepare callback (xact:%s info: %s, %s:%s)", + xact, xact_info, xpath, msg) + + @asyncio.coroutine + def delete_instantiation(ns_id): + """ Delete instantiation """ + with self._dts.transaction() as xact: + yield from self._nsm.terminate_ns(ns_id, xact) + + def handle_delete_nsr(): + """ Handle delete NSR requests """ + self._log.info("Delete req for NSR Id: %s received", msg.id) + # Terminate the NSR instance + nsr = self._nsm.get_ns_by_nsr_id(msg.id) + + nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD) + event_descr = "Terminate rcvd for NS Id:%s" % msg.id + nsr.record_event("terminate-rcvd", event_descr) + + self._loop.create_task(delete_instantiation(msg.id)) + + + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + self._log.info("Delete NSR received in prepare to terminate NS:%s", msg.id) + try: + handle_delete_nsr() + except Exception: + self._log.exception("Failed to terminate NS:%s", msg.id) + + else: + # Ensure the Cloud account has been specified if this is an NSR create + if msg.id not in self._nsm.nsrs: + if not msg.has_field("cloud_account"): + raise NsrInstantiationFailed("Cloud account not specified in NSR") + + acg.handle.prepare_complete_ok(xact_info.handle) + + self._log.debug("Registering for NSR config using xpath: %s", + NsrDtsHandler.XPATH,) + + acg_hdl = rift.tasklets.AppConfGroup.Handler( + on_init=on_init, + on_deinit=on_deinit, + on_apply=on_apply, + ) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + self._regh = acg.register(xpath=NsrDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + + +class NsrOpDataDtsHandler(object): + """ The network service op data DTS handler """ + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + self._regh = None + + @property + def regh(self): + """ Return the registration handle""" + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @asyncio.coroutine + def register(self): + """ Register for Nsr op data publisher registration""" + self._log.debug("Registering Nsr op data path %s as publisher", + NsrOpDataDtsHandler.XPATH) + + hdl = rift.tasklets.DTS.RegistrationHandler() + handlers = rift.tasklets.Group.Handler() + with self._dts.group_create(handler=handlers) as group: + self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ| rwdts.Flag.FILE_DATASTORE) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create an NS record in DTS with the path and message + """ + self._log.debug("Creating NSR xact = %s, %s:%s", xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created NSR xact = %s, %s:%s", xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg, flags=rwdts.Flag.REPLACE): + """ + Update an NS record in DTS with the path and message + """ + self._log.debug("Updating NSR xact = %s, %s:%s regh = %s", xact, path, msg, self.regh) + self.regh.update_element(path, msg, flags) + self._log.debug("Updated NSR xact = %s, %s:%s", xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Update an NS record in DTS with the path and message + """ + self._log.debug("Deleting NSR xact:%s, path:%s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted NSR xact:%s, path:%s", xact, path) + + +class VnfrDtsHandler(object): + """ The virtual network service DTS handler """ + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @asyncio.coroutine + def register(self): + """ Register for vnfr create/update/delete/ advises from dts """ + + def on_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got vnfr commit (xact_info: %s)", xact_info) + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + self._log.debug( + "Got vnfr on_prepare cb (xact_info: %s, action: %s): %s:%s", + xact_info, action, ks_path, msg + ) + + if action == rwdts.QueryAction.CREATE or action == rwdts.QueryAction.UPDATE: + yield from self._nsm.update_vnfr(msg) + elif action == rwdts.QueryAction.DELETE: + schema = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + self._log.debug("Deleting VNFR with id %s", path_entry.key00.id) + self._nsm.delete_vnfr(path_entry.key00.id) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath) + + self._log.debug("Registering for VNFR using xpath: %s", + VnfrDtsHandler.XPATH,) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit, + on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=VnfrDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.SUBSCRIBER),) + + +class NsMonitorDtsHandler(object): + """ The Network service Monitor DTS handler """ + XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr/nsr:vnf-monitoring-param" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @staticmethod + def vnf_mon_param_xpath(nsr_id, vnfr_id): + """ VNF monitoring xpath """ + return ("D,/nsr:ns-instance-opdata" + + "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" + + "/nsr:vnf-monitoring-param" + + "[nsr:vnfr-id-ref = '{}']").format(nsr_id, vnfr_id) + + @asyncio.coroutine + def register(self): + """ Register for NS monitoring read from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + if action == rwdts.QueryAction.READ: + schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + try: + monp_list = yield from self._nsm.get_monitoring_param( + path_entry.key00.ns_instance_config_ref) + for nsr_id, vnf_monp_list in monp_list: + for monp in vnf_monp_list: + vnf_xpath = NsMonitorDtsHandler.vnf_mon_param_xpath( + nsr_id, + monp.vnfr_id_ref + ) + xact_info.respond_xpath(rwdts.XactRspCode.MORE, + vnf_xpath, + monp) + except Exception: + self._log.exception("##### Caught exception while collection mon params #####") + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=NsMonitorDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER, + ) + + +class NsdRefCountDtsHandler(object): + """ The NSD Ref Count DTS handler """ + XPATH = "D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + @asyncio.coroutine + def register(self): + """ Register for NSD ref count read from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwNsrYang.get_schema()) + + if action == rwdts.QueryAction.READ: + schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount.schema() + path_entry = schema.keyspec_to_entry(ks_path) + nsd_list = yield from self._nsm.get_nsd_refcount(path_entry.key00.nsd_id_ref) + for xpath, msg in nsd_list: + xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.MORE, + xpath=xpath, + msg=msg) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + raise NetworkServiceRecordError("Not supported operation %s" % action) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=NsdRefCountDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER,) + + +class NsManagerRPCHandler(object): + """ The Network service Monitor DTS handler """ + EXEC_NS_CONF_XPATH = "I,/nsr:exec-ns-config-primitive" + EXEC_NS_CONF_O_XPATH = "O,/nsr:exec-ns-config-primitive" + + GET_NS_CONF_XPATH = "I,/nsr:get-ns-config-primitive-values" + GET_NS_CONF_O_XPATH = "O,/nsr:get-ns-config-primitive-values" + + def __init__(self, dts, log, loop, nsm): + self._dts = dts + self._log = log + self._loop = loop + self._nsm = nsm + + self._ns_regh = None + self._vnf_regh = None + self._get_ns_conf_regh = None + + self.job_manager = rift.mano.config_agent.ConfigAgentJobManager(dts, log, loop, nsm) + + @property + def reghs(self): + """ Return registration handles """ + return (self._ns_regh, self._vnf_regh, self._get_ns_conf_regh) + + @property + def nsm(self): + """ Return the NS manager instance """ + return self._nsm + + def prepare_meta(self, rpc_ip): + + try: + nsr_id = rpc_ip.nsr_id_ref + nsr = self._nsm.nsrs[nsr_id] + vnfrs = {} + for vnf in rpc_ip.vnf_list: + vnfr_id = vnf.vnfr_id_ref + vnfrs[vnfr_id] = self._nsm.vnfrs[vnfr_id] + + return nsr, vnfrs + except KeyError as e: + raise ValueError("Record not found", str(e)) + + def _get_ns_cfg_primitive(self, nsr_id, ns_cfg_name): + try: + nsr = self._nsm.nsrs[nsr_id] + except KeyError: + raise ValueError("NSR id %s not found" % nsr_id) + + nsd_msg = self._nsm.get_nsd(nsr.nsd_id).msg + + def get_nsd_cfg_prim(name): + for ns_cfg_prim in nsd_msg.config_primitive: + if ns_cfg_prim.name == name: + return ns_cfg_prim + + raise ValueError("Could not find ns_cfg_prim %s in nsr id %s" % (name, nsr_id)) + + ns_cfg_prim_msg = get_nsd_cfg_prim(ns_cfg_name) + ret_cfg_prim_msg = ns_cfg_prim_msg.deep_copy() + + return ret_cfg_prim_msg + + def _get_vnf_primitive(self, nsr_id, vnf_index, primitive_name): + try: + nsr = self._nsm.nsrs[nsr_id] + except KeyError: + raise ValueError("NSR id %s not found" % nsr_id) + + nsd_msg = self._nsm.get_nsd(nsr.nsd_id).msg + for vnf in nsd_msg.constituent_vnfd: + if vnf.member_vnf_index != vnf_index: + continue + + for primitive in vnf.vnf_configuration.config_primitive: + if primitive.name == primitive_name: + return primitive + + raise ValueError("Could not find vnf index %s primitive %s in nsr id %s" % + (vnf_index, primitive_name, nsr_id)) + + @asyncio.coroutine + def register(self): + """ Register for NS monitoring read from dts """ + yield from self.job_manager.register() + + @asyncio.coroutine + def on_ns_config_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts exec-ns-config-primitive""" + assert action == rwdts.QueryAction.RPC + rpc_ip = msg + rpc_op = NsrYang.YangOutput_Nsr_ExecNsConfigPrimitive() + + ns_cfg_prim_name = rpc_ip.name + nsr_id = rpc_ip.nsr_id_ref + nsr = self._nsm.nsrs[nsr_id] + + nsd_cfg_prim_msg = self._get_ns_cfg_primitive(nsr_id, ns_cfg_prim_name) + + def find_nsd_vnf_prim_param_pool(vnf_index, vnf_prim_name, param_name): + for vnf_prim_group in nsd_cfg_prim_msg.vnf_primitive_group: + if vnf_prim_group.member_vnf_index_ref != vnf_index: + continue + + for vnf_prim in vnf_prim_group.primitive: + if vnf_prim.name != vnf_prim_name: + continue + + for pool_param in vnf_prim.pool_parameters: + if pool_param.name != param_name: + continue + + try: + nsr_param_pool = nsr.param_pools[pool_param.parameter_pool] + except KeyError: + raise ValueError("Parameter pool %s does not exist in nsr" % vnf_prim.parameter_pool) + + self._log.debug("Found parameter pool %s for vnf index(%s), vnf_prim_name(%s), param_name(%s)", + nsr_param_pool, vnf_index, vnf_prim_name, param_name) + return nsr_param_pool + + self._log.debug("Could not find parameter pool for vnf index(%s), vnf_prim_name(%s), param_name(%s)", + vnf_index, vnf_prim_name, param_name) + return None + + rpc_op.nsr_id_ref = nsr_id + rpc_op.name = ns_cfg_prim_name + + nsr, vnfrs = self.prepare_meta(rpc_ip) + rpc_op.job_id = nsr.job_id + + # Give preference to user defined script. + if nsd_cfg_prim_msg.has_field("user_defined_script"): + rpc_ip.user_defined_script = nsd_cfg_prim_msg.user_defined_script + + + tasks = [] + for config_plugin in self.nsm.config_agent_plugins: + task = yield from config_plugin.apply_config( + rpc_ip, + nsr, + vnfrs) + tasks.append(task) + + self.job_manager.add_job(rpc_op, tasks) + else: + for vnf in rpc_ip.vnf_list: + vnf_op = rpc_op.vnf_out_list.add() + vnf_member_idx = vnf.member_vnf_index_ref + vnfr_id = vnf.vnfr_id_ref + vnf_op.vnfr_id_ref = vnfr_id + vnf_op.member_vnf_index_ref = vnf_member_idx + for primitive in vnf.vnf_primitive: + op_primitive = vnf_op.vnf_out_primitive.add() + op_primitive.name = primitive.name + op_primitive.execution_id = '' + op_primitive.execution_status = 'completed' + self._log.debug("%s:%s Got primitive %s:%s", + nsr_id, vnf.member_vnf_index_ref, primitive.name, primitive.parameter) + + nsd_vnf_primitive = self._get_vnf_primitive( + nsr_id, + vnf_member_idx, + primitive.name + ) + for param in nsd_vnf_primitive.parameter: + if not param.has_field("parameter_pool"): + continue + + try: + nsr_param_pool = nsr.param_pools[param.parameter_pool] + except KeyError: + raise ValueError("Parameter pool %s does not exist in nsr" % param.parameter_pool) + nsr_param_pool.add_used_value(param.value) + + for config_plugin in self.nsm.config_agent_plugins: + yield from config_plugin.vnf_config_primitive(nsr_id, + vnfr_id, + primitive, + op_primitive) + + self.job_manager.add_job(rpc_op) + + # Get NSD + # Find Config Primitive + # For each vnf-primitive with parameter pool + # Find parameter pool + # Add used value to the pool + self._log.debug("RPC output: {}".format(rpc_op)) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, + NsManagerRPCHandler.EXEC_NS_CONF_O_XPATH, + rpc_op) + + @asyncio.coroutine + def on_get_ns_config_values_prepare(xact_info, action, ks_path, msg): + assert action == rwdts.QueryAction.RPC + nsr_id = msg.nsr_id_ref + nsr = self._nsm.nsrs[nsr_id] + cfg_prim_name = msg.name + + rpc_op = NsrYang.YangOutput_Nsr_GetNsConfigPrimitiveValues() + + ns_cfg_prim_msg = self._get_ns_cfg_primitive(nsr_id, cfg_prim_name) + + # Get pool values for NS-level parameters + for ns_param in ns_cfg_prim_msg.parameter: + if not ns_param.has_field("parameter_pool"): + continue + + try: + nsr_param_pool = nsr.param_pools[ns_param.parameter_pool] + except KeyError: + raise ValueError("Parameter pool %s does not exist in nsr" % ns_param.parameter_pool) + + new_ns_param = rpc_op.ns_parameter.add() + new_ns_param.name = ns_param.name + new_ns_param.value = str(nsr_param_pool.get_next_unused_value()) + + + # Get pool values for NS-level parameters + for vnf_prim_group in ns_cfg_prim_msg.vnf_primitive_group: + rsp_prim_group = rpc_op.vnf_primitive_group.add() + rsp_prim_group.member_vnf_index_ref = vnf_prim_group.member_vnf_index_ref + if vnf_prim_group.has_field("vnfd_id_ref"): + rsp_prim_group.vnfd_id_ref = vnf_prim_group.vnfd_id_ref + + for index, vnf_prim in enumerate(vnf_prim_group.primitive): + rsp_prim = rsp_prim_group.primitive.add() + rsp_prim.name = vnf_prim.name + rsp_prim.index = index + vnf_primitive = self._get_vnf_primitive( + nsr_id, + vnf_prim_group.member_vnf_index_ref, + vnf_prim.name + ) + for param in vnf_primitive.parameter: + if not param.has_field("parameter_pool"): + continue + + try: + nsr_param_pool = nsr.param_pools[param.parameter_pool] + except KeyError: + raise ValueError("Parameter pool %s does not exist in nsr" % vnf_prim.parameter_pool) + + vnf_param = rsp_prim.parameter.add() + vnf_param.name = param.name + vnf_param.value = str(nsr_param_pool.get_next_unused_value()) + + self._log.debug("RPC output: {}".format(rpc_op)) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, + NsManagerRPCHandler.GET_NS_CONF_O_XPATH, rpc_op) + + hdl_ns = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_ns_config_prepare,) + hdl_ns_get = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_get_ns_config_values_prepare,) + + with self._dts.group_create() as group: + self._ns_regh = group.register(xpath=NsManagerRPCHandler.EXEC_NS_CONF_XPATH, + handler=hdl_ns, + flags=rwdts.Flag.PUBLISHER, + ) + self._get_ns_conf_regh = group.register(xpath=NsManagerRPCHandler.GET_NS_CONF_XPATH, + handler=hdl_ns_get, + flags=rwdts.Flag.PUBLISHER, + ) + + +class NsManager(object): + """ The Network Service Manager class""" + def __init__(self, dts, log, loop, + nsr_handler, vnfr_handler, vlr_handler, cloud_plugin_selector,vnffgmgr): + self._dts = dts + self._log = log + self._loop = loop + self._nsr_handler = nsr_handler + self._vnfr_pub_handler = vnfr_handler + self._vlr_pub_handler = vlr_handler + self._vnffgmgr = vnffgmgr + + self._cloud_plugin_selector = cloud_plugin_selector + + self._nsrs = {} + self._nsds = {} + self._vnfds = {} + self._vnfrs = {} + + self._so_obj = conman.ROServiceOrchConfig(log, loop, dts, self) + + self._nsd_dts_handler = NsdDtsHandler(dts, log, loop, self) + self._vnfd_dts_handler = VnfdDtsHandler(dts, log, loop, self) + + self._dts_handlers = [self._nsd_dts_handler, + VnfrDtsHandler(dts, log, loop, self), + NsMonitorDtsHandler(dts, log, loop, self), + NsdRefCountDtsHandler(dts, log, loop, self), + NsrDtsHandler(dts, log, loop, self), + self._vnfd_dts_handler, + NsManagerRPCHandler(dts, log, loop, self), + self._so_obj] + + self._config_agent_plugins = [] + + @property + def log(self): + """ Log handle """ + return self._log + + @property + def loop(self): + """ Loop """ + return self._loop + + @property + def dts(self): + """ DTS handle """ + return self._dts + + @property + def nsr_handler(self): + """" NSR handler """ + return self._nsr_handler + + @property + def so_obj(self): + """" So Obj handler """ + return self._so_obj + + @property + def nsrs(self): + """ NSRs in this NSM""" + return self._nsrs + + @property + def nsds(self): + """ NSDs in this NSM""" + return self._nsds + + @property + def vnfds(self): + """ VNFDs in this NSM""" + return self._vnfds + + @property + def vnfrs(self): + """ VNFRs in this NSM""" + return self._vnfrs + + @property + def nsr_pub_handler(self): + """ NSR publication handler """ + return self._nsr_handler + + @property + def vnfr_pub_handler(self): + """ VNFR publication handler """ + return self._vnfr_pub_handler + + @property + def vlr_pub_handler(self): + """ VLR publication handler """ + return self._vlr_pub_handler + + @property + def config_agent_plugins(self): + """ Config agent plugins""" + return self._config_agent_plugins + + def set_config_agent_plugin(self, plugin_instance): + """ Sets the plugin to use for the NSM config agents""" + self._log.debug("Set NSM config agent plugin instance: %s", plugin_instance) + if plugin_instance not in self._config_agent_plugins: + self._config_agent_plugins.append(plugin_instance) + + @asyncio.coroutine + def register(self): + """ Register all static DTS handlers """ + for dts_handle in self._dts_handlers: + yield from dts_handle.register() + + def get_ns_by_nsr_id(self, nsr_id): + """ get NSR by nsr id """ + if nsr_id not in self._nsrs: + raise NetworkServiceRecordError("NSR id %s not found" % nsr_id) + + return self._nsrs[nsr_id] + + def create_nsr(self, nsr_msg): + """ Create an NSR instance """ + if nsr_msg.id in self._nsrs: + msg = "NSR id %s already exists" % nsr_msg.id + self._log.error(msg) + raise NetworkServiceRecordError(msg) + + self._log.info("Create NetworkServiceRecord nsr id %s from nsd_id %s", + nsr_msg.id, + nsr_msg.nsd_ref) + + nsm_plugin = self._cloud_plugin_selector.get_cloud_account_plugin_instance( + nsr_msg.cloud_account + ) + sdn_account_name = self._cloud_plugin_selector.get_cloud_account_sdn_name(nsr_msg.cloud_account) + + nsr = NetworkServiceRecord(self._dts, + self._log, + self._loop, + self, + nsm_plugin, + self._config_agent_plugins, + nsr_msg, + sdn_account_name + ) + self._nsrs[nsr_msg.id] = nsr + nsm_plugin.create_nsr(nsr_msg, self.get_nsd(nsr_msg.nsd_ref).msg) + + return nsr + + def delete_nsr(self, nsr_id): + """ + Delete NSR with the passed nsr id + """ + del self._nsrs[nsr_id] + + @asyncio.coroutine + def instantiate_ns(self, nsr_id, xact): + """ Instantiate an NS instance """ + self._log.debug("Instatiating Network service id %s", nsr_id) + if nsr_id not in self._nsrs: + err = "NSR id %s not found " % nsr_id + self._log.error(err) + raise NetworkServiceRecordError(err) + + nsr = self._nsrs[nsr_id] + yield from nsr.nsm_plugin.instantiate_ns(nsr, xact) + + @asyncio.coroutine + def update_vnfr(self, vnfr): + """Create/Update an VNFR """ + vnfr_state = self._vnfrs[vnfr.id].state + self._log.debug("Updating VNFR with state %s: vnfr %s", vnfr_state, vnfr) + yield from self._vnfrs[vnfr.id].update(vnfr) + nsr = self.find_nsr_for_vnfr(vnfr.id) + yield from nsr.update_nsr_state() + return self._vnfrs[vnfr.id] + + def find_nsr_for_vnfr(self, vnfr_id): + """ Find the NSR which )has the passed vnfr id""" + for nsr in list(self.nsrs.values()): + for vnfr in list(nsr.vnfrs.values()): + if vnfr.id == vnfr_id: + return nsr + return None + + def delete_vnfr(self, vnfr_id): + """ Delete VNFR with the passed id""" + del self._vnfrs[vnfr_id] + + def get_nsd_ref(self, nsd_id): + """ Get network service descriptor for the passed nsd_id + with a reference""" + nsd = self.get_nsd(nsd_id) + nsd.ref() + return nsd + + @asyncio.coroutine + def get_nsr_config(self, nsd_id): + xpath = "C,/nsr:ns-instance-config" + results = yield from self._dts.query_read(xpath, rwdts.Flag.MERGE) + + for result in results: + entry = yield from result + ns_instance_config = entry.result + + for nsr in ns_instance_config.nsr: + if nsr.nsd_ref == nsd_id: + return nsr + + return None + + @asyncio.coroutine + def nsd_unref_by_nsr_id(self, nsr_id): + """ Unref the network service descriptor based on NSR id """ + self._log.debug("NSR Unref called for Nsr Id:%s", nsr_id) + if nsr_id in self._nsrs: + nsr = self._nsrs[nsr_id] + nsd = self.get_nsd(nsr.nsd_id) + self._log.debug("Releasing ref on NSD %s held by NSR %s - Curr %d", + nsd.id, nsr.id, nsd.ref_count) + nsd.unref() + else: + self._log.error("Cannot find NSD for NSR id %s", nsr_id) + raise NetworkServiceDescriptorUnrefError("No Nsd for nsr id" % nsr_id) + + @asyncio.coroutine + def nsd_unref(self, nsd_id): + """ Unref the network service descriptor associated with the id """ + nsd = self.get_nsd(nsd_id) + nsd.unref() + + def get_nsd(self, nsd_id): + """ Get network service descriptor for the passed nsd_id""" + if nsd_id not in self._nsds: + self._log.error("Cannot find NSD id:%s", nsd_id) + raise NetworkServiceDescriptorError("Cannot find NSD id:%s", nsd_id) + + return self._nsds[nsd_id] + + def create_nsd(self, nsd_msg): + """ Create a network service descriptor """ + self._log.debug("Create network service descriptor - %s", nsd_msg) + if nsd_msg.id in self._nsds: + self._log.error("Cannot create NSD %s -NSD ID already exists", nsd_msg) + raise NetworkServiceDescriptorError("NSD already exists-%s", nsd_msg.id) + + nsd = NetworkServiceDescriptor( + self._dts, + self._log, + self._loop, + nsd_msg, + ) + self._nsds[nsd_msg.id] = nsd + + return nsd + + def update_nsd(self, nsd): + """ update the Network service descriptor """ + self._log.debug("Update network service descriptor - %s", nsd) + if nsd.id not in self._nsds: + self._log.debug("No NSD found - creating NSD id = %s", nsd.id) + self.create_nsd(nsd) + else: + self._log.debug("Updating NSD id = %s, nsd = %s", nsd.id, nsd) + self._nsds[nsd.id].update(nsd) + + def delete_nsd(self, nsd_id): + """ Delete the Network service descriptor with the passed id """ + self._log.debug("Deleting the network service descriptor - %s", nsd_id) + if nsd_id not in self._nsds: + self._log.debug("Delete NSD failed - cannot find nsd-id %s", nsd_id) + raise NetworkServiceDescriptorNotFound("Cannot find %s", nsd_id) + + if nsd_id not in self._nsds: + self._log.debug("Cannot delete NSD id %s reference exists %s", + nsd_id, + self._nsds[nsd_id].ref_count) + raise NetworkServiceDescriptorRefCountExists( + "Cannot delete :%s, ref_count:%s", + nsd_id, + self._nsds[nsd_id].ref_count) + + del self._nsds[nsd_id] + + def get_vnfd_config(self, xact): + vnfd_dts_reg = self._vnfd_dts_handler.regh + for cfg in vnfd_dts_reg.get_xact_elements(xact): + self.create_vnfd(cfg) + + @asyncio.coroutine + def get_vnfd(self, vnfd_id, xact): + """ Get virtual network function descriptor for the passed vnfd_id""" + if vnfd_id not in self._vnfds: + self._log.error("Cannot find VNFD id:%s", vnfd_id) + self.get_vnfd_config(xact) + + if vnfd_id not in self._vnfds: + self._log.error("Cannot find VNFD id:%s", vnfd_id) + raise VnfDescriptorError("Cannot find VNFD id:%s", vnfd_id) + + return self._vnfds[vnfd_id] + + def create_vnfd(self, vnfd): + """ Create a virtual network function descriptor """ + self._log.debug("Create virtual network function descriptor - %s", vnfd) + if vnfd.id in self._vnfds: + self._log.error("Cannot create VNFD %s -VNFD ID already exists", vnfd) + raise VnfDescriptorError("VNFD already exists-%s", vnfd.id) + + self._vnfds[vnfd.id] = vnfd + return self._vnfds[vnfd.id] + + def update_vnfd(self, vnfd): + """ Update the virtual network function descriptor """ + self._log.debug("Update virtual network function descriptor- %s", vnfd) + + # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511 + for ivld in vnfd.internal_vld: + ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref)) + + if vnfd.id not in self._vnfds: + self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id) + self.create_vnfd(vnfd) + else: + self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd) + self._vnfds[vnfd.id] = vnfd + + @asyncio.coroutine + def delete_vnfd(self, vnfd_id): + """ Delete the virtual network function descriptor with the passed id """ + self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id) + if vnfd_id not in self._vnfds: + self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id) + raise VnfDescriptorError("Cannot find %s", vnfd_id) + + del self._vnfds[vnfd_id] + + def nsd_in_use(self, nsd_id): + """ Is the NSD with the passed id in use """ + self._log.debug("Is this NSD in use - msg:%s", nsd_id) + if nsd_id in self._nsds: + return self._nsds[nsd_id].in_use() + return False + + @asyncio.coroutine + def publish_nsr(self, xact, path, msg): + """ Publish a NSR """ + self._log.debug("Publish NSR with path %s, msg %s", + path, msg) + yield from self.nsr_handler.update(xact, path, msg) + + @asyncio.coroutine + def unpublish_nsr(self, xact, path): + """ Un Publish an NSR """ + self._log.debug("Publishing delete NSR with path %s", path) + yield from self.nsr_handler.delete(path, xact) + + def vnfr_is_ready(self, vnfr_id): + """ VNFR with the id is ready """ + self._log.debug("VNFR id %s ready", vnfr_id) + if vnfr_id not in self._vnfds: + err = "Did not find VNFR ID with id %s" % vnfr_id + self._log.critical("err") + raise VirtualNetworkFunctionRecordError(err) + self._vnfrs[vnfr_id].is_ready() + + @asyncio.coroutine + def get_monitoring_param(self, nsr_id): + """ Get the monitoring params based on the passed ks_path """ + monp_list = [] + if nsr_id is None or nsr_id == "": + nsrs = list(self._nsrs.values()) + for nsr in nsrs: + if nsr.active: + monp = yield from nsr.get_monitoring_param() + monp_list.append((nsr.id, monp)) + elif nsr_id in self._nsrs: + if self._nsrs[nsr_id].active: + monp = yield from self._nsrs[nsr_id].get_monitoring_param() + monp_list.append((nsr_id, monp)) + + return monp_list + + @asyncio.coroutine + def get_nsd_refcount(self, nsd_id): + """ Get the nsd_list from this NSM""" + + def nsd_refcount_xpath(nsd_id): + """ xpath for ref count entry """ + return (NsdRefCountDtsHandler.XPATH + + "[rw-nsr:nsd-id-ref = '{}']").format(nsd_id) + + nsd_list = [] + if nsd_id is None or nsd_id == "": + for nsd in self._nsds.values(): + nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount() + nsd_msg.nsd_id_ref = nsd.id + nsd_msg.instance_ref_count = nsd.ref_count + nsd_list.append((nsd_refcount_xpath(nsd.id), nsd_msg)) + elif nsd_id in self._nsds: + nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount() + nsd_msg.nsd_id_ref = self._nsds[nsd_id].id + nsd_msg.instance_ref_count = self._nsds[nsd_id].ref_count + nsd_list.append((nsd_refcount_xpath(nsd_id), nsd_msg)) + + return nsd_list + + @asyncio.coroutine + def terminate_ns(self, nsr_id, xact): + """ + Terminate network service for the given NSR Id + """ + + # Terminate the instances/networks assocaited with this nw service + self._log.debug("Terminating the network service %s", nsr_id) + yield from self._nsrs[nsr_id].terminate(xact) + + # Unref the NSD + yield from self.nsd_unref_by_nsr_id(nsr_id) + + # Unpublish the NSR record + self._log.debug("Unpublishing the network service %s", nsr_id) + yield from self._nsrs[nsr_id].unpublish(xact) + + # Finaly delete the NS instance from this NS Manager + self._log.debug("Deletng the network service %s", nsr_id) + self.delete_nsr(nsr_id) + + +class NsmRecordsPublisherProxy(object): + """ This class provides a publisher interface that allows plugin objects + to publish NSR/VNFR/VLR""" + + def __init__(self, dts, log, loop, nsr_pub_hdlr, vnfr_pub_hdlr, vlr_pub_hdlr): + self._dts = dts + self._log = log + self._loop = loop + self._nsr_pub_hdlr = nsr_pub_hdlr + self._vlr_pub_hdlr = vlr_pub_hdlr + self._vnfr_pub_hdlr = vnfr_pub_hdlr + + @asyncio.coroutine + def publish_nsr(self, xact, nsr): + """ Publish an NSR """ + path = NetworkServiceRecord.xpath_from_nsr(nsr) + return (yield from self._nsr_pub_hdlr.update(xact, path, nsr)) + + @asyncio.coroutine + def unpublish_nsr(self, xact, nsr): + """ Unpublish an NSR """ + path = NetworkServiceRecord.xpath_from_nsr(nsr) + return (yield from self._nsr_pub_hdlr.delete(xact, path)) + + @asyncio.coroutine + def publish_vnfr(self, xact, vnfr): + """ Publish an VNFR """ + path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr) + return (yield from self._vnfr_pub_hdlr.update(xact, path, vnfr)) + + @asyncio.coroutine + def unpublish_vnfr(self, xact, vnfr): + """ Unpublish a VNFR """ + path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr) + return (yield from self._vnfr_pub_hdlr.delete(xact, path)) + + @asyncio.coroutine + def publish_vlr(self, xact, vlr): + """ Publish a VLR """ + path = VirtualLinkRecord.vlr_xpath(vlr) + return (yield from self._vlr_pub_hdlr.update(xact, path, vlr)) + + @asyncio.coroutine + def unpublish_vlr(self, xact, vlr): + """ Unpublish a VLR """ + path = VirtualLinkRecord.vlr_xpath(vlr) + return (yield from self._vlr_pub_hdlr.delete(xact, path)) + + +class NsmTasklet(rift.tasklets.Tasklet): + """ + The network service manager tasklet + """ + def __init__(self, *args, **kwargs): + super(NsmTasklet, self).__init__(*args, **kwargs) + + self._dts = None + self._nsm = None + + self._cloud_plugin_selector = None + self._config_agent_mgr = None + self._vnffgmgr = None + + self._nsr_handler = None + self._vnfr_pub_handler = None + self._vlr_pub_handler = None + + self._records_publisher_proxy = None + + def start(self): + """ The task start callback """ + super(NsmTasklet, self).start() + self.log.info("Starting NsmTasklet") + + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS(self.tasklet_info, + RwNsmYang.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in NSM stop:", sys.exc_info()[0]) + raise + + def on_instance_started(self): + """ Task instance started callback """ + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def init(self): + """ Task init callback """ + self.log.debug("Got instance started callback") + + self.log.debug("creating config account handler") + + self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(self._dts, self.log, self.loop) + yield from self._nsr_pub_handler.register() + + self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(self._dts, self.log, self.loop) + yield from self._vnfr_pub_handler.register() + + self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(self._dts, self.log, self.loop) + yield from self._vlr_pub_handler.register() + + self._records_publisher_proxy = NsmRecordsPublisherProxy( + self._dts, + self.log, + self.loop, + self._nsr_pub_handler, + self._vnfr_pub_handler, + self._vlr_pub_handler, + ) + + # Register the NSM to receive the nsm plugin + # when cloud account is configured + self._cloud_plugin_selector = cloud.CloudAccountNsmPluginSelector( + self._dts, + self.log, + self.log_hdl, + self.loop, + self._records_publisher_proxy, + ) + yield from self._cloud_plugin_selector.register() + + self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts,self.log,self.log_hdl,self.loop) + yield from self._vnffgmgr.register() + + self._nsm = NsManager( + self._dts, + self.log, + self.loop, + self._nsr_pub_handler, + self._vnfr_pub_handler, + self._vlr_pub_handler, + self._cloud_plugin_selector, + self._vnffgmgr, + ) + + # Register the NSM to receive the nsm config agent plugin + # when config agent is configured + self._config_agent_mgr = conagent.NsmConfigAgent( + self._dts, + self.log, + self.loop, + self._records_publisher_proxy, + self._nsm.set_config_agent_plugin, + ) + yield from self._config_agent_mgr.register() + # RIFT-11780 : Must call NSM register after initializing config plugin + # During restart, there is race condition which causes the NS creation + # to occur before even config_plugin is registered. + yield from self._nsm.register() + + + @asyncio.coroutine + def run(self): + """ Task run callback """ + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self.log.debug("Changing state to %s", next_state) + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py new file mode 100755 index 0000000..3551079 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py @@ -0,0 +1,361 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio + +from gi.repository import ( + RwDts as rwdts, + RwsdnYang, + RwTypes, + ProtobufC, +) + +from gi.repository.RwTypes import RwStatus +import rw_peas +import rift.tasklets + +class SdnGetPluginError(Exception): + """ Error while fetching SDN plugin """ + pass + + +class SdnGetInterfaceError(Exception): + """ Error while fetching SDN interface""" + pass + + +class SdnAccountExistsError(Exception): + """ Error while creating SDN Account""" + pass + +class VnffgrDoesNotExist(Exception): + """ Error while fetching SDN interface""" + pass + +class VnffgrAlreadyExist(Exception): + """ Vnffgr already exists Error""" + pass + +class VnffgrCreationFailed(Exception): + """ Error while creating VNFFGR""" + pass + + +class VnffgrUpdateFailed(Exception): + """ Error while updating VNFFGR""" + pass + +class VnffgMgr(object): + """ Implements the interface to backend plugins to fetch topology """ + def __init__(self, dts, log, log_hdl, loop): + self._account = {} + self._dts = dts + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._sdn = {} + self._sdn_handler = SDNAccountDtsHandler(self._dts,self._log,self) + self._log.error("Vnffmgr instantiated") + self._vnffgr_list = {} + + @asyncio.coroutine + def register(self): + yield from self._sdn_handler.register() + + def set_sdn_account(self,account): + if (account.name in self._account): + self._log.error("SDN Account is already set") + else: + sdn_account = RwsdnYang.SDNAccount() + sdn_account.from_dict(account.as_dict()) + sdn_account.name = account.name + self._account[account.name] = sdn_account + self._log.debug("Account set is %s , %s",type(self._account), self._account) + + def del_sdn_account(self, name): + self._log.debug("Account deleted is %s , %s", type(self._account), name) + del self._account[name] + + + def get_sdn_account(self, name): + """ + Creates an object for class RwsdnYang.SdnAccount() + """ + if (name in self._account): + return self._account[name] + else: + self._log.error("SDN account is not configured") + + + def get_sdn_plugin(self,name): + """ + Loads rw.sdn plugin via libpeas + """ + if (name in self._sdn): + return self._sdn[name] + account = self.get_sdn_account(name) + plugin_name = getattr(account, account.account_type).plugin_name + self._log.debug("SDN plugin being created") + plugin = rw_peas.PeasPlugin(plugin_name, 'RwSdn-1.0') + engine, info, extension = plugin() + + self._sdn[name] = plugin.get_interface("Topology") + try: + rc = self._sdn[name].init(self._log_hdl) + assert rc == RwStatus.SUCCESS + except: + self._log.error("ERROR:SDN plugin instantiation failed ") + else: + self._log.debug("SDN plugin successfully instantiated") + return self._sdn[name] + + def fetch_vnffgr(self,vnffgr_id): + if vnffgr_id not in self._vnffgr_list: + self._log.error("VNFFGR with id %s not present in VNFFGMgr", vnffgr_id) + msg = "VNFFGR with id {} not present in VNFFGMgr".format(vnffgr_id) + raise VnffgrDoesNotExist(msg) + self.update_vnffgrs(self._vnffgr_list[vnffgr_id].sdn_account) + vnffgr = self._vnffgr_list[vnffgr_id].deep_copy() + self._log.debug("VNFFGR for id %s is %s",vnffgr_id,vnffgr) + return vnffgr + + def create_vnffgr(self,vnffgr,classifier_list): + """ + """ + self._log.debug("Received VNFFG chain Create msg %s",vnffgr) + if vnffgr.id in self._vnffgr_list: + self._log.error("VNFFGR with id %s already present in VNFFGMgr", vnffgr.id) + vnffgr.operational_status = 'failed' + msg = "VNFFGR with id {} already present in VNFFGMgr".format(vnffgr.id) + raise VnffgrAlreadyExist(msg) + + self._vnffgr_list[vnffgr.id] = vnffgr + vnffgr.operational_status = 'init' + if len(self._account) == 0: + self._log.error("SDN Account not configured") + vnffgr.operational_status = 'failed' + return + if vnffgr.sdn_account: + sdn_acct_name = vnffgr.sdn_account + else: + self._log.error("SDN Account is not associated to create VNFFGR") + # TODO Fail the VNFFGR creation if SDN account is not associated + #vnffgr.operational_status = 'failed' + #msg = "SDN Account is not associated to create VNFFGR" + #raise VnffgrCreationFailed(msg) + sdn_account = [sdn_account.name for _,sdn_account in self._account.items()] + sdn_acct_name = sdn_account[0] + vnffgr.sdn_account = sdn_acct_name + sdn_plugin = self.get_sdn_plugin(sdn_acct_name) + + for rsp in vnffgr.rsp: + vnffg = RwsdnYang.VNFFGChain() + vnffg.name = rsp.name + + for index,cp_ref in enumerate(rsp.vnfr_connection_point_ref): + cpath = vnffg.vnf_chain_path.add() + cpath.order=cp_ref.hop_number + cpath.service_function_type = cp_ref.service_function_type + cpath.nsh_aware=True + cpath.transport_type = 'vxlan-gpe' + + vnfr=cpath.vnfr_ids.add() + vnfr.vnfr_id = cp_ref.vnfr_id_ref + vnfr.vnfr_name = cp_ref.vnfr_name_ref + vnfr.mgmt_address = cp_ref.connection_point_params.mgmt_address + vnfr.mgmt_port = 5000 + + vdu = vnfr.vdu_list.add() + vdu.name = cp_ref.connection_point_params.name + vdu.port_id = cp_ref.connection_point_params.port_id + vdu.vm_id = cp_ref.connection_point_params.vm_id + vdu.address = cp_ref.connection_point_params.address + vdu.port = cp_ref.connection_point_params.port + + self._log.debug("VNFFG chain msg is %s",vnffg) + rc,rs = sdn_plugin.create_vnffg_chain(self._account[sdn_acct_name],vnffg) + if rc != RwTypes.RwStatus.SUCCESS: + vnffgr.operational_status = 'failed' + msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id) + raise VnffgrCreationFailed(msg) + + self._log.info("VNFFG chain created successfully for rsp with id %s",rsp.id) + + for classifier in classifier_list: + cl_rsp = [_rsp for _rsp in vnffgr.rsp if classifier.rsp_id_ref == _rsp.vnffgd_rsp_id_ref] + if len(cl_rsp) > 0: + cl_rsp_name = cl_rsp[0].name + else: + self._log.error("No RSP wiht name %s found; Skipping classifier %s creation",classifier.rsp_id_ref,classifier.name) + continue + vnffgcl = RwsdnYang.VNFFGClassifier() + vnffgcl.name = classifier.name + vnffgcl.rsp_name = cl_rsp_name + #vnffgcl.port_id ='dfc3eb6b-3753-4183-93c8-df7c25723fd0' + #vnffgcl.vm_id = 'bd86ade8-03bf-4f03-aa3e-375a7cb5a629' + acl = vnffgcl.match_attributes.add() + acl.name = vnffgcl.name + acl.ip_proto = classifier.match_attributes.ip_proto + acl.source_ip_address = classifier.match_attributes.source_ip_address + '/32' + acl.source_port = classifier.match_attributes.source_port + acl.destination_ip_address = classifier.match_attributes.destination_ip_address + '/32' + acl.destination_port = classifier.match_attributes.destination_port + + self._log.debug(" Creating VNFFG Classifier Classifier %s for RSP: %s",vnffgcl.name,vnffgcl.rsp_name) + rc,rs = sdn_plugin.create_vnffg_classifier(self._account[sdn_acct_name],vnffgcl) + if rc != RwTypes.RwStatus.SUCCESS: + self._log.error("VNFFG Classifier cretaion failed for Classifier %s for RSP ID: %s",classifier.name,classifier.rsp_id_ref) + #vnffgr.operational_status = 'failed' + #msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id) + #raise VnffgrCreationFailed(msg) + + vnffgr.operational_status = 'running' + self.update_vnffgrs(vnffgr.sdn_account) + return vnffgr + + def update_vnffgrs(self,sdn_acct_name): + """ + Update VNFFGR by reading data from SDN Plugin + """ + sdn_plugin = self.get_sdn_plugin(sdn_acct_name) + rc,rs = sdn_plugin.get_vnffg_rendered_paths(self._account[sdn_acct_name]) + if rc != RwTypes.RwStatus.SUCCESS: + msg = "Reading of VNFFGR from SDN Plugin failed" + raise VnffgrUpdateFailed(msg) + + vnffgr_list = [_vnffgr for _vnffgr in self._vnffgr_list.values() if _vnffgr.sdn_account == sdn_acct_name and _vnffgr.operational_status == 'running'] + + for _vnffgr in vnffgr_list: + for _vnffgr_rsp in _vnffgr.rsp: + vnffg_rsp_list = [vnffg_rsp for vnffg_rsp in rs.vnffg_rendered_path if vnffg_rsp.name == _vnffgr_rsp.name] + if vnffg_rsp_list is not None and len(vnffg_rsp_list) > 0: + vnffg_rsp = vnffg_rsp_list[0] + if len(vnffg_rsp.rendered_path_hop) != len(_vnffgr_rsp.vnfr_connection_point_ref): + _vnffgr.operational_status = 'failed' + self._log.error("Received hop count %d doesnt match the VNFFGD hop count %d", len(vnffg_rsp.rendered_path_hop), + len(_vnffgr_rsp.vnfr_connection_point_ref)) + msg = "Fetching of VNFFGR with id {} failed".format(_vnffgr.id) + raise VnffgrUpdateFailed(msg) + _vnffgr_rsp.path_id = vnffg_rsp.path_id + for index, rendered_hop in enumerate(vnffg_rsp.rendered_path_hop): + for vnfr_cp_ref in _vnffgr_rsp.vnfr_connection_point_ref: + if rendered_hop.vnfr_name == vnfr_cp_ref.vnfr_name_ref: + vnfr_cp_ref.hop_number = rendered_hop.hop_number + vnfr_cp_ref.service_index = rendered_hop.service_index + vnfr_cp_ref.service_function_forwarder.name = rendered_hop.service_function_forwarder.name + vnfr_cp_ref.service_function_forwarder.ip_address = rendered_hop.service_function_forwarder.ip_address + vnfr_cp_ref.service_function_forwarder.port = rendered_hop.service_function_forwarder.port + else: + _vnffgr.operational_status = 'failed' + self._log.error("VNFFGR RSP with name %s in VNFFG %s not found",_vnffgr_rsp.name, _vnffgr.id) + msg = "Fetching of VNFFGR with name {} failed".format(_vnffgr_rsp.name) + raise VnffgrUpdateFailed(msg) + + + def terminate_vnffgr(self,vnffgr_id,sdn_account_name = None): + """ + Deletet the VNFFG chain + """ + if vnffgr_id not in self._vnffgr_list: + self._log.error("VNFFGR with id %s not present in VNFFGMgr during termination", vnffgr_id) + msg = "VNFFGR with id {} not present in VNFFGMgr during termination".format(vnffgr_id) + raise VnffgrDoesNotExist(msg) + self._log.info("Received VNFFG chain terminate for id %s",vnffgr_id) + if sdn_account_name is None: + sdn_account = [sdn_account.name for _,sdn_account in self._account.items()] + sdn_account_name = sdn_account[0] + sdn_plugin = self.get_sdn_plugin(sdn_account_name) + sdn_plugin.terminate_vnffg_chain(self._account[sdn_account_name],vnffgr_id) + sdn_plugin.terminate_vnffg_classifier(self._account[sdn_account_name],vnffgr_id) + del self._vnffgr_list[vnffgr_id] + +class SDNAccountDtsHandler(object): + XPATH = "C,/rw-sdn:sdn-account" + + def __init__(self, dts, log, parent): + self._dts = dts + self._log = log + self._parent = parent + + self._sdn_account = {} + + def _set_sdn_account(self, account): + self._log.info("Setting sdn account: {}".format(account)) + if account.name in self._sdn_account: + self._log.error("SDN Account with name %s already exists. Ignoring config", account.name); + self._sdn_account[account.name] = account + self._parent.set_sdn_account(account) + + def _del_sdn_account(self, account_name): + self._log.info("Deleting sdn account: {}".format(account_name)) + del self._sdn_account[account_name] + + self._parent.del_sdn_account(account_name) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action) + if action == rwdts.AppconfAction.INSTALL and xact.id is None: + self._log.debug("No xact handle. Skipping apply config") + return RwTypes.RwStatus.SUCCESS + + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for SDN Account config """ + + self._log.info("SDN Cloud account config received: %s", msg) + + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + # Delete the sdn account record + self._del_sdn_account(msg.name) + else: + if msg.name in self._sdn_account: + msg = "Cannot update a SDN account that already was set." + self._log.error(msg) + xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE, + SDNAccountDtsHandler.XPATH, + msg) + raise SdnAccountExistsError(msg) + + # Set the sdn account record + self._set_sdn_account(msg) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + + self._log.debug("Registering for Sdn Account config using xpath: %s", + SDNAccountDtsHandler.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self._dts.appconf_group_create(acg_handler) as acg: + acg.register( + xpath=SDNAccountDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml new file mode 100644 index 0000000..7187897 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml @@ -0,0 +1,23 @@ + + + + + + + + + diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py new file mode 100755 index 0000000..b55511b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py @@ -0,0 +1,363 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import re + + +class Attribute(collections.namedtuple("Attribute", "module name")): + def __repr__(self): + return "{}:{}".format(self.module, self.name) + + +class ListElement(collections.namedtuple("List", "module name key value")): + def __repr__(self): + return "{}:{}[{}={}]".format(self.module, self.name, self.key, self.value) + + +def tokenize(xpath): + """Return a list of tokens representing an xpath + + The types of xpaths that this selector supports is extremely limited. + The xpath is required to be an absolute path delimited by a + forward-slash. Each of the parts (elements between delimiters) is + treated as one of two possible types: + + - an attribute + - a list element + + An attribute is a normal python attribute on an object. A list element + is an element within a list, which is identified by a key value (like a + yang list, although this is more properly a dict in python). + + Each attribute is expected to have the form, + + : + + A valid variable name (or namespace) follows the python regular expression, + + [a-zA-Z0-9-_]+ + + A list entry has the form, + + :[:=] + + The expression in the square brackets is the key of the required + element, and the value that that key must have. + + Arguments: + xpath - a string containing an xpath expression + + Raises: + A ValueError is raised if the xpath cannot be parsed. + + Returns: + a list of tokens + + """ + # define the symbols that are valid for a variable name in yang + name = "[a-zA-Z0-9-_]+" + + # define a set of regular expressions for parsing the xpath + pattern_attribute = re.compile("({t}):({t})$".format(t=name)) + pattern_key_value = re.compile("^{t}:({t})\s*=\s*(.*)$".format(t=name)) + pattern_quote = re.compile("^[\'\"](.*)[\'\"]$") + pattern_list = re.compile("^(.*)\[(.*)\]$") + + def dash_to_underscore(text): + return text.replace('-', '_') + + # Iterate through the parts of the xpath (NB: because the xpaths are + # required to be absolute paths, the first character is going to be the + # forward slash. As a result, when the string is split, the first + # element with be an empty string). + tokens = list() + for part in xpath.split("/")[1:]: + + # Test the part to see if it is a attribute + result = pattern_attribute.match(part) + if result is not None: + module, name = result.groups() + + # Convert the dashes to underscores + name = dash_to_underscore(name) + module = dash_to_underscore(module) + + tokens.append(Attribute(module, name)) + + continue + + # Test the part to see if it is a list + result = pattern_list.match(part) + if result is not None: + attribute, keyvalue = result.groups() + + module, name = pattern_attribute.match(attribute).groups() + key, value = pattern_key_value.match(keyvalue).groups() + + # Convert the dashes to underscore (but not in the key value) + key = dash_to_underscore(key) + name = dash_to_underscore(name) + module = dash_to_underscore(module) + + result = pattern_quote.match(value) + if result is not None: + value = result.group(1) + + tokens.append(ListElement(module, name, key, value)) + + continue + + raise ValueError("cannot parse '{}'".format(part)) + + return tokens + + +class XPathAttribute(object): + """ + This class is used to represent a reference to an attribute. If you use + getattr on an attribute, it may give you the value of the attribute rather + than a reference to it. What is really wanted is a representation of the + attribute so that its value can be both retrieved and set. That is what + this class provides. + """ + + def __init__(self, obj, name): + """Create an instance of XPathAttribute + + Arguments: + obj - the object containing the attribute + name - the name of an attribute + + Raises: + A ValueError is raised if the provided object does not have the + associated attribute. + + """ + if not hasattr(obj, name): + msg = "The provided object does not contain the associated attribute" + raise ValueError(msg) + + self.obj = obj + self.name = name + + def __repr__(self): + return self.value + + @property + def value(self): + return getattr(self.obj, self.name) + + @value.setter + def value(self, value): + """Set the value of the attribute + + Arguments: + value - the new value that the attribute should take + + Raises: + An TypeError is raised if the provided value cannot be cast the + current type of the attribute. + + """ + attr_type = type(self.value) + attr_value = value + + # The only way we can currently get the type of the atrribute is if it + # has an existing value. So if the attribute has an existing value, + # cast the value to the type of the attribute value. + if attr_type is not type(None): + try: + attr_value = attr_type(attr_value) + + except ValueError: + msg = "expected type '{}', but got '{}' instead" + raise TypeError(msg.format(attr_type.__name__, type(value).__name__)) + + setattr(self.obj, self.name, attr_value) + + +class XPathElement(XPathAttribute): + """ + This class is used to represent a reference to an element within a list. + Unlike scalar attributes, it is not entirely necessary to have this class + to represent the attribute because the element cannot be a simple scalar. + However, this class is used because it creates a uniform interface that can + be used by the setxattr and getxattr functions. + """ + + def __init__(self, container, key, value): + """Create an instance of XPathElement + + Arguments: + container - the object that contains the element + key - the name of the field that is used to identify the + element + value - the value of the key that identifies the element + + """ + self._container = container + self._value = value + self._key = key + + @property + def value(self): + for element in self._container: + if getattr(element, self._key) == self._value: + return element + + raise ValueError("specified element does not exist") + + @value.setter + def value(self, value): + existing = None + for element in self._container: + if getattr(element, self._key) == self._value: + existing = element + break + + if existing is not None: + self._container.remove(existing) + + self._container.append(value) + + +class XPathSelector(object): + def __init__(self, xpath): + """Creates an instance of XPathSelector + + Arguments: + xpath - a string containing an xpath expression + + """ + self._tokens = tokenize(xpath) + + + def __call__(self, obj): + """Returns a reference to an attribute on the provided object + + Using the defined xpath, an attribute is selected from the provided + object and returned. + + Arguments: + obj - a GI object + + Raises: + A ValueError is raised if the specified element in a list cannot be + found. + + Returns: + an XPathAttribute that reference the specified attribute + + """ + current = obj + for token in self._tokens[:-1]: + # If the object is contained within a list, we will need to iterate + # through the tokens until we find a token that is a field of the + # object. + if token.name not in current.fields: + if current is obj: + continue + + raise ValueError('cannot find attribute {}'.format(token.name)) + + # If the token is a ListElement, try to find the matching element + if isinstance(token, ListElement): + for element in getattr(current, token.name): + if getattr(element, token.key) == token.value: + current = element + break + + else: + raise ValueError('unable to find {}'.format(token.value)) + + else: + # Attribute the variable matching the name of the token + current = getattr(current, token.name) + + # Process the final token + token = self._tokens[-1] + + # If the token represents a list element, find the element in the list + # and return an XPathElement + if isinstance(token, ListElement): + container = getattr(current, token.name) + for element in container: + if getattr(element, token.key) == token.value: + return XPathElement(container, token.key, token.value) + + else: + raise ValueError('unable to find {}'.format(token.value)) + + # Otherwise, return the object as an XPathAttribute + return XPathAttribute(current, token.name) + + @property + def tokens(self): + """The tokens in the xpath expression""" + return self._tokens + + +# A global cache to avoid repeated parsing of known xpath expressions +__xpath_cache = dict() + + +def reset_cache(): + global __xpath_cache + __xpath_cache = dict() + + +def getxattr(obj, xpath): + """Return an attribute on the provided object + + The xpath is parsed and used to identify an attribute on the provided + object. The object is expected to be a GI object where each attribute that + is accessible via an xpath expression is contained in the 'fields' + attribute of the object (NB: this is not true of GI lists, which do not + have a 'fields' attribute). + + A selector is create for each xpath and used to find the specified + attribute. The accepted xpath expressions are those supported by the + XPathSelector class. The parsed xpath expression is cached so that + subsequent parsing is unnecessary. However, selectors are stored in a + global dictionary and this means that this function is not thread-safe. + + Arguments: + obj - a GI object + xpath - a string containing an xpath expression + + Returns: + an attribute on the provided object + + """ + if xpath not in __xpath_cache: + __xpath_cache[xpath] = XPathSelector(xpath) + + return __xpath_cache[xpath](obj).value + + +def setxattr(obj, xpath, value): + """Set the attribute referred to by the xpath + + Arguments: + obj - a GI object + xpath - a string containing an xpath expression + value - the new value of the attribute + + """ + if xpath not in __xpath_cache: + __xpath_cache[xpath] = XPathSelector(xpath) + + __xpath_cache[xpath](obj).value = value \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py new file mode 100755 index 0000000..e56c32b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwnsmtasklet + +class Tasklet(rift.tasklets.rwnsmtasklet.NsmTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt new file mode 100644 index 0000000..7cd388f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwresmgrtasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/rwresmgr_config.py + rift/tasklets/${TASKLET_NAME}/rwresmgr_core.py + rift/tasklets/${TASKLET_NAME}/rwresmgr_events.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py new file mode 100644 index 0000000..f93e30e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwresmgrtasklet import ResMgrTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py new file mode 100644 index 0000000..1494cff --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py @@ -0,0 +1,123 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import time +import uuid +from enum import Enum + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') + +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwResourceMgrYang, + RwLaunchpadYang, + RwcalYang, +) + +from gi.repository.RwTypes import RwStatus +import rift.tasklets +import rift.mano.cloud + + +class ResourceMgrConfig(object): + XPATH_POOL_OPER_DATA = "D,/rw-resource-mgr:resource-pool-records" + def __init__(self, dts, log, rwlog_hdl, loop, parent): + self._dts = dts + self._log = log + self._rwlog_hdl = rwlog_hdl + self._loop = loop + self._parent = parent + + self._cloud_sub = None + + @asyncio.coroutine + def register(self): + yield from self.register_resource_pool_operational_data() + self.register_cloud_account_config() + + def register_cloud_account_config(self): + def on_add_cloud_account_apply(account): + self._log.debug("Received on_add_cloud_account: %s", account) + self._parent.add_cloud_account_config(account) + + def on_delete_cloud_account_apply(account_name): + self._log.debug("Received on_delete_cloud_account_apply: %s", account_name) + self._parent.delete_cloud_account_config(account_name) + + @asyncio.coroutine + def on_delete_cloud_account_prepare(account_name): + self._log.debug("Received on_delete_cloud_account_prepare: %s", account_name) + self._parent.delete_cloud_account_config(account_name, dry_run=True) + + @asyncio.coroutine + def on_update_cloud_account_prepare(account): + raise NotImplementedError( + "Resource manager does not support updating cloud account" + ) + + cloud_callbacks = rift.mano.cloud.CloudAccountConfigCallbacks( + on_add_apply=on_add_cloud_account_apply, + on_delete_apply=on_delete_cloud_account_apply, + on_delete_prepare=on_delete_cloud_account_prepare, + on_update_prepare=on_update_cloud_account_prepare, + ) + + self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber( + self._dts, self._log, self._rwlog_hdl, cloud_callbacks + ) + self._cloud_sub.register() + + @asyncio.coroutine + def register_resource_pool_operational_data(self): + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + self._log.debug("ResourceMgr providing resource-pool information") + msg = RwResourceMgrYang.ResourcePoolRecords() + + cloud_accounts = self._parent.get_cloud_account_names() + for cloud_account_name in cloud_accounts: + pools = self._parent.get_pool_list(cloud_account_name) + self._log.debug("Publishing information about cloud account %s %d resource pools", + cloud_account_name, len(pools)) + + cloud_account_msg = msg.cloud_account.add() + cloud_account_msg.name = cloud_account_name + for pool in pools: + pool_info = self._parent.get_pool_info(cloud_account_name, pool.name) + cloud_account_msg.records.append(pool_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, + ResourceMgrConfig.XPATH_POOL_OPER_DATA, + msg=msg,) + + self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: %s", + ResourceMgrConfig.XPATH_POOL_OPER_DATA) + + handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + response = yield from self._dts.register(xpath=ResourceMgrConfig.XPATH_POOL_OPER_DATA, + handler=handler, + flags=rwdts.Flag.PUBLISHER) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py new file mode 100644 index 0000000..c212abd --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py @@ -0,0 +1,1185 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# + +import uuid +import collections +import asyncio + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwResourceMgrYang, + RwLaunchpadYang, + RwcalYang, +) + +from gi.repository.RwTypes import RwStatus + +class ResMgrCALNotPresent(Exception): + pass + +class ResMgrCloudAccountNotFound(Exception): + pass + +class ResMgrCloudAccountExists(Exception): + pass + +class ResMgrCloudAccountInUse(Exception): + pass + +class ResMgrDuplicatePool(Exception): + pass + +class ResMgrPoolNotAvailable(Exception): + pass + +class ResMgrPoolOperationFailed(Exception): + pass + +class ResMgrDuplicateEventId(Exception): + pass + +class ResMgrUnknownEventId(Exception): + pass + +class ResMgrUnknownResourceId(Exception): + pass + +class ResMgrResourceIdBusy(Exception): + pass + +class ResMgrResourceIdNotAllocated(Exception): + pass + +class ResMgrNoResourcesAvailable(Exception): + pass + +class ResMgrResourcesInitFailed(Exception): + pass + +class ResMgrCALOperationFailure(Exception): + pass + + +class ResourceMgrCALHandler(object): + def __init__(self, log, log_hdl, account): + self._log = log + self._account = account.cal_account_msg + self._rwcal = account.cal + if account.account_type == 'aws': + self._subnets = ["172.31.97.0/24", "172.31.98.0/24", "172.31.99.0/24", "172.31.100.0/24", "172.31.101.0/24"] + else: + self._subnets = ["11.0.0.0/24", "12.0.0.0/24", "13.0.0.0/24", "14.0.0.0/24", "15.0.0.0/24"] + self._subnet_ptr = 0 + + def _select_link_subnet(self): + subnet = self._subnets[self._subnet_ptr] + self._subnet_ptr += 1 + if self._subnet_ptr == len(self._subnets): + self._subnet_ptr = 0 + return subnet + + def create_virtual_network(self, req_params): + rc, rsp = self._rwcal.get_virtual_link_list(self._account) + assert rc == RwStatus.SUCCESS + links = [vlink for vlink in rsp.virtual_link_info_list if vlink.name == req_params.name] + if links: + return links[0].virtual_link_id + + params = RwcalYang.VirtualLinkReqParams() + params.from_dict(req_params.as_dict()) + params.subnet = self._select_link_subnet() + rc, rs = self._rwcal.create_virtual_link(self._account, params) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-network-allocate operation failed for cloud account: %s", self._account.name) + raise ResMgrCALOperationFailure("Virtual-network allocate operationa failed for cloud account: %s" %(self._account.name)) + return rs + + + def delete_virtual_network(self, network_id): + rc = self._rwcal.delete_virtual_link(self._account, network_id) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-network-release operation failed for cloud account: %s. ResourceID: %s", + self._account.name, + network_id) + raise ResMgrCALOperationFailure("Virtual-network release operation failed for cloud account: %s. ResourceId: %s" %(self._account.name, network_id)) + + def get_virtual_network_info(self, network_id): + rc, rs = self._rwcal.get_virtual_link(self._account, network_id) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-network-info operation failed for cloud account: %s. ResourceID: %s", + self._account.name, + network_id) + raise ResMgrCALOperationFailure("Virtual-network-info operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, network_id)) + return rs + + def create_virtual_compute(self, req_params): + rc, rsp = self._rwcal.get_vdu_list(self._account) + assert rc == RwStatus.SUCCESS + vdus = [vm for vm in rsp.vdu_info_list if vm.name == req_params.name] + if vdus: + return vdus[0].vdu_id + + params = RwcalYang.VDUInitParams() + params.from_dict(req_params.as_dict()) + + image_checksum = req_params.image_checksum if req_params.has_field("image_checksum") else None + params.image_id = self.get_image_id_from_image_info(req_params.image_name, image_checksum) + + self._log.info("Creating virtual-compute, Name: %s, ImageID: %s, FlavorID: %s", params.name, params.image_id, params.flavor_id) + rc, rs = self._rwcal.create_vdu(self._account, params) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-compute-create operation failed for cloud account: %s", self._account.name) + raise ResMgrCALOperationFailure("Virtual-compute-create operation failed for cloud account: %s" %(self._account.name)) + return rs + + def modify_virtual_compute(self, req_params): + rc = self._rwcal.modify_vdu(self._account, req_params) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-compute-modify operation failed for cloud account: %s", self._account.name) + raise ResMgrCALOperationFailure("Virtual-compute-modify operation failed for cloud account: %s" %(self._account.name)) + + def delete_virtual_compute(self, compute_id): + rc = self._rwcal.delete_vdu(self._account, compute_id) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-compute-release operation failed for cloud account: %s. ResourceID: %s", + self._account.name, + compute_id) + raise ResMgrCALOperationFailure("Virtual-compute-release operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id)) + + def get_virtual_compute_info(self, compute_id): + rc, rs = self._rwcal.get_vdu(self._account, compute_id) + if rc != RwStatus.SUCCESS: + self._log.error("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s", + self._account.name, + compute_id) + raise ResMgrCALOperationFailure("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id)) + return rs + + def get_compute_flavor_info_list(self): + rc, rs = self._rwcal.get_flavor_list(self._account) + if rc != RwStatus.SUCCESS: + self._log.error("Get-flavor-info-list operation failed for cloud account: %s", + self._account.name) + raise ResMgrCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(self._account.name)) + return rs.flavorinfo_list + + def create_compute_flavor(self, request): + flavor = RwcalYang.FlavorInfoItem() + flavor.name = str(uuid.uuid4()) + epa_types = ['vm_flavor', 'guest_epa'] + epa_dict = {k: v for k, v in request.as_dict().items() if k in epa_types} + flavor.from_dict(epa_dict) + + self._log.info("Creating flavor: %s", flavor) + rc, rs = self._rwcal.create_flavor(self._account, flavor) + if rc != RwStatus.SUCCESS: + self._log.error("Create-flavor operation failed for cloud account: %s", + self._account.name) + raise ResMgrCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(self._account.name)) + return rs + + def get_image_info_list(self): + rc, rs = self._rwcal.get_image_list(self._account) + if rc != RwStatus.SUCCESS: + self._log.error("Get-image-info-list operation failed for cloud account: %s", + self._account.name) + raise ResMgrCALOperationFailure("Get-image-info-list operation failed for cloud account: %s" %(self._account.name)) + return rs.imageinfo_list + + def get_image_id_from_image_info(self, image_name, image_checksum=None): + self._log.debug("Looking up image id for image name %s and checksum %s on cloud account: %s", + image_name, image_checksum, self._account.name + ) + image_list = self.get_image_info_list() + matching_images = [i for i in image_list if i.name == image_name] + + # If the image checksum was filled in then further filter the images by the checksum + if image_checksum is not None: + matching_images = [i for i in matching_images if i.checksum == image_checksum] + else: + self._log.warning("Image checksum not provided. Lookup using image name only.") + + if len(matching_images) == 0: + raise ResMgrCALOperationFailure("Could not find image name {} (using checksum: {}) for cloud account: {}".format( + image_name, image_checksum, self._account.name + )) + + elif len(matching_images) > 1: + unique_checksums = {i.checksum for i in matching_images} + if len(unique_checksums) > 1: + msg = ("Too many images with different checksums matched " + "image name of %s for cloud account: %s" % (image_name, self._account.name)) + raise ResMgrCALOperationFailure(msg) + + return matching_images[0].id + + def get_image_info(self, image_id): + rc, rs = self._rwcal.get_image(self._account, image_id) + if rc != RwStatus.SUCCESS: + self._log.error("Get-image-info-list operation failed for cloud account: %s", + self._account.name) + raise ResMgrCALOperationFailure("Get-image-info operation failed for cloud account: %s" %(self._account.name)) + return rs.imageinfo_list + + def dynamic_flavor_supported(self): + return getattr(self._account, self._account.account_type).dynamic_flavor_support + + +class Resource(object): + def __init__(self, resource_id, resource_type): + self._id = resource_id + self._type = resource_type + + @property + def resource_id(self): + return self._id + + @property + def resource_type(self): + return self._type + + def cleanup(self): + pass + + +class ComputeResource(Resource): + def __init__(self, resource_id, resource_type): + super(ComputeResource, self).__init__(resource_id, resource_type) + + +class NetworkResource(Resource): + def __init__(self, resource_id, resource_type): + super(NetworkResource, self).__init__(resource_id, resource_type) + + +class ResourcePoolInfo(object): + def __init__(self, name, pool_type, resource_type, max_size): + self.name = name + self.pool_type = pool_type + self.resource_type = resource_type + self.max_size = max_size + + @classmethod + def from_dict(cls, pool_dict): + return cls( + pool_dict["name"], + pool_dict["pool_type"], + pool_dict["resource_type"], + pool_dict["max_size"], + ) + + +class ResourcePool(object): + def __init__(self, log, loop, pool_info, resource_class, cal): + self._log = log + self._loop = loop + self._name = pool_info.name + self._pool_type = pool_info.pool_type + self._resource_type = pool_info.resource_type + self._cal = cal + self._resource_class = resource_class + + self._max_size = pool_info.max_size + + self._status = 'unlocked' + ### A Dictionary of all the resources in this pool, keyed by CAL resource-id + self._all_resources = {} + ### A List of free resources in this pool + self._free_resources = [] + ### A Dictionary of all the allocated resources in this pool, keyed by CAL resource-id + self._allocated_resources = {} + + @property + def name(self): + return self._name + + @property + def cal(self): + """ This instance's ResourceMgrCALHandler """ + return self._cal + + @property + def pool_type(self): + return self._pool_type + + @property + def resource_type(self): + return self._resource_type + + @property + def max_size(self): + return self._max_size + + @property + def status(self): + return self._status + + def in_use(self): + if len(self._allocated_resources) != 0: + return True + else: + return False + + def update_cal_handler(self, cal): + if self.in_use(): + raise ResMgrPoolOperationFailed( + "Cannot update CAL plugin for in use pool" + ) + + self._cal = cal + + def lock_pool(self): + self._log.info("Locking the pool :%s", self.name) + self._status = 'locked' + + def unlock_pool(self): + self._log.info("Unlocking the pool :%s", self.name) + self._status = 'unlocked' + + def add_resource(self, resource_info): + self._log.info("Adding static resource to Pool: %s, Resource-id: %s Resource-Type: %s", + self.name, + resource_info.resource_id, + self.resource_type) + + ### Add static resources to pool + resource = self._resource_class(resource_info.resource_id, 'static') + assert resource.resource_id == resource_info.resource_id + self._all_resources[resource.resource_id] = resource + self._free_resources.append(resource) + + def delete_resource(self, resource_id): + if resource_id not in self._all_resources: + self._log.error("Resource Id: %s not present in pool: %s. Delete operation failed", resource_id, self.name) + raise ResMgrUnknownResourceId("Resource Id: %s requested for release is not found" %(resource_id)) + + if resource_id in self._allocated_resources: + self._log.error("Resource Id: %s in use. Delete operation failed", resource_id) + raise ResMgrResourceIdBusy("Resource Id: %s requested for release is in use" %(resource_id)) + + self._log.info("Deleting resource: %s from pool: %s, Resource-Type", + resource_id, + self.name, + self.resource_type) + + resource = self._all_resources.pop(resource_id) + self._free_resources.remove(resource) + resource.cleanup() + del resource + + @asyncio.coroutine + def read_resource_info(self, resource_id): + if resource_id not in self._all_resources: + self._log.error("Resource Id: %s not present in pool: %s. Read operation failed", resource_id, self.name) + raise ResMgrUnknownResourceId("Resource Id: %s requested for read is not found" %(resource_id)) + + if resource_id not in self._allocated_resources: + self._log.error("Resource Id: %s not in use. Read operation failed", resource_id) + raise ResMgrResourceIdNotAllocated("Resource Id: %s not in use. Read operation failed" %(resource_id)) + + resource = self._allocated_resources[resource_id] + resource_info = self.get_resource_info(resource) + return resource_info + + def get_pool_info(self): + info = RwResourceMgrYang.ResourceRecordInfo() + self._log.info("Providing info for pool: %s", self.name) + info.name = self.name + if self.pool_type: + info.pool_type = self.pool_type + if self.resource_type: + info.resource_type = self.resource_type + if self.status: + info.pool_status = self.status + + info.total_resources = len(self._all_resources) + info.free_resources = len(self._free_resources) + info.allocated_resources = len(self._allocated_resources) + return info + + def cleanup(self): + for _, v in self._all_resources.items(): + v.cleanup() + + def _allocate_static_resource(self, request, resource_type): + unit_type = {'compute': 'VDU', 'network':'VirtualLink'} + match_found = False + resource = None + self._log.info("Doing resource match from pool :%s", self._free_resources) + for resource in self._free_resources: + resource_info = self.get_resource_info(resource) + self._log.info("Attempting to match %s-requirements for %s: %s with resource-id :%s", + resource_type, unit_type[resource_type],request.name, resource.resource_id) + if self.match_epa_params(resource_info, request): + if self.match_image_params(resource_info, request): + match_found = True + self._log.info("%s-requirements matched for %s: %s with resource-id :%s", + resource_type, unit_type[resource_type],request.name, resource.resource_id) + self.initialize_resource_in_cal(resource, request) + break + + if not match_found: + self._log.error("No match found for %s-requirements for %s: %s in pool: %s. %s instantiation failed", + resource_type, + unit_type[resource_type], + request.name, + self.name, + unit_type[resource_type]) + return None + else: + ### Move resource from free-list into allocated-list + self._log.info("Allocating the static resource with resource-id: %s for %s: %s", + resource.resource_id, + unit_type[resource_type],request.name) + self._free_resources.remove(resource) + self._allocated_resources[resource.resource_id] = resource + + return resource + + @asyncio.coroutine + def allocate_resource(self, request): + resource = yield from self.allocate_resource_in_cal(request) + resource_info = self.get_resource_info(resource) + return resource.resource_id, resource_info + + @asyncio.coroutine + def release_resource(self, resource_id): + self._log.debug("Releasing resource_id %s in pool %s", resource_id, self.name) + if resource_id not in self._allocated_resources: + self._log.error("Failed to release a resource with resource-id: %s in pool: %s. Resource not known", + resource_id, + self.name) + raise ResMgrUnknownResourceId("Failed to release resource with resource-id: %s. Unknown resource-id" %(resource_id)) + + ### Get resource object + resource = self._allocated_resources.pop(resource_id) + self.uninitialize_resource_in_cal(resource) + yield from self.release_cal_resource(resource) + + +class NetworkPool(ResourcePool): + def __init__(self, log, loop, pool_info, cal): + super(NetworkPool, self).__init__(log, loop, pool_info, NetworkResource, cal) + + @asyncio.coroutine + def allocate_resource_in_cal(self, request): + resource = None + if self.pool_type == 'static': + self._log.info("Attempting network resource allocation from static pool: %s", self.name) + ### Attempt resource allocation from static pool + resource = self._allocate_static_resource(request, 'network') + elif self.pool_type == 'dynamic': + ### Attempt resource allocation from dynamic pool + self._log.info("Attempting network resource allocation from dynamic pool: %s", self.name) + if len(self._free_resources) != 0: + self._log.info("Dynamic pool: %s has %d static resources, Attempting resource allocation from static resources", + self.name, len(self._free_resources)) + resource = self._allocate_static_resource(request, 'network') + if resource is None: + self._log.info("Could not resource from static resources. Going for dynamic resource allocation") + ## Not static resource available. Attempt dynamic resource from pool + resource = yield from self.allocate_dynamic_resource(request) + if resource is None: + raise ResMgrNoResourcesAvailable("No matching resource available for allocation from pool: %s" %(self.name)) + return resource + + @asyncio.coroutine + def allocate_dynamic_resource(self, request): + resource_id = self._cal.create_virtual_network(request) + resource = self._resource_class(resource_id, 'dynamic') + self._all_resources[resource_id] = resource + self._allocated_resources[resource_id] = resource + self._log.info("Successfully allocated virtual-network resource from CAL with resource-id: %s", resource_id) + return resource + + @asyncio.coroutine + def release_cal_resource(self, resource): + if resource.resource_type == 'dynamic': + self._log.debug("Deleting virtual network with network_id: %s", resource.resource_id) + self._cal.delete_virtual_network(resource.resource_id) + self._all_resources.pop(resource.resource_id) + self._log.info("Successfully released virtual-network resource in CAL with resource-id: %s", resource.resource_id) + else: + self._log.info("Successfully released virtual-network resource with resource-id: %s into available-list", resource.resource_id) + self._free_resources.append(resource) + + def get_resource_info(self, resource): + info = self._cal.get_virtual_network_info(resource.resource_id) + self._log.info("Successfully retrieved virtual-network information from CAL with resource-id: %s. Info: %s", + resource.resource_id, str(info)) + response = RwResourceMgrYang.VirtualLinkEventData_ResourceInfo() + response.from_dict(info.as_dict()) + response.pool_name = self.name + response.resource_state = 'active' + return response + + def get_info_by_id(self, resource_id): + info = self._cal.get_virtual_network_info(resource_id) + self._log.info("Successfully retrieved virtual-network information from CAL with resource-id: %s. Info: %s", + resource_id, str(info)) + return info + + def match_image_params(self, resource_info, request_params): + return True + + def match_epa_params(self, resource_info, request_params): + if not hasattr(request_params, 'provider_network'): + ### Its a match if nothing is requested + return True + else: + required = getattr(request_params, 'provider_network') + + if not hasattr(resource_info, 'provider_network'): + ### Its no match + return False + else: + available = getattr(resource_info, 'provider_network') + + self._log.debug("Matching Network EPA params. Required: %s, Available: %s", required, available) + + if required.has_field('name') and required.name!= available.name: + self._log.debug("Provider Network mismatch. Required: %s, Available: %s", + required.name, + available.name) + return False + + self._log.debug("Matching EPA params physical network name") + + if required.has_field('physical_network') and required.physical_network != available.physical_network: + self._log.debug("Physical Network mismatch. Required: %s, Available: %s", + required.physical_network, + available.physical_network) + return False + + self._log.debug("Matching EPA params overlay type") + if required.has_field('overlay_type') and required.overlay_type != available.overlay_type: + self._log.debug("Overlay type mismatch. Required: %s, Available: %s", + required.overlay_type, + available.overlay_type) + return False + + self._log.debug("Matching EPA params SegmentationID") + if required.has_field('segmentation_id') and required.segmentation_id != available.segmentation_id: + self._log.debug("Segmentation-Id mismatch. Required: %s, Available: %s", + required.segmentation_id, + available.segmentation_id) + return False + return True + + def initialize_resource_in_cal(self, resource, request): + pass + + def uninitialize_resource_in_cal(self, resource): + pass + + +class ComputePool(ResourcePool): + def __init__(self, log, loop, pool_info, cal): + super(ComputePool, self).__init__(log, loop, pool_info, ComputeResource, cal) + + @asyncio.coroutine + def allocate_resource_in_cal(self, request): + resource = None + if self.pool_type == 'static': + self._log.info("Attempting compute resource allocation from static pool: %s", self.name) + ### Attempt resource allocation from static pool + resource = self._allocate_static_resource(request, 'compute') + elif self.pool_type == 'dynamic': + ### Attempt resource allocation from dynamic pool + self._log.info("Attempting compute resource allocation from dynamic pool: %s", self.name) + if len(self._free_resources) != 0: + self._log.info("Dynamic pool: %s has %d static resources, Attempting resource allocation from static resources", + len(self._free_resources), + self.name) + resource = self._allocate_static_resource(request, 'compute') + if resource is None: + self._log.info("Attempting for dynamic resource allocation") + resource = yield from self.allocate_dynamic_resource(request) + if resource is None: + raise ResMgrNoResourcesAvailable("No matching resource available for allocation from pool: %s" %(self.name)) + + requested_params = RwcalYang.VDUInitParams() + requested_params.from_dict(request.as_dict()) + resource.requested_params = requested_params + return resource + + @asyncio.coroutine + def allocate_dynamic_resource(self, request): + request.flavor_id = self.select_resource_flavor(request) + resource_id = self._cal.create_virtual_compute(request) + resource = self._resource_class(resource_id, 'dynamic') + self._all_resources[resource_id] = resource + self._allocated_resources[resource_id] = resource + self._log.info("Successfully allocated virtual-compute resource from CAL with resource-id: %s", resource_id) + return resource + + @asyncio.coroutine + def release_cal_resource(self, resource): + if hasattr(resource, 'requested_params'): + delattr(resource, 'requested_params') + if resource.resource_type == 'dynamic': + self._cal.delete_virtual_compute(resource.resource_id) + self._all_resources.pop(resource.resource_id) + self._log.info("Successfully released virtual-compute resource in CAL with resource-id: %s", resource.resource_id) + else: + self._log.info("Successfully released virtual-compute resource with resource-id: %s into available-list", resource.resource_id) + self._free_resources.append(resource) + + def get_resource_info(self, resource): + info = self._cal.get_virtual_compute_info(resource.resource_id) + self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s", + resource.resource_id, str(info)) + response = RwResourceMgrYang.VDUEventData_ResourceInfo() + response.from_dict(info.as_dict()) + response.pool_name = self.name + response.resource_state = self._get_resource_state(info, resource.requested_params) + return response + + def get_info_by_id(self, resource_id): + info = self._cal.get_virtual_compute_info(resource_id) + self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s", + resource_id, str(info)) + return info + + def _get_resource_state(self, resource_info, requested_params): + if resource_info.state == 'failed': + self._log.error(" Reached failed state.", + resource_info.name) + return 'failed' + + if resource_info.state != 'active': + self._log.info(" Not reached active state.", + resource_info.name) + return 'pending' + + if not resource_info.has_field('management_ip') or resource_info.management_ip == '': + self._log.info(" Management IP not assigned.", + resource_info.name) + return 'pending' + + if (requested_params.has_field('allocate_public_address')) and (requested_params.allocate_public_address == True): + if not resource_info.has_field('public_ip'): + self._log.warning(" Management IP not assigned- waiting for public ip, %s", + resource_info.name, requested_params) + return 'pending' + + if(len(requested_params.connection_points) != + len(resource_info.connection_points)): + return 'pending' + + not_active = [c for c in resource_info.connection_points + if c.state != 'active'] + + if not_active: + self._log.warning(" Management IP not assigned- waiting for connection_points , %s", + resource_info.name, resource_info) + return 'pending' + + ## Find the connection_points which are in active state but does not have IP address + no_address = [c for c in resource_info.connection_points + if (c.state == 'active') and (not c.has_field('ip_address'))] + + if no_address: + self._log.warning(" Management IP not assigned- waiting for connection_points , %s", + resource_info.name, resource_info) + return 'pending' + + return 'active' + + def select_resource_flavor(self, request): + flavors = self._cal.get_compute_flavor_info_list() + self._log.debug("Received %d flavor information from RW.CAL", len(flavors)) + flavor_id = None + match_found = False + for flv in flavors: + self._log.info("Attempting to match compute requirement for VDU: %s with flavor %s", + request.name, flv) + if self.match_epa_params(flv, request): + self._log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s", + request.name, flv.name, flv.id) + match_found = True + flavor_id = flv.id + break + + if not match_found: + ### Check if CAL account allows dynamic flavor creation + if self._cal.dynamic_flavor_supported(): + self._log.info("Attempting to create a new flavor for required compute-requirement for VDU: %s", request.name) + flavor_id = self._cal.create_compute_flavor(request) + else: + ### No match with existing flavors and CAL does not support dynamic flavor creation + self._log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", request.name) + raise ResMgrNoResourcesAvailable("No resource available with matching EPA attributes") + else: + ### Found flavor + self._log.info("Found flavor with id: %s for compute requirement for VDU: %s", + flavor_id, request.name) + return flavor_id + + def _match_vm_flavor(self, required, available): + if available.vcpu_count != required.vcpu_count: + self._log.debug("VCPU requirement mismatch. Required: %d, Available: %d", + required.vcpu_count, + available.vcpu_count) + return False + if available.memory_mb != required.memory_mb: + self._log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB", + required.memory_mb, + available.memory_mb) + return False + if available.storage_gb != required.storage_gb: + self._log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB", + required.storage_gb, + available.storage_gb) + return False + self._log.debug("VM Flavor match found") + return True + + def _match_guest_epa(self, required, available): + if required.has_field('mempage_size'): + self._log.debug("Matching mempage_size") + if available.has_field('mempage_size') == False: + self._log.debug("Matching mempage_size failed. Not available in flavor") + return False + else: + if required.mempage_size != available.mempage_size: + self._log.debug("Matching mempage_size failed. Required: %s, Available: %s", required.mempage_size, available.mempage_size) + return False + + if required.has_field('cpu_pinning_policy'): + self._log.debug("Matching cpu_pinning_policy") + if required.cpu_pinning_policy != 'ANY': + if available.has_field('cpu_pinning_policy') == False: + self._log.debug("Matching cpu_pinning_policy failed. Not available in flavor") + return False + else: + if required.cpu_pinning_policy != available.cpu_pinning_policy: + self._log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s", required.cpu_pinning_policy, available.cpu_pinning_policy) + return False + + if required.has_field('cpu_thread_pinning_policy'): + self._log.debug("Matching cpu_thread_pinning_policy") + if available.has_field('cpu_thread_pinning_policy') == False: + self._log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor") + return False + else: + if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy: + self._log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s", required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy) + return False + + if required.has_field('trusted_execution'): + self._log.debug("Matching trusted_execution") + if required.trusted_execution == True: + if available.has_field('trusted_execution') == False: + self._log.debug("Matching trusted_execution failed. Not available in flavor") + return False + else: + if required.trusted_execution != available.trusted_execution: + self._log.debug("Matching trusted_execution failed. Required: %s, Available: %s", required.trusted_execution, available.trusted_execution) + return False + + if required.has_field('numa_node_policy'): + self._log.debug("Matching numa_node_policy") + if available.has_field('numa_node_policy') == False: + self._log.debug("Matching numa_node_policy failed. Not available in flavor") + return False + else: + if required.numa_node_policy.has_field('node_cnt'): + self._log.debug("Matching numa_node_policy node_cnt") + if available.numa_node_policy.has_field('node_cnt') == False: + self._log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor") + return False + else: + if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt: + self._log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt) + return False + + if required.numa_node_policy.has_field('mem_policy'): + self._log.debug("Matching numa_node_policy mem_policy") + if available.numa_node_policy.has_field('mem_policy') == False: + self._log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor") + return False + else: + if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy: + self._log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s", required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy) + return False + + if required.numa_node_policy.has_field('node'): + self._log.debug("Matching numa_node_policy nodes configuration") + if available.numa_node_policy.has_field('node') == False: + self._log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor") + return False + for required_node in required.numa_node_policy.node: + self._log.debug("Matching numa_node_policy nodes configuration for node %s", required_node) + numa_match = False + for available_node in available.numa_node_policy.node: + if required_node.id != available_node.id: + self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node) + continue + if required_node.vcpu != available_node.vcpu: + self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node) + continue + if required_node.memory_mb != available_node.memory_mb: + self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node) + continue + numa_match = True + if numa_match == False: + return False + return True + + def _match_vswitch_epa(self, required, available): + self._log.debug("VSwitch EPA match found") + return True + + def _match_hypervisor_epa(self, required, available): + self._log.debug("Hypervisor EPA match found") + return True + + def _match_host_epa(self, required, available): + return True + + def match_image_params(self, resource_info, request_params): + return True + + def match_epa_params(self, resource_info, request_params): + result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'), + getattr(resource_info, 'vm_flavor')) + if result == False: + self._log.debug("VM Flavor mismatched") + return False + + result = self._match_guest_epa(getattr(request_params, 'guest_epa'), + getattr(resource_info, 'guest_epa')) + if result == False: + self._log.debug("Guest EPA mismatched") + return False + + result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'), + getattr(resource_info, 'vswitch_epa')) + if result == False: + self._log.debug("Vswitch EPA mismatched") + return False + + result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'), + getattr(resource_info, 'hypervisor_epa')) + if result == False: + self._log.debug("Hypervisor EPA mismatched") + return False + + result = self._match_host_epa(getattr(request_params, 'host_epa'), + getattr(resource_info, 'host_epa')) + if result == False: + self._log.debug("Host EPA mismatched") + return False + + return True + + + def initialize_resource_in_cal(self, resource, request): + self._log.info("Initializing the compute-resource with id: %s in RW.CAL", resource.resource_id) + modify_params = RwcalYang.VDUModifyParams() + modify_params.vdu_id = resource.resource_id + modify_params.image_id = request.image_id + + for c_point in request.connection_points: + self._log.debug("Adding connection point for VDU: %s to virtual-compute with id: %s Connection point Name: %s", + request.name,resource.resource_id,c_point.name) + point = modify_params.connection_points_add.add() + point.name = c_point.name + point.virtual_link_id = c_point.virtual_link_id + self._cal.modify_virtual_compute(modify_params) + + def uninitialize_resource_in_cal(self, resource): + self._log.info("Un-initializing the compute-resource with id: %s in RW.CAL", resource.resource_id) + modify_params = RwcalYang.VDUModifyParams() + modify_params.vdu_id = resource.resource_id + resource_info = self.get_resource_info(resource) + for c_point in resource_info.connection_points: + self._log.debug("Removing connection point: %s from VDU: %s ", + c_point.name,resource_info.name) + point = modify_params.connection_points_remove.add() + point.connection_point_id = c_point.connection_point_id + self._cal.modify_virtual_compute(modify_params) + + +class ResourceMgrCore(object): + def __init__(self, dts, log, log_hdl, loop, parent): + self._log = log + self._log_hdl = log_hdl + self._dts = dts + self._loop = loop + self._parent = parent + self._cloud_cals = {} + # Dictionary of pool objects keyed by name + self._cloud_pool_table = {} + # Dictionary of tuples (resource_id, cloud_account_name, pool_name) keyed by event_id + self._resource_table = {} + self._pool_class = {'compute': ComputePool, + 'network': NetworkPool} + + def _get_cloud_pool_table(self, cloud_account_name): + if cloud_account_name not in self._cloud_pool_table: + msg = "Cloud account %s not found" % cloud_account_name + self._log.error(msg) + raise ResMgrCloudAccountNotFound(msg) + + return self._cloud_pool_table[cloud_account_name] + + def _get_cloud_cal_plugin(self, cloud_account_name): + if cloud_account_name not in self._cloud_cals: + msg = "Cloud account %s not found" % cloud_account_name + self._log.error(msg) + raise ResMgrCloudAccountNotFound(msg) + + return self._cloud_cals[cloud_account_name] + + def _add_default_cloud_pools(self, cloud_account_name): + self._log.debug("Adding default compute and network pools for cloud account %s", + cloud_account_name) + default_pools = [ + { + 'name': '____default_compute_pool', + 'resource_type': 'compute', + 'pool_type': 'dynamic', + 'max_size': 128, + }, + { + 'name': '____default_network_pool', + 'resource_type': 'network', + 'pool_type': 'dynamic', + 'max_size': 128, + }, + ] + + for pool_dict in default_pools: + pool_info = ResourcePoolInfo.from_dict(pool_dict) + self._log.info("Applying configuration for cloud account %s pool: %s", + cloud_account_name, pool_info.name) + + self.add_resource_pool(cloud_account_name, pool_info) + self.unlock_resource_pool(cloud_account_name, pool_info.name) + + def get_cloud_account_names(self): + """ Returns a list of configured cloud account names """ + return self._cloud_cals.keys() + + def add_cloud_account(self, account): + self._log.debug("Received CAL account. Account Name: %s, Account Type: %s", + account.name, account.account_type) + + ### Add cal handler to all the pools + if account.name in self._cloud_cals: + raise ResMgrCloudAccountExists("Cloud account already exists in res mgr: %s", + account.name) + + self._cloud_pool_table[account.name] = {} + + cal = ResourceMgrCALHandler(self._log, self._log_hdl, account) + self._cloud_cals[account.name] = cal + + self._add_default_cloud_pools(account.name) + + def update_cloud_account(self, account): + raise NotImplementedError("Update cloud account not implemented") + + def delete_cloud_account(self, account_name, dry_run=False): + cloud_pool_table = self._get_cloud_pool_table(account_name) + for pool in cloud_pool_table.values(): + if pool.in_use(): + raise ResMgrCloudAccountInUse("Cannot delete cloud which is currently in use") + + # If dry_run is specified, do not actually delete the cloud account + if dry_run: + return + + for pool in list(cloud_pool_table): + self.delete_resource_pool(account_name, pool) + + del self._cloud_pool_table[account_name] + del self._cloud_cals[account_name] + + def add_resource_pool(self, cloud_account_name, pool_info): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_info.name in cloud_pool_table: + raise ResMgrDuplicatePool("Pool with name: %s already exists", pool_info.name) + + cloud_cal = self._get_cloud_cal_plugin(cloud_account_name) + pool = self._pool_class[pool_info.resource_type](self._log, self._loop, pool_info, cloud_cal) + + cloud_pool_table[pool_info.name] = pool + + def delete_resource_pool(self, cloud_account_name, pool_name): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_name not in cloud_pool_table: + self._log.error("Pool: %s not found for deletion", pool_name) + return + pool = cloud_pool_table[pool_name] + + if pool.in_use(): + # Can't delete a pool in use + self._log.error("Pool: %s in use. Can not delete in-use pool", pool.name) + return + + pool.cleanup() + del cloud_pool_table[pool_name] + self._log.info("Resource Pool: %s successfully deleted", pool_name) + + def modify_resource_pool(self, cloud_account_name, pool): + pass + + def lock_resource_pool(self, cloud_account_name, pool_name): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_name not in cloud_pool_table: + self._log.info("Pool: %s is not available for lock operation") + return + + pool = cloud_pool_table[pool_name] + pool.lock_pool() + + def unlock_resource_pool(self, cloud_account_name, pool_name): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_name not in cloud_pool_table: + self._log.info("Pool: %s is not available for unlock operation") + return + + pool = cloud_pool_table[pool_name] + pool.unlock_pool() + + def get_resource_pool_info(self, cloud_account_name, pool_name): + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + if pool_name in cloud_pool_table: + pool = cloud_pool_table[pool_name] + return pool.get_pool_info() + else: + return None + + def get_resource_pool_list(self, cloud_account_name): + return [v for _, v in self._get_cloud_pool_table(cloud_account_name).items()] + + def _select_resource_pools(self, cloud_account_name, resource_type): + pools = [pool for pool in self.get_resource_pool_list(cloud_account_name) if pool.resource_type == resource_type and pool.status == 'unlocked'] + if not pools: + raise ResMgrPoolNotAvailable("No %s pool found for resource allocation", resource_type) + + return pools[0] + + @asyncio.coroutine + def allocate_virtual_resource(self, event_id, cloud_account_name, request, resource_type): + ### Check if event_id is unique or already in use + if event_id in self._resource_table: + r_id, cloud_account_name, pool_name = self._resource_table[event_id] + self._log.warning("Requested event-id :%s for resource-allocation already active with pool: %s", + event_id, pool_name) + # If resource-type matches then return the same resource + cloud_pool_table = self._get_cloud_pool_table(request.cloud_account) + pool = cloud_pool_table[pool_name] + if pool.resource_type == resource_type: + info = pool.get_resource_info(r_id) + return info + else: + self._log.error("Event-id conflict. Duplicate event-id: %s", event_id) + raise ResMgrDuplicateEventId("Requested event-id :%s already active with pool: %s" %(event_id, pool_name)) + + ### All-OK, lets go ahead with resource allocation + pool = self._select_resource_pools(cloud_account_name, resource_type) + self._log.info("Selected pool %s for resource allocation", pool.name) + + r_id, r_info = yield from pool.allocate_resource(request) + + self._resource_table[event_id] = (r_id, cloud_account_name, pool.name) + return r_info + + @asyncio.coroutine + def reallocate_virtual_resource(self, event_id, cloud_account_name, request, resource_type, resource): + ### Check if event_id is unique or already in use + if event_id in self._resource_table: + r_id, cloud_account_name, pool_name = self._resource_table[event_id] + self._log.warning("Requested event-id :%s for resource-allocation already active with pool: %s", + event_id, pool_name) + # If resource-type matches then return the same resource + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + pool = cloud_pool_table[pool_name] + if pool.resource_type == resource_type: + info = pool.get_resource_info(r_id) + return info + else: + self._log.error("Event-id conflict. Duplicate event-id: %s", event_id) + raise ResMgrDuplicateEventId("Requested event-id :%s already active with pool: %s" %(event_id, pool_name)) + + r_info = None + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + pool = cloud_pool_table[resource.pool_name] + if pool.resource_type == resource_type: + if resource_type == 'network': + r_id = resource.virtual_link_id + r_info = pool.get_info_by_id(resource.virtual_link_id) + elif resource_type == 'compute': + r_id = resource.vdu_id + r_info = pool.get_info_by_id(resource.vdu_id) + + if r_info is None: + r_id, r_info = yield from pool.allocate_resource(request) + self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name) + return r_info + + self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name) + new_resource = pool._resource_class(r_id, 'dynamic') + if resource_type == 'compute': + requested_params = RwcalYang.VDUInitParams() + requested_params.from_dict(request.as_dict()) + new_resource.requested_params = requested_params + pool._all_resources[r_id] = new_resource + pool._allocated_resources[r_id] = new_resource + return r_info + + @asyncio.coroutine + def release_virtual_resource(self, event_id, resource_type): + ### Check if event_id exists + if event_id not in self._resource_table: + self._log.error("Received resource-release-request with unknown Event-id :%s", event_id) + raise ResMgrUnknownEventId("Received resource-release-request with unknown Event-id :%s" %(event_id)) + + ## All-OK, lets proceed with resource release + r_id, cloud_account_name, pool_name = self._resource_table.pop(event_id) + self._log.debug("Attempting to release virtual resource id %s from pool %s", + r_id, pool_name) + + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + pool = cloud_pool_table[pool_name] + yield from pool.release_resource(r_id) + + @asyncio.coroutine + def read_virtual_resource(self, event_id, resource_type): + ### Check if event_id exists + if event_id not in self._resource_table: + self._log.error("Received resource-read-request with unknown Event-id :%s", event_id) + raise ResMgrUnknownEventId("Received resource-read-request with unknown Event-id :%s" %(event_id)) + + ## All-OK, lets proceed + r_id, cloud_account_name, pool_name = self._resource_table[event_id] + cloud_pool_table = self._get_cloud_pool_table(cloud_account_name) + pool = cloud_pool_table[pool_name] + info = yield from pool.read_resource_info(r_id) + return info \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py new file mode 100644 index 0000000..cdab39b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py @@ -0,0 +1,270 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import sys + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwResourceMgrYang, + RwLaunchpadYang, + RwcalYang, +) + +from gi.repository.RwTypes import RwStatus +import rift.tasklets + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class ResourceMgrEvent(object): + VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data" + VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data" + + def __init__(self, dts, log, loop, parent): + self._log = log + self._dts = dts + self._loop = loop + self._parent = parent + self._vdu_reg = None + self._link_reg = None + + self._vdu_reg_event = asyncio.Event(loop=self._loop) + self._link_reg_event = asyncio.Event(loop=self._loop) + + @asyncio.coroutine + def wait_ready(self, timeout=5): + self._log.debug("Waiting for all request registrations to become ready.") + yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()], + timeout=timeout, loop=self._loop) + + def create_record_dts(self, regh, xact, path, msg): + """ + Create a record in DTS with path and message + """ + self._log.debug("Creating Resource Record xact = %s, %s:%s", + xact, path, msg) + regh.create_element(path, msg) + + def delete_record_dts(self, regh, xact, path): + """ + Delete a VNFR record in DTS with path and message + """ + self._log.debug("Deleting Resource Record xact = %s, %s", + xact, path) + regh.delete_element(path) + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def onlink_event(dts, g_reg, xact, xact_event, scratch_data): + @asyncio.coroutine + def instantiate_realloc_vn(link): + """Re-populate the virtual link information after restart + + Arguments: + vlink + + """ + # wait for 3 seconds + yield from asyncio.sleep(3, loop=self._loop) + + response_info = yield from self._parent.reallocate_virtual_network(link.event_id, + link.cloud_account, + link.request_info, link.resource_info, + ) + if (xact_event == rwdts.MemberEvent.INSTALL): + link_cfg = self._link_reg.elements + for link in link_cfg: + self._loop.create_task(instantiate_realloc_vn(link)) + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def onvdu_event(dts, g_reg, xact, xact_event, scratch_data): + @asyncio.coroutine + def instantiate_realloc_vdu(vdu): + """Re-populate the VDU information after restart + + Arguments: + vdu + + """ + # wait for 3 seconds + yield from asyncio.sleep(3, loop=self._loop) + + response_info = yield from self._parent.reallocate_virtual_compute(vdu.event_id, + vdu.cloud_account, + vdu.request_info, vdu.resource_info, + ) + if (xact_event == rwdts.MemberEvent.INSTALL): + vdu_cfg = self._vdu_reg.elements + for vdu in vdu_cfg: + self._loop.create_task(instantiate_realloc_vdu(vdu)) + return rwdts.MemberRspCode.ACTION_OK + + def on_link_request_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Received link request commit (xact_info: %s)", xact_info) + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_link_request_prepare(xact_info, action, ks_path, request_msg): + self._log.debug("Received virtual-link on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg) + + response_info = None + response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info" + + schema = RwResourceMgrYang.VirtualLinkEventData().schema() + pathentry = schema.keyspec_to_entry(ks_path) + + if action == rwdts.QueryAction.CREATE: + response_info = yield from self._parent.allocate_virtual_network(pathentry.key00.event_id, + request_msg.cloud_account, + request_msg.request_info) + request_msg.resource_info = response_info + self.create_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()), request_msg) + elif action == rwdts.QueryAction.DELETE: + yield from self._parent.release_virtual_network(pathentry.key00.event_id) + self.delete_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema())) + elif action == rwdts.QueryAction.READ: + response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id) + else: + raise ValueError("Only read/create/delete actions available. Received action: %s" %(action)) + + self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.", + response_xpath, response_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info) + + + def on_vdu_request_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Received vdu request commit (xact_info: %s)", xact_info) + return rwdts.MemberRspCode.ACTION_OK + + def monitor_vdu_state(response_xpath, pathentry): + self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath) + loop_cnt = 120 + while loop_cnt > 0: + self._log.debug("VDU state monitoring: Sleeping for 1 second ") + yield from asyncio.sleep(1, loop = self._loop) + try: + response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id) + except Exception as e: + self._log.info("VDU state monitoring: Received exception %s in VDU state monitoring for %s. Aborting monitoring", + str(e),response_xpath) + return + if response_info.resource_state == 'active' or response_info.resource_state == 'failed': + self._log.info("VDU state monitoring: VDU reached terminal state. Publishing VDU info: %s at path: %s", + response_info, response_xpath) + yield from self._dts.query_update(response_xpath, + rwdts.Flag.ADVISE, + response_info) + return + else: + loop_cnt -= 1 + ### End of while loop. This is only possible if VDU did not reach active state + self._log.info("VDU state monitoring: VDU at xpath :%s did not reached active state in 120 seconds. Aborting monitoring", + response_xpath) + response_info = RwResourceMgrYang.VDUEventData_ResourceInfo() + response_info.resource_state = 'failed' + yield from self._dts.query_update(response_xpath, + rwdts.Flag.ADVISE, + response_info) + return + + @asyncio.coroutine + def on_vdu_request_prepare(xact_info, action, ks_path, request_msg): + self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg) + + response_info = None + response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info" + + schema = RwResourceMgrYang.VDUEventData().schema() + pathentry = schema.keyspec_to_entry(ks_path) + + if action == rwdts.QueryAction.CREATE: + response_info = yield from self._parent.allocate_virtual_compute(pathentry.key00.event_id, + request_msg.cloud_account, + request_msg.request_info, + ) + if response_info.resource_state == 'pending': + asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry), + loop = self._loop) + request_msg.resource_info = response_info + self.create_record_dts(self._vdu_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()), request_msg) + elif action == rwdts.QueryAction.DELETE: + yield from self._parent.release_virtual_compute(pathentry.key00.event_id) + self.delete_record_dts(self._vdu_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema())) + elif action == rwdts.QueryAction.READ: + response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id) + else: + raise ValueError("Only create/delete actions available. Received action: %s" %(action)) + + self._log.debug("Responding with VDUInfo at xpath %s: %s", + response_xpath, response_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info) + + + + @asyncio.coroutine + def on_request_ready(registration, status): + self._log.debug("Got request ready event (registration: %s) (status: %s)", + registration, status) + + if registration == self._link_reg: + self._link_reg_event.set() + elif registration == self._vdu_reg: + self._vdu_reg_event.set() + else: + self._log.error("Unknown registration ready event: %s", registration) + + link_handlers = rift.tasklets.Group.Handler(on_event=onlink_event,) + with self._dts.group_create(handler=link_handlers) as link_group: + self._log.debug("Registering for Link Resource Request using xpath: %s", + ResourceMgrEvent.VLINK_REQUEST_XPATH) + + self._link_reg = link_group.register(xpath=ResourceMgrEvent.VLINK_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready, + on_commit=on_link_request_commit, + on_prepare=on_link_request_prepare), + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.FILE_DATASTORE,) + + vdu_handlers = rift.tasklets.Group.Handler(on_event=onvdu_event, ) + with self._dts.group_create(handler=vdu_handlers) as vdu_group: + + self._log.debug("Registering for VDU Resource Request using xpath: %s", + ResourceMgrEvent.VDU_REQUEST_XPATH) + + self._vdu_reg = vdu_group.register(xpath=ResourceMgrEvent.VDU_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready, + on_commit=on_vdu_request_commit, + on_prepare=on_vdu_request_prepare), + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.FILE_DATASTORE,) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py new file mode 100755 index 0000000..bb64ba6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py @@ -0,0 +1,234 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import sys + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwYang, + RwResourceMgrYang, + RwLaunchpadYang, + RwcalYang, +) + +import rift.tasklets + +from . import rwresmgr_core as Core +from . import rwresmgr_config as Config +from . import rwresmgr_events as Event + + +class ResourceManager(object): + def __init__(self, log, log_hdl, loop, dts): + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._dts = dts + self.config_handler = Config.ResourceMgrConfig(self._dts, self._log, self._log_hdl, self._loop, self) + self.event_handler = Event.ResourceMgrEvent(self._dts, self._log, self._loop, self) + self.core = Core.ResourceMgrCore(self._dts, self._log, self._log_hdl, self._loop, self) + + @asyncio.coroutine + def register(self): + yield from self.config_handler.register() + yield from self.event_handler.register() + + def add_cloud_account_config(self, account): + self._log.debug("Received Cloud-Account add config event for account: %s", account.name) + self.core.add_cloud_account(account) + + def update_cloud_account_config(self, account): + self._log.debug("Received Cloud-Account update config event for account: %s", account.name) + self.core.update_cloud_account(account) + + def delete_cloud_account_config(self, account_name, dry_run=False): + self._log.debug("Received Cloud-Account delete event for account (dry_run: %s): %s", + dry_run, account_name) + self.core.delete_cloud_account(account_name, dry_run) + + def get_cloud_account_names(self): + cloud_account_names = self.core.get_cloud_account_names() + return cloud_account_names + + def pool_add(self, cloud_account_name, pool): + self._log.debug("Received Pool add event for cloud account %s pool: %s", + cloud_account_name, pool.name) + self.core.add_resource_pool(cloud_account_name, pool) + + def pool_modify(self, cloud_account_name, pool): + self._log.debug("Received Pool modify event for cloud account %s pool: %s", + cloud_account_name, pool.name) + self.core.modify_resource_pool(cloud_account_name, pool) + + def pool_delete(self, cloud_account_name, pool_name): + self._log.debug("Received Pool delete event for cloud account %s pool: %s", + cloud_account_name, pool_name) + self.core.delete_resource_pool(cloud_account_name, pool_name) + + def get_pool_list(self, cloud_account_name): + return self.core.get_resource_pool_list(cloud_account_name) + + def get_pool_info(self, cloud_account_name, pool_name): + self._log.debug("Received get-pool-info event for cloud account %s pool: %s", + cloud_account_name, pool_name) + return self.core.get_resource_pool_info(cloud_account_name, pool_name) + + def lock_pool(self, cloud_account_name, pool_name): + self._log.debug("Received pool unlock event for pool: %s", + cloud_account_name, pool_name) + self.core.lock_resource_pool(cloud_account_name, pool_name) + + def unlock_pool(self, cloud_account_name, pool_name): + self._log.debug("Received pool unlock event for pool: %s", + cloud_account_name, pool_name) + self.core.unlock_resource_pool(cloud_account_name, pool_name) + + @asyncio.coroutine + def allocate_virtual_network(self, event_id, cloud_account_name, request): + self._log.info("Received network resource allocation request with event-id: %s", event_id) + resource = yield from self.core.allocate_virtual_resource(event_id, cloud_account_name, request, 'network') + return resource + + @asyncio.coroutine + def reallocate_virtual_network(self, event_id, cloud_account_name, request, resource): + self._log.info("Received network resource allocation request with event-id: %s", event_id) + resource = yield from self.core.reallocate_virtual_resource(event_id, cloud_account_name, request, 'network', resource) + return resource + + @asyncio.coroutine + def release_virtual_network(self, event_id): + self._log.info("Received network resource release request with event-id: %s", event_id) + yield from self.core.release_virtual_resource(event_id, 'network') + + @asyncio.coroutine + def read_virtual_network_info(self, event_id): + self._log.info("Received network resource read request with event-id: %s", event_id) + info = yield from self.core.read_virtual_resource(event_id, 'network') + return info + + @asyncio.coroutine + def allocate_virtual_compute(self, event_id, cloud_account_name, request): + self._log.info("Received compute resource allocation request " + "(cloud account: %s) with event-id: %s", + cloud_account_name, event_id) + resource = yield from self.core.allocate_virtual_resource( + event_id, cloud_account_name, request, 'compute', + ) + return resource + + @asyncio.coroutine + def reallocate_virtual_compute(self, event_id, cloud_account_name, request, resource): + self._log.info("Received compute resource allocation request " + "(cloud account: %s) with event-id: %s", + cloud_account_name, event_id) + resource = yield from self.core.reallocate_virtual_resource( + event_id, cloud_account_name, request, 'compute', resource, + ) + return resource + + @asyncio.coroutine + def release_virtual_compute(self, event_id): + self._log.info("Received compute resource release request with event-id: %s", event_id) + yield from self.core.release_virtual_resource(event_id, 'compute') + + @asyncio.coroutine + def read_virtual_compute_info(self, event_id): + self._log.info("Received compute resource read request with event-id: %s", event_id) + info = yield from self.core.read_virtual_resource(event_id, 'compute') + return info + + +class ResMgrTasklet(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super(ResMgrTasklet, self).__init__(*args, **kwargs) + self._dts = None + self._resource_manager = None + + def start(self): + super(ResMgrTasklet, self).start() + self.log.info("Starting ResMgrTasklet") + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + + self._dts = rift.tasklets.DTS(self.tasklet_info, + RwResourceMgrYang.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in RESMGR stop:", sys.exc_info()[0]) + raise + + def on_instance_started(self): + self.log.debug("Got instance started callback") + + @asyncio.coroutine + def init(self): + self._log.info("Initializing the Resource Manager tasklet") + self._resource_manager = ResourceManager(self.log, + self.log_hdl, + self.loop, + self._dts) + yield from self._resource_manager.register() + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py new file mode 100755 index 0000000..506b433 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py @@ -0,0 +1,29 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwresmgrtasklet +class Tasklet(rift.tasklets.rwresmgrtasklet.ResMgrTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py new file mode 100755 index 0000000..9c494fa --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py @@ -0,0 +1,784 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import logging +import os +import sys +import types +import unittest +import uuid +import random + +import xmlrunner + +import gi +gi.require_version('CF', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwMain', '1.0') +gi.require_version('RwManifestYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwTypes', '1.0') + +import gi.repository.CF as cf +import gi.repository.RwDts as rwdts +import gi.repository.RwMain as rwmain +import gi.repository.RwManifestYang as rwmanifest +import gi.repository.RwResourceMgrYang as rmgryang +from gi.repository import RwcalYang +from gi.repository import RwCloudYang +from gi.repository.RwTypes import RwStatus + +import rw_peas +import rift.tasklets +import rift.test.dts + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +openstack_info = { + 'username' : 'pluto', + 'password' : 'mypasswd', + 'auth_url' : 'http://10.66.4.14:5000/v3/', + 'project_name' : 'demo', + 'mgmt_network' : 'private', + 'image_id' : '5cece2b1-1a49-42c5-8029-833c56574652', + 'vms' : ['res-test-1', 'res-test-2'], + 'networks' : ['testnet1', 'testnet2']} + + +def create_mock_resource_temaplate(): + ### Resource to be reuqested for 'mock' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0')) + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 40 + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1')) + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 20 + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-1'] = msg + + return resource_requests + + +def create_cloudsim_resource_template(): + ### Resource to be reuqested for 'cloudsim' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = "1" + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 40 + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = "1" + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 20 + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-1'] = msg + + return resource_requests + +def create_mock_resource_temaplate(): + ### Resource to be reuqested for 'mock' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0')) + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 40 + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1')) + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 20 + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + resource_requests['network']['mynet-1'] = msg + + return resource_requests + + +def create_openstack_static_template(): + ### Resource to be reuqested for 'openstack_static' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = openstack_info['image_id'] + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 80 + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = openstack_info['image_id'] + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 4096 + msg.vm_flavor.storage_gb = 40 + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + msg.provider_network.physical_network = 'PHYSNET1' + msg.provider_network.overlay_type = 'VLAN' + msg.provider_network.segmentation_id = 17 + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + msg.provider_network.physical_network = 'PHYSNET1' + msg.provider_network.overlay_type = 'VLAN' + msg.provider_network.segmentation_id = 18 + resource_requests['network']['mynet-1'] = msg + + return resource_requests + + +def create_openstack_dynamic_template(): + ### Resource to be reuqested for 'openstack_dynamic' + resource_requests = {'compute': {}, 'network': {}} + + ###### mycompute-0 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = openstack_info['image_id'] + msg.vm_flavor.vcpu_count = 2 + msg.vm_flavor.memory_mb = 4096 + msg.vm_flavor.storage_gb = 40 + msg.guest_epa.mempage_size = 'LARGE' + msg.guest_epa.cpu_pinning_policy = 'DEDICATED' + msg.allocate_public_address = True + + resource_requests['compute']['mycompute-0'] = msg + + ###### mycompute-1 + msg = rmgryang.VDUEventData_RequestInfo() + msg.image_id = openstack_info['image_id'] + msg.vm_flavor.vcpu_count = 4 + msg.vm_flavor.memory_mb = 8192 + msg.vm_flavor.storage_gb = 40 + msg.guest_epa.mempage_size = 'LARGE' + msg.guest_epa.cpu_pinning_policy = 'DEDICATED' + msg.allocate_public_address = True + + resource_requests['compute']['mycompute-1'] = msg + + ####### mynet-0 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + #msg.provider_network.overlay_type = 'VXLAN' + #msg.provider_network.segmentation_id = 71 + + resource_requests['network']['mynet-0'] = msg + + ####### mynet-1 + msg = rmgryang.VirtualLinkEventData_RequestInfo() + #msg.provider_network.overlay_type = 'VXLAN' + #msg.provider_network.segmentation_id = 73 + resource_requests['network']['mynet-1'] = msg + + return resource_requests + + + + +resource_requests = { + 'mock' : create_mock_resource_temaplate(), + 'openstack_static': create_openstack_static_template(), + 'openstack_dynamic': create_openstack_dynamic_template(), + 'cloudsim': create_cloudsim_resource_template(), +} + + +def get_cal_account(account_type): + """ + Creates an object for class RwcalYang.CloudAccount() + """ + account = RwcalYang.CloudAccount() + if account_type == 'mock': + account.name = 'mock_account' + account.account_type = "mock" + account.mock.username = "mock_user" + elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')): + account.name = 'openstack_cal' + account.account_type = 'openstack' + account.openstack.key = openstack_info['username'] + account.openstack.secret = openstack_info['password'] + account.openstack.auth_url = openstack_info['auth_url'] + account.openstack.tenant = openstack_info['project_name'] + account.openstack.mgmt_network = openstack_info['mgmt_network'] + + elif account_type == 'cloudsim': + account.name = 'cloudsim' + account.account_type = "cloudsim_proxy" + + return account + +def create_cal_plugin(account, log_hdl): + plugin_name = getattr(account, account.account_type).plugin_name + plugin = rw_peas.PeasPlugin(plugin_name, 'RwCal-1.0') + engine, info, extension = plugin() + rwcal = plugin.get_interface("Cloud") + try: + rc = rwcal.init(log_hdl) + assert rc == RwStatus.SUCCESS + except Exception as e: + raise + return rwcal + + +class RMMgrTestCase(rift.test.dts.AbstractDTSTest): + rwcal = None + rwcal_acct_info = None + + @classmethod + def configure_suite(cls, rwmain): + rm_dir = os.environ.get('RM_DIR') + cnt_mgr_dir = os.environ.get('CNTR_MGR_DIR') + cal_proxy_dir = os.environ.get('CAL_PROXY_DIR') + + cls.rwmain.add_tasklet(cal_proxy_dir, 'rwcalproxytasklet') + cls.rwmain.add_tasklet(rm_dir, 'rwresmgrtasklet') + cls.rwmain.add_tasklet(cnt_mgr_dir, 'rwcntmgrtasklet') + + @classmethod + def configure_schema(cls): + return rmgryang.get_schema() + + @asyncio.coroutine + def wait_tasklets(self): + yield from asyncio.sleep(1, loop=self.loop) + + @classmethod + def configure_timeout(cls): + return 360 + + def get_cloud_account_msg(self, acct_type): + cloud_account = RwCloudYang.CloudAccount() + acct = get_cal_account(acct_type) + cloud_account.from_dict(acct.as_dict()) + cloud_account.name = acct.name + return cloud_account + + def get_compute_pool_msg(self, name, pool_type, cloud_type): + pool_config = rmgryang.ResourcePools() + pool = pool_config.pools.add() + pool.name = name + pool.resource_type = "compute" + if pool_type == "static": + pool.pool_type = 'static' + acct = get_cal_account(cloud_type) + rwcal = create_cal_plugin(acct, self.tinfo.get_rwlog_ctx()) + rc, rsp = rwcal.get_vdu_list(acct) + assert rc == RwStatus.SUCCESS + + if cloud_type == 'openstack_static': + for vdu in rsp.vdu_info_list: + if vdu.name in openstack_info['vms']: + self.log.info("Adding the static compute resource: %s to compute pool", vdu.name) + r = pool.resources.add() + r.resource_id = vdu.vdu_id + else: + # 'mock', 'cloudsim' 'openstack_dynamic' etc + for vdu in rsp.vdu_info_list: + self.log.info("Adding the static compute resource: %s to compute pool", vdu.name) + r = pool.resources.add() + r.resource_id = vdu.vdu_id + else: + pool.pool_type = 'dynamic' + pool.max_size = 10 + return pool_config + + def get_network_pool_msg(self, name, pool_type, cloud_type): + pool_config = rmgryang.ResourcePools() + pool = pool_config.pools.add() + pool.name = name + pool.resource_type = "network" + if pool_type == "static": + pool.pool_type = 'static' + acct = get_cal_account(cloud_type) + rwcal = create_cal_plugin(acct, self.tinfo.get_rwlog_ctx()) + rc, rsp = rwcal.get_virtual_link_list(acct) + assert rc == RwStatus.SUCCESS + if cloud_type == 'openstack_static': + for vlink in rsp.virtual_link_info_list: + if vlink.name in openstack_info['networks']: + self.log.info("Adding the static network resource: %s to network pool", vlink.name) + r = pool.resources.add() + r.resource_id = vlink.virtual_link_id + else: + # 'mock', 'cloudsim', 'openstack_dynamic' etc + for vlink in rsp.virtual_link_info_list: + self.log.info("Adding the static network resource: %s to network pool", vlink.name) + r = pool.resources.add() + r.resource_id = vlink.virtual_link_id + else: + pool.pool_type = 'dynamic' + pool.max_size = 4 + return pool_config + + + def get_network_reserve_msg(self, name, cloud_type, xpath): + event_id = str(uuid.uuid4()) + msg = rmgryang.VirtualLinkEventData() + msg.event_id = event_id + msg.request_info.name = name + attributes = ['physical_network', 'name', 'overlay_type', 'segmentation_id'] + + for attr in attributes: + if resource_requests[cloud_type]['network'][name].has_field('provider_network'): + if resource_requests[cloud_type]['network'][name].provider_network.has_field(attr): + setattr(msg.request_info.provider_network, attr, + getattr(resource_requests[cloud_type]['network'][name].provider_network ,attr)) + + return msg, xpath.format(event_id) + + def get_compute_reserve_msg(self, name, cloud_type, xpath, vlinks): + event_id = str(uuid.uuid4()) + msg = rmgryang.VDUEventData() + msg.event_id = event_id + msg.request_info.name = name + msg.request_info.image_id = resource_requests[cloud_type]['compute'][name].image_id + attributes = ['image_id', 'vcpu_count', 'memory_mb', 'storage_gb'] + + if resource_requests[cloud_type]['compute'][name].has_field('vm_flavor'): + for attr in attributes: + if resource_requests[cloud_type]['compute'][name].vm_flavor.has_field(attr): + setattr(msg.request_info.vm_flavor, + attr, + getattr(resource_requests[cloud_type]['compute'][name].vm_flavor , attr)) + + attributes = ['mempage_size', 'cpu_pinning_policy'] + + if resource_requests[cloud_type]['compute'][name].has_field('guest_epa'): + for attr in attributes: + if resource_requests[cloud_type]['compute'][name].guest_epa.has_field(attr): + setattr(msg.request_info.guest_epa, + attr, + getattr(resource_requests[cloud_type]['compute'][name].guest_epa , attr)) + + if resource_requests[cloud_type]['compute'][name].has_field('allocate_public_address'): + msg.request_info.allocate_public_address = resource_requests[cloud_type]['compute'][name].allocate_public_address + + cnt = 0 + for link in vlinks: + c1 = msg.request_info.connection_points.add() + c1.name = name+"-port-"+str(cnt) + cnt += 1 + c1.virtual_link_id = link + + self.log.info("Sending message :%s", msg) + return msg, xpath.format(event_id) + + @asyncio.coroutine + def configure_cloud_account(self, dts, acct_type): + account_xpath = "C,/rw-cloud:cloud/account" + msg = self.get_cloud_account_msg(acct_type) + self.log.info("Configuring cloud-account: %s",msg) + yield from dts.query_create(account_xpath, + rwdts.Flag.ADVISE, + msg) + + @asyncio.coroutine + def configure_compute_resource_pools(self, dts, resource_type, cloud_type): + pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools" + msg = self.get_compute_pool_msg("virtual-compute", resource_type, cloud_type) + self.log.info("Configuring compute-resource-pool: %s",msg) + yield from dts.query_create(pool_xpath, + rwdts.Flag.ADVISE, + msg) + + + @asyncio.coroutine + def configure_network_resource_pools(self, dts, resource_type, cloud_type): + pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools" + msg = self.get_network_pool_msg("virtual-network", resource_type, cloud_type) + self.log.info("Configuring network-resource-pool: %s",msg) + yield from dts.query_create(pool_xpath, + rwdts.Flag.ADVISE, + msg) + + @asyncio.coroutine + def verify_resource_pools_config(self, dts): + pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records" + self.log.debug("Verifying test_create_resource_pools results") + res_iter = yield from dts.query_read(pool_records_xpath,) + for result in res_iter: + response = yield from result + records = response.result.records + #self.assertEqual(len(records), 2) + #names = [i.name for i in records] + #self.assertTrue('virtual-compute' in names) + #self.assertTrue('virtual-network' in names) + for record in records: + self.log.debug("Received Pool Record, Name: %s, Resource Type: %s, Pool Status: %s, Pool Size: %d, Allocated Resources: %d, Free Resources: %d", + record.name, + record.resource_type, + record.pool_status, + record.total_resources, + record.allocated_resources, + record.free_resources) + + @asyncio.coroutine + def read_resource(self, dts, xpath): + self.log.debug("Reading data for XPATH:%s", xpath) + result = yield from dts.query_read(xpath, rwdts.Flag.MERGE) + msg = None + for r in result: + msg = yield from r + self.log.debug("Received data: %s", msg.result) + return msg.result + + @asyncio.coroutine + def reserve_network_resources(self, name, dts, cloud_type): + network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']" + msg,xpath = self.get_network_reserve_msg(name, cloud_type, network_xpath) + self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg)) + yield from dts.query_create(xpath, 0, msg) + return xpath + + + @asyncio.coroutine + def reserve_compute_resources(self, name, dts, cloud_type, vlinks = []): + compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']" + msg,xpath = self.get_compute_reserve_msg(name, cloud_type, compute_xpath, vlinks) + self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg)) + yield from dts.query_create(xpath, 0, msg) + return xpath + + @asyncio.coroutine + def release_network_resources(self, dts, xpath): + self.log.debug("Initiating network resource release for : %s ", xpath) + yield from dts.query_delete(xpath, 0) + + @asyncio.coroutine + def release_compute_resources(self, dts, xpath): + self.log.debug("Initiating compute resource release for : %s ", xpath) + yield from dts.query_delete(xpath, 0) + + @unittest.skip("Skipping test_static_pool_resource_allocation") + def test_static_pool_resource_allocation(self): + self.log.debug("STARTING - test_static_pool_resource_allocation") + tinfo = self.new_tinfo('static_mock') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'mock' + yield from self.wait_tasklets() + yield from self.configure_cloud_account(dts, cloud_type) + + yield from self.configure_network_resource_pools(dts, "static", cloud_type) + yield from self.configure_compute_resource_pools(dts, "static", cloud_type) + yield from self.verify_resource_pools_config(dts) + + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + + for r in computes: + yield from self.release_compute_resources(dts, r[0]) + + yield from self.release_network_resources(dts,networks[0][0]) + yield from self.verify_resource_pools_config(dts) + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_static_pool_resource_allocation") + + @unittest.skip("Skipping test_dynamic_pool_resource_allocation") + def test_dynamic_pool_resource_allocation(self): + self.log.debug("STARTING - test_dynamic_pool_resource_allocation") + tinfo = self.new_tinfo('dynamic_mock') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'mock' + yield from self.wait_tasklets() + yield from self.configure_cloud_account(dts, cloud_type) + yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type) + yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type) + yield from self.verify_resource_pools_config(dts) + + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + + for r in computes: + self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id) + yield from self.release_compute_resources(dts, r[0]) + + yield from self.release_network_resources(dts,networks[0][0]) + yield from self.verify_resource_pools_config(dts) + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_dynamic_pool_resource_allocation") + + @unittest.skip("Skipping test_dynamic_pool_resource_allocation") + def test_dynamic_cloudsim_pool_resource_allocation(self): + self.log.debug("STARTING - test_dynamic_pool_resource_allocation") + tinfo = self.new_tinfo('dynamic_mock') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'cloudsim' + + yield from asyncio.sleep(120, loop=self.loop) + yield from self.configure_cloud_account(dts, cloud_type) + yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type) + yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type) + yield from self.verify_resource_pools_config(dts) + + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + + for r in computes: + self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id) + yield from self.release_compute_resources(dts, r[0]) + + yield from self.release_network_resources(dts,networks[0][0]) + yield from self.verify_resource_pools_config(dts) + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_dynamic_pool_resource_allocation") + + @unittest.skip("Skipping test_static_pool_openstack_resource_allocation") + def test_static_pool_openstack_resource_allocation(self): + self.log.debug("STARTING - test_static_pool_openstack_resource_allocation") + tinfo = self.new_tinfo('static_openstack') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'openstack_static' + yield from self.wait_tasklets() + yield from self.configure_cloud_account(dts, cloud_type) + yield from self.configure_network_resource_pools(dts, "static", cloud_type) + yield from self.configure_compute_resource_pools(dts, "static", cloud_type) + yield from self.verify_resource_pools_config(dts) + + self.log.debug("Creating virtual-network-resources in openstack") + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + self.log.debug("virtual-network-resources successfully created in openstack") + + self.log.debug("Creating virtual-network-compute in openstack") + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-" + str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + for r in computes: + self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id) + yield from self.release_compute_resources(dts, r[0]) + + yield from self.release_network_resources(dts,networks[0][0]) + yield from self.verify_resource_pools_config(dts) + self.log.debug("Openstack static resource allocation completed") + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_static_pool_openstack_resource_allocation") + + #@unittest.skip("Skipping test_dynamic_pool_openstack_resource_allocation") + def test_dynamic_pool_openstack_resource_allocation(self): + self.log.debug("STARTING - test_dynamic_pool_openstack_resource_allocation") + tinfo = self.new_tinfo('dynamic_openstack') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + cloud_type = 'openstack_dynamic' + yield from self.wait_tasklets() + yield from self.configure_cloud_account(dts, cloud_type) + yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type) + yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type) + yield from self.verify_resource_pools_config(dts) + + self.log.debug("Creating virtual-network-resources in openstack") + r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type) + r_info = yield from self.read_resource(dts,r_xpath) + networks.append((r_xpath, r_info.resource_info)) + self.log.debug("virtual-network-resources successfully created in openstack") + + self.log.debug("Creating virtual-network-compute in openstack") + for i in range(2): + r_xpath = yield from self.reserve_compute_resources("mycompute-" + str(i), + dts, + cloud_type, + [networks[0][1].virtual_link_id]) + r_info = yield from self.read_resource(dts,r_xpath) + computes.append((r_xpath, r_info)) + + yield from self.verify_resource_pools_config(dts) + for r in computes: + self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id) + #yield from self.release_compute_resources(dts, r[0]) + + self.log.debug("Releasing network resource with id: %s", r[1].resource_info.vdu_id) + #yield from self.release_network_resources(dts,networks[0][0]) + #yield from self.verify_resource_pools_config(dts) + self.log.debug("Openstack dynamic resource allocation completed") + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_dynamic_pool_openstack_resource_allocation") + + +def main(): + top_dir = __file__[:__file__.find('/modules/core/')] + build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build') + mc_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build') + launchpad_build_dir = os.path.join(mc_build_dir, 'rwlaunchpad') + cntr_mgr_build_dir = os.path.join(mc_build_dir, 'rwcntmgr') + + if 'MESSAGE_BROKER_DIR' not in os.environ: + os.environ['MESSAGE_BROKER_DIR'] = os.path.join(build_dir, 'rwmsg/plugins/rwmsgbroker-c') + + if 'ROUTER_DIR' not in os.environ: + os.environ['ROUTER_DIR'] = os.path.join(build_dir, 'rwdts/plugins/rwdtsrouter-c') + + if 'RM_DIR' not in os.environ: + os.environ['RM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwresmgrtasklet') + + if 'CAL_PROXY_DIR' not in os.environ: + os.environ['CAL_PROXY_DIR'] = os.path.join(build_dir, 'plugins/rwcalproxytasklet') + + if 'CNTR_MGR_DIR' not in os.environ: + os.environ['CNTR_MGR_DIR'] = os.path.join(cntr_mgr_build_dir, 'plugins/rwcntmgrtasklet') + + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + unittest.main(testRunner=runner) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt new file mode 100644 index 0000000..97aa0ca --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt @@ -0,0 +1,27 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwvnfmtasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/mon_params.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py new file mode 100644 index 0000000..4bde5b3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwvnfmtasklet import VnfmTasklet +from . import mon_params diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/mon_params.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/mon_params.py new file mode 100644 index 0000000..a6134d2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/mon_params.py @@ -0,0 +1,678 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import collections +import types + +import requests +import requests.auth +import tornado.escape + +from requests.packages.urllib3.exceptions import InsecureRequestWarning + +import gi +gi.require_version('RwDts', '1.0') +import rift.tasklets +from gi.repository import ( + RwDts as rwdts, + VnfrYang + ) + +class MonitoringParamError(Exception): + """Monitoring Parameter error""" + pass + + +class JsonPathValueQuerier(object): + def __init__(self, log, json_path): + self._log = log + self._json_path = json_path + self._json_path_expr = None + + try: + import jsonpath_rw + self._json_path_expr = jsonpath_rw.parse(self._json_path) + except Exception as e: + self._log.error("Could not create json_path parser: %s", str(e)) + + def query(self, json_msg): + try: + json_dict = tornado.escape.json_decode(json_msg) + except ValueError as e: + msg = "Failed to convert response into json" + self._log.warning(msg) + raise MonitoringParamError(e) + + if self._json_path_expr is None: + raise MonitoringParamError( + "Parser not created. Unable to extract value from %s" % json_msg + ) + + try: + matches = self._json_path_expr.find(json_dict) + values = [m.value for m in matches] + except Exception as e: + raise MonitoringParamError( + "Failed to run find using json_path (%s) against json_msg: %s" % + (self._json_path, str(e)) + ) + + if len(values) == 0: + raise MonitoringParamError( + "No values found from json_path (%s)" % self._json_path + ) + + if len(values) > 1: + self._log.debug("Got multiple values from json_path (%s). Only returning the first.", + self._json_path) + + return values[0] + + +class ObjectPathValueQuerier(object): + def __init__(self, log, object_path): + self._log = log + self._object_path = object_path + self._object_path_expr = None + + def query(self, object_msg): + try: + object_dict = tornado.escape.json_decode(object_msg) + except ValueError as e: + msg = "Failed to convert response into object" + self._log.warning(msg) + raise MonitoringParamError(e) + + import objectpath + try: + tree = objectpath.Tree(object_dict) + except Exception as e: + msg = "Could not create objectpath tree: %s", str(e) + self._log.error(msg) + raise MonitoringParamError(msg) + + try: + value = tree.execute(self._object_path) + except Exception as e: + raise MonitoringParamError( + "Failed to run execute object_path (%s) against object_msg: %s" % + (self._object_path, str(e)) + ) + + if isinstance(value, types.GeneratorType): + try: + value = next(value) + except Exception as e: + raise MonitoringParamError( + "Failed to get value from objectpath %s execute generator: %s" % + (self._object_path, str(e)) + ) + + if isinstance(value, (list, tuple)): + if len(value) == 0: + raise MonitoringParamError( + "No values found from object_path (%s)" % self._object_path + ) + + elif len(value) > 1: + self._log.debug( + "Got multiple values from object_path (%s). " + "Only returning the first.", self._object_path + ) + + # Only take the first element + value = value[0] + + return value + + +class JsonKeyValueQuerier(object): + def __init__(self, log, key): + self._log = log + self._key = key + + def query(self, json_msg): + try: + json_dict = tornado.escape.json_decode(json_msg) + except ValueError as e: + msg = "Failed to convert response into json" + self._log.warning(msg) + raise MonitoringParamError(e) + + if self._key not in json_dict: + msg = "Did not find '{}' key in response: {}".format( + self._key, json_dict + ) + self._log.warning(msg) + raise MonitoringParamError(msg) + + value = json_dict[self._key] + + return value + + +class ValueConverter(object): + def __init__(self, value_type): + self._value_type = value_type + + def _convert_int(self, value): + if isinstance(value, int): + return value + + try: + return int(value) + except (ValueError, TypeError) as e: + raise MonitoringParamError( + "Could not convert value into integer: %s", str(e) + ) + + def _convert_text(self, value): + if isinstance(value, str): + return value + + try: + return str(value) + except (ValueError, TypeError) as e: + raise MonitoringParamError( + "Could not convert value into string: %s", str(e) + ) + + def _convert_decimal(self, value): + if isinstance(value, float): + return value + + try: + return float(value) + except (ValueError, TypeError) as e: + raise MonitoringParamError( + "Could not convert value into string: %s", str(e) + ) + + def convert(self, value): + if self._value_type == "INT": + return self._convert_int(value) + elif self._value_type == "DECIMAL": + return self._convert_decimal(value) + elif self._value_type == "STRING": + return self._convert_text(value) + else: + raise MonitoringParamError("Unknown value type: %s", self._value_type) + + +class HTTPBasicAuth(object): + def __init__(self, username, password): + self.username = username + self.password = password + + +class HTTPEndpoint(object): + def __init__(self, log, loop, ip_address, ep_msg): + self._log = log + self._loop = loop + self._ip_address = ip_address + self._ep_msg = ep_msg + + # This is to suppress HTTPS related warning as we do not support + # certificate verification yet + requests.packages.urllib3.disable_warnings(InsecureRequestWarning) + self._session = requests.Session() + self._auth = None + self._headers = None + + @property + def poll_interval(self): + return self._ep_msg.polling_interval_secs + + @property + def ip_address(self): + return self._ip_address + + @property + def port(self): + return self._ep_msg.port + + @property + def protocol(self): + if self._ep_msg.has_field("https"): + if self._ep_msg.https is True: + return "https" + + return "http" + + @property + def path(self): + return self._ep_msg.path + + @property + def method(self): + if self._ep_msg.has_field("method"): + return self._ep_msg.method + return "GET" + + @property + def username(self): + if self._ep_msg.has_field("username"): + return self._ep_msg.username + + return None + + @property + def headers(self): + if self._headers is None: + headers = {} + for header in self._ep_msg.headers: + if header.has_field("key") and header.has_field("value"): + headers[header.key] = header.value + + self._headers = headers + + return self._headers + + @property + def password(self): + if self._ep_msg.has_field("password"): + return self._ep_msg.password + + return None + + @property + def auth(self): + if self._auth is None: + if self.username is not None and self.password is not None: + self._auth = requests.auth.HTTPBasicAuth( + self.username, + self.password, + ) + + return self._auth + + @property + def url(self): + url = "{protocol}://{ip_address}:{port}/{path}".format( + protocol=self.protocol, + ip_address=self.ip_address, + port=self.port, + path=self.path.lstrip("/"), + ) + + return url + + def _poll(self): + try: + resp = self._session.request( + self.method, self.url, timeout=10, auth=self.auth, + headers=self.headers, verify=False + ) + resp.raise_for_status() + except requests.exceptions.RequestException as e: + msg = "Got HTTP error when request monitoring method {} from url {}: {}".format( + self.method, + self.url, + str(e), + ) + self._log.warning(msg) + raise MonitoringParamError(msg) + + return resp.text + + @asyncio.coroutine + def poll(self): + try: + resp = yield from self._loop.run_in_executor( + None, + self._poll, + ) + + except Exception as e: + msg = "Caught exception when polling http endpoint: %s" % str(e) + self._log.warning(msg) + raise MonitoringParamError(msg) + + self._log.debug("Got response from http endpoint (%s): %s", + self.url, resp) + + return resp + + +class MonitoringParam(object): + def __init__(self, log, vnfr_mon_param_msg): + self._log = log + self._vnfr_mon_param_msg = vnfr_mon_param_msg + + self._current_value = None + + self._json_querier = self._create_json_querier() + self._value_converter = ValueConverter(self.value_type) + + def _create_json_querier(self): + if self.msg.json_query_method == "NAMEKEY": + return JsonKeyValueQuerier(self._log, self.msg.name) + elif self.msg.json_query_method == "JSONPATH": + if not self.msg.json_query_params.has_field("json_path"): + msg = "JSONPATH query_method requires json_query_params.json_path to be filled in %s" + self._log.error(msg, self.msg) + raise ValueError(msg) + return JsonPathValueQuerier(self._log, self.msg.json_query_params.json_path) + elif self.msg.json_query_method == "OBJECTPATH": + if not self.msg.json_query_params.has_field("object_path"): + msg = "OBJECTPATH query_method requires json_query_params.object_path to be filled in %s" + self._log.error(msg, self.msg) + raise ValueError(msg) + return ObjectPathValueQuerier(self._log, self.msg.json_query_params.object_path) + else: + msg = "Unknown JSON query method: %s" % self.json_query_method + self._log.error(msg) + raise ValueError(msg) + + @property + def current_value(self): + return self._current_value + + @property + def msg(self): + msg = self._vnfr_mon_param_msg + value_type = msg.value_type + + if self._current_value is None: + return msg + + if value_type == "INT": + msg.value_integer = self._current_value + + elif value_type == "DECIMAL": + msg.value_decimal = self._current_value + + elif value_type == "STRING": + msg.value_string = self._current_value + + else: + self._log.debug("Unknown value_type: %s", value_type) + + return msg + + @property + def path(self): + return self.msg.http_endpoint_ref + + @property + def value_type(self): + return self.msg.value_type + + @property + def json_query_method(self): + return self.msg.json_query_method + + @property + def json_path(self): + return self.msg.json_path_params.json_path + + @property + def name(self): + return self.msg.name + + def extract_value_from_response(self, response_msg): + if self._json_querier is None: + self._log.warning("json querier is not created. Cannot extract value form response.") + return + + try: + value = self._json_querier.query(response_msg) + converted_value = self._value_converter.convert(value) + except MonitoringParamError as e: + self._log.warning("Failed to extract value from json response: %s", str(e)) + return + else: + self._current_value = converted_value + + +class EndpointMonParamsPoller(object): + REQUEST_TIMEOUT_SECS = 10 + + def __init__(self, log, loop, endpoint, mon_params): + self._log = log + self._loop = loop + self._endpoint = endpoint + self._mon_params = mon_params + + self._poll_task = None + + @property + def poll_interval(self): + return self._endpoint.poll_interval + + def _apply_response_to_mon_params(self, response_msg): + for mon_param in self._mon_params: + mon_param.extract_value_from_response(response_msg) + + @asyncio.coroutine + def _poll_loop(self): + self._log.debug("Starting http endpoint %s poll loop", self._endpoint.url) + while True: + try: + response = yield from self._endpoint.poll() + self._apply_response_to_mon_params(response) + + except Exception as e: + msg = "Caught exception when polling http endpoint: %s", str(e) + self._log.warning(msg) + + yield from asyncio.sleep(self.poll_interval, loop=self._loop) + + def start(self): + self._log.debug("Got start request for endpoint poller: %s", + self._endpoint.url) + if self._poll_task is not None: + return + + self._poll_task = self._loop.create_task(self._poll_loop()) + + def stop(self): + self._log.debug("Got stop request for endpoint poller: %s", + self._endpoint.url) + if self._poll_task is None: + return + + self._poll_task.cancel() + + self._poll_task = None + + +class VnfMonitoringParamsController(object): + def __init__(self, log, loop, vnfr_id, management_ip, + http_endpoint_msgs, monitoring_param_msgs): + self._log = log + self._loop = loop + self._vnfr_id = vnfr_id + self._management_ip = management_ip + self._http_endpoint_msgs = http_endpoint_msgs + self._monitoring_param_msgs = monitoring_param_msgs + + self._endpoints = self._create_endpoints() + self._mon_params = self._create_mon_params() + + self._endpoint_mon_param_map = self._create_endpoint_mon_param_map( + self._endpoints, self._mon_params + ) + self._endpoint_pollers = self._create_endpoint_pollers(self._endpoint_mon_param_map) + + def _create_endpoints(self): + path_endpoint_map = {} + for ep_msg in self._http_endpoint_msgs: + endpoint = HTTPEndpoint( + self._log, + self._loop, + self._management_ip, + ep_msg, + ) + path_endpoint_map[endpoint.path] = endpoint + + return path_endpoint_map + + def _create_mon_params(self): + mon_params = {} + for mp_msg in self._monitoring_param_msgs: + mon_params[mp_msg.id] = MonitoringParam( + self._log, + mp_msg, + ) + + return mon_params + + def _create_endpoint_mon_param_map(self, endpoints, mon_params): + ep_mp_map = collections.defaultdict(list) + for mp in mon_params.values(): + endpoint = endpoints[mp.path] + ep_mp_map[endpoint].append(mp) + + return ep_mp_map + + def _create_endpoint_pollers(self, ep_mp_map): + pollers = [] + for endpoint, mon_params in ep_mp_map.items(): + poller = EndpointMonParamsPoller( + self._log, + self._loop, + endpoint, + mon_params, + ) + + pollers.append(poller) + + return pollers + + @property + def msgs(self): + msgs = [] + for mp in self.mon_params: + msgs.append(mp.msg) + + return msgs + + @property + def mon_params(self): + return list(self._mon_params.values()) + + @property + def endpoints(self): + return list(self._endpoints.values()) + + def start(self): + """ Start monitoring """ + self._log.debug("Starting monitoring of VNF id: %s", self._vnfr_id) + for poller in self._endpoint_pollers: + poller.start() + + def stop(self): + """ Stop monitoring """ + self._log.debug("Stopping monitoring of VNF id: %s", self._vnfr_id) + for poller in self._endpoint_pollers: + poller.stop() + + +class VnfMonitorDtsHandler(object): + """ VNF monitoring class """ + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:monitoring-param" + + def __init__(self, dts, log, loop, vnfr): + self._dts = dts + self._log = log + self._loop = loop + self._vnfr = vnfr + self._group = None + self._regh = None + + mon_params = [] + for mon_param in self._vnfr.vnfd.msg.monitoring_param: + param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict( + mon_param.as_dict() + ) + mon_params.append(param) + + http_endpoints = [] + for endpoint in self._vnfr.vnfd.msg.http_endpoint: + endpoint = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict( + endpoint.as_dict() + ) + http_endpoints.append(endpoint) + + self._log.debug("Creating monitoring param controller") + self._log.debug(" - Endpoints: %s", http_endpoints) + self._log.debug(" - Monitoring Params: %s", mon_params) + + self._mon_param_controller = VnfMonitoringParamsController( + self._log, + self._loop, + self._vnfr.msg.id, + self._vnfr.msg.mgmt_interface.ip_address, + http_endpoints, + mon_params, + ) + + def start(self): + self._mon_param_controller.start() + + def stop(self): + self._mon_param_controller.stop() + + def xpath(self, id): + """ Monitoing params xpath """ + return (self._vnfr.xpath + + "/vnfr:monitoring-param[vnfr:id = '{}']".format(id)) + + @property + def msg(self): + """ The message with the monitoing params """ + return self._mon_param_controller.msgs + + def register(self): + """ Register with dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + if self._regh is None: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + if action == rwdts.QueryAction.READ: + for msg in self.msg: + xact_info.respond_xpath(rwdts.XactRspCode.MORE, + self.xpath(msg.id), + msg) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + with self._dts.group_create() as self._group: + self._regh = self._group.register(xpath=VnfMonitorDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER) + + def deregister(self): + """ de-register with dts """ + if self._regh is not None: + self._log.debug("Deregistering path %s, regh = %s", + VnfMonitorDtsHandler.XPATH, + self._regh) + self._regh.deregister() + self._regh=None + self._vnfr = None \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py new file mode 100755 index 0000000..d86a7b3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py @@ -0,0 +1,2396 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import collections +import enum +import logging +import uuid +import time +import os.path +import sys +import re + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwVnfmYang', '1.0') +gi.require_version('RwVlrYang', '1.0') +gi.require_version('RwManifestYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + RwDts as rwdts, + RwVnfrYang, + RwVnfmYang, + RwVlrYang, + VnfrYang, + RwManifestYang, + RwBaseYang, + RwResourceMgrYang, + ProtobufC, + RwTypes, +) + +import rift.tasklets + +from . import mon_params + + +class VMResourceError(Exception): + """ VM resource Error""" + pass + + +class VnfRecordError(Exception): + """ VNF record instatiation failed""" + pass + + +class VduRecordError(Exception): + """ VDU record instatiation failed""" + pass + + +class NotImplemented(Exception): + """Not implemented """ + pass + + +class VnfrRecordExistsError(Exception): + """VNFR record already exist with the same VNFR id""" + pass + + +class InternalVirtualLinkRecordError(Exception): + """Internal virtual link record error""" + pass + + +class VDUImageNotFound(Exception): + """VDU Image not found error""" + pass + + +class VirtualDeploymentUnitRecordError(Exception): + """VDU Instantiation failed""" + pass + + +class VMNotReadyError(Exception): + """ VM Not yet received from resource manager """ + pass + + +class VDURecordNotFound(Exception): + """ Could not find a VDU record """ + pass + + +class VirtualNetworkFunctionRecordDescNotFound(Exception): + """ Cannot find Virtual Network Function Record Descriptor """ + pass + + +class VirtualNetworkFunctionDescriptorError(Exception): + """ Virtual Network Function Record Descriptor Error """ + pass + + +class VirtualNetworkFunctionDescriptorNotFound(Exception): + """ Virtual Network Function Record Descriptor Not Found """ + pass + + +class VirtualNetworkFunctionRecordNotFound(Exception): + """ Virtual Network Function Record Not Found """ + pass + + +class VirtualNetworkFunctionDescriptorRefCountExists(Exception): + """ Virtual Network Funtion Descriptor reference count exists """ + pass + + +class VnfrInstantiationFailed(Exception): + """ Virtual Network Funtion Instantiation failed""" + pass + + +class VirtualNetworkFunctionRecordState(enum.Enum): + """ VNFR state """ + INIT = 1 + VL_INIT_PHASE = 2 + VM_INIT_PHASE = 3 + READY = 4 + TERMINATE = 5 + VL_TERMINATE_PHASE = 6 + VDU_TERMINATE_PHASE = 7 + TERMINATED = 7 + FAILED = 10 + + +class VDURecordState(enum.Enum): + """VDU record state """ + INIT = 1 + INSTANTIATING = 2 + RESOURCE_ALLOC_PENDING = 3 + READY = 4 + TERMINATING = 5 + TERMINATED = 6 + FAILED = 10 + + +class VcsComponent(object): + """ VCS Component within the VNF descriptor """ + def __init__(self, dts, log, loop, cluster_name, vcs_handler, component, mangled_name): + self._dts = dts + self._log = log + self._loop = loop + self._component = component + self._cluster_name = cluster_name + self._vcs_handler = vcs_handler + self._mangled_name = mangled_name + + @staticmethod + def mangle_name(component_name, vnf_name, vnfd_id): + """ mangled component name """ + return vnf_name + ":" + component_name + ":" + vnfd_id + + @property + def name(self): + """ name of this component""" + return self._mangled_name + + @property + def path(self): + """ The path for this object """ + return("D,/rw-manifest:manifest" + + "/rw-manifest:operational-inventory" + + "/rw-manifest:component" + + "[rw-manifest:component-name = '{}']").format(self.name) + + @property + def instance_xpath(self): + """ The path for this object """ + return("D,/rw-base:vcs" + + "/instances" + + "/instance" + + "[instance-name = '{}']".format(self._cluster_name)) + + @property + def start_comp_xpath(self): + """ start component xpath """ + return (self.instance_xpath + + "/child-n[instance-name = 'START-REQ']") + + def get_start_comp_msg(self, ip_address): + """ start this component """ + start_msg = RwBaseYang.VcsInstance_Instance_ChildN() + start_msg.instance_name = 'START-REQ' + start_msg.component_name = self.name + start_msg.admin_command = "START" + start_msg.ip_address = ip_address + + return start_msg + + @property + def msg(self): + """ Returns the message for this vcs component""" + + vcs_comp_dict = self._component.as_dict() + + def mangle_comp_names(comp_dict): + """ mangle component name with VNF name, id""" + for key, val in comp_dict.items(): + if isinstance(val, dict): + comp_dict[key] = mangle_comp_names(val) + elif isinstance(val, list): + i = 0 + for ent in val: + if isinstance(ent, dict): + val[i] = mangle_comp_names(ent) + else: + val[i] = ent + i += 1 + elif key == "component_name": + comp_dict[key] = VcsComponent.mangle_name(val, + self._vnfd_name, + self._vnfd_id) + return comp_dict + + + mangled_dict = mangle_comp_names(vcs_comp_dict) + msg = RwManifestYang.OpInventory_Component.from_dict(mangled_dict) + return msg + + @asyncio.coroutine + def publish(self, xact): + """ Publishes the VCS component """ + self._log.debug("Publishing the VcsComponent %s, path = %s comp = %s", + self.name, self.path, self.msg) + yield from self._vcs_handler.publish(xact, self.path, self.msg) + + @asyncio.coroutine + def start(self, xact, parent, ip_addr=None): + """ Starts this VCS component """ + # ATTN RV - replace with block add + start_msg = self.get_start_comp_msg(ip_addr) + self._log.debug("starting component %s %s", + self.start_comp_xpath, start_msg) + yield from self._dts.query_create(self.start_comp_xpath, + 0, + start_msg) + self._log.debug("started component %s, %s", + self.start_comp_xpath, start_msg) + + +class VirtualDeploymentUnitRecord(object): + """ Virtual Deployment Unit Record """ + def __init__(self, dts, log, loop, vdud, vnfr, mgmt_intf, cloud_account_name, vdur_id=None): + self._dts = dts + self._log = log + self._loop = loop + self._vdud = vdud + self._vnfr = vnfr + self._mgmt_intf = mgmt_intf + self._cloud_account_name = cloud_account_name + + self._vdur_id = vdur_id or str(uuid.uuid4()) + self._int_intf = [] + self._ext_intf = [] + self._state = VDURecordState.INIT + self._request_id = str(uuid.uuid4()) + self._name = vnfr.name + "." + vdud.name + + self._rm_regh = None + self._vm_resp = None + + def cp_ip_addr(self, cp_name): + """ Find ip address by connection point name """ + if self._vm_resp is not None: + for conn_point in self._vm_resp.connection_points: + if conn_point.name == cp_name: + return conn_point.ip_address + return "0.0.0.0" + + def cp_id(self, cp_name): + """ Find connection point id by connection point name """ + if self._vm_resp is not None: + for conn_point in self._vm_resp.connection_points: + if conn_point.name == cp_name: + return conn_point.connection_point_id + return '' + + + @property + def vdu_id(self): + return self._vdud.id + + @property + def vm_resp(self): + return self._vm_resp + + @property + def name(self): + """ Return this VDUR's name """ + return self._name + + @property + def cloud_account_name(self): + """ Cloud account this VDU should be created in """ + return self._cloud_account_name + + @property + def image_name(self): + """ name that should be used to lookup the image on the CMP """ + return os.path.basename(self._vdud.image) + + @property + def image_checksum(self): + """ name that should be used to lookup the image on the CMP """ + return self._vdud.image_checksum if self._vdud.has_field("image_checksum") else None + + @property + def management_ip(self): + if not self.active: + return None + return self._vm_resp.public_ip if self._vm_resp.has_field('public_ip') else self._vm_resp.management_ip + + @property + def vm_management_ip(self): + if not self.active: + return None + return self._vm_resp.management_ip + + @property + def operational_status(self): + """ Operational status of this VDU""" + op_stats_dict = {"INIT": "init", + "INSTANTIATING": "vm_init_phase", + "RESOURCE_ALLOC_PENDING": "vm_alloc_pending", + "READY": "running", + "FAILED": "failed", + "TERMINATING": "terminated", + "TERMINATED": "terminated", + } + return op_stats_dict[self._state.name] + + @property + def msg(self): + """ VDU message """ + vdu_fields = ["vm_flavor", + "guest_epa", + "vswitch_epa", + "hypervisor_epa", + "host_epa"] + vdu_copy_dict = {k: v for k, v in + self._vdud.as_dict().items() if k in vdu_fields} + vdur_dict = {"id": self._vdur_id, + "vdu_id_ref": self._vdud.id, + "operational_status": self.operational_status, + } + if self.vm_resp is not None: + vdur_dict.update({"vim_id": self.vm_resp.vdu_id, + "flavor_id": self.vm_resp.flavor_id, + "image_id": self.vm_resp.image_id, + }) + + if self.management_ip is not None: + vdur_dict["management_ip"] = self.management_ip + + if self.vm_management_ip is not None: + vdur_dict["vm_management_ip"] = self.vm_management_ip + + vdur_dict.update(vdu_copy_dict) + + icp_list = [] + ii_list = [] + for intf, cp, vlr in self._int_intf: + icp_list.append({"id": cp, + "type_yang": "VPORT", + "ip_address": self.cp_ip_addr(cp)}) + + ii_list.append({"name": intf.name, + "vdur_internal_connection_point_ref": cp, + "virtual_interface": {}}) + + vdur_dict["internal_connection_point"] = icp_list + vdur_dict["internal_interface"] = ii_list + + ei_list = [] + for intf, cp, vlr in self._ext_intf: + ei_list.append({"name": cp, + "vnfd_connection_point_ref": cp, + "virtual_interface": {}}) + self._vnfr.update_cp(cp, self.cp_ip_addr(cp),self.cp_id(cp)) + + vdur_dict["external_interface"] = ei_list + + return RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict) + + @property + def resmgr_path(self): + """ path for resource-mgr""" + return ("D,/rw-resource-mgr:resource-mgmt" + + "/vdu-event" + + "/vdu-event-data[event-id='{}']".format(self._request_id)) + + @property + def vm_flavor_msg(self): + """ VM flavor message """ + flavor = self._vdud.vm_flavor.__class__() + flavor.copy_from(self._vdud.vm_flavor) + + return flavor + + def resmgr_msg(self, config=None): + vdu_fields = ["vm_flavor", + "guest_epa", + "vswitch_epa", + "hypervisor_epa", + "host_epa"] + + self._log.debug("Creating params based on VDUD: %s", self._vdud) + vdu_copy_dict = {k: v for k, v in self._vdud.as_dict().items() if k in vdu_fields} + + vm_create_msg_dict = { + "name": self.name, + "image_name": self.image_name, + } + + if self.image_checksum is not None: + vm_create_msg_dict["image_checksum"] = self.image_checksum + + vm_create_msg_dict["allocate_public_address"] = self._mgmt_intf + if self._vdud.has_field('mgmt_vpci'): + vm_create_msg_dict["mgmt_vpci"] = self._vdud.mgmt_vpci + + self._log.debug("VDUD: %s", self._vdud) + if config is not None: + vm_create_msg_dict['vdu_init'] = {'userdata': config} + + cp_list = [] + for intf, cp, vlr in self._ext_intf: + if (intf.virtual_interface.has_field('vpci') and + intf.virtual_interface.vpci is not None): + cp_list.append({"name": cp, + "virtual_link_id": vlr.network_id, + "type_yang": intf.virtual_interface.type_yang, + "vpci": intf.virtual_interface.vpci}) + else: + cp_list.append({"name": cp, + "virtual_link_id": vlr.network_id, + "type_yang": intf.virtual_interface.type_yang}) + + for intf, cp, vlr in self._int_intf: + if (intf.virtual_interface.has_field('vpci') and + intf.virtual_interface.vpci is not None): + cp_list.append({"name": cp, + "virtual_link_id": vlr.network_id, + "type_yang": intf.virtual_interface.type_yang, + "vpci": intf.virtual_interface.vpci}) + else: + cp_list.append({"name": cp, + "virtual_link_id": vlr.network_id, + "type_yang": intf.virtual_interface.type_yang}) + + vm_create_msg_dict["connection_points"] = cp_list + vm_create_msg_dict.update(vdu_copy_dict) + + msg = RwResourceMgrYang.VDUEventData() + msg.event_id = self._request_id + msg.cloud_account = self.cloud_account_name + msg.request_info.from_dict(vm_create_msg_dict) + return msg + + @asyncio.coroutine + def terminate(self, xact): + """ Delete resource in VIM """ + if self._state != VDURecordState.READY and self._state != VDURecordState.FAILED: + self._log.warning("VDU terminate in not ready state - Ignoring request") + return + + self._state = VDURecordState.TERMINATING + if self._vm_resp is not None: + try: + with self._dts.transaction() as new_xact: + yield from self.delete_resource(new_xact) + except Exception: + self._log.exception("Caught exception while deleting VDU %s", self.vdu_id) + + if self._rm_regh is not None: + self._log.debug("Deregistering resource manager registration handle") + self._rm_regh.deregister() + self._rm_regh = None + + self._state = VDURecordState.TERMINATED + + @asyncio.coroutine + def create_resource(self, xact, vnfr, config=None): + """ Request resource from ResourceMgr """ + def find_cp_by_name(cp_name): + """ Find a connection point by name """ + cp = None + self._log.debug("find_cp_by_name(%s) called", cp_name) + for ext_cp in vnfr._cprs: + self._log.debug("Checking ext cp (%s) called", ext_cp.name) + if ext_cp.name == cp_name: + cp = ext_cp + break + if cp is None: + self._log.debug("Failed to find cp %s in external connection points", + cp_name) + return cp + + def find_internal_vlr_by_cp_name(cp_name): + """ Find the VLR corresponding to the connection point name""" + cp = None + + self._log.debug("find_internal_vlr_by_cp_name(%s) called", + cp_name) + + for int_cp in self._vdud.internal_connection_point: + self._log.debug("Checking for int cp %s in internal connection points", + int_cp.id) + if int_cp.id == cp_name: + cp = int_cp + break + + if cp is None: + self._log.debug("Failed to find cp %s in internal connection points", + cp_name) + msg = "Failed to find cp %s in internal connection points" % cp_name + raise VduRecordError(msg) + + # return the VLR associated with the connection point + return vnfr.find_vlr_by_cp(cp_name) + + block = xact.block_create() + + self._log.debug("Executing vm request id: %s, action: create", + self._request_id) + + # Resolve the networks associated external interfaces + for ext_intf in self._vdud.external_interface: + self._log.debug("Resolving external interface name [%s], cp[%s]", + ext_intf.name, ext_intf.vnfd_connection_point_ref) + cp = find_cp_by_name(ext_intf.vnfd_connection_point_ref) + if cp is None: + self._log.debug("Failed to find connection point - %s", + ext_intf.vnfd_connection_point_ref) + continue + self._log.debug("Connection point name [%s], type[%s]", + cp.name, cp.type_yang) + + vlr = vnfr.ext_vlr_by_id(cp.vlr_ref) + + etuple = (ext_intf, cp.name, vlr) + self._ext_intf.append(etuple) + + self._log.debug("Created external interface tuple : %s", etuple) + + # Resolve the networks associated internal interfaces + for intf in self._vdud.internal_interface: + cp_id = intf.vdu_internal_connection_point_ref + self._log.debug("Resolving internal interface name [%s], cp[%s]", + intf.name, cp_id) + + try: + vlr = find_internal_vlr_by_cp_name(cp_id) + except Exception as e: + self._log.debug("Failed to find cp %s in internal VLR list", cp_id) + msg = "Failed to find cp %s in internal VLR list, e = %s" % (cp_id, e) + raise VduRecordError(msg) + + ituple = (intf, cp_id, vlr) + self._int_intf.append(ituple) + + self._log.debug("Created internal interface tuple : %s", ituple) + + resmgr_path = self.resmgr_path + resmgr_msg = self.resmgr_msg(config) + + self._log.debug("Creating new VM request at: %s, params: %s", resmgr_path, resmgr_msg) + block.add_query_create(resmgr_path, resmgr_msg) + + res_iter = yield from block.execute(flags=0, now=True) + + resp = None + + for i in res_iter: + r = yield from i + resp = r.result + + if resp is None or not (resp.has_field('resource_info') and resp.resource_info.has_field('vdu_id')): + raise VMResourceError("Did not get a vm resource response (resp: %s)", resp) + self._log.debug("Got vm request response: %s", resp.resource_info) + return resp.resource_info + + @asyncio.coroutine + def delete_resource(self, xact): + block = xact.block_create() + + self._log.debug("Executing vm request id: %s, action: delete", + self._request_id) + + block.add_query_delete(self.resmgr_path) + + yield from block.execute(flags=0, now=True) + + @asyncio.coroutine + def start_component(self): + """ This VDUR is active """ + self._log.debug("Starting component %s for vdud %s vdur %s", + self._vdud.vcs_component_ref, + self._vdud, + self._vdur_id) + yield from self._vnfr.start_component(self._vdud.vcs_component_ref, + self.vm_resp.management_ip) + + @property + def active(self): + """ Is this VDU active """ + return True if self._state is VDURecordState.READY else False + + @asyncio.coroutine + def instantiation_failed(self): + """ VDU instantiation failed """ + self._log.debug("VDU %s instantiation failed ", self._vdur_id) + self._state = VDURecordState.FAILED + yield from self._vnfr.instantiation_failed() + + @asyncio.coroutine + def vdu_is_active(self): + """ This VDU is active""" + if self.active: + self._log.warning("VDU %s was already marked as active", self._vdur_id) + return + + self._log.debug("VDUR id %s in VNFR %s is active", self._vdur_id, self._vnfr.vnfr_id) + + if self._vdud.vcs_component_ref is not None: + yield from self.start_component() + + self._state = VDURecordState.READY + + if self._vnfr.all_vdus_active(): + self._log.debug("Inside vdu_is_active. VNFR is READY. Info: %s", self._vnfr) + yield from self._vnfr.is_ready() + + @asyncio.coroutine + def instantiate(self, xact, vnfr, config=None): + """ Instantiate this VDU """ + self._state = VDURecordState.INSTANTIATING + + @asyncio.coroutine + def on_prepare(xact_info, query_action, ks_path, msg): + """ This VDUR is active """ + self._log.debug("Received VDUR instantiate on_prepare (%s:%s:%s)", + query_action, + ks_path, + msg) + + if (query_action == rwdts.QueryAction.UPDATE or + query_action == rwdts.QueryAction.CREATE): + self._vm_resp = msg + + if msg.resource_state == "active": + # Move this VDU to ready state + yield from self.vdu_is_active() + elif msg.resource_state == "failed": + yield from self.instantiation_failed() + elif query_action == rwdts.QueryAction.DELETE: + self._log.debug("DELETE action in on_prepare for VDUR instantiation, ignoring") + else: + raise NotImplementedError( + "%s action on VirtualDeployementUnitRecord not supported", + query_action) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + try: + vm_resp = yield from self.create_resource(xact, vnfr, config) + self._vm_resp = vm_resp + + self._state = VDURecordState.RESOURCE_ALLOC_PENDING + self._log.debug("Requested VM from resource manager response %s", + vm_resp) + if vm_resp.resource_state == "active": + self._log.debug("Resourcemgr responded wih an active vm resp %s", + vm_resp) + yield from self.vdu_is_active() + self._state = VDURecordState.READY + elif (vm_resp.resource_state == "pending" or + vm_resp.resource_state == "inactive"): + self._log.debug("Resourcemgr responded wih a pending vm resp %s", + vm_resp) + handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + self._rm_regh = yield from self._dts.register(self.resmgr_path + '/resource-info', + flags=rwdts.Flag.SUBSCRIBER, + handler=handler) + else: + self._log.debug("Resourcemgr responded wih an error vm resp %s", + vm_resp) + raise VirtualDeploymentUnitRecordError( + "Failed VDUR instantiation %s " % vm_resp) + + except Exception as e: + import traceback + traceback.print_exc() + self._log.exception(e) + self._log.error("Instantiation of VDU record failed: %s", str(e)) + self._state = VDURecordState.FAILED + yield from self.instantiation_failed() + + +class VlRecordState(enum.Enum): + """ VL Record State """ + INIT = 101 + INSTANTIATION_PENDING = 102 + ACTIVE = 103 + TERMINATE_PENDING = 104 + TERMINATED = 105 + FAILED = 106 + + +class InternalVirtualLinkRecord(object): + """ Internal Virtual Link record """ + def __init__(self, dts, log, loop, ivld_msg, vnfr_name, cloud_account_name): + self._dts = dts + self._log = log + self._loop = loop + self._ivld_msg = ivld_msg + self._vnfr_name = vnfr_name + self._cloud_account_name = cloud_account_name + + self._vlr_req = self.create_vlr() + self._vlr = None + self._state = VlRecordState.INIT + + @property + def vlr_id(self): + """ Find VLR by id """ + return self._vlr_req.id + + @property + def name(self): + """ Name of this VL """ + return self._vnfr_name + "." + self._ivld_msg.name + + @property + def network_id(self): + """ Find VLR by id """ + return self._vlr.network_id if self._vlr else None + + def vlr_path(self): + """ VLR path for this VLR instance""" + return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self.vlr_id) + + def create_vlr(self): + """ Create the VLR record which will be instantiated """ + + vld_fields = ["short_name", + "vendor", + "description", + "version", + "type_yang", + "provider_network"] + + vld_copy_dict = {k: v for k, v in self._ivld_msg.as_dict().items() if k in vld_fields} + + vlr_dict = {"id": str(uuid.uuid4()), + "name": self.name, + "cloud_account": self._cloud_account_name, + } + vlr_dict.update(vld_copy_dict) + + vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict) + return vlr + + @asyncio.coroutine + def instantiate(self, xact, restart_mode=False): + """ Instantiate VL """ + + @asyncio.coroutine + def instantiate_vlr(): + """ Instantiate VLR""" + self._log.debug("Create VL with xpath %s and vlr %s", + self.vlr_path(), self._vlr_req) + + with self._dts.transaction(flags=0) as xact: + block = xact.block_create() + block.add_query_create(xpath=self.vlr_path(), msg=self._vlr_req) + self._log.debug("Executing VL create path:%s msg:%s", + self.vlr_path(), self._vlr_req) + + res_iter = None + try: + res_iter = yield from block.execute() + except Exception: + self._state = VlRecordState.FAILED + self._log.exception("Caught exception while instantial VL") + raise + + for ent in res_iter: + res = yield from ent + self._vlr = res.result + + if self._vlr.operational_status == 'failed': + self._log.debug("VL creation failed for vlr id %s", self._vlr.id) + self._state = VlRecordState.FAILED + raise VnfrInstantiationFailed("instantiation due to VL failure %s" % (self._vlr.id)) + + self._log.info("Created VL with xpath %s and vlr %s", + self.vlr_path(), self._vlr) + + @asyncio.coroutine + def get_vlr(): + """ Get the network id """ + res_iter = yield from self._dts.query_read(self.vlr_path(), rwdts.Flag.MERGE) + vlr = None + for ent in res_iter: + res = yield from ent + vlr = res.result + + if vlr is None: + err = "Failed to get VLR for path %s" % self.vlr_path() + self._log.warn(err) + raise InternalVirtualLinkRecordError(err) + return vlr + + self._state = VlRecordState.INSTANTIATION_PENDING + + if restart_mode: + vl = yield from get_vlr() + if vl is None: + yield from instantiate_vlr() + else: + yield from instantiate_vlr() + + self._state = VlRecordState.ACTIVE + + def vlr_in_vns(self): + """ Is there a VLR record in VNS """ + if (self._state == VlRecordState.ACTIVE or + self._state == VlRecordState.INSTANTIATION_PENDING or + self._state == VlRecordState.FAILED): + return True + + return False + + @asyncio.coroutine + def terminate(self, xact): + """Terminate this VL """ + if not self.vlr_in_vns(): + self._log.debug("Ignoring terminate request for id %s in state %s", + self.vlr_id, self._state) + return + + self._log.debug("Terminating VL with path %s", self.vlr_path()) + self._state = VlRecordState.TERMINATE_PENDING + block = xact.block_create() + block.add_query_delete(self.vlr_path()) + yield from block.execute(flags=0, now=True) + self._state = VlRecordState.TERMINATED + self._log.debug("Terminated VL with path %s", self.vlr_path()) + + +class VirtualNetworkFunctionRecord(object): + """ Virtual Network Function Record """ + def __init__(self, dts, log, loop, cluster_name, vnfm, vcs_handler, vnfr_msg): + self._dts = dts + self._log = log + self._loop = loop + self._cluster_name = cluster_name + self._vnfr_id = vnfr_msg.id + self._vnfd_id = vnfr_msg.vnfd_ref + self._vnfm = vnfm + self._vcs_handler = vcs_handler + self._vnfr = vnfr_msg + + self._vnfd = None + self._state = VirtualNetworkFunctionRecordState.INIT + self._ext_vlrs = {} # The list of external virtual links + self._vlrs = [] # The list of internal virtual links + self._vdus = [] # The list of vdu + self._vlr_by_cp = {} + self._cprs = [] + self._inventory = {} + self._create_time = int(time.time()) + self._vnf_mon = None + self._config_status = vnfr_msg.config_status + + def _get_vdur_from_vdu_id(self, vdu_id): + self._log.debug("Finding vdur for vdu_id %s", vdu_id) + self._log.debug("Searching through vdus: %s", self._vdus) + for vdu in self._vdus: + self._log.debug("vdu_id: %s", vdu.vdu_id) + if vdu.vdu_id == vdu_id: + return vdu + + raise VDURecordNotFound("Could not find vdu record from id: %s", vdu_id) + + @property + def operational_status(self): + """ Operational status of this VNFR """ + op_status_map = {"INIT": "init", + "VL_INIT_PHASE": "vl_init_phase", + "VM_INIT_PHASE": "vm_init_phase", + "READY": "running", + "TERMINATE": "terminate", + "VL_TERMINATE_PHASE": "vl_terminate_phase", + "VDU_TERMINATE_PHASE": "vm_terminate_phase", + "TERMINATED": "terminated", + "FAILED": "failed", } + return op_status_map[self._state.name] + + @property + def vnfd_xpath(self): + """ VNFD xpath associated with this VNFR """ + return("C,/vnfd:vnfd-catalog/" + "vnfd:vnfd[vnfd:id = '{}']".format(self._vnfd_id)) + + @property + def vnfd(self): + """ VNFD for this VNFR """ + return self._vnfd + + @property + def vnf_name(self): + """ VNFD name associated with this VNFR """ + return self.vnfd.name + + @property + def name(self): + """ Name of this VNF in the record """ + return self._vnfr.name + + @property + def cloud_account_name(self): + """ Name of the cloud account this VNFR is instantiated in """ + return self._vnfr.cloud_account + + @property + def vnfd_id(self): + """ VNFD Id associated with this VNFR """ + return self.vnfd.id + + @property + def vnfr_id(self): + """ VNFR Id associated with this VNFR """ + return self._vnfr_id + + @property + def member_vnf_index(self): + """ Member VNF index associated with this VNFR """ + return self._vnfr.member_vnf_index_ref + + @property + def config_status(self): + """ Config agent status for this VNFR """ + return self._config_status + + def component_by_name(self, component_name): + """ Find a component by name in the inventory list""" + mangled_name = VcsComponent.mangle_name(component_name, + self.vnf_name, + self.vnfd_id) + return self._inventory[mangled_name] + + @asyncio.coroutine + def start_component(self, component_name, ip_addr): + """ Start a component in the VNFR by name """ + comp = self.component_by_name(component_name) + yield from comp.start(None, None, ip_addr) + + def cp_ip_addr(self, cp_name): + """ Get ip address for connection point """ + self._log.debug("cp_ip_addr()") + for cp in self._cprs: + if cp.name == cp_name and cp.ip_address is not None: + return cp.ip_address + return "0.0.0.0" + + def mgmt_intf_info(self): + """ Get Management interface info for this VNFR """ + mgmt_intf_desc = self.vnfd.msg.mgmt_interface + self._log.debug("Find mgmt interface info for vnfr id %s, mgmt_intf %s", + self._vnfr_id, mgmt_intf_desc) + ip_addr = None + if mgmt_intf_desc.has_field("cp"): + ip_addr = self.cp_ip_addr(mgmt_intf_desc.cp) + elif mgmt_intf_desc.has_field("vdu_id"): + try: + vdur = self._get_vdur_from_vdu_id(mgmt_intf_desc.vdu_id) + ip_addr = vdur.management_ip + except VDURecordNotFound: + ip_addr = None + else: + ip_addr = mgmt_intf_desc.ip_address + port = mgmt_intf_desc.port + + self._log.debug("Found mgmt interface for vnfr id %s, %s:%s", + self._vnfr_id, ip_addr, port) + + return ip_addr, port + + @property + def msg(self): + """ Message associated with this VNFR """ + vnfd_fields = ["short_name", "vendor", "description", "version"] + vnfd_copy_dict = {k: v for k, v in self.vnfd.msg.as_dict().items() if k in vnfd_fields} + + mgmt_intf = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MgmtInterface() + ip_address, port = self.mgmt_intf_info() + + if ip_address is not None: + mgmt_intf.ip_address = ip_address + if port is not None: + mgmt_intf.port = port + + vnfr_dict = {"id": self._vnfr_id, + "name": self.name, + "member_vnf_index_ref": self.member_vnf_index, + "vnfd_ref": self.vnfd_id, + "operational_status": self.operational_status, + "cloud_account": self.cloud_account_name, + "config_status" : self._config_status + } + + vnfr_dict.update(vnfd_copy_dict) + + vnfr_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict) + vnfr_msg.mgmt_interface = mgmt_intf + + # Add all the VLRs to VNFR + for vlr in self._vlrs: + ivlr = vnfr_msg.internal_vlr.add() + ivlr.vlr_ref = vlr.vlr_id + + # Add all the VDURs to VDUR + if self._vdus is not None: + for vdu in self._vdus: + vdur = vnfr_msg.vdur.add() + vdur.from_dict(vdu.msg.as_dict()) + + if self.vnfd.msg.mgmt_interface.has_field('dashboard_params'): + vnfr_msg.dashboard_url = self.dashboard_url + + for cpr in self._cprs: + new_cp = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict()) + vnfr_msg.connection_point.append(new_cp) + + if self._vnf_mon is not None: + for monp in self._vnf_mon.msg: + vnfr_msg.monitoring_param.append( + VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict())) + + if self._vnfr.vnf_configuration is not None: + vnfr_msg.vnf_configuration.from_dict(self._vnfr.vnf_configuration.as_dict()) + if (ip_address is not None and + vnfr_msg.vnf_configuration.config_access.mgmt_ip_address is None): + vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = ip_address + + return vnfr_msg + + @property + def dashboard_url(self): + ip, cfg_port = self.mgmt_intf_info() + protocol = 'http' + http_port = 80 + if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('https'): + if self.vnfd.msg.mgmt_interface.dashboard_params.https is True: + protocol = 'https' + http_port = 443 + if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('port'): + http_port = self.vnfd.msg.mgmt_interface.dashboard_params.port + + + url = "{protocol}://{ip_address}:{port}/{path}".format( + protocol=protocol, + ip_address=ip, + port=http_port, + path=self.vnfd.msg.mgmt_interface.dashboard_params.path.lstrip("/"), + ) + + return url + + @property + def xpath(self): + """ path for this VNFR """ + return("D,/vnfr:vnfr-catalog" + "/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id)) + + @asyncio.coroutine + def publish(self, xact): + """ publish this VNFR """ + vnfr = self.msg + self._log.debug("Publishing VNFR path = [%s], record = [%s]", + self.xpath, self.msg) + vnfr.create_time = self._create_time + yield from self._vnfm.publish_vnfr(xact, self.xpath, self.msg) + self._log.debug("Published VNFR path = [%s], record = [%s]", + self.xpath, self.msg) + + @asyncio.coroutine + def create_vls(self): + """ Publish The VLs associated with this VNF """ + self._log.debug("Publishing Internal Virtual Links for vnfd id: %s", + self.vnfd_id) + for ivld_msg in self.vnfd.msg.internal_vld: + self._log.debug("Creating internal vld:" + " %s, int_cp_ref = %s", + ivld_msg, ivld_msg.internal_connection_point_ref + ) + vlr = InternalVirtualLinkRecord(dts=self._dts, + log=self._log, + loop=self._loop, + ivld_msg=ivld_msg, + vnfr_name=self.name, + cloud_account_name=self.cloud_account_name + ) + self._vlrs.append(vlr) + + for int_cp in ivld_msg.internal_connection_point_ref: + if int_cp in self._vlr_by_cp: + msg = ("Connection point %s already " + " bound %s" % (int_cp, self._vlr_by_cp[int_cp])) + raise InternalVirtualLinkRecordError(msg) + self._log.debug("Setting vlr %s to internal cp = %s", + vlr, int_cp) + self._vlr_by_cp[int_cp] = vlr + + @asyncio.coroutine + def instantiate_vls(self, xact, restart_mode=False): + """ Instantiate the VLs associated with this VNF """ + self._log.debug("Instantiating Internal Virtual Links for vnfd id: %s", + self.vnfd_id) + + for vlr in self._vlrs: + self._log.debug("Instantiating VLR %s", vlr) + yield from vlr.instantiate(xact, restart_mode) + + def find_vlr_by_cp(self, cp_name): + """ Find the VLR associated with the cp name """ + return self._vlr_by_cp[cp_name] + + def create_vdus(self, vnfr, restart_mode=False): + """ Create the VDUs associated with this VNF """ + + def get_vdur_id(vdud): + """Get the corresponding VDUR's id for the VDUD. This is useful in + case of a restart. + + In restart mode we check for exiting VDUR's ID and use them, if + available. This way we don't end up creating duplicate VDURs + """ + vdur_id = None + + if restart_mode and vdud is not None: + try: + vdur = [vdur.id for vdur in vnfr._vnfr.vdur if vdur.vdu_id_ref == vdud.id] + vdur_id = vdur[0] + except IndexError: + self._log.error("Unable to find a VDUR for VDUD {}", vdud) + + return vdur_id + + self._log.info("Creating VDU's for vnfd id: %s", self.vnfd_id) + for vdu in self.vnfd.msg.vdu: + self._log.debug("Creating vdu: %s", vdu) + vdur_id = get_vdur_id(vdu) + + vdur = VirtualDeploymentUnitRecord( + dts=self._dts, + log=self._log, + loop=self._loop, + vdud=vdu, + vnfr=vnfr, + mgmt_intf=self.has_mgmt_interface(vdu), + cloud_account_name=self.cloud_account_name, + vdur_id=vdur_id + ) + + self._vdus.append(vdur) + + @asyncio.coroutine + def instantiate_vdus(self, xact, vnfr): + """ Instantiate the VDUs associated with this VNF """ + self._log.debug("Instantiating VDU's for vnfd id %s: %s", self.vnfd_id, self._vdus) + + lookup = {vdu.vdu_id:vdu for vdu in self._vdus} + + # Identify any dependencies among the VDUs + dependencies = collections.defaultdict(list) + vdu_id_pattern = re.compile(r"\{\{ vdu\[([^]]+)\]\S* \}\}") + + for vdu in self._vdus: + if vdu._vdud.cloud_init is not None: + for vdu_id in vdu_id_pattern.findall(vdu._vdud.cloud_init): + if vdu_id != vdu.vdu_id: + # This means that vdu.vdu_id depends upon vdu_id, + # i.e. vdu_id must be instantiated before + # vdu.vdu_id. + dependencies[vdu.vdu_id].append(lookup[vdu_id]) + + # Define the terminal states of VDU instantiation + terminal = ( + VDURecordState.READY, + VDURecordState.TERMINATED, + VDURecordState.FAILED, + ) + + datastore = VdurDatastore() + processed = set() + + @asyncio.coroutine + def instantiate_monitor(vdu): + """Monitor the state of the VDU during instantiation + + Arguments: + vdu - a VirtualDeploymentUnitRecord + + """ + # wait for the VDUR to enter a terminal state + while vdu._state not in terminal: + yield from asyncio.sleep(1, loop=self._loop) + + # update the datastore + datastore.update(vdu) + + # add the VDU to the set of processed VDUs + processed.add(vdu.vdu_id) + + @asyncio.coroutine + def instantiate(vdu): + """Instantiate the specified VDU + + Arguments: + vdu - a VirtualDeploymentUnitRecord + + Raises: + if the VDU, or any of the VDUs this VDU depends upon, are + terminated or fail to instantiate properly, a + VirtualDeploymentUnitRecordError is raised. + + """ + for dependency in dependencies[vdu.vdu_id]: + self._log.debug("{}: waiting for {}".format(vdu.vdu_id, dependency.vdu_id)) + + while dependency.vdu_id not in processed: + yield from asyncio.sleep(1, loop=self._loop) + + if not dependency.active: + raise VirtualDeploymentUnitRecordError() + + self._log.debug('instantiating {}'.format(vdu.vdu_id)) + + # Populate the datastore with the current values of the VDU + datastore.add(vdu) + + # Substitute any variables contained in the cloud config script + config = str(vdu._vdud.cloud_init) + + parts = re.split("\{\{ ([^\}]+) \}\}", config) + if len(parts) > 1: + + # Extract the variable names + variables = list() + for variable in parts[1::2]: + variables.append(variable.lstrip('{{').rstrip('}}').strip()) + + # Iterate of the variables and substitute values from the + # datastore. + for variable in variables: + + # Handle a reference to a VDU by ID + if variable.startswith('vdu['): + value = datastore.get(variable) + if value is None: + msg = "Unable to find a substitute for {} in {} cloud-init script" + raise ValueError(msg.format(variable, vdu.vdu_id)) + + config = config.replace("{{ %s }}" % variable, value) + continue + + # Handle a reference to the current VDU + if variable.startswith('vdu'): + value = datastore.get('vdu[{}]'.format(vdu.vdu_id) + variable[3:]) + config = config.replace("{{ %s }}" % variable, value) + continue + + # Handle unrecognized variables + msg = 'unrecognized cloud-config variable: {}' + raise ValueError(msg.format(variable)) + + # Instantiate the VDU + with self._dts.transaction() as xact: + self._log.debug("Instantiating vdu: %s", vdu) + yield from vdu.instantiate(xact, vnfr, config=config) + if self._state == VirtualNetworkFunctionRecordState.FAILED: + self._log.error("Instatiation of VNF %s failed while instantiating vdu %s", + self.vnfr_id, vdu) + + # First create a set of tasks to monitor the state of the VDUs and + # report when they have entered a terminal state + for vdu in self._vdus: + self._loop.create_task(instantiate_monitor(vdu)) + + for vdu in self._vdus: + self._loop.create_task(instantiate(vdu)) + + def has_mgmt_interface(self, vdu): + # ## TODO: Support additional mgmt_interface type options + if self.vnfd.msg.mgmt_interface.vdu_id == vdu.id: + return True + return False + + def vlr_xpath(self, vlr_id): + """ vlr xpath """ + return( + "D,/vlr:vlr-catalog/" + "vlr:vlr[vlr:id = '{}']".format(vlr_id)) + + def ext_vlr_by_id(self, vlr_id): + """ find ext vlr by id """ + return self._ext_vlrs[vlr_id] + + @asyncio.coroutine + def publish_inventory(self, xact): + """ Publish the inventory associated with this VNF """ + self._log.debug("Publishing inventory for VNFR id: %s", self._vnfr_id) + + for component in self.vnfd.msg.component: + self._log.debug("Creating inventory component %s", component) + mangled_name = VcsComponent.mangle_name(component.component_name, + self.vnf_name, + self.vnfd_id + ) + comp = VcsComponent(dts=self._dts, + log=self._log, + loop=self._loop, + cluster_name=self._cluster_name, + vcs_handler=self._vcs_handler, + component=component, + mangled_name=mangled_name, + ) + if comp.name in self._inventory: + self._log.debug("Duplicate entries in inventory %s for vnfr %s", + component, self._vnfd_id) + return + self._log.debug("Adding component %s for vnrf %s", + comp.name, self._vnfr_id) + self._inventory[comp.name] = comp + yield from comp.publish(xact) + + def all_vdus_active(self): + """ Are all VDUS in this VNFR active? """ + for vdu in self._vdus: + if not vdu.active: + return False + + self._log.debug("Inside all_vdus_active. Returning True") + return True + + @asyncio.coroutine + def instantiation_failed(self): + """ VNFR instantiation failed """ + self._log.debug("VNFR %s instantiation failed ", self.vnfr_id) + self.set_state(VirtualNetworkFunctionRecordState.FAILED) + + # Update the VNFR with the changed status + with self._dts.transaction(flags=0) as xact: + yield from self.publish(xact) + + @asyncio.coroutine + def is_ready(self): + """ This VNF is ready""" + self._log.debug("VNFR id %s is ready", self.vnfr_id) + + if self._state != VirtualNetworkFunctionRecordState.FAILED: + self.set_state(VirtualNetworkFunctionRecordState.READY) + + # Start the the VNFR monitor + self._vnf_mon = mon_params.VnfMonitorDtsHandler(self._dts, self._log, self._loop, self) + self._vnf_mon.register() + self._vnf_mon.start() + else: + self._log.debug("VNFR id %s ignoring state change", self.vnfr_id) + + # Update the VNFR with the changed status + with self._dts.transaction(flags=0) as xact: + yield from self.publish(xact) + + def update_cp(self, cp_name, ip_address, cp_id): + """Updated the connection point with ip address""" + for cp in self._cprs: + if cp.name == cp_name: + self._log.debug("Setting ip address and id for cp %s, cpr %s with ip %s id %s", + cp_name, cp, ip_address,cp_id) + cp.ip_address = ip_address + cp.connection_point_id = cp_id + return + + err = "No connection point %s found in VNFR id %s" % (cp.name, self._vnfr_id) + self._log.debug(err) + raise VirtualDeploymentUnitRecordError(err) + + def set_state(self, state): + """ Set state for this VNFR""" + self._state = state + + @asyncio.coroutine + def instantiate(self, xact, restart_mode=False): + """ instantiate this VNF """ + self.set_state(VirtualNetworkFunctionRecordState.VL_INIT_PHASE) + + @asyncio.coroutine + def fetch_vlrs(): + """ Fetch VLRs """ + # Iterate over all the connection points in VNFR and fetch the + # associated VLRs + + def cpr_from_cp(cp): + """ Creates a record level connection point from the desciptor cp""" + cp_fields = ["name", "image", "vm-flavor"] + cp_copy_dict = {k: v for k, v in cp.as_dict().items() if k in cp_fields} + cpr_dict = {} + cpr_dict.update(cp_copy_dict) + return VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict) + + self._log.debug("Fetching VLRs for VNFR id = %s, cps = %s", + self._vnfr_id, self._vnfr.connection_point) + + for cp in self._vnfr.connection_point: + cpr = cpr_from_cp(cp) + self._cprs.append(cpr) + self._log.debug("Adding Connection point record %s ", cp) + + vlr_path = self.vlr_xpath(cp.vlr_ref) + self._log.debug("Fetching VLR with path = %s", vlr_path) + res_iter = yield from self._dts.query_read(self.vlr_xpath(cp.vlr_ref), + rwdts.Flag.MERGE) + for i in res_iter: + r = yield from i + d = r.result + self._ext_vlrs[cp.vlr_ref] = d + cpr.vlr_ref = cp.vlr_ref + self._log.debug("Fetched VLR [%s] with path = [%s]", d, vlr_path) + + # Fetch the VNFD associated with the VNFR + self._log.debug("VNFR-ID %s: Fetching vnfds", self._vnfr_id) + self._vnfd = yield from self._vnfm.get_vnfd_ref(self._vnfd_id) + self._log.debug("VNFR-ID %s: Fetched vnfd:%s", self._vnfr_id, self._vnfd) + + assert self.vnfd is not None + + # Fetch External VLRs + self._log.debug("VNFR-ID %s: Fetching vlrs", self._vnfr_id) + yield from fetch_vlrs() + + # Publish inventory + self._log.debug("VNFR-ID %s: Publishing Inventory", self._vnfr_id) + yield from self.publish_inventory(xact) + + # Publish inventory + self._log.debug("VNFR-ID %s: Creating VLs", self._vnfr_id) + yield from self.create_vls() + + # publish the VNFR + self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id) + yield from self.publish(xact) + + # instantiate VLs + self._log.debug("VNFR-ID %s: Instantiate VLs", self._vnfr_id) + try: + yield from self.instantiate_vls(xact, restart_mode) + except Exception: + self._log.exception("VL instantiation failed") + yield from self.instantiation_failed() + return + + self.set_state(VirtualNetworkFunctionRecordState.VM_INIT_PHASE) + + # instantiate VDUs + self._log.debug("VNFR-ID %s: Create VDUs", self._vnfr_id) + self.create_vdus(self, restart_mode) + + # publish the VNFR + self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id) + yield from self.publish(xact) + + # instantiate VDUs + # ToDo: Check if this should be prevented during restart + self._log.debug("VNFR-ID %s: Instantiate VDUs", self._vnfr_id) + _ = self._loop.create_task(self.instantiate_vdus(xact, self)) + + # publish the VNFR + self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id) + yield from self.publish(xact) + + self._log.debug("VNFR-ID %s: Instantiation Done", self._vnfr_id) + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this virtual network function """ + + self._log.debug("Terminatng VNF id %s", self.vnfr_id) + + self.set_state(VirtualNetworkFunctionRecordState.TERMINATE) + + # stop monitoring + if self._vnf_mon is not None: + self._vnf_mon.stop() + self._vnf_mon.deregister() + self._vnf_mon = None + + @asyncio.coroutine + def terminate_vls(): + """ Terminate VLs in this VNF """ + for vl in self._vlrs: + yield from vl.terminate(xact) + + @asyncio.coroutine + def terminate_vdus(): + """ Terminate VDUS in this VNF """ + for vdu in self._vdus: + yield from vdu.terminate(xact) + + self._log.debug("Terminatng VLs in VNF id %s", self.vnfr_id) + self.set_state(VirtualNetworkFunctionRecordState.VL_TERMINATE_PHASE) + yield from terminate_vls() + + self._log.debug("Terminatng VDUs in VNF id %s", self.vnfr_id) + self.set_state(VirtualNetworkFunctionRecordState.VDU_TERMINATE_PHASE) + yield from terminate_vdus() + + self._log.debug("Terminated VNF id %s", self.vnfr_id) + self.set_state(VirtualNetworkFunctionRecordState.TERMINATED) + + +class VnfdDtsHandler(object): + """ DTS handler for VNFD config changes """ + XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd" + + def __init__(self, dts, log, loop, vnfm): + self._dts = dts + self._log = log + self._loop = loop + self._vnfm = vnfm + self._regh = None + + @asyncio.coroutine + def regh(self): + """ DTS registration handle """ + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for VNFD configuration""" + + def on_apply(dts, acg, xact, action, scratch): + """Apply the configuration""" + self._log.debug("Got VNFM VNFD apply (xact: %s) (action: %s)(scr: %s)", + xact, action, scratch) + # Create/Update a VNFD record + for cfg in self._regh.get_xact_elements(xact): + # Only interested in those VNFD cfgs whose ID was received in prepare callback + if cfg.id in acg.scratch['vnfds']: + self._vnfm.update_vnfd(cfg) + + del acg._scratch['vnfds'][:] + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ on prepare callback """ + self._log.debug("Got on prepare for VNFD (path: %s) (action: %s)", + ks_path.to_xpath(RwVnfmYang.get_schema()), msg) + # RIFT-10161 + try: + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + # Handle deletes in prepare_callback, but adds/updates in apply_callback + if fref.is_field_deleted(): + # Delete an VNFD record + self._log.debug("Deleting VNFD with id %s", msg.id) + if self._vnfm.vnfd_in_use(msg.id): + self._log.debug("Cannot delete VNFD in use - %s", msg) + err = "Cannot delete a VNFD in use - %s" % msg + raise VirtualNetworkFunctionDescriptorRefCountExists(err) + # Delete a VNFD record + yield from self._vnfm.delete_vnfd(msg.id) + else: + # Handle actual adds/updates in apply_callback, + # just check if VNFD in use in prepare_callback + if self._vnfm.vnfd_in_use(msg.id): + self._log.debug("Cannot modify an VNFD in use - %s", msg) + err = "Cannot modify an VNFD in use - %s" % msg + raise VirtualNetworkFunctionDescriptorRefCountExists(err) + + # Add this VNFD to scratch to create/update in apply callback + acg._scratch['vnfds'].append(msg.id) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + except Exception: + self._log.exception("VNFD delete/modification failed:%s", msg.id) + acg._acg.prepare_complete_fail(xact_info=xact_info._xact_info, + rs=RwTypes.RwStatus.FAILURE, + errstr="Cannot modify VNFD in use") + + self._log.debug( + "Registering for VNFD config using xpath: %s", + VnfdDtsHandler.XPATH, + ) + acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply) + with self._dts.appconf_group_create(handler=acg_hdl) as acg: + # Need a list in scratch to store VNFDs to create/update later + acg._scratch['vnfds'] = list() + self._regh = acg.register( + xpath=VnfdDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare) + + +class VcsComponentDtsHandler(object): + """ Vcs Component DTS handler """ + XPATH = ("D,/rw-manifest:manifest" + + "/rw-manifest:operational-inventory" + + "/rw-manifest:component") + + def __init__(self, dts, log, loop, vnfm): + self._dts = dts + self._log = log + self._loop = loop + self._regh = None + self._vnfm = vnfm + + @property + def regh(self): + """ DTS registration handle """ + return self._regh + + @asyncio.coroutine + def register(self): + """ Registers VCS component dts publisher registration""" + self._log.debug("VCS Comp publisher DTS handler registering path %s", + VcsComponentDtsHandler.XPATH) + + hdl = rift.tasklets.DTS.RegistrationHandler() + handlers = rift.tasklets.Group.Handler() + with self._dts.group_create(handler=handlers) as group: + self._regh = group.register(xpath=VcsComponentDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.PUBLISHER | + rwdts.Flag.NO_PREP_READ | + rwdts.Flag.FILE_DATASTORE),) + + @asyncio.coroutine + def publish(self, xact, path, msg): + """ Publishes the VCS component """ + self._log.debug("Publishing the VcsComponent xact = %s, %s:%s", + xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Published the VcsComponent to %s xact = %s, %s:%s", + VcsComponentDtsHandler.XPATH, xact, path, msg) + + +class VnfrDtsHandler(object): + """ registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS""" + XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr" + + def __init__(self, dts, log, loop, vnfm): + self._dts = dts + self._log = log + self._loop = loop + self._vnfm = vnfm + + self._regh = None + + @property + def regh(self): + """ Return registration handle""" + return self._regh + + @property + def vnfm(self): + """ Return VNF manager instance """ + return self._vnfm + + @asyncio.coroutine + def register(self): + """ Register for vnfr create/update/delete/read requests from dts """ + def on_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got vnfr commit (xact_info: %s)", xact_info) + return rwdts.MemberRspCode.ACTION_OK + + def on_abort(*args): + """ Abort callback """ + self._log.debug("VNF transaction got aborted") + + @asyncio.coroutine + def on_event(dts, g_reg, xact, xact_event, scratch_data): + + @asyncio.coroutine + def instantiate_realloc_vnfr(vnfr): + """Re-populate the vnfm after restart + + Arguments: + vlink + + """ + + with self._dts.transaction(flags=0) as xact: + yield from vnfr.instantiate(xact, restart_mode=True) + + if xact_event == rwdts.MemberEvent.INSTALL: + curr_cfg = self.regh.elements + for cfg in curr_cfg: + vnfr = self.vnfm.create_vnfr(cfg) + self._loop.create_task(instantiate_realloc_vnfr(vnfr)) + + self._log.debug("Got on_event in vnfm") + + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + self._log.debug( + "Got vnfr on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + + if action == rwdts.QueryAction.CREATE: + if not msg.has_field("vnfd_ref"): + err = "Vnfd reference not provided" + self._log.error(err) + raise VnfRecordError(err) + + vnfr = self.vnfm.create_vnfr(msg) + try: + # RIFT-9105: Unable to add a READ query under an existing transaction + # xact = xact_info.xact + with self._dts.transaction(flags=0) as xact: + yield from vnfr.instantiate(xact) + except Exception as e: + self._log.exception(e) + self._log.error("Error while instantiating vnfr:%s", vnfr.vnfr_id) + vnfr.set_state(VirtualNetworkFunctionRecordState.FAILED) + with self._dts.transaction(flags=0) as xact: + yield from vnfr.publish(xact) + elif action == rwdts.QueryAction.DELETE: + schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + vnfr = self._vnfm.get_vnfr(path_entry.key00.id) + + if vnfr is None: + self._log.debug("VNFR id %s not found for delete", path_entry.key00.id) + raise VirtualNetworkFunctionRecordNotFound( + "VNFR id %s", path_entry.key00.id) + + try: + yield from vnfr.terminate(xact_info.xact) + # Unref the VNFD + vnfr.vnfd.unref() + yield from self._vnfm.delete_vnfr(xact_info.xact, vnfr) + except Exception as e: + self._log.exception(e) + self._log.error("Caught exception while deleting vnfr %s", + path_entry.key00.id) + + elif action == rwdts.QueryAction.UPDATE: + schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + vnfr = self._vnfm.get_vnfr(path_entry.key00.id) + + if vnfr is None: + self._log.debug("VNFR id %s not found for update", path_entry.key00.id) + raise VirtualNetworkFunctionRecordNotFound( + "VNFR id %s", path_entry.key00.id) + + self._log.debug("VNFR {} update config status {} (current {})". + format(vnfr.name, msg.config_status, vnfr.config_status)) + if vnfr.config_status != msg.config_status: + # Update the config status and publish + vnfr._config_status = msg.config_status + with self._dts.transaction(flags=0) as xact: + yield from vnfr.publish(xact) + + else: + raise NotImplementedError( + "%s action on VirtualNetworkFunctionRecord not supported", + action) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for VNFR using xpath: %s", + VnfrDtsHandler.XPATH,) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit, + on_prepare=on_prepare,) + handlers = rift.tasklets.Group.Handler(on_event=on_event,) + with self._dts.group_create(handler=handlers) as group: + self._regh = group.register(xpath=VnfrDtsHandler.XPATH, + handler=hdl, + flags=(rwdts.Flag.PUBLISHER | + rwdts.Flag.NO_PREP_READ | + rwdts.Flag.CACHE | + rwdts.Flag.FILE_DATASTORE),) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create a VNFR record in DTS with path and message + """ + self._log.debug("Creating VNFR xact = %s, %s:%s", + xact, path, msg) + + self.regh.create_element(path, msg) + self._log.debug("Created VNFR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg): + """ + Update a VNFR record in DTS with path and message + """ + self._log.debug("Updating VNFR xact = %s, %s:%s", + xact, path, msg) + self.regh.update_element(path, msg) + self._log.debug("Updated VNFR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Delete a VNFR record in DTS with path and message + """ + self._log.debug("Deleting VNFR xact = %s, %s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted VNFR xact = %s, %s", xact, path) + + +class VirtualNetworkFunctionDescriptor(object): + """ + Virtual Network Function descriptor class + """ + + def __init__(self, dts, log, loop, vnfm, vnfd): + self._dts = dts + self._log = log + self._loop = loop + + self._vnfm = vnfm + self._vnfd = vnfd + self._ref_count = 0 + + @property + def ref_count(self): + """ Returns the reference count associated with + this Virtual Network Function Descriptor""" + return self._ref_count + + @property + def id(self): + """ Returns vnfd id """ + return self._vnfd.id + + @property + def name(self): + """ Returns vnfd name """ + return self._vnfd.name + + def in_use(self): + """ Returns whether vnfd is in use or not """ + return True if self._ref_count > 0 else False + + def ref(self): + """ Take a reference on this object """ + self._ref_count += 1 + return self._ref_count + + def unref(self): + """ Release reference on this object """ + if self.ref_count < 1: + msg = ("Unref on a VNFD object - vnfd id %s, ref_count = %s" % + (self.id, self._ref_count)) + self._log.critical(msg) + raise VnfRecordError(msg) + self._log.debug("Releasing ref on VNFD %s - curr ref_count:%s", + self.id, self.ref_count) + self._ref_count -= 1 + return self._ref_count + + @property + def msg(self): + """ Return the message associated with this NetworkServiceDescriptor""" + return self._vnfd + + @staticmethod + def path_for_id(vnfd_id): + """ Return path for the passed vnfd_id""" + return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id) + + def path(self): + """ Return the path associated with this NetworkServiceDescriptor""" + return VirtualNetworkFunctionDescriptor.path_for_id(self.id) + + def update(self, vnfd): + """ Update the Virtual Network Function Descriptor """ + if self.in_use(): + self._log.error("Cannot update descriptor %s in use", self.id) + raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot update descriptor in use %s" % self.id) + self._vnfd = vnfd + + def delete(self): + """ Delete the Virtual Network Function Descriptor """ + if self.in_use(): + self._log.error("Cannot delete descriptor %s in use", self.id) + raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot delete descriptor in use %s" % self.id) + self._vnfm.delete_vnfd(self.id) + + +class VnfdRefCountDtsHandler(object): + """ The VNFD Ref Count DTS handler """ + XPATH = "D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" + + def __init__(self, dts, log, loop, vnfm): + self._dts = dts + self._log = log + self._loop = loop + self._vnfm = vnfm + + self._regh = None + + @property + def regh(self): + """ Return registration handle """ + return self._regh + + @property + def vnfm(self): + """ Return the NS manager instance """ + return self._vnfm + + @asyncio.coroutine + def register(self): + """ Register for VNFD ref count read from dts """ + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare callback from dts """ + xpath = ks_path.to_xpath(RwVnfrYang.get_schema()) + self._log.debug( + "Got VNFD ref count get xact_info: %s, action: %s): %s:%s", + xact_info, action, xpath, msg + ) + + if action == rwdts.QueryAction.READ: + schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount.schema() + path_entry = schema.keyspec_to_entry(ks_path) + vnfd_list = yield from self._vnfm.get_vnfd_refcount(path_entry.key00.vnfd_id_ref) + for xpath, msg in vnfd_list: + self._log.debug("Responding to ref count query path:%s, msg:%s", + xpath, msg) + xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.MORE, + xpath=xpath, + msg=msg) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + raise VnfRecordError("Not supported operation %s" % action) + + hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,) + with self._dts.group_create() as group: + self._regh = group.register(xpath=VnfdRefCountDtsHandler.XPATH, + handler=hdl, + flags=rwdts.Flag.PUBLISHER, + ) + + +class VdurDatastore(object): + """ + This VdurDatastore is intended to expose select information about a VDUR + such that it can be referenced in a cloud config file. The data that is + exposed does not necessarily follow the structure of the data in the yang + model. This is intentional. The data that are exposed are intended to be + agnostic of the yang model so that changes in the model do not necessarily + require changes to the interface provided to the user. It also means that + the user does not need to be familiar with the RIFT.ware yang models. + """ + + def __init__(self): + """Create an instance of VdurDatastore""" + self._vdur_data = dict() + self._pattern = re.compile("vdu\[([^]]+)\]\.(.+)") + + def add(self, vdur): + """Add a new VDUR to the datastore + + Arguments: + vdur - a VirtualDeploymentUnitRecord instance + + Raises: + A ValueError is raised if the VDUR is (1) None or (2) already in + the datastore. + + """ + if vdur.vdu_id is None: + raise ValueError('VDURs are required to have an ID') + + if vdur.vdu_id in self._vdur_data: + raise ValueError('cannot add a VDUR more than once') + + self._vdur_data[vdur.vdu_id] = dict() + + def set_if_not_none(key, attr): + if attr is not None: + self._vdur_data[vdur.vdu_id][key] = attr + + set_if_not_none('name', vdur._vdud.name) + set_if_not_none('mgmt.ip', vdur.vm_management_ip) + + def update(self, vdur): + """Update the VDUR information in the datastore + + Arguments: + vdur - a GI representation of a VDUR + + Raises: + A ValueError is raised if the VDUR is (1) None or (2) already in + the datastore. + + """ + if vdur.vdu_id is None: + raise ValueError('VNFDs are required to have an ID') + + if vdur.vdu_id not in self._vdur_data: + raise ValueError('VNF is not recognized') + + def set_or_delete(key, attr): + if attr is None: + if key in self._vdur_data[vdur.vdu_id]: + del self._vdur_data[vdur.vdu_id][key] + + else: + self._vdur_data[vdur.vdu_id][key] = attr + + set_or_delete('name', vdur._vdud.name) + set_or_delete('mgmt.ip', vdur.vm_management_ip) + + def remove(self, vdur_id): + """Remove all of the data associated with specified VDUR + + Arguments: + vdur_id - the identifier of a VNFD in the datastore + + Raises: + A ValueError is raised if the VDUR is not contained in the + datastore. + + """ + if vdur_id not in self._vdur_data: + raise ValueError('VNF is not recognized') + + del self._vdur_data[vdur_id] + + def get(self, expr): + """Retrieve VDUR information from the datastore + + An expression should be of the form, + + vdu[]. + + where is the VDUR ID (an unquoted UUID), and is the name of + the exposed attribute that the user wishes to retrieve. + + If the requested data is not available, None is returned. + + Arguments: + expr - a string that specifies the data to return + + Raises: + A ValueError is raised if the provided expression cannot be parsed. + + Returns: + The requested data or None + + """ + result = self._pattern.match(expr) + if result is None: + raise ValueError('data expression not recognized ({})'.format(expr)) + + vdur_id, key = result.groups() + + if vdur_id not in self._vdur_data: + return None + + return self._vdur_data[vdur_id].get(key, None) + + +class VnfManager(object): + """ The virtual network function manager class """ + def __init__(self, dts, log, loop, cluster_name): + self._dts = dts + self._log = log + self._loop = loop + self._cluster_name = cluster_name + + self._vcs_handler = VcsComponentDtsHandler(dts, log, loop, self) + self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self) + + self._dts_handlers = [VnfdDtsHandler(dts, log, loop, self), + self._vnfr_handler, + self._vcs_handler, + VnfdRefCountDtsHandler(dts, log, loop, self)] + self._vnfrs = {} + self._vnfds = {} + + @property + def vnfr_handler(self): + """ VNFR dts handler """ + return self._vnfr_handler + + @property + def vcs_handler(self): + """ VCS dts handler """ + return self._vcs_handler + + @asyncio.coroutine + def register(self): + """ Register all static DTS handlers """ + for hdl in self._dts_handlers: + yield from hdl.register() + + @asyncio.coroutine + def run(self): + """ Run this VNFM instance """ + self._log.debug("Run VNFManager - registering static DTS handlers""") + yield from self.register() + + def get_vnfr(self, vnfr_id): + """ get VNFR by vnfr id """ + + if vnfr_id not in self._vnfrs: + raise VnfRecordError("VNFR id %s not found", vnfr_id) + + return self._vnfrs[vnfr_id] + + def create_vnfr(self, vnfr): + """ Create a VNFR instance """ + if vnfr.id in self._vnfrs: + msg = "Vnfr id %s already exists" % vnfr.id + self._log.error(msg) + raise VnfRecordError(msg) + + self._log.info("Create VirtualNetworkFunctionRecord %s from vnfd_id: %s", + vnfr.id, + vnfr.vnfd_ref) + + self._vnfrs[vnfr.id] = VirtualNetworkFunctionRecord( + self._dts, self._log, self._loop, self._cluster_name, self, self.vcs_handler, vnfr + ) + return self._vnfrs[vnfr.id] + + @asyncio.coroutine + def delete_vnfr(self, xact, vnfr): + """ Create a VNFR instance """ + if vnfr.vnfr_id in self._vnfrs: + self._log.debug("Deleting VNFR id %s", vnfr.vnfr_id) + yield from self._vnfr_handler.delete(xact, vnfr.xpath) + del self._vnfrs[vnfr.vnfr_id] + + @asyncio.coroutine + def fetch_vnfd(self, vnfd_id): + """ Fetch VNFDs based with the vnfd id""" + vnfd_path = VirtualNetworkFunctionDescriptor.path_for_id(vnfd_id) + self._log.debug("Fetch vnfd with path %s", vnfd_path) + vnfd = None + + res_iter = yield from self._dts.query_read(vnfd_path, rwdts.Flag.MERGE) + + for ent in res_iter: + res = yield from ent + vnfd = res.result + + if vnfd is None: + err = "Failed to get Vnfd %s" % vnfd_id + self._log.error(err) + raise VnfRecordError(err) + + self._log.debug("Fetched vnfd for path %s, vnfd - %s", vnfd_path, vnfd) + + return vnfd + + @asyncio.coroutine + def get_vnfd_ref(self, vnfd_id): + """ Get Virtual Network Function descriptor for the passed vnfd_id""" + vnfd = yield from self.get_vnfd(vnfd_id) + vnfd.ref() + return vnfd + + @asyncio.coroutine + def get_vnfd(self, vnfd_id): + """ Get Virtual Network Function descriptor for the passed vnfd_id""" + vnfd = None + if vnfd_id not in self._vnfds: + self._log.error("Cannot find VNFD id:%s", vnfd_id) + vnfd = yield from self.fetch_vnfd(vnfd_id) + + if vnfd is None: + self._log.error("Cannot find VNFD id:%s", vnfd_id) + raise VirtualNetworkFunctionDescriptorError("Cannot find VNFD id:%s", vnfd_id) + + if vnfd.id != vnfd_id: + self._log.error("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id)) + raise VirtualNetworkFunctionDescriptorError("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id)) + + if vnfd.id not in self._vnfds: + self.create_vnfd(vnfd) + + return self._vnfds[vnfd_id] + + def vnfd_in_use(self, vnfd_id): + """ Is this VNFD in use """ + self._log.debug("Is this VNFD in use - msg:%s", vnfd_id) + if vnfd_id in self._vnfds: + return self._vnfds[vnfd_id].in_use() + return False + + @asyncio.coroutine + def publish_vnfr(self, xact, path, msg): + """ Publish a VNFR """ + self._log.debug("publish_vnfr called with path %s, msg %s", + path, msg) + yield from self.vnfr_handler.update(xact, path, msg) + + def create_vnfd(self, vnfd): + """ Create a virtual network function descriptor """ + self._log.debug("Create virtual networkfunction descriptor - %s", vnfd) + if vnfd.id in self._vnfds: + self._log.error("Cannot create VNFD %s -VNFD id already exists", vnfd) + raise VirtualNetworkFunctionDescriptorError("VNFD already exists-%s", vnfd.id) + + self._vnfds[vnfd.id] = VirtualNetworkFunctionDescriptor(self._dts, + self._log, + self._loop, + self, + vnfd) + return self._vnfds[vnfd.id] + + def update_vnfd(self, vnfd): + """ update the Virtual Network Function descriptor """ + self._log.debug("Update virtual network function descriptor - %s", vnfd) + + # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511 + for ivld in vnfd.internal_vld: + ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref)) + + if vnfd.id not in self._vnfds: + self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id) + self.create_vnfd(vnfd) + else: + self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd) + self._vnfds[vnfd.id].update(vnfd) + + @asyncio.coroutine + def delete_vnfd(self, vnfd_id): + """ Delete the Virtual Network Function descriptor with the passed id """ + self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id) + if vnfd_id not in self._vnfds: + self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id) + raise VirtualNetworkFunctionDescriptorNotFound("Cannot find %s", vnfd_id) + + if vnfd_id not in self._vnfds: + self._log.debug("Cannot delete VNFD id %s reference exists %s", + vnfd_id, + self._vnfds[vnfd_id].ref_count) + raise VirtualNetworkFunctionDescriptorRefCountExists( + "Cannot delete :%s, ref_count:%s", + vnfd_id, + self._vnfds[vnfd_id].ref_count) + + del self._vnfds[vnfd_id] + + def vnfd_refcount_xpath(self, vnfd_id): + """ xpath for ref count entry """ + return (VnfdRefCountDtsHandler.XPATH + + "[rw-vnfr:vnfd-id-ref = '{}']").format(vnfd_id) + + @asyncio.coroutine + def get_vnfd_refcount(self, vnfd_id): + """ Get the vnfd_list from this VNFM""" + vnfd_list = [] + if vnfd_id is None or vnfd_id == "": + for vnfd in self._vnfds.values(): + vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount() + vnfd_msg.vnfd_id_ref = vnfd.id + vnfd_msg.instance_ref_count = vnfd.ref_count + vnfd_list.append((self.vnfd_refcount_xpath(vnfd.id), vnfd_msg)) + elif vnfd_id in self._vnfds: + vnfd_msg.vnfd_id_ref = self._vnfds[vnfd_id].id + vnfd_msg.instance_ref_count = self._vnfds[vnfd_id].ref_count + vnfd_list.append((self.vnfd_refcount_xpath(vnfd_id), vnfd_msg)) + + return vnfd_list + + +class VnfmTasklet(rift.tasklets.Tasklet): + """ VNF Manager tasklet class """ + def __init__(self, *args, **kwargs): + super(VnfmTasklet, self).__init__(*args, **kwargs) + self._dts = None + self._vnfm = None + + def start(self): + try: + super(VnfmTasklet, self).start() + self.log.info("Starting VnfmTasklet") + + self.log.setLevel(logging.DEBUG) + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS(self.tasklet_info, + RwVnfmYang.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + except Exception: + print("Caught Exception in VNFM start:", sys.exc_info()[0]) + raise + + def on_instance_started(self): + """ Task insance started callback """ + self.log.debug("Got instance started callback") + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in VNFM stop:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def init(self): + """ Task init callback """ + try: + vm_parent_name = self.tasklet_info.get_parent_vm_parent_instance_name() + assert vm_parent_name is not None + self._vnfm = VnfManager(self._dts, self.log, self.loop, vm_parent_name) + yield from self._vnfm.run() + except Exception: + print("Caught Exception in VNFM init:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def run(self): + """ Task run callback """ + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py new file mode 100755 index 0000000..0857818 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwvnfmtasklet + +class Tasklet(rift.tasklets.rwvnfmtasklet.VnfmTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvnfm/test/mon_params_test.py b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/test/mon_params_test.py new file mode 100755 index 0000000..159971a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvnfm/test/mon_params_test.py @@ -0,0 +1,514 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import base64 +import logging +import os +import sys +import tornado.escape +import tornado.platform.asyncio +import tornado.testing +import tornado.web +import unittest +import xmlrunner + +import rift.tasklets.rwvnfmtasklet.mon_params as mon_params + + +from gi.repository import VnfrYang + +logging.basicConfig(format='TEST %(message)s', level=logging.DEBUG) +logger = logging.getLogger("mon_params_test.py") + + +class AsyncioTornadoTest(tornado.testing.AsyncHTTPTestCase): + def setUp(self): + self._loop = asyncio.get_event_loop() + super().setUp() + + def get_new_ioloop(self): + return tornado.platform.asyncio.AsyncIOMainLoop() + + +class MonParamsPingStatsTest(AsyncioTornadoTest): + ping_path = r"/api/v1/ping/stats" + ping_response = { + 'ping-request-tx-count': 5, + 'ping-response-rx-count': 10 + } + + mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam() + mon_param_msg.from_dict({ + 'id': '1', + 'name': 'ping-request-tx-count', + 'json_query_method': "NAMEKEY", + 'http_endpoint_ref': ping_path, + 'value_type': "INT", + 'description': 'no of ping requests', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }) + + endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint() + endpoint_msg.from_dict({ + 'path': ping_path, + 'polling_interval_secs': 1, + 'username': 'admin', + 'password': 'password', + 'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}], + }) + + def create_endpoint(self, endpoint_msg): + self.mon_port = self.get_http_port() + endpoint = mon_params.HTTPEndpoint( + logger, + self._loop, + "127.0.0.1", + self.endpoint_msg, + ) + # For each creation, update the descriptor as well + endpoint_msg.port = self.mon_port + + return endpoint + + def create_mon_param(self): + return mon_params.MonitoringParam(logger, self.mon_param_msg) + + def get_app(self): + class PingStatsHandler(tornado.web.RequestHandler): + def get(this): + test_header = this.request.headers.get('TEST_KEY') + if test_header is None or test_header != 'TEST_VALUE': + this.set_status(401) + this.finish() + return None + + auth_header = this.request.headers.get('Authorization') + if auth_header is None or not auth_header.startswith('Basic '): + this.set_status(401) + this.set_header('WWW-Authenticate', 'Basic realm=Restricted') + this._transforms = [] + this.finish() + return None + + auth_header = auth_header.encode('ascii') + auth_decoded = base64.decodestring(auth_header[6:]).decode('ascii') + login, password = auth_decoded.split(':', 2) + login = login.encode('ascii') + password = password.encode('ascii') + is_auth = (login == b"admin" and password == b"password") + + if not is_auth: + this.set_status(401) + this.set_header('WWW-Authenticate', 'Basic realm=Restricted') + this._transforms = [] + this.finish() + return None + + this.write(self.ping_response) + + return tornado.web.Application([ + (self.ping_path, PingStatsHandler), + ]) + + def test_value_convert(self): + float_con = mon_params.ValueConverter("DECIMAL") + int_con = mon_params.ValueConverter("INT") + text_con = mon_params.ValueConverter("STRING") + + a = float_con.convert("1.23") + self.assertEqual(a, 1.23) + + a = float_con.convert(1) + self.assertEqual(a, float(1)) + + t = text_con.convert(1.23) + self.assertEqual(t, "1.23") + + t = text_con.convert("asdf") + self.assertEqual(t, "asdf") + + i = int_con.convert(1.23) + self.assertEqual(i, 1) + + def test_json_key_value_querier(self): + kv_querier = mon_params.JsonKeyValueQuerier(logger, "ping-request-tx-count") + value = kv_querier.query(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(value, 5) + + def test_json_path_value_querier(self): + kv_querier = mon_params.JsonPathValueQuerier(logger, '$.ping-request-tx-count') + value = kv_querier.query(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(value, 5) + + def test_object_path_value_querier(self): + kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.*['ping-request-tx-count']") + value = kv_querier.query(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(value, 5) + + def test_endpoint(self): + @asyncio.coroutine + def run_test(): + endpoint = self.create_endpoint(self.endpoint_msg) + resp = yield from endpoint.poll() + resp_json = tornado.escape.json_decode(resp) + self.assertEqual(resp_json["ping-request-tx-count"], 5) + self.assertEqual(resp_json["ping-response-rx-count"], 10) + + self._loop.run_until_complete( + asyncio.wait_for(run_test(), 10, loop=self._loop) + ) + + def test_mon_param(self): + a = self.create_mon_param() + a.extract_value_from_response(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(a.current_value, 5) + self.assertEqual(a.msg.value_integer, 5) + + def test_endpoint_poller(self): + endpoint = self.create_endpoint(self.endpoint_msg) + mon_param = self.create_mon_param() + poller = mon_params.EndpointMonParamsPoller( + logger, self._loop, endpoint, [mon_param], + ) + poller.start() + + self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop)) + self.assertEqual(mon_param.current_value, 5) + + poller.stop() + + def test_params_controller(self): + new_port = self.get_http_port() + # Update port after new port is initialized + self.endpoint_msg.port = new_port + ctrl = mon_params.VnfMonitoringParamsController( + logger, self._loop, "1", "127.0.0.1", + [self.endpoint_msg], [self.mon_param_msg], + ) + ctrl.start() + + self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop)) + + ctrl.stop() + + self.assertEqual(1, len(ctrl.mon_params)) + mon_param = ctrl.mon_params[0] + self.assertEqual(mon_param.current_value, 5) + + +class AsyncioTornadoHttpsTest(tornado.testing.AsyncHTTPSTestCase): + def setUp(self): + self._loop = asyncio.get_event_loop() + super().setUp() + + def get_new_ioloop(self): + return tornado.platform.asyncio.AsyncIOMainLoop() + + +class MonParamsPingStatsHttpsTest(AsyncioTornadoHttpsTest): + ping_path = r"/api/v1/ping/stats" + ping_response = { + 'ping-request-tx-count': 5, + 'ping-response-rx-count': 10 + } + + mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam() + mon_param_msg.from_dict({ + 'id': '1', + 'name': 'ping-request-tx-count', + 'json_query_method': "NAMEKEY", + 'http_endpoint_ref': ping_path, + 'value_type': "INT", + 'description': 'no of ping requests', + 'group_tag': 'Group1', + 'widget_type': 'COUNTER', + 'units': 'packets' + }) + + endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint() + endpoint_msg.from_dict({ + 'path': ping_path, + 'https': 'true', + 'polling_interval_secs': 1, + 'username': 'admin', + 'password': 'password', + 'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}], + }) + + def create_endpoint(self, endpoint_msg): + self.mon_port = self.get_http_port() + endpoint = mon_params.HTTPEndpoint( + logger, + self._loop, + "127.0.0.1", + self.endpoint_msg, + ) + # For each creation, update the descriptor as well + endpoint_msg.port = self.mon_port + + return endpoint + + def create_mon_param(self): + return mon_params.MonitoringParam(logger, self.mon_param_msg) + + def get_app(self): + class PingStatsHandler(tornado.web.RequestHandler): + def get(this): + test_header = this.request.headers.get('TEST_KEY') + if test_header is None or test_header != 'TEST_VALUE': + this.set_status(401) + this.finish() + return None + + auth_header = this.request.headers.get('Authorization') + if auth_header is None or not auth_header.startswith('Basic '): + this.set_status(401) + this.set_header('WWW-Authenticate', 'Basic realm=Restricted') + this._transforms = [] + this.finish() + return None + + auth_header = auth_header.encode('ascii') + auth_decoded = base64.decodestring(auth_header[6:]).decode('ascii') + login, password = auth_decoded.split(':', 2) + login = login.encode('ascii') + password = password.encode('ascii') + is_auth = (login == b"admin" and password == b"password") + + if not is_auth: + this.set_status(401) + this.set_header('WWW-Authenticate', 'Basic realm=Restricted') + this._transforms = [] + this.finish() + return None + + this.write(self.ping_response) + + return tornado.web.Application([ + (self.ping_path, PingStatsHandler), + ]) + + def test_value_convert(self): + float_con = mon_params.ValueConverter("DECIMAL") + int_con = mon_params.ValueConverter("INT") + text_con = mon_params.ValueConverter("STRING") + + a = float_con.convert("1.23") + self.assertEqual(a, 1.23) + + a = float_con.convert(1) + self.assertEqual(a, float(1)) + + t = text_con.convert(1.23) + self.assertEqual(t, "1.23") + + t = text_con.convert("asdf") + self.assertEqual(t, "asdf") + + i = int_con.convert(1.23) + self.assertEqual(i, 1) + + def test_json_key_value_querier(self): + kv_querier = mon_params.JsonKeyValueQuerier(logger, "ping-request-tx-count") + value = kv_querier.query(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(value, 5) + + def test_endpoint(self): + @asyncio.coroutine + def run_test(): + endpoint = self.create_endpoint(self.endpoint_msg) + resp = yield from endpoint.poll() + resp_json = tornado.escape.json_decode(resp) + self.assertEqual(resp_json["ping-request-tx-count"], 5) + self.assertEqual(resp_json["ping-response-rx-count"], 10) + + self._loop.run_until_complete( + asyncio.wait_for(run_test(), 10, loop=self._loop) + ) + + def test_mon_param(self): + a = self.create_mon_param() + a.extract_value_from_response(tornado.escape.json_encode(self.ping_response)) + self.assertEqual(a.current_value, 5) + self.assertEqual(a.msg.value_integer, 5) + + def test_endpoint_poller(self): + endpoint = self.create_endpoint(self.endpoint_msg) + mon_param = self.create_mon_param() + poller = mon_params.EndpointMonParamsPoller( + logger, self._loop, endpoint, [mon_param], + ) + poller.start() + + self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop)) + self.assertEqual(mon_param.current_value, 5) + + poller.stop() + + def test_params_controller(self): + new_port = self.get_http_port() + # Update port after new port is initialized + self.endpoint_msg.port = new_port + ctrl = mon_params.VnfMonitoringParamsController( + logger, self._loop, "1", "127.0.0.1", + [self.endpoint_msg], [self.mon_param_msg], + ) + ctrl.start() + + self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop)) + + ctrl.stop() + + self.assertEqual(1, len(ctrl.mon_params)) + mon_param = ctrl.mon_params[0] + self.assertEqual(mon_param.current_value, 5) + + +class VRouterStatsTest(unittest.TestCase): + system_response = { + "system": { + "cpu": [ + { + "usage": 2.35, + "cpu": "all" + }, + { + "usage": 5.35, + "cpu": "1" + } + ] + } + } + + def test_object_path_value_querier(self): + kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.system.cpu[@.cpu is 'all'].usage") + value = kv_querier.query(tornado.escape.json_encode(self.system_response)) + self.assertEqual(value, 2.35) + + +class TrafsinkStatsTest(unittest.TestCase): + system_response = { + "rw-vnf-base-opdata:port-state": [ + { + "ip": [ + { + "address": "12.0.0.3/24" + } + ], + "rw-trafgen-data:trafgen-info": { + "src_l4_port": 1234, + "dst_l4_port": 5678, + "dst_ip_address": "192.168.1.1", + "tx_state": "Off", + "dst_mac_address": "00:00:00:00:00:00", + "tx_mode": "single-template", + "packet-count": 0, + "tx-cycles": 5478, + "tx_burst": 16, + "src_ip_address": "192.168.0.1", + "pkt_size": 64, + "src_mac_address": "fa:16:3e:07:b1:52", + "descr-string": "", + "tx_rate": 100 + }, + "counters": { + "input-errors": 0, + "output-bytes": 748, + "input-pause-xoff-pkts": 0, + "input-badcrc-pkts": 0, + "input-bytes": 62, + "rx-rate-mbps": 9576, + "output-pause-xoff-pkts": 0, + "input-missed-pkts": 0, + "input-packets": 1, + "output-errors": 0, + "tx-rate-mbps": 0, + "input-pause-xon-pkts": 0, + "output-pause-xon-pkts": 0, + "tx-rate-pps": 0, + "input-mcast-pkts": 0, + "rx-rate-pps": 0, + "output-packets": 6, + "input-nombuf-pkts": 0 + }, + "info": { + "numa-socket": 0, + "transmit-queues": 1, + "privatename": "eth_uio:pci=0000:00:04.0", + "duplex": "full-duplex", + "virtual-fabric": "No", + "link-state": "up", + "rte-port-id": 0, + "fastpath-instance": 1, + "id": 0, + "app-name": "rw_trafgen", + "speed": 10000, + "receive-queues": 1, + "descr-string": "", + "mac": "fa:16:3e:07:b1:52" + }, + "portname": "trafsink_vnfd/cp0", + "queues": { + "rx-queue": [ + { + "packets": 1, + "bytes-MB": 0, + "qid": 0, + "rate-mbps": 0, + "rate-pps": 0 + } + ], + "tx-queue": [ + { + "bytes-MB": 0, + "packets": 6, + "rate-pps": 0, + "errors": 0, + "qid": 0, + "rate-mbps": 0 + } + ] + } + } + ] + } + + def test_object_path_value_querier(self): + kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.portname is 'trafsink_vnfd/cp0'].counters.'rx-rate-mbps'") + value = kv_querier.query(tornado.escape.json_encode(self.system_response)) + self.assertEqual(value, 9576) + + +def main(argv=sys.argv[1:]): + + # The unittest framework requires a program name, so use the name of this + # file instead (we do not want to have to pass a fake program name to main + # when this is called from the interpreter). + unittest.main( + argv=[__file__] + argv, + testRunner=xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + ) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/CMakeLists.txt new file mode 100644 index 0000000..55a29e9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/CMakeLists.txt @@ -0,0 +1,39 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +cmake_minimum_required(VERSION 2.8) + +include(rift_plugin) + +set(TASKLET_NAME rwvnstasklet) + +set(subdirs yang vala) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/vlmgr/__init__.py + rift/vlmgr/rwvlmgr.py + rift/topmgr/__init__.py + rift/topmgr/rwtopmgr.py + rift/topmgr/rwtopdatastore.py + rift/topmgr/core.py + rift/topmgr/mock.py + rift/topmgr/sdnsim.py + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwvns/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py new file mode 100644 index 0000000..35c44ef --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwvnstasklet import VnsTasklet diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py new file mode 100755 index 0000000..af2638e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py @@ -0,0 +1,356 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import logging +import os +import sys + +import gi +gi.require_version('RwVnsYang', '1.0') +gi.require_version('RwDts', '1.0') +from gi.repository import ( + RwVnsYang, + RwDts as rwdts, + RwTypes, + ProtobufC, +) + +import rift.tasklets + +from rift.vlmgr import ( + VlrDtsHandler, + VldDtsHandler, + VirtualLinkRecord, +) + +from rift.topmgr import ( + NwtopStaticDtsHandler, + NwtopDiscoveryDtsHandler, + NwtopDataStore, + SdnAccountMgr, +) + + +class SdnInterfaceError(Exception): + """ SDN interface creation Error """ + pass + + +class SdnPluginError(Exception): + """ SDN plugin creation Error """ + pass + + +class VlRecordError(Exception): + """ Vlr Record creation Error """ + pass + + +class VlRecordNotFound(Exception): + """ Vlr Record not found""" + pass + +class SdnAccountExistsError(Exception): + pass + + +class SDNAccountDtsHandler(object): + XPATH = "C,/rw-sdn:sdn-account" + + def __init__(self, dts, log, parent): + self._dts = dts + self._log = log + self._parent = parent + + self._sdn_account = {} + + def _set_sdn_account(self, account): + self._log.info("Setting sdn account: {}".format(account)) + if account.name in self._sdn_account: + self._log.error("SDN Account with name %s already exists. Ignoring config", account.name); + self._sdn_account[account.name] = account + self._parent._acctmgr.set_sdn_account(account) + + def _del_sdn_account(self, account_name): + self._log.info("Deleting sdn account: {}".format(account_name)) + del self._sdn_account[account_name] + + self._parent._acctmgr.del_sdn_account(account_name) + + @asyncio.coroutine + def register(self): + def apply_config(dts, acg, xact, action, _): + self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action) + if action == rwdts.AppconfAction.INSTALL and xact.id is None: + self._log.debug("No xact handle. Skipping apply config") + return RwTypes.RwStatus.SUCCESS + + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for SDN Account config """ + + self._log.info("SDN Cloud account config received: %s", msg) + + fref = ProtobufC.FieldReference.alloc() + fref.goto_whole_message(msg.to_pbcm()) + + if fref.is_field_deleted(): + # Delete the sdn account record + self._del_sdn_account(msg.name) + else: + if msg.name in self._sdn_account: + msg = "Cannot update a SDN account that already was set." + self._log.error(msg) + xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE, + SDNAccountDtsHandler.XPATH, + msg) + raise SdnAccountExistsError(msg) + + # Set the sdn account record + self._set_sdn_account(msg) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + + self._log.debug("Registering for Sdn Account config using xpath: %s", + SDNAccountDtsHandler.XPATH, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + + with self._dts.appconf_group_create(acg_handler) as acg: + acg.register( + xpath=SDNAccountDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) + + +class VnsManager(object): + """ The Virtual Network Service Manager """ + def __init__(self, dts, log, log_hdl, loop): + self._dts = dts + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._vlr_handler = VlrDtsHandler(dts, log, loop, self) + self._vld_handler = VldDtsHandler(dts, log, loop, self) + self._sdn_handler = SDNAccountDtsHandler(dts,log,self) + self._acctmgr = SdnAccountMgr(self._log, self._log_hdl, self._loop) + self._nwtopdata_store = NwtopDataStore(log) + self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store) + self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store) + + self._log.info("type %s", type(self._log)) + + self._vlrs = {} + + @asyncio.coroutine + def register_vlr_handler(self): + """ Register vlr DTS handler """ + self._log.debug("Registering DTS VLR handler") + yield from self._vlr_handler.register() + + @asyncio.coroutine + def register_vld_handler(self): + """ Register vlr DTS handler """ + self._log.debug("Registering DTS VLD handler") + yield from self._vld_handler.register() + + @asyncio.coroutine + def register_sdn_handler(self): + """ Register vlr DTS handler """ + self._log.debug("Registering SDN Account config handler") + yield from self._sdn_handler.register() + + @asyncio.coroutine + def register_nwtopstatic_handler(self): + """ Register static NW topology DTS handler """ + self._log.debug("Registering static DTS NW topology handler") + yield from self._nwtopstatic_handler.register() + + @asyncio.coroutine + def register_nwtopdiscovery_handler(self): + """ Register discovery-based NW topology DTS handler """ + self._log.debug("Registering discovery-based DTS NW topology handler") + yield from self._nwtopdiscovery_handler.register() + + @asyncio.coroutine + def register(self): + """ Register all static DTS handlers""" + yield from self.register_sdn_handler() + yield from self.register_vlr_handler() + yield from self.register_vld_handler() + yield from self.register_nwtopstatic_handler() + # Not used for now + yield from self.register_nwtopdiscovery_handler() + + def create_vlr(self, msg): + """ Create VLR """ + if msg.id in self._vlrs: + err = "Vlr id %s already exists" % msg.id + self._log.error(err) + raise VlRecordError(err) + + self._log.info("Creating VirtualLinkRecord %s", msg.id) + self._vlrs[msg.id] = VirtualLinkRecord(self._dts, + self._log, + self._loop, + self, + msg, + msg.res_id + ) + return self._vlrs[msg.id] + + def get_vlr(self, vlr_id): + """ Get VLR by vlr id """ + return self._vlrs[vlr_id] + + @asyncio.coroutine + def delete_vlr(self, vlr_id, xact): + """ Delete VLR with the passed id""" + if vlr_id not in self._vlrs: + err = "Delete Failed - Vlr id %s not found" % vlr_id + self._log.error(err) + raise VlRecordNotFound(err) + + self._log.info("Deleting virtual link id %s", vlr_id) + yield from self._vlrs[vlr_id].terminate(xact) + del self._vlrs[vlr_id] + self._log.info("Deleted virtual link id %s", vlr_id) + + def find_vlr_by_vld_id(self, vld_id): + """ Find a VLR matching the VLD Id """ + for vlr in self._vlrs.values(): + if vlr.vld_id == vld_id: + return vlr + return None + + @asyncio.coroutine + def run(self): + """ Run this VNSM instance """ + self._log.debug("Run VNSManager - registering static DTS handlers") + yield from self.register() + + def vld_in_use(self, vld_id): + """ Is this VLD in use """ + return False + + @asyncio.coroutine + def publish_vlr(self, xact, path, msg): + """ Publish a VLR """ + self._log.debug("Publish vlr called with path %s, msg %s", + path, msg) + yield from self._vlr_handler.update(xact, path, msg) + + @asyncio.coroutine + def unpublish_vlr(self, xact, path): + """ Publish a VLR """ + self._log.debug("Unpublish vlr called with path %s", path) + yield from self._vlr_handler.delete(xact, path) + + +class VnsTasklet(rift.tasklets.Tasklet): + """ The VNS tasklet class """ + def __init__(self, *args, **kwargs): + super(VnsTasklet, self).__init__(*args, **kwargs) + + #self.add_log_stderr_handler() + self._dts = None + self._vlr_handler = None + + self._vnsm = None + # A mapping of instantiated vlr_id's to VirtualLinkRecord objects + self._vlrs = {} + + def start(self): + super(VnsTasklet, self).start() + self.log.info("Starting VnsTasklet") + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS(self.tasklet_info, + RwVnsYang.get_schema(), + self.loop, + self.on_dts_state_change) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + def on_instance_started(self): + """ The task instance started callback""" + self.log.debug("Got instance started callback") + + def stop(self): + try: + self._dts.deinit() + except Exception: + print("Caught Exception in VNS stop:", sys.exc_info()[0]) + raise + + @asyncio.coroutine + def init(self): + """ task init callback""" + self._vnsm = VnsManager(dts=self._dts, + log=self.log, + log_hdl=self.log_hdl, + loop=self.loop) + yield from self._vnsm.run() + + # NSM needs to detect VLD deletion that has active VLR + # self._vld_handler = VldDescriptorConfigDtsHandler( + # self._dts, self.log, self.loop, self._vlrs, + # ) + # yield from self._vld_handler.register() + + @asyncio.coroutine + def run(self): + """ tasklet run callback """ + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py new file mode 100644 index 0000000..6ab8066 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py @@ -0,0 +1,38 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Ravi Chamarty +# Creation Date: 10/28/2015 +# + +from .rwtopmgr import ( + NwtopDiscoveryDtsHandler, + NwtopStaticDtsHandler, + SdnAccountMgr, +) + +from .rwtopdatastore import ( + NwtopDataStore, +) + +try: + from .sdnsim import SdnSim + from .core import Topology + from .mock import Mock + +except ImportError as e: + print("Error: Unable to load sdn implementation: %s" % str(e)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py new file mode 100644 index 0000000..597bb4a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py @@ -0,0 +1,50 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import functools + +#from . import exceptions + + +def unsupported(f): + @functools.wraps(f) + def impl(*args, **kwargs): + msg = '{} not supported'.format(f.__name__) + raise exceptions.RWErrorNotSupported(msg) + + return impl + + +class Topology(object): + """ + Topoology defines a base class for sdn driver implementations. Note that + not all drivers will support the complete set of functionality presented + here. + """ + + @unsupported + def get_network_list(self, account): + """ + Returns the discovered network associated with the specified account. + + @param account - a SDN account + + @return a discovered network + """ + pass \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py new file mode 100644 index 0000000..395b866 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py @@ -0,0 +1,51 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import mock + +import gi +gi.require_version('RwcalYang', '1.0') +from gi.repository import RwcalYang + +from . import core + +import logging + +logger = logging.getLogger('rwsdn.mock') + +class Mock(core.Topology): + """This class implements the abstract methods in the Topology class. + Mock is used for unit testing.""" + + def __init__(self): + super(Mock, self).__init__() + + m = mock.MagicMock() + + create_default_topology() + + def get_network_list(self, account): + """ + Returns the discovered network + + @param account - a SDN account + + """ + logger.debug("Not yet implemented") + return None \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py new file mode 100644 index 0000000..b952108 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py @@ -0,0 +1,187 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + IetfNetworkYang, + IetfNetworkTopologyYang, + IetfL2TopologyYang, + RwTopologyYang, + RwTypes +) +import logging +from gi.repository.RwTypes import RwStatus + + +class NwtopDataStore(object): + """ Common datastore for discovered and static topologies """ + def __init__(self, log): + self._networks = {} + self._log = log + + """ Deep copy utility for topology class """ + def rwtop_copy_object(self, obj): + dup = obj.__class__() + dup.copy_from(obj) + return dup + + """ Utility for updating L2 topology attributes """ + def _update_l2_attr(self, current_elem, new_elem, new_l2_attr, attr_field_name): + if not getattr(current_elem, attr_field_name): + self._log.debug ("Creating L2 attributes..%s", l2_attr_field) + setattr(current_elem, attr_field_name, new_l2_attr) + return + + for l2_attr_field in new_l2_attr.fields: + l2_elem_attr_value = getattr(new_l2_attr, l2_attr_field) + if l2_elem_attr_value: + self._log.debug ("Updating L2 attributes..%s", l2_attr_field) + setattr(getattr(current_elem, attr_field_name), l2_attr_field, getattr(new_l2_attr, l2_attr_field)) + + """ Utility for updating termination point attributes """ + def _update_termination_point(self, current_node, new_node, new_tp): + current_tp = next((x for x in current_node.termination_point if x.tp_id == new_tp.tp_id), None) + if current_tp is None: + self._log.debug("Creating termination point..%s", new_tp) + # Add tp to current node + new_tp_dup = self.rwtop_copy_object(new_tp) + current_node.termination_point.append(new_tp_dup) + return + # Update current tp + for tp_field in new_tp.fields: + tp_field_value = getattr(new_tp, tp_field) + if tp_field_value: + self._log.debug("Updating termination point..%s", tp_field) + if (tp_field == 'tp_id'): + # Don't change key + pass + elif (tp_field == 'l2_termination_point_attributes'): + self._update_l2_attr(current_tp, new_tp, tp_field_value, tp_field) + elif (tp_field == 'supporting_termination_point'): + self._log.debug(tp_field) + else: + self._log.info("Updating termination point..Not implemented %s", tp_field) + #raise NotImplementedError + + """ Utility for updating link attributes """ + def _update_link(self, current_nw, new_nw, new_link): + current_link = next((x for x in current_nw.link if x.link_id == new_link.link_id), None) + if current_link is None: + # Add link to current nw + self._log.info("Creating link..%s", new_link ) + new_link_dup = self.rwtop_copy_object(new_link) + current_nw.link.append(new_link_dup) + return + # Update current link + for link_field in new_link.fields: + link_field_value = getattr(new_link, link_field) + if link_field_value: + self._log.info("Updating link..%s", link_field) + if (link_field == 'link_id'): + # Don't change key + pass + elif (link_field == 'source'): + if getattr(link_field_value, 'source_node') is not None: + current_link.source.source_node = getattr(link_field_value, 'source_node') + if getattr(link_field_value, 'source_tp') is not None: + current_link.source.source_tp = getattr(link_field_value, 'source_tp') + elif (link_field == 'destination'): + if getattr(link_field_value, 'dest_node') is not None: + current_link.destination.dest_node = link_field_value.dest_node + if getattr(link_field_value, 'dest_tp') is not None: + current_link.destination.dest_tp = link_field_value.dest_tp + elif (link_field == 'l2_link_attributes'): + self._update_l2_attr(current_link, new_link, link_field_value, link_field) + elif (link_field == 'supporting_link'): + self._log.debug(link_field) + else: + self._log.info("Update link..Not implemented %s", link_field) + #raise NotImplementedError + + + """ Utility for updating node attributes """ + def _update_node(self, current_nw, new_nw, new_node): + current_node = next((x for x in current_nw.node if x.node_id == new_node.node_id), None) + if current_node is None: + # Add node to current nw + self._log.debug("Creating node..%s", new_node) + new_node_dup = self.rwtop_copy_object(new_node) + current_nw.node.append(new_node_dup) + return + # Update current node + for node_field in new_node.fields: + node_field_value = getattr(new_node, node_field) + if node_field_value: + self._log.debug("Updating node..%s", node_field) + if (node_field == 'node_id'): + # Don't change key + pass + elif (node_field == 'l2_node_attributes'): + self._update_l2_attr(current_node, new_node, node_field_value, node_field) + elif (node_field == 'termination_point'): + for tp in new_node.termination_point: + self._update_termination_point(current_node, new_node, tp) + elif (node_field == 'supporting-node'): + self._log.debug(node_field) + else: + self._log.info("Update node..Not implemented %s", node_field) + #raise NotImplementedError + + + """ API for retrieving internal network """ + def get_network(self, network_id): + if (network_id not in self._networks): + return None + return self._networks[network_id] + + """ API for creating internal network """ + def create_network(self, key, nw): + self._networks[key] = self.rwtop_copy_object(nw) + + """ API for updating internal network """ + def update_network(self, key, new_nw): + if key not in self._networks: + self._log.debug("Creating network..New_nw %s", new_nw) + self._networks[key] = self.rwtop_copy_object(new_nw) + return + # Iterating thru changed fields + for nw_field in new_nw.fields: + nw_field_value = getattr(new_nw, nw_field) + self._log.debug("Update nw..nw_field %s", nw_field) + if nw_field_value: + if (nw_field == 'node'): + for node in new_nw.node: + self._update_node(self._networks[key], new_nw, node) + elif (nw_field == 'network_id'): + # Don't change key + pass + elif (nw_field == 'link'): + for link in new_nw.link: + self._update_link(self._networks[key], new_nw, link) + elif (nw_field == 'network_types'): + self._networks[key].network_types.l2_network = self._networks[key].network_types.l2_network.new() + elif (nw_field == 'l2_network_attributes'): + self._update_l2_attr(self._networks[key], new_nw, nw_field_value, nw_field) + else: + self._log.info("Update nw..Not implemented %s", nw_field) + #raise NotImplementedError + + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py new file mode 100755 index 0000000..bf78174 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py @@ -0,0 +1,253 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio + +import gi +gi.require_version('RwDts', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwTypes', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import ( + RwDts as rwdts, + IetfNetworkYang, + IetfNetworkTopologyYang, + IetfL2TopologyYang, + RwTopologyYang, + RwsdnYang, + RwTypes +) + +from gi.repository.RwTypes import RwStatus +import rw_peas +import rift.tasklets + +class SdnGetPluginError(Exception): + """ Error while fetching SDN plugin """ + pass + + +class SdnGetInterfaceError(Exception): + """ Error while fetching SDN interface""" + pass + + +class SdnAccountMgr(object): + """ Implements the interface to backend plugins to fetch topology """ + def __init__(self, log, log_hdl, loop): + self._account = {} + self._log = log + self._log_hdl = log_hdl + self._loop = loop + self._sdn = {} + + self._regh = None + + def set_sdn_account(self,account): + if (account.name in self._account): + self._log.error("SDN Account is already set") + else: + sdn_account = RwsdnYang.SDNAccount() + sdn_account.from_dict(account.as_dict()) + sdn_account.name = account.name + self._account[account.name] = sdn_account + self._log.debug("Account set is %s , %s",type(self._account), self._account) + + def del_sdn_account(self, name): + self._log.debug("Account deleted is %s , %s", type(self._account), name) + del self._account[name] + + def get_sdn_account(self, name): + """ + Creates an object for class RwsdnYang.SdnAccount() + """ + if (name in self._account): + return self._account[name] + else: + self._log.error("ERROR : SDN account is not configured") + + + def get_sdn_plugin(self,name): + """ + Loads rw.sdn plugin via libpeas + """ + if (name in self._sdn): + return self._sdn[name] + account = self.get_sdn_account(name) + plugin_name = getattr(account, account.account_type).plugin_name + self._log.info("SDN plugin being created") + plugin = rw_peas.PeasPlugin(plugin_name, 'RwSdn-1.0') + engine, info, extension = plugin() + + self._sdn[name] = plugin.get_interface("Topology") + try: + rc = self._sdn[name].init(self._log_hdl) + assert rc == RwStatus.SUCCESS + except: + self._log.error("ERROR:SDN plugin instantiation failed ") + else: + self._log.info("SDN plugin successfully instantiated") + return self._sdn[name] + + +class NwtopDiscoveryDtsHandler(object): + """ Handles DTS interactions for the Discovered Topology registration """ + DISC_XPATH = "D,/nd:network" + + def __init__(self, dts, log, loop, acctmgr, nwdatastore): + self._dts = dts + self._log = log + self._loop = loop + self._acctmgr = acctmgr + self._nwdatastore = nwdatastore + + self._regh = None + + @property + def regh(self): + """ The registration handle associated with this Handler""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for the Discovered Topology path """ + + @asyncio.coroutine + def on_ready(regh, status): + """ On_ready for Discovered Topology registration """ + self._log.debug("PUB reg ready for Discovered Topology handler regn_hdl(%s) status %s", + regh, status) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare for Discovered Topology registration""" + self._log.debug( + "Got topology on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + + if action == rwdts.QueryAction.READ: + + for name in self._acctmgr._account: + _sdnacct = self._acctmgr.get_sdn_account(name) + if (_sdnacct is None): + raise SdnGetPluginError + + _sdnplugin = self._acctmgr.get_sdn_plugin(name) + if (_sdnplugin is None): + raise SdnGetInterfaceError + + rc, nwtop = _sdnplugin.get_network_list(_sdnacct) + #assert rc == RwStatus.SUCCESS + if rc != RwStatus.SUCCESS: + self._log.error("Fetching get network list for SDN Account %s failed", name) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + self._log.debug("Topology: Retrieved network attributes ") + for nw in nwtop.network: + # Add SDN account name + nw.rw_network_attributes.sdn_account_name = name + nw.network_id = name + ':' + nw.network_id + self._log.debug("...Network id %s", nw.network_id) + nw_xpath = ("D,/nd:network[network-id=\'{}\']").format(nw.network_id) + xact_info.respond_xpath(rwdts.XactRspCode.MORE, + nw_xpath, nw) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + return + else: + err = "%s action on discovered Topology not supported" % action + raise NotImplementedError(err) + + self._log.debug("Registering for discovered topology using xpath %s", NwtopDiscoveryDtsHandler.DISC_XPATH) + + handler = rift.tasklets.DTS.RegistrationHandler( + on_ready=on_ready, + on_prepare=on_prepare, + ) + + yield from self._dts.register( + NwtopDiscoveryDtsHandler.DISC_XPATH, + flags=rwdts.Flag.PUBLISHER, + handler=handler + ) + + +class NwtopStaticDtsHandler(object): + """ Handles DTS interactions for the Static Topology registration """ + STATIC_XPATH = "C,/nd:network" + + def __init__(self, dts, log, loop, acctmgr, nwdatastore): + self._dts = dts + self._log = log + self._loop = loop + self._acctmgr = acctmgr + + self._regh = None + self.pending = {} + self._nwdatastore = nwdatastore + + @property + def regh(self): + """ The registration handle associated with this Handler""" + return self._regh + + + @asyncio.coroutine + def register(self): + """ Register for the Static Topology path """ + + @asyncio.coroutine + def prepare_nw_cfg(dts, acg, xact, xact_info, ksp, msg): + """Prepare for application configuration. Stash the pending + configuration object for subsequent transaction phases""" + self._log.debug("Prepare Network config received network id %s, msg %s", + msg.network_id, msg) + self.pending[xact.id] = msg + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + def apply_nw_config(dts, acg, xact, action, scratch): + """Apply the pending configuration object""" + if action == rwdts.AppconfAction.INSTALL and xact.id is None: + self._log.debug("No xact handle. Skipping apply config") + return + + if xact.id not in self.pending: + raise KeyError("No stashed configuration found with transaction id [{}]".format(xact.id)) + + try: + if action == rwdts.AppconfAction.INSTALL: + self._nwdatastore.create_network(self.pending[xact.id].network_id, self.pending[xact.id]) + elif action == rwdts.AppconfAction.RECONCILE: + self._nwdatastore.update_network(self.pending[xact.id].network_id, self.pending[xact.id]) + except: + raise + + self._log.debug("Create network config done") + return RwTypes.RwStatus.SUCCESS + + self._log.debug("Registering for static topology using xpath %s", NwtopStaticDtsHandler.STATIC_XPATH) + handler=rift.tasklets.AppConfGroup.Handler( + on_apply=apply_nw_config) + + with self._dts.appconf_group_create(handler=handler) as acg: + acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH, + flags = rwdts.Flag.SUBSCRIBER, + on_prepare=prepare_nw_cfg) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py new file mode 100644 index 0000000..03e9c2f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py @@ -0,0 +1,62 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +from . import core +import logging + +import xml.etree.ElementTree as etree +import json +from gi.repository import RwTopologyYang as RwTl + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import RwYang + + +logger = logging.getLogger(__name__) + + +class SdnSim(core.Topology): + def __init__(self): + super(SdnSim, self).__init__() + + def get_network_list(self, account): + """ + Returns the discovered network + + @param account - a SDN account + + """ + topology_source = "/net/boson/home1/rchamart/work/topology/l2_top.xml" + logger.info("Reading topology file: %s", topology_source) + tree = etree.parse(topology_source) + root = tree.getroot() + xmlstr = etree.tostring(root, encoding="unicode") + + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + nwtop = RwTl.YangData_IetfNetwork() + # The top level topology object does not have XML conversion + # Hence going one level down + l2nw1 = nwtop.network.add() + l2nw1.from_xml_v2(model, xmlstr) + + logger.debug("Returning topology data imported from XML file") + + return nwtop \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py new file mode 100644 index 0000000..e40a495 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py @@ -0,0 +1,27 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Ravi Chamarty +# Creation Date: 9/2/2015 +# + +from .rwvlmgr import ( + VirtualLinkRecordState, + VirtualLinkRecord, + VlrDtsHandler, + VldDtsHandler, +) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py new file mode 100755 index 0000000..9b8c72d --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py @@ -0,0 +1,468 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import enum +import uuid +import time + +import gi +gi.require_version('RwVlrYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +from gi.repository import ( + RwVlrYang, + VldYang, + RwDts as rwdts, + RwResourceMgrYang, +) +import rift.tasklets + + +class NetworkResourceError(Exception): + """ Network Resource Error """ + pass + + +class VlrRecordExistsError(Exception): + """ VLR record already exists""" + pass + + +class VlRecordError(Exception): + """ VLR record error """ + pass + + +class VirtualLinkRecordState(enum.Enum): + """ Virtual Link record state """ + INIT = 1 + INSTANTIATING = 2 + RESOURCE_ALLOC_PENDING = 3 + READY = 4 + TERMINATING = 5 + TERMINATED = 6 + FAILED = 10 + + +class VirtualLinkRecord(object): + """ + Virtual Link Record object + """ + def __init__(self, dts, log, loop, vnsm, vlr_msg, req_id=None): + self._dts = dts + self._log = log + self._loop = loop + self._vnsm = vnsm + self._vlr_msg = vlr_msg + + self._network_id = None + self._network_pool = None + self._create_time = int(time.time()) + if req_id == None: + self._request_id = str(uuid.uuid4()) + else: + self._request_id = req_id + + self._state = VirtualLinkRecordState.INIT + + @property + def vld_xpath(self): + """ VLD xpath associated with this VLR record """ + return "C,/vld:vld-catalog/vld:vld[id='{}']".format(self.vld_id) + + @property + def vld_id(self): + """ VLD id associated with this VLR record """ + return self._vlr_msg.vld_ref + + @property + def vlr_id(self): + """ VLR id associated with this VLR record """ + return self._vlr_msg.id + + @property + def xpath(self): + """ path for this VLR """ + return("D,/vlr:vlr-catalog" + "/vlr:vlr[vlr:id='{}']".format(self.vlr_id)) + + @property + def name(self): + """ Name of this VLR """ + return self._vlr_msg.name + + @property + def cloud_account_name(self): + """ Cloud Account to instantiate the virtual link on """ + return self._vlr_msg.cloud_account + + @property + def resmgr_path(self): + """ path for resource-mgr""" + return ("D,/rw-resource-mgr:resource-mgmt" + + "/vlink-event/vlink-event-data[event-id='{}']".format(self._request_id)) + + @property + def operational_status(self): + """ Operational status of this VLR""" + op_stats_dict = {"INIT": "init", + "INSTANTIATING": "vl_alloc_pending", + "RESOURCE_ALLOC_PENDING": "vl_alloc_pending", + "READY": "running", + "FAILED": "failed", + "TERMINATING": "vl_terminate_pending", + "TERMINATED": "terminated"} + + return op_stats_dict[self._state.name] + + @property + def msg(self): + """ VLR message for this VLR """ + msg = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr() + msg.copy_from(self._vlr_msg) + + if self._network_id is not None: + msg.network_id = self._network_id + + if self._network_pool is not None: + msg.network_pool = self._network_pool + + msg.operational_status = self.operational_status + msg.res_id = self._request_id + + return msg + + @property + def resmgr_msg(self): + """ VLR message for this VLR """ + msg = RwResourceMgrYang.VirtualLinkEventData() + msg.event_id = self._request_id + msg.cloud_account = self.cloud_account_name + msg.request_info.name = self.name + msg.request_info.provider_network.from_dict( + self._vlr_msg.provider_network.as_dict() + ) + return msg + + @asyncio.coroutine + def create_network(self, xact): + """ Create network for this VL """ + self._log.debug("Creating network req-id: %s", self._request_id) + return (yield from self.request_network(xact, "create")) + + @asyncio.coroutine + def delete_network(self, xact): + """ Delete network for this VL """ + self._log.debug("Deleting network - req-id: %s", self._request_id) + return (yield from self.request_network(xact, "delete")) + + @asyncio.coroutine + def read_network(self, xact): + """ Read network for this VL """ + self._log.debug("Reading network - req-id: %s", self._request_id) + return (yield from self.request_network(xact, "read")) + + @asyncio.coroutine + def request_network(self, xact, action): + """Request creation/deletion network for this VL """ + + block = xact.block_create() + + if action == "create": + self._log.debug("Creating network path:%s, msg:%s", + self.resmgr_path, self.resmgr_msg) + block.add_query_create(self.resmgr_path, self.resmgr_msg) + elif action == "delete": + self._log.debug("Deleting network path:%s", self.resmgr_path) + if self.resmgr_msg.request_info.name != "multisite": + block.add_query_delete(self.resmgr_path) + elif action == "read": + self._log.debug("Reading network path:%s", self.resmgr_path) + block.add_query_read(self.resmgr_path) + else: + raise VlRecordError("Invalid action %s received" % action) + + res_iter = yield from block.execute(flags=rwdts.Flag.TRACE, now=True) + + resp = None + + if action == "create" or action == "read": + for i in res_iter: + r = yield from i + resp = r.result + + if resp is None or not (resp.has_field('resource_info') and + resp.resource_info.has_field('virtual_link_id')): + raise NetworkResourceError("Did not get a network resource response (resp: %s)", + resp) + + self._log.debug("Got network request response: %s", resp) + + return resp + + @asyncio.coroutine + def instantiate(self, xact, restart=0): + """ Instantiate this VL """ + self._state = VirtualLinkRecordState.INSTANTIATING + + self._log.debug("Instantiating VLR path = [%s]", self.xpath) + + try: + self._state = VirtualLinkRecordState.RESOURCE_ALLOC_PENDING + + if restart == 0: + network_resp = yield from self.create_network(xact) + else: + network_resp = yield from self.read_network(xact) + if network_resp == None: + network_resp = yield from self.create_network(xact) + + # Note network_resp.virtual_link_id is CAL assigned network_id. + + self._network_id = network_resp.resource_info.virtual_link_id + self._network_pool = network_resp.resource_info.pool_name + + self._state = VirtualLinkRecordState.READY + + yield from self.publish(xact) + + except Exception as e: + self._log.error("Instantiatiation of VLR record failed: %s", str(e)) + self._state = VirtualLinkRecordState.FAILED + yield from self.publish(xact) + + @asyncio.coroutine + def publish(self, xact): + """ publish this VLR """ + vlr = self.msg + self._log.debug("Publishing VLR path = [%s], record = [%s]", + self.xpath, self.msg) + vlr.create_time = self._create_time + yield from self._vnsm.publish_vlr(xact, self.xpath, self.msg) + self._log.debug("Published VLR path = [%s], record = [%s]", + self.xpath, self.msg) + + @asyncio.coroutine + def terminate(self, xact): + """ Terminate this VL """ + if self._state not in [VirtualLinkRecordState.READY, VirtualLinkRecordState.FAILED]: + self._log.error("Ignoring terminate for VL %s is in %s state", + self.vlr_id, self._state) + return + + if self._state == VirtualLinkRecordState.READY: + self._log.debug("Terminating VL with id %s", self.vlr_id) + self._state = VirtualLinkRecordState.TERMINATING + try: + yield from self.delete_network(xact) + except Exception: + self._log.exception("Caught exception while deleting VL %s", self.vlr_id) + self._log.debug("Terminated VL with id %s", self.vlr_id) + + yield from self.unpublish(xact) + self._state = VirtualLinkRecordState.TERMINATED + + @asyncio.coroutine + def unpublish(self, xact): + """ Unpublish this VLR """ + self._log.debug("UnPublishing VLR id %s", self.vlr_id) + yield from self._vnsm.unpublish_vlr(xact, self.xpath) + self._log.debug("UnPublished VLR id %s", self.vlr_id) + + +class VlrDtsHandler(object): + """ Handles DTS interactions for the VLR registration """ + XPATH = "D,/vlr:vlr-catalog/vlr:vlr" + + def __init__(self, dts, log, loop, vnsm): + self._dts = dts + self._log = log + self._loop = loop + self._vnsm = vnsm + + self._regh = None + + @property + def regh(self): + """ The registration handle assocaited with this Handler""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register for the VLR path """ + def on_commit(xact_info): + """ The transaction has been committed """ + self._log.debug("Got vlr commit (xact_info: %s)", xact_info) + + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_event(dts, g_reg, xact, xact_event, scratch_data): + @asyncio.coroutine + def instantiate_realloc_vlr(vlr): + """Re-populate the virtual link information after restart + + Arguments: + vlink + + """ + + with self._dts.transaction(flags=0) as xact: + yield from vlr.instantiate(xact, 1) + + if (xact_event == rwdts.MemberEvent.INSTALL): + curr_cfg = self.regh.elements + for cfg in curr_cfg: + vlr = self._vnsm.create_vlr(cfg) + self._loop.create_task(instantiate_realloc_vlr(vlr)) + + self._log.debug("Got on_event") + return rwdts.MemberRspCode.ACTION_OK + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + """ prepare for VLR registration""" + self._log.debug( + "Got vlr on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, msg + ) + + if action == rwdts.QueryAction.CREATE: + vlr = self._vnsm.create_vlr(msg) + with self._dts.transaction(flags=0) as xact: + yield from vlr.instantiate(xact) + self._log.debug("Responding to VL create request path:%s, msg:%s", + vlr.xpath, vlr.msg) + xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath=vlr.xpath, msg=vlr.msg) + return + elif action == rwdts.QueryAction.DELETE: + # Delete an VLR record + schema = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.schema() + path_entry = schema.keyspec_to_entry(ks_path) + self._log.debug("Terminating VLR id %s", path_entry.key00.id) + yield from self._vnsm.delete_vlr(path_entry.key00.id, xact_info.xact) + else: + err = "%s action on VirtualLinkRecord not supported" % action + raise NotImplementedError(err) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + return + + self._log.debug("Registering for VLR using xpath: %s", + VlrDtsHandler.XPATH) + + reg_handle = rift.tasklets.DTS.RegistrationHandler( + on_commit=on_commit, + on_prepare=on_prepare, + ) + handlers = rift.tasklets.Group.Handler(on_event=on_event,) + with self._dts.group_create(handler=handlers) as group: + self._regh = group.register( + xpath=VlrDtsHandler.XPATH, + handler=reg_handle, + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ| rwdts.Flag.FILE_DATASTORE, + ) + + @asyncio.coroutine + def create(self, xact, path, msg): + """ + Create a VLR record in DTS with path and message + """ + self._log.debug("Creating VLR xact = %s, %s:%s", + xact, path, msg) + self.regh.create_element(path, msg) + self._log.debug("Created VLR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def update(self, xact, path, msg): + """ + Update a VLR record in DTS with path and message + """ + self._log.debug("Updating VLR xact = %s, %s:%s", + xact, path, msg) + self.regh.update_element(path, msg) + self._log.debug("Updated VLR xact = %s, %s:%s", + xact, path, msg) + + @asyncio.coroutine + def delete(self, xact, path): + """ + Delete a VLR record in DTS with path and message + """ + self._log.debug("Deleting VLR xact = %s, %s", xact, path) + self.regh.delete_element(path) + self._log.debug("Deleted VLR xact = %s, %s", xact, path) + + +class VldDtsHandler(object): + """ DTS handler for the VLD registration """ + XPATH = "C,/vld:vld-catalog/vld:vld" + + def __init__(self, dts, log, loop, vnsm): + self._dts = dts + self._log = log + self._loop = loop + self._vnsm = vnsm + + self._regh = None + + @property + def regh(self): + """ The registration handle assocaited with this Handler""" + return self._regh + + @asyncio.coroutine + def register(self): + """ Register the VLD path """ + @asyncio.coroutine + def on_prepare(xact_info, query_action, ks_path, msg): + """ prepare callback on vld path """ + self._log.debug( + "Got on prepare for VLD update (ks_path: %s) (action: %s)", + ks_path.to_xpath(VldYang.get_schema()), msg) + + schema = VldYang.YangData_Vld_VldCatalog_Vld.schema() + path_entry = schema.keyspec_to_entry(ks_path) + vld_id = path_entry.key00.id + + disabled_actions = [rwdts.QueryAction.DELETE, rwdts.QueryAction.UPDATE] + if query_action not in disabled_actions: + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + return + + vlr = self._vnsm.find_vlr_by_vld_id(vld_id) + if vlr is None: + self._log.debug( + "Did not find an existing VLR record for vld %s. " + "Permitting %s vld action", vld_id, query_action) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + return + + raise VlrRecordExistsError( + "Vlr record(s) exists." + "Cannot perform %s action on VLD." % query_action) + + handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare) + + yield from self._dts.register( + VldDtsHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + handler=handler + ) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/rwvnstasklet.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/rwvnstasklet.py new file mode 100755 index 0000000..be6058b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/rwvnstasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwvnstasklet + +class Tasklet(rift.tasklets.rwvnstasklet.VnsTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py new file mode 100644 index 0000000..b79d310 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py @@ -0,0 +1,333 @@ +#!/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +from gi.repository import RwYang +from xml.etree import ElementTree as etree +import subprocess +import logging + +from create_stackedl2topology import MyL2Network +from create_stackedl2topology import MyL2Topology + +class MyNwNotFound(Exception): + pass + +class MyNodeNotFound(Exception): + pass + +class MyTpNotFound(Exception): + pass + +class MyProvNetwork(object): + def __init__(self, nwtop, l2top, log): + self.next_mac = 11 + self.log = log + self.provnet1 = nwtop.network.add() + self.provnet1.network_id = "ProviderNetwork-1" + + self.nwtop = nwtop + self.l2top = l2top + + # L2 Network type augmentation + self.provnet1.network_types.l2_network = self.provnet1.network_types.l2_network.new() + # L2 Network augmentation + self.provnet1.l2_network_attributes.name = "Rift LAB SFC-Demo Provider Network" + ul_net = self.provnet1.supporting_network.add() + try: + ul_net.network_ref = l2top.find_nw_id("L2HostNetwork-1") + self.l2netid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + + def get_nw_id(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw.network_id + + def get_node(self, node_name): + _node_id = "urn:Rift:Lab:" + node_name + for node in self.provnet1.node: + if (node.node_id == _node_id): + return node + + def get_tp(self, node, tp_name): + _tp_id = node.node_id + ":" + tp_name + for tp in node.termination_point : + if (tp.tp_id == _tp_id): + return tp + + def get_link(self, link_name): + for link in nw.link : + if (link.l2_link_attributes.name == link_name): + return link + + def create_node(self, node_name, description, mgmt_ip_addr = None, sup_node = None): + logging.debug("Creating node %s", node_name) + node = self.provnet1.node.add() + node.node_id = "urn:Rift:Lab:" + node_name + # L2 Node augmentation + node.l2_node_attributes.name = node_name + node.l2_node_attributes.description = description + if (mgmt_ip_addr is not None): + node.l2_node_attributes.management_address.append(mgmt_ip_addr) + if (sup_node is not None): + logging.debug(" Adding support node %s", sup_node.node_id) + ul_node = node.supporting_node.add() + ul_node.network_ref = self.l2netid + ul_node.node_ref = sup_node.node_id + return node + + def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, vlan = False): + logging.debug(" Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp) + tp = node.termination_point.add() + tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + tp.l2_termination_point_attributes.mac_address = "00:4f:9c:ab:dd:" + str(self.next_mac) + self.next_mac = self.next_mac + 1 + if (vlan == True): + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:vlan" + else: + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + if ((sup_tp is not None) and (sup_node is not None)): + logging.debug(" Adding support terminaton point %s", sup_tp.tp_id) + ul_tp = tp.supporting_termination_point.add() + ul_tp.network_ref = self.l2netid + ul_tp.node_ref = sup_node.node_id + ul_tp.tp_ref = sup_tp.tp_id + return tp + + def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2): + logging.debug("Creating links %s %s", link_name1, link_name2) + lnk1= self.provnet1.link.add() + lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description) + lnk1.source.source_node = node1.node_id + lnk1.source.source_tp = tp1.tp_id + lnk1.destination.dest_node = node2.node_id + lnk1.destination.dest_tp = tp2.tp_id + # L2 link augmentation + lnk1.l2_link_attributes.name = link_name1 + #lnk1.l2_link_attributes.rate = 1000000000.00 + + lnk2= self.provnet1.link.add() + lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description) + lnk2.source.source_node = node2.node_id + lnk2.source.source_tp = tp2.tp_id + lnk2.destination.dest_node = node1.node_id + lnk2.destination.dest_tp = tp1.tp_id + # L2 link augmentation + lnk2.l2_link_attributes.name = link_name2 + #lnk2.l2_link_attributes.rate = 1000000000.00 + return lnk1, lnk2 + +class MyProvTopology(MyProvNetwork): + def __init__(self, nwtop, l2top, log): + super(MyProvTopology, self).__init__(nwtop, l2top, log) + + def find_nw_id(self, nw_name): + return self.get_nw_id(nw_name) + + def find_node(self, node_name): + return self.get_node(node_name) + + def find_tp(self, node, tp_name): + return self.get_tp(node, tp_name) + + def find_link(self, link_name): + return self.get_link(link_name) + + def setup_nodes(self): + logging.debug("Setting up nodes") + self.pseudo_mgmt_node = self.create_node("Pseudo_mgmt_node", "Pseudo node for VM mgmt network LAN") + self.pseudo_dp_node = self.create_node("Pseudo_DP_node", "Pseudo node for DP network LAN") + + self.g118_node = self.l2top.find_node("Grunt118") + if (self.g118_node is None): + raise MyNodeNotFound() + self.g44_node = self.l2top.find_node("Grunt44") + if (self.g44_node is None): + raise MyNodeNotFound() + self.g120_node = self.l2top.find_node("Grunt120") + if (self.g120_node is None): + raise MyNodeNotFound() + + self.g118_br_int = self.create_node("G118_Br_Int","OVS Integration bridge on Grunt118", mgmt_ip_addr="10.66.4.118", sup_node = self.g118_node) + self.g118_br_eth1 = self.create_node("G118_Br_Eth1","OVS Integration bridge on Grunt118", mgmt_ip_addr="10.66.4.118", sup_node = self.g118_node) + # eth2 on g118 is being used in PCI passthrough mode + + self.g44_br_int = self.create_node("G44_Br_Int","OVS Integration bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node) + self.g44_br_eth1 = self.create_node("G44_Br_Eth1","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node) + self.g44_br_eth2 = self.create_node("G44_Br_Eth2","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node) + self.g44_br_eth3 = self.create_node("G44_Br_Eth3","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node) + + self.g120_br_int = self.create_node("G120_Br_Int","OVS Integration bridge on Grunt120", mgmt_ip_addr = "10.66.4.120", sup_node = self.g120_node) + self.g120_br_eth1 = self.create_node("G120_Br_Eth1","OVS Integration bridge on Grunt120", mgmt_ip_addr = "10.66.4.120", sup_node = self.g120_node) + # eth2 on g120 is being used in PCI passthrough mode + + def setup_tps(self): + logging.debug("Setting up termination points") + self.g118_e1 = self.l2top.find_tp(self.g118_node, "eth1") + if (self.g118_e1 is None): + raise MyTpNotFound() + self.g44_e1 = self.l2top.find_tp(self.g44_node, "eth1") + if (self.g44_e1 is None): + raise MyTpNotFound() + self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2") + if (self.g44_e2 is None): + raise MyTpNotFound() + self.g44_e3 = self.l2top.find_tp(self.g44_node, "eth3") + if (self.g44_e3 is None): + raise MyTpNotFound() + self.g120_e1 = self.l2top.find_tp(self.g120_node, "eth1") + if (self.g44_e3 is None): + raise MyTpNotFound() + + self.g118_br_int_eth1 = self.create_tp(self.g118_br_int, "int-br-eth1") + self.g118_br_int_tap1 = self.create_tp(self.g118_br_int, "tap1") + + self.g118_br_eth1_phyeth1 = self.create_tp(self.g118_br_eth1, "phyeth1") + self.g118_br_eth1_eth1 = self.create_tp(self.g118_br_eth1, "eth1", sup_node=self.g118_node, sup_tp=self.g118_e1, vlan=True) + + self.g44_br_int_eth1 = self.create_tp(self.g44_br_int, "int-br-eth1") + self.g44_br_int_vhu1 = self.create_tp(self.g44_br_int, "vhu1") + self.g44_br_int_eth2 = self.create_tp(self.g44_br_int, "int-br-eth2") + self.g44_br_int_vhu2 = self.create_tp(self.g44_br_int, "vhu2") + self.g44_br_int_eth1 = self.create_tp(self.g44_br_int, "int-br-eth3") + self.g44_br_int_vhu1 = self.create_tp(self.g44_br_int, "vhu3") + + self.g44_br_eth1_phyeth1 = self.create_tp(self.g44_br_eth1, "phyeth1") + self.g44_br_eth1_dpdk0 = self.create_tp(self.g44_br_eth1, "dpdk0", sup_node=self.g44_node, sup_tp=self.g44_e1, vlan=True) + + self.g44_br_eth2_phyeth1 = self.create_tp(self.g44_br_eth2, "phyeth2") + self.g44_br_eth2_dpdk1 = self.create_tp(self.g44_br_eth2, "dpdk1", sup_node=self.g44_node, sup_tp=self.g44_e2) + + self.g44_br_eth3_phyeth1 = self.create_tp(self.g44_br_eth3, "phyeth3") + self.g44_br_eth3_dpdk2 = self.create_tp(self.g44_br_eth3, "dpdk2", sup_node=self.g44_node, sup_tp=self.g44_e3) + + self.g120_br_int_eth1 = self.create_tp(self.g120_br_int, "int-br-eth1") + self.g120_br_int_tap1 = self.create_tp(self.g120_br_int, "tap1") + + self.g120_br_eth1_phyeth1 = self.create_tp(self.g120_br_eth1, "phyeth1") + self.g120_br_eth1_eth1 = self.create_tp(self.g120_br_eth1, "eth1", sup_node=self.g120_node, sup_tp=self.g120_e1, vlan=True) + + self.pmn_eth1 = self.create_tp(self.pseudo_mgmt_node, "eth1") + self.pmn_eth2 = self.create_tp(self.pseudo_mgmt_node, "eth2") + self.pmn_eth3 = self.create_tp(self.pseudo_mgmt_node, "eth3") + + def setup_links(self): + # Add links to provnet1 network + # These links are unidirectional and point-to-point + logging.debug("Setting up links") + # Bidir Links for OVS bridges + self.create_bidir_link(self.g118_br_eth1, self.g118_br_eth1_eth1, self.pseudo_mgmt_node, self.pmn_eth1, "Link_g118_be1_pmn_e1", "Link_pmn_e1_g118_be1") + self.create_bidir_link(self.g44_br_eth1, self.g44_br_eth1_dpdk0, self.pseudo_mgmt_node, self.pmn_eth2, "Link_g44_be1_pmn_d0", "Link_pmn_e2_g44_d0") + self.create_bidir_link(self.g120_br_eth1, self.g120_br_eth1_eth1, self.pseudo_mgmt_node, self.pmn_eth3, "Link_g120_be1_pmn_e3", "Link_pmn_e3_g120_be1") + # Data path links cannot be represented here since PCI pass through is beingused on G118 and G44 + + def setup_all(self): + self.setup_nodes() + self.setup_tps() + self.setup_links() + +def adjust_xml_file(infile, outfile, begin_marker, end_marker): + buffer = [] + in_block = False + max_interesting_line_toread = 1 + interesting_line = 0 + with open(infile) as inf: + with open(outfile, 'w') as outf: + for line in inf: + if begin_marker in line: + in_block = True + # Go down + if end_marker in line: + assert in_block is True + print("End of gathering line...", line) + buffer.append(line) # gather lines + interesting_line = max_interesting_line_toread + in_block = False + continue + if interesting_line: + print("Interesting line printing ...", line) + outf.write(line) + interesting_line -= 1 + if interesting_line == 0: # output gathered lines + for lbuf in buffer: + outf.write(lbuf) + buffer = [] # empty buffer + print("\n\n") + continue + + if in_block: + print("Gathering line...", line) + buffer.append(line) # gather lines + else: + outf.write(line) + + +if __name__ == "__main__": + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + # create logger + logger = logging.getLogger('Provider Network Topology') + logger.setLevel(logging.DEBUG) + logging.basicConfig(level=logging.DEBUG) + + logger.info('Creating an instance of Provider Network Topology') + + nwtop = RwTl.YangData_IetfNetwork() + + # Setup L2 topology + l2top = MyL2Topology(nwtop, logger) + l2top.setup_all() + + # Setup Provider network topology + provtop = MyProvTopology(nwtop, l2top, logger) + provtop.setup_all() + + print ("Converting to XML") + # Convert l2nw network to XML + xml_str = nwtop.to_xml_v2(model) + tree = etree.XML(xml_str) + xml_file = "/tmp/stacked_provtop.xml" + xml_formatted_file = "/tmp/stacked_provtop2.xml" + with open(xml_file, "w") as f: + f.write(xml_str) + status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True) + + status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True) + + print ("Converting to JSON ") + # Convert set of topologies to JSON + json_str = nwtop.to_json(model) + with open("/tmp/stacked_provtop.json", "w") as f: + f.write(json_str) + status = subprocess.call("python -m json.tool /tmp/stacked_provtop.json > /tmp/stacked_provtop2.json", shell=True) + json_formatted_file = "/tmp/stacked_provtop2.json" + status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True) + status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py new file mode 100644 index 0000000..1a86847 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py @@ -0,0 +1,278 @@ +#!/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +from gi.repository import RwYang +from xml.etree import ElementTree as etree +import subprocess +import logging + +from create_stackedl2topology import MyL2Network +from create_stackedl2topology import MyL2Topology +from create_stackedProvNettopology import MyProvNetwork +from create_stackedProvNettopology import MyProvTopology +from create_stackedVMNettopology import MyVMNetwork +from create_stackedVMNettopology import MyVMTopology + + +class MyNwNotFound(Exception): + pass + +class MyNodeNotFound(Exception): + pass + +class MyTpNotFound(Exception): + pass + +class MySfcNetwork(object): + def __init__(self, nwtop, l2top, provtop, vmtop, log): + self.next_mac = 81 + self.log = log + self.sfcnet1 = nwtop.network.add() + self.sfcnet1.network_id = "SfcNetwork-1" + + self.l2top = l2top + self.provtop = provtop + self.vmtop = vmtop + + # L2 Network type augmentation + self.sfcnet1.network_types.l2_network = self.sfcnet1.network_types.l2_network.new() + # L2 Network augmentation + self.sfcnet1.l2_network_attributes.name = "Rift LAB SFC-Demo SFC Network" + try: + self.l2netid = l2top.find_nw_id("L2HostNetwork-1") + except TypeError: + raise MyNwNotFound() + ul_net = self.sfcnet1.supporting_network.add() + try: + ul_net.network_ref = provtop.find_nw_id("ProviderNetwork-1") + self.provnetid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + ul_net = self.sfcnet1.supporting_network.add() + try: + ul_net.network_ref = vmtop.find_nw_id("VmNetwork-1") + self.vmnetid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + + def get_nw_id(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw.network_id + + def get_node(self, node_name): + _node_id = "urn:Rift:Lab:" + node_name + for node in self.sfcnet1.node: + if (node.node_id == _node_id): + return node + + def get_tp(self, node, tp_name): + _tp_id = "urn:Rift:Lab:" + node.node_id + "_" + tp_name + for tp in node.termination_point : + if (tp.tp_id == _tp_id): + return tp + + def get_link(self, link_name): + for link in nw.link : + if (link.l2_link_attributes.name == link_name): + return link + + def create_node(self, node_name, description, mgmt_ip_addr = None, sup_node = None, nw_ref = None): + logging.debug("Creating node %s", node_name) + node = self.sfcnet1.node.add() + node.node_id = "urn:Rift:Lab:" + node_name + # L2 Node augmentation + node.l2_node_attributes.name = node_name + node.l2_node_attributes.description = description + if (mgmt_ip_addr is not None): + node.l2_node_attributes.management_address.append(mgmt_ip_addr) + if (sup_node is not None): + logging.debug(" Adding support node %s", sup_node.node_id) + ul_node = node.supporting_node.add() + if (nw_ref is not None): + ul_node.network_ref = nw_ref + else: + ul_node.network_ref = self.l2netid + ul_node.node_ref = sup_node.node_id + return node + + def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, nw_ref = None): + logging.debug(" Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp) + tp = node.termination_point.add() + tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + #tp.l2_termination_point_attributes.mac_address = "00:5e:8a:ab:dd:" + str(self.next_mac) + #self.next_mac = self.next_mac + 1 + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:vxlan" + if ((sup_tp is not None) and (sup_node is not None)): + logging.debug(" Adding support terminaton point %s", sup_tp.tp_id) + ul_tp = tp.supporting_termination_point.add() + if (nw_ref is not None): + ul_tp.network_ref = nw_ref + else: + ul_tp.network_ref = self.l2netid + ul_tp.node_ref = sup_node.node_id + ul_tp.tp_ref = sup_tp.tp_id + return tp + + def create_link(self, node1, tp1, node2, tp2, link_name1, link_name2 = None): + logging.debug("Creating links %s %s", link_name1, link_name2) + lnk1= self.sfcnet1.link.add() + lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description) + lnk1.source.source_node = node1.node_id + lnk1.source.source_tp = tp1.tp_id + lnk1.destination.dest_node = node2.node_id + lnk1.destination.dest_tp = tp2.tp_id + # L2 link augmentation + lnk1.l2_link_attributes.name = link_name1 + lnk1.l2_link_attributes.rate = 1000000000.00 + + # Create bidir link if second link is provided + if (link_name2 is not None): + lnk2= self.sfcnet1.link.add() + lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description) + lnk2.source.source_node = node2.node_id + lnk2.source.source_tp = tp2.tp_id + lnk2.destination.dest_node = node1.node_id + lnk2.destination.dest_tp = tp1.tp_id + # L2 link augmentation + lnk2.l2_link_attributes.name = link_name2 + lnk2.l2_link_attributes.rate = 1000000000.00 + + +class MySfcTopology(MySfcNetwork): + def __init__(self, nwtop, l2top, provtop, vmnet, log): + super(MySfcTopology, self).__init__(nwtop, l2top, provtop, vmnet, log) + + def find_nw_id(self, nw_name): + return self.get_nw_id(nw_name) + + def find_node(self, node_name): + return self.get_node(node_name) + + def find_tp(self, node, tp_name): + return self.get_tp(node, tp_name) + + def find_link(self, link_name): + return self.get_link(link_name) + + def setup_nodes(self): + logging.debug("Setting up nodes") + + self.tg_node = self.vmtop.find_node("Trafgen_VM") + if (self.tg_node is None): + raise MyNodeNotFound() + self.lb_node = self.vmtop.find_node("LB_VM") + if (self.lb_node is None): + raise MyNodeNotFound() + + self.g44_br_int_node = self.provtop.find_node("G44_Br_Int") + if (self.g44_br_int_node is None): + raise MyNodeNotFound() + + self.sf1 = self.create_node("SF1","SF on LB VM", sup_node = self.lb_node, nw_ref = self.vmnetid) + self.sfc1 = self.create_node("SFC1","SF classifier on Trafgen VM", sup_node = self.tg_node, nw_ref = self.vmnetid) + self.sff1 = self.create_node("SFF1","SF forwarder on Grunt44 OVS integration bridge", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_br_int_node, nw_ref = self.provnetid) + + def setup_tps(self): + logging.debug("Setting up termination points") + # FInd L2 hosts + #self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2") + #if (self.g44_e2 is None): + # raise MyTpNotFound() + + self.sfc1_vxlannsh1 = self.create_tp(self.sfc1, "vxlannsh1") + self.sf1_vxlannsh1 = self.create_tp(self.sf1, "vxlannsh1") + self.sff1_vxlannsh1 = self.create_tp(self.sff1, "vxlannsh1") + + + def setup_links(self): + # Add links to sfcnet1 network + # These links are unidirectional and point-to-point + logging.debug("Setting up links") + # Bidir Links for OVS bridges + self.create_link(self.sfc1, self.sfc1_vxlannsh1, self.sff1, self.sff1_vxlannsh1, "Link_sfc1_sff1") + self.create_link(self.sfc1, self.sfc1_vxlannsh1, self.sf1, self.sf1_vxlannsh1, "Link_sff1_sf1", "Link_sf1_sff1") + + def setup_all(self): + self.setup_nodes() + self.setup_tps() + #self.setup_links() + + +if __name__ == "__main__": + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + # create logger + logger = logging.getLogger('SFC Network Topology') + logger.setLevel(logging.DEBUG) + logging.basicConfig(level=logging.DEBUG) + + logger.info('Creating an instance of SFC Network Topology') + + nwtop = RwTl.YangData_IetfNetwork() + + # Setup L2 topology + l2top = MyL2Topology(nwtop, logger) + l2top.setup_all() + + # Setup Provider network topology + provtop = MyProvTopology(nwtop, l2top, logger) + provtop.setup_all() + + # Setup VM network topology + vmtop = MyVMTopology(nwtop, l2top, provtop, logger) + vmtop.setup_all() + + # Setup SFC network topology + sfctop = MySfcTopology(nwtop, l2top, provtop, vmtop, logger) + sfctop.setup_all() + + print ("Converting to XML") + # Convert l2nw network to XML + xml_str = nwtop.to_xml_v2(model) + tree = etree.XML(xml_str) + xml_file = "/tmp/stacked_sfctop.xml" + xml_formatted_file = "/tmp/stacked_sfctop2.xml" + with open(xml_file, "w") as f: + f.write(xml_str) + status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True) + + status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True) + + print ("Converting to JSON ") + # Convert set of topologies to JSON + json_str = nwtop.to_json(model) + with open("/tmp/stacked_sfctop.json", "w") as f: + f.write(json_str) + status = subprocess.call("python -m json.tool /tmp/stacked_sfctop.json > /tmp/stacked_sfctop2.json", shell=True) + json_formatted_file = "/tmp/stacked_sfctop2.json" + status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True) + status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True) + status = subprocess.call("sed -i -e 's/\"l2t:vxlan\"/\"vxlan\"/g' " + json_formatted_file, shell=True) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py new file mode 100644 index 0000000..719fcf8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py @@ -0,0 +1,333 @@ +#!/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +from gi.repository import RwYang +from xml.etree import ElementTree as etree +import subprocess +import logging + +from create_stackedl2topology import MyL2Network +from create_stackedl2topology import MyL2Topology +from create_stackedProvNettopology import MyProvNetwork +from create_stackedProvNettopology import MyProvTopology + +class MyNwNotFound(Exception): + pass + +class MyNodeNotFound(Exception): + pass + +class MyTpNotFound(Exception): + pass + +class MyVMNetwork(object): + def __init__(self, nwtop, l2top, provtop, log): + self.next_mac = 41 + self.log = log + self.vmnet1 = nwtop.network.add() + self.vmnet1.network_id = "VmNetwork-1" + + self.nwtop = nwtop + self.l2top = l2top + self.provtop = provtop + + # L2 Network type augmentation + self.vmnet1.network_types.l2_network = self.vmnet1.network_types.l2_network.new() + # L2 Network augmentation + self.vmnet1.l2_network_attributes.name = "Rift LAB SFC-Demo VM Network" + ul_net = self.vmnet1.supporting_network.add() + try: + ul_net.network_ref = l2top.find_nw_id("L2HostNetwork-1") + self.l2netid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + ul_net = self.vmnet1.supporting_network.add() + try: + ul_net.network_ref = provtop.find_nw_id("ProviderNetwork-1") + self.provnetid = ul_net.network_ref + except TypeError: + raise MyNwNotFound() + + def get_nw_id(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw.network_id + + def get_node(self, node_name): + _node_id = "urn:Rift:Lab:" + node_name + for node in self.vmnet1.node: + if (node.node_id == _node_id): + return node + + def get_tp(self, node, tp_name): + _tp_id = node.node_id + "_" + tp_name + for tp in node.termination_point : + if (tp.tp_id == _tp_id): + return tp + + def get_link(self, link_name): + for link in nw.link : + if (link.l2_link_attributes.name == link_name): + return link + + def create_node(self, node_name, description, mgmt_ip_addr=None, sup_node_list=None): + logging.debug("Creating node %s", node_name) + node = self.vmnet1.node.add() + node.node_id = "urn:Rift:Lab:" + node_name + # L2 Node augmentation + node.l2_node_attributes.name = node_name + node.l2_node_attributes.description = description + if (mgmt_ip_addr is not None): + node.l2_node_attributes.management_address.append(mgmt_ip_addr) + if (sup_node_list is not None): + for sup_node in sup_node_list: + logging.debug(" Adding support node %s", sup_node[0].node_id) + ul_node = node.supporting_node.add() + # Second element is hardcoded as nw ref + if (sup_node[1] is not None): + ul_node.network_ref = sup_node[1] + else: + ul_node.network_ref = self.l2netid + ul_node.node_ref = sup_node[0].node_id + return node + + def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, nw_ref = None): + logging.debug(" Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp) + tp = node.termination_point.add() + tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + tp.l2_termination_point_attributes.mac_address = "00:5e:8a:ab:cc:" + str(self.next_mac) + self.next_mac = self.next_mac + 1 + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + if ((sup_tp is not None) and (sup_node is not None)): + logging.debug(" Adding support terminaton point %s", sup_tp.tp_id) + ul_tp = tp.supporting_termination_point.add() + if (nw_ref is not None): + ul_tp.network_ref = nw_ref + else: + ul_tp.network_ref = self.l2netid + ul_tp.node_ref = sup_node.node_id + ul_tp.tp_ref = sup_tp.tp_id + return tp + + def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2): + logging.debug("Creating links %s %s", link_name1, link_name2) + lnk1= self.vmnet1.link.add() + lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description) + lnk1.source.source_node = node1.node_id + lnk1.source.source_tp = tp1.tp_id + lnk1.destination.dest_node = node2.node_id + lnk1.destination.dest_tp = tp2.tp_id + # L2 link augmentation + lnk1.l2_link_attributes.name = link_name1 + #lnk1.l2_link_attributes.rate = 1000000000.00 + + lnk2= self.vmnet1.link.add() + lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description) + lnk2.source.source_node = node2.node_id + lnk2.source.source_tp = tp2.tp_id + lnk2.destination.dest_node = node1.node_id + lnk2.destination.dest_tp = tp1.tp_id + # L2 link augmentation + lnk2.l2_link_attributes.name = link_name2 + #lnk2.l2_link_attributes.rate = 1000000000.00 + return lnk1, lnk2 + +class MyVMTopology(MyVMNetwork): + def __init__(self, nwtop, l2top, provtop, log): + super(MyVMTopology, self).__init__(nwtop, l2top, provtop, log) + + def find_nw_id(self, nw_name): + return self.get_nw_id(nw_name) + + def find_node(self, node_name): + return self.get_node(node_name) + + def find_tp(self, node, tp_name): + return self.get_tp(node, tp_name) + + + def find_link(self, link_name): + return self.get_link(link_name) + + def setup_nodes(self): + logging.debug("Setting up nodes") + + self.g118_node = self.l2top.find_node("Grunt118") + if (self.g118_node is None): + raise MyNodeNotFound() + self.g44_node = self.l2top.find_node("Grunt44") + if (self.g44_node is None): + raise MyNodeNotFound() + self.g120_node = self.l2top.find_node("Grunt120") + if (self.g120_node is None): + raise MyNodeNotFound() + + self.g44_br_int_node = self.provtop.find_node("G44_Br_Int") + if (self.g44_br_int_node is None): + raise MyNodeNotFound() + + self.pseudo_vm = self.create_node("Pseudo_VM","Pseudo VM to manage eth0 LAN") + sup_node_list = [[self.g118_node, self.l2netid], [self.g44_br_int_node, self.provnetid]] + self.tg_vm = self.create_node("Trafgen_VM","Trafgen VM on Grunt118", mgmt_ip_addr="10.0.118.3", sup_node_list = sup_node_list) + sup_node_list = [[self.g44_node, self.l2netid], [self.g44_br_int_node, self.provnetid]] + self.lb_vm = self.create_node("LB_VM","LB VM on Grunt44", mgmt_ip_addr="10.0.118.35", sup_node_list = sup_node_list) + sup_node_list = [[self.g120_node, self.l2netid], [self.g44_br_int_node, self.provnetid]] + self.ts_vm = self.create_node("Trafsink_VM","Trafsink VM on Grunt120", mgmt_ip_addr="10.0.118.4", sup_node_list = sup_node_list) + + def setup_tps(self): + logging.debug("Setting up termination points") + # FInd L2 hosts + self.g118_e2 = self.l2top.find_tp(self.g118_node, "eth2") + if (self.g118_e2 is None): + raise MyTpNotFound() + self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2") + if (self.g44_e2 is None): + raise MyTpNotFound() + # Find OVS tps + self.g44_br_int_vhu2 = self.provtop.find_tp(self.g44_br_int_node, "vhu2") + if (self.g44_br_int_vhu2 is None): + raise MyTpNotFound() + self.g44_br_int_vhu3 = self.provtop.find_tp(self.g44_br_int_node, "vhu3") + if (self.g44_br_int_vhu3 is None): + raise MyTpNotFound() + + self.pvm_eth1 = self.create_tp(self.pseudo_vm, "eth1") + self.pvm_eth2 = self.create_tp(self.pseudo_vm, "eth2") + self.pvm_eth3 = self.create_tp(self.pseudo_vm, "eth3") + + self.tg_vm_eth0 = self.create_tp(self.tg_vm, "eth0") + self.tg_vm_trafgen11 = self.create_tp(self.tg_vm, "trafgen11", sup_node=self.g118_node, sup_tp=self.g118_e2) + + self.lb_vm_eth0 = self.create_tp(self.lb_vm, "eth0") + self.lb_vm_lb21 = self.create_tp(self.lb_vm, "load_balancer21", sup_node=self.g44_br_int_node, sup_tp=self.g44_br_int_vhu2, nw_ref=self.provnetid) + self.lb_vm_lb22 = self.create_tp(self.lb_vm, "load_balancer22", sup_node=self.g44_br_int_node, sup_tp=self.g44_br_int_vhu3, nw_ref=self.provnetid) + + self.ts_vm_eth0 = self.create_tp(self.ts_vm, "eth0") + self.ts_vm_trafsink31 = self.create_tp(self.ts_vm, "trafsink31", sup_node=self.g44_node, sup_tp=self.g44_e2) + + + def setup_links(self): + # Add links to vmnet1 network + # These links are unidirectional and point-to-point + logging.debug("Setting up links") + # Bidir Links for OVS bridges + self.create_bidir_link(self.tg_vm, self.tg_vm_trafgen11, self.lb_vm, self.lb_vm_lb21, "Link_tg_t11_lb_lb21", "Link_lb_lb21_tg_t11") + self.create_bidir_link(self.ts_vm, self.ts_vm_trafsink31, self.lb_vm, self.lb_vm_lb22, "Link_ts_t31_lb_lb22", "Link_lb_lb22_tg_t31") + + self.create_bidir_link(self.pseudo_vm, self.pvm_eth1, self.tg_vm, self.tg_vm_eth0, "Link_pvm_e1_tgv_e0", "Link_tgv_e0_pvm_e1") + self.create_bidir_link(self.pseudo_vm, self.pvm_eth2, self.lb_vm, self.lb_vm_eth0, "Link_pvm_e2_lbv_e0", "Link_lbv_e0_pvm_e2") + self.create_bidir_link(self.pseudo_vm, self.pvm_eth3, self.ts_vm, self.ts_vm_eth0, "Link_pvm_e3_tsv_e0", "Link_tsv_e0_pvm_e3") + + def setup_all(self): + self.setup_nodes() + self.setup_tps() + self.setup_links() + +def adjust_xml_file(infile, outfile, begin_marker, end_marker): + buffer = [] + in_block = False + max_interesting_line_toread = 1 + interesting_line = 0 + with open(infile) as inf: + with open(outfile, 'w') as outf: + for line in inf: + if begin_marker in line: + in_block = True + # Go down + if end_marker in line: + assert in_block is True + print("End of gathering line...", line) + buffer.append(line) # gather lines + interesting_line = max_interesting_line_toread + in_block = False + continue + if interesting_line: + print("Interesting line printing ...", line) + outf.write(line) + interesting_line -= 1 + if interesting_line == 0: # output gathered lines + for lbuf in buffer: + outf.write(lbuf) + buffer = [] # empty buffer + print("\n\n") + continue + + if in_block: + print("Gathering line...", line) + buffer.append(line) # gather lines + else: + outf.write(line) + + +if __name__ == "__main__": + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + # create logger + logger = logging.getLogger('VM Network Topology') + logger.setLevel(logging.DEBUG) + logging.basicConfig(level=logging.DEBUG) + + logger.info('Creating an instance of VM Network Topology') + + nwtop = RwTl.YangData_IetfNetwork() + + # Setup L2 topology + l2top = MyL2Topology(nwtop, logger) + l2top.setup_all() + + # Setup Provider network topology + provtop = MyProvTopology(nwtop, l2top, logger) + provtop.setup_all() + + # Setup VM network topology + vmtop = MyVMTopology(nwtop, l2top, provtop, logger) + vmtop.setup_all() + + print ("Converting to XML") + # Convert l2nw network to XML + xml_str = nwtop.to_xml_v2(model) + tree = etree.XML(xml_str) + xml_file = "/tmp/stacked_vmtop.xml" + xml_formatted_file = "/tmp/stacked_vmtop2.xml" + with open(xml_file, "w") as f: + f.write(xml_str) + status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True) + + status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True) + + print ("Converting to JSON ") + # Convert set of topologies to JSON + json_str = nwtop.to_json(model) + with open("/tmp/stacked_vmtop.json", "w") as f: + f.write(json_str) + status = subprocess.call("python -m json.tool /tmp/stacked_vmtop.json > /tmp/stacked_vmtop2.json", shell=True) + json_formatted_file = "/tmp/stacked_vmtop2.json" + status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True) + status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py new file mode 100644 index 0000000..433bb9a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py @@ -0,0 +1,262 @@ +#!/bin/python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import gi +gi.require_version('RwYang', '1.0') +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +from gi.repository import RwYang +from xml.etree import ElementTree as etree +import subprocess +import logging + + +class MyL2Network(object): + def __init__(self, nwtop, log): + self.next_mac = 11 + self.log = log + self.nwtop = nwtop + self.l2net1 = nwtop.network.add() + self.l2net1.network_id = "L2HostNetwork-1" + + # L2 Network type augmentation + self.l2net1.network_types.l2_network = self.l2net1.network_types.l2_network.new() + # L2 Network augmentation + self.l2net1.l2_network_attributes.name = "Rift LAB SFC-Demo Host Network" + + def get_nw_id(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw.network_id + + def get_nw(self, nw_name): + for nw in self.nwtop.network: + if (nw.network_id == nw_name): + return nw + + def get_node(self, node_name): + _node_id = "urn:Rift:Lab:" + node_name + for node in self.l2net1.node: + if (node.node_id == _node_id): + return node + + def get_tp(self, node, tp_name): + _tp_id = node.node_id + "_" + tp_name + for tp in node.termination_point : + if (tp.tp_id == _tp_id): + return tp + + def get_link(self, link_name): + for link in nw.link : + if (link.l2_link_attributes.name == link_name): + return link + + def create_node(self, node_name, mgmt_ip_addr, description): + logging.debug("Creating node %s", node_name) + node = self.l2net1.node.add() + node.node_id = "urn:Rift:Lab:" + node_name + # L2 Node augmentation + node.l2_node_attributes.name = node_name + node.l2_node_attributes.description = description + node.l2_node_attributes.management_address.append(mgmt_ip_addr) + return node + + def create_tp(self, node, cfg_tp): + logging.debug(" Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp) + tp = node.termination_point.add() + tp.tp_id = ("{}_{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + tp.l2_termination_point_attributes.mac_address = "00:1e:67:d8:48:" + str(self.next_mac) + self.next_mac = self.next_mac + 1 + tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + return tp + + def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2): + logging.debug("Creating links %s %s", link_name1, link_name2) + lnk1= self.l2net1.link.add() + lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description) + lnk1.source.source_node = node1.node_id + lnk1.source.source_tp = tp1.tp_id + lnk1.destination.dest_node = node2.node_id + lnk1.destination.dest_tp = tp2.tp_id + # L2 link augmentation + lnk1.l2_link_attributes.name = link_name1 + #lnk1.l2_link_attributes.rate = 1000000000.00 + + lnk2= self.l2net1.link.add() + lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description) + lnk2.source.source_node = node2.node_id + lnk2.source.source_tp = tp2.tp_id + lnk2.destination.dest_node = node1.node_id + lnk2.destination.dest_tp = tp1.tp_id + # L2 link augmentation + lnk2.l2_link_attributes.name = link_name2 + #lnk2.l2_link_attributes.rate = 1000000000.00 + return lnk1, lnk2 + +class MyL2Topology(MyL2Network): + def __init__(self, nwtop, log): + super(MyL2Topology, self).__init__(nwtop, log) + + def find_nw_id(self, nw_name): + return self.get_nw_id(nw_name) + + def find_nw(self, nw_name): + return self.get_nw(nw_name) + + def find_node(self, node_name): + return self.get_node(node_name) + + def find_tp(self, node, tp_name): + return self.get_tp(node, tp_name) + + def find_link(self, link_name): + return self.get_link(link_name) + + def setup_nodes(self): + self.g118 = self.create_node("Grunt118","10.66.4.118", "Host with OVS and PCI") + self.g44 = self.create_node("Grunt44","10.66.4.44", "Host with OVS-DPDK") + self.g120 = self.create_node("Grunt120","10.66.4.120", "Host with OVS and PCI") + self.hms = self.create_node("HostMgmtSwitch","10.66.4.98", "Switch for host eth0") + self.vms = self.create_node("VMMgmtSwitch","10.66.4.55", "Switch for VMs eth0") + self.ads = self.create_node("AristaDPSwitch","10.66.4.90", "10 Gbps Switch") + + def setup_tps(self): + self.g118_e0 = self.create_tp(self.g118, "eth0") + self.g118_e1 = self.create_tp(self.g118, "eth1") + self.g118_e2 = self.create_tp(self.g118, "eth2") + + self.g44_e0 = self.create_tp(self.g44, "eth0") + self.g44_e1 = self.create_tp(self.g44, "eth1") + self.g44_e2 = self.create_tp(self.g44, "eth2") + self.g44_e3 = self.create_tp(self.g44, "eth3") + + self.g120_e0 = self.create_tp(self.g120, "eth0") + self.g120_e1 = self.create_tp(self.g120, "eth1") + self.g120_e2 = self.create_tp(self.g120, "eth2") + + self.hms_e1 = self.create_tp(self.hms, "eth1") + self.hms_e2 = self.create_tp(self.hms, "eth2") + self.hms_e3 = self.create_tp(self.hms, "eth3") + + self.vms_e1 = self.create_tp(self.vms, "eth1") + self.vms_e2 = self.create_tp(self.vms, "eth2") + self.vms_e3 = self.create_tp(self.vms, "eth3") + + self.ads_57 = self.create_tp(self.ads, "Card_5:Port_7") + self.ads_58 = self.create_tp(self.ads, "Card_8:Port_8") + self.ads_47 = self.create_tp(self.ads, "Card_4:Port_7") + self.ads_48 = self.create_tp(self.ads, "Card_4:Port_8") + + def setup_links(self): + # Add links to l2net1 network + # These links are unidirectional and point-to-point + # Bidir Links for Grunt118 + self.create_bidir_link(self.g118, self.g118_e0, self.hms, self.hms_e1, "Link_g118_e0_hms_e1", "Link_hms_e1_g118_e0") + self.create_bidir_link(self.g118, self.g118_e1, self.vms, self.vms_e1, "Link_g118_e1_vms_e1", "Link_vms_e1_g118_e1") + self.create_bidir_link(self.g118, self.g118_e2, self.ads, self.ads_57, "Link_g118_e2_ads_47", "Link_ads_47_g118_e2") + # Bidir Links for Grunt44 + self.create_bidir_link(self.g44, self.g44_e0, self.hms, self.hms_e2, "Link_g44_e0_hms_e1", "Link_hms_e1_g44_e0") + self.create_bidir_link(self.g44, self.g44_e1, self.vms, self.vms_e2, "Link_g44_e1_vms_e1", "Link_vms_e1_g44_e1") + self.create_bidir_link(self.g44, self.g44_e2, self.ads, self.ads_47, "Link_g44_e2_ads_47", "Link_ads_47_g44_e2") + self.create_bidir_link(self.g44, self.g44_e3, self.ads, self.ads_48, "Link_g44_e3_ads_48", "Link_ads_48_g44_e3") + # Bidir Links for Grunt120 + self.create_bidir_link(self.g120, self.g120_e0, self.hms, self.hms_e3, "Link_g120_e0_hms_e1", "Link_hms_e1_g120_e0") + self.create_bidir_link(self.g120, self.g120_e1, self.vms, self.vms_e3, "Link_g120_e1_vms_e1", "Link_vms_e1_g120_e1") + self.create_bidir_link(self.g120, self.g120_e2, self.ads, self.ads_58, "Link_g120_e2_ads_58", "Link_ads_58_g120_e2") + + def setup_all(self): + self.setup_nodes() + self.setup_tps() + self.setup_links() + +def adjust_xml_file(infile, outfile, begin_marker, end_marker): + buffer = [] + in_block = False + max_interesting_line_toread = 1 + interesting_line = 0 + with open(infile) as inf: + with open(outfile, 'w') as outf: + for line in inf: + if begin_marker in line: + in_block = True + # Go down + if end_marker in line: + assert in_block is True + print("End of gathering line...", line) + buffer.append(line) # gather lines + interesting_line = max_interesting_line_toread + in_block = False + continue + if interesting_line: + print("Interesting line printing ...", line) + outf.write(line) + interesting_line -= 1 + if interesting_line == 0: # output gathered lines + for lbuf in buffer: + outf.write(lbuf) + buffer = [] # empty buffer + print("\n\n") + continue + + if in_block: + print("Gathering line...", line) + buffer.append(line) # gather lines + else: + outf.write(line) + +if __name__ == "__main__": + model = RwYang.Model.create_libncx() + model.load_schema_ypbc(RwTl.get_schema()) + # create logger + logger = logging.getLogger(__file__) + logger.setLevel(logging.DEBUG) + logging.basicConfig(level=logging.DEBUG) + + logging.info('Creating an instance of L2 Host Topology') + nwtop = RwTl.YangData_IetfNetwork() + + l2top = MyL2Topology(nwtop, logger) + l2top.setup_all() + + logging.info ("Converting to XML") + # Convert l2nw network to XML + xml_str = nwtop.to_xml_v2(model) + tree = etree.XML(xml_str) + xml_file = "/tmp/stacked_top.xml" + xml_formatted_file = "/tmp/stacked_top2.xml" + with open(xml_file, "w") as f: + f.write(xml_str) + status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True) + status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True) + + logging.info ("Converting to JSON") + # Convert set of topologies to JSON + json_str = nwtop.to_json(model) + with open("/tmp/stacked_top.json", "w") as f: + f.write(json_str) + status = subprocess.call("python -m json.tool /tmp/stacked_top.json > /tmp/stacked_top2.json", shell=True) + json_formatted_file = "/tmp/stacked_top2.json" + status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py new file mode 100644 index 0000000..d7bf609 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py @@ -0,0 +1,99 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import datetime +import logging +import unittest + +import rw_peas +import rwlogger + +from gi.repository import RwsdnYang +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import RwcalYang +from gi.repository import IetfNetworkYang +from gi.repository.RwTypes import RwStatus + + +logger = logging.getLogger('mock') + +def get_sdn_account(): + """ + Creates an object for class RwsdnYang.SdnAccount() + """ + account = RwsdnYang.SDNAccount() + account.account_type = "mock" + account.mock.username = "rift" + account.mock.plugin_name = "rwsdn_mock" + return account + +def get_sdn_plugin(): + """ + Loads rw.sdn plugin via libpeas + """ + plugin = rw_peas.PeasPlugin('rwsdn_mock', 'RwSdn-1.0') + engine, info, extension = plugin() + + # Get the RwLogger context + rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log") + + sdn = plugin.get_interface("Topology") + try: + rc = sdn.init(rwloggerctx) + assert rc == RwStatus.SUCCESS + except: + logger.error("ERROR:SDN plugin instantiation failed. Aborting tests") + else: + logger.info("Mock SDN plugin successfully instantiated") + return sdn + + + +class SdnMockTest(unittest.TestCase): + def setUp(self): + """ + Initialize test plugins + """ + self._acct = get_sdn_account() + logger.info("Mock-SDN-Test: setUp") + self.sdn = get_sdn_plugin() + logger.info("Mock-SDN-Test: setUpEND") + + def tearDown(self): + logger.info("Mock-SDN-Test: Done with tests") + + def test_get_network_list(self): + """ + First test case + """ + rc, nwtop = self.sdn.get_network_list(self._acct) + self.assertEqual(rc, RwStatus.SUCCESS) + logger.debug("SDN-Mock-Test: Retrieved network attributes ") + for nw in nwtop.network: + logger.debug("...Network id %s", nw.network_id) + logger.debug("...Network name %s", nw.l2_network_attributes.name) + print(nw) + + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + unittest.main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py new file mode 100644 index 0000000..be58aae --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py @@ -0,0 +1,97 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import datetime +import logging +import unittest + +import rw_peas +import rwlogger + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import RwsdnYang +from gi.repository import IetfNetworkYang +from gi.repository.RwTypes import RwStatus +from gi.repository import RwSdn + + +logger = logging.getLogger('sdnsim') + +def get_sdn_account(): + """ + Creates an object for class RwsdnYang.SdnAccount() + """ + account = RwsdnYang.SDNAccount() + account.account_type = "sdnsim" + account.sdnsim.username = "rift" + account.sdnsim.plugin_name = "rwsdn_sim" + return account + +def get_sdn_plugin(): + """ + Loads rw.sdn plugin via libpeas + """ + plugin = rw_peas.PeasPlugin('rwsdn_sim', 'RwSdn-1.0') + engine, info, extension = plugin() + + # Get the RwLogger context + rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log") + + sdn = plugin.get_interface("Topology") + try: + rc = sdn.init(rwloggerctx) + assert rc == RwStatus.SUCCESS + except: + logger.error("ERROR:SDN sim plugin instantiation failed. Aborting tests") + else: + logger.info("SDN sim plugin successfully instantiated") + return sdn + + + +class SdnSimTest(unittest.TestCase): + def setUp(self): + """ + Initialize test plugins + """ + self._acct = get_sdn_account() + logger.info("SDN-Sim-Test: setUp") + self.sdn = get_sdn_plugin() + logger.info("SDN-Sim-Test: setUpEND") + + def tearDown(self): + logger.info("SDN-Sim-Test: Done with tests") + + def test_get_network_list(self): + """ + First test case + """ + rc, nwtop = self.sdn.get_network_list(self._acct) + self.assertEqual(rc, RwStatus.SUCCESS) + logger.debug("SDN-Sim-Test: Retrieved network attributes ") + for nw in nwtop.network: + logger.debug("...Network id %s", nw.network_id) + logger.debug("...Network name %s", nw.l2_network_attributes.name) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + unittest.main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py new file mode 100644 index 0000000..f9529c4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py @@ -0,0 +1,732 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import datetime +import logging +import unittest + +import rwlogger + +# from gi.repository import IetfNetworkYang +from gi.repository import IetfL2TopologyYang as l2Tl +from gi.repository import RwTopologyYang as RwTl +# from gi.repository.RwTypes import RwStatus + +from create_stackedl2topology import MyL2Topology + +from rift.topmgr import ( + NwtopDataStore, +) +logger = logging.getLogger('sdntop') + +NUM_NWS = 1 +NUM_NODES_L2_NW = 6 +NUM_TPS_L2_NW = 20 +NUM_LINKS = 20 + +class SdnTopStoreNetworkTest(unittest.TestCase): + def setUp(self): + """ + Initialize Top data store + """ + self._nwtopdata_store = NwtopDataStore(logger) + self.test_nwtop = RwTl.YangData_IetfNetwork() + + self.l2top = MyL2Topology(self.test_nwtop, logger) + self.l2top.setup_all() + + # Get initial test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + # Create initial nw + self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1) + + # Add test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + assert self.l2net1 is not None + self.new_l2net = RwTl.YangData_IetfNetwork_Network() + self.new_l2net.network_id = "L2HostNetwork-2" + logger.info("SdnTopStoreNetworkTest: setUp") + + def tearDown(self): + self.l2net1 = None + self.new_l2net = None + logger.info("SdnTopStoreNetworkTest: Done with tests") + + def test_create_network(self): + """ + Test: Create first l2 network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreNetworkTest: Create network ") + # Get test data + # Created durign setup phase + assert self.l2net1 is not None + # Use data store APIs + # Network already stored + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + logger.debug("...Network id %s", nw.network_id) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + logger.debug("...Network name %s", nw.l2_network_attributes.name) + for node in nw.node: + logger.debug("...Node id %s", node.node_id) + num_nodes += 1 + for tp in node.termination_point: + logger.debug("...Tp id %s", tp.tp_id) + num_tps += 1 + self.assertEqual(num_nodes, NUM_NODES_L2_NW) + self.assertEqual(num_tps, NUM_TPS_L2_NW) + + + def test_add_network(self): + """ + Test: Add another network, Check network id + """ + logger.debug("SdnTopStoreNetworkTest: Add network ") + # Use data store APIs + self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-2") + self.assertEqual(len(self._nwtopdata_store._networks), 2) + + def test_add_networktype(self): + """ + Test: Add another network, Check network type + """ + logger.debug("SdnTopStoreTest: Add network type ") + # Use data store APIs + self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-2") + self.assertEqual(len(self._nwtopdata_store._networks), 2) + # Add new test data + self.new_l2net.network_types.l2_network = self.new_l2net.network_types.l2_network.new() + logger.debug("Adding update l2net..%s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertIsNotNone(nw.network_types.l2_network) + + def test_add_networkl2name(self): + """ + Test: Add another network, Check L2 network name + """ + logger.debug("SdnTopStoreTest: Add L2 network name ") + # Use data store APIs + self.new_l2net.network_types.l2_network = self.new_l2net.network_types.l2_network.new() + self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-2") + self.assertEqual(len(self._nwtopdata_store._networks), 2) + # Add new test data + self.new_l2net.l2_network_attributes.name = "L2networkName" + logger.debug("Adding update l2net..%s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-2", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-2") + self.assertEqual(nw.l2_network_attributes.name, "L2networkName") + + +class SdnTopStoreNetworkNodeTest(unittest.TestCase): + def setUp(self): + """ + Initialize Top data store + """ + self._nwtopdata_store = NwtopDataStore(logger) + self.test_nwtop = RwTl.YangData_IetfNetwork() + + self.l2top = MyL2Topology(self.test_nwtop, logger) + self.l2top.setup_all() + + # Get initial test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + # Create initial nw + self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1) + # Get test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + assert self.l2net1 is not None + self.new_l2net = RwTl.YangData_IetfNetwork_Network() + self.new_l2net.network_id = "L2HostNetwork-1" + self.node2 = self.new_l2net.node.add() + self.node2.node_id = "TempNode2" + logger.info("SdnTopStoreTest: setUp NetworkNodetest") + + def tearDown(self): + logger.info("SdnTopStoreTest: Done with NetworkNodetest") + + + def test_add_network_node(self): + """ + Test: Add a node to existing network + Test all parameters + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Add network node") + # Add test data + self.node2.node_id = "TempNode2" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + + #@unittest.skip("Skipping") + def test_update_network_node(self): + """ + Test: Updat a node to existing network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network node") + # Add test data + self.node2.node_id = "TempNode2" + self.node2.l2_node_attributes.description = "TempNode2 desc" + self.node2.l2_node_attributes.name = "Nice Name2" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name2") + + #@unittest.skip("Skipping") + def test_update_network_node_l2attr1(self): + """ + Test: Update a node to existing network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network node") + # Add test data + self.node2.node_id = "TempNode2" + self.node2.l2_node_attributes.description = "TempNode2 desc" + self.node2.l2_node_attributes.name = "Nice Name3" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3") + + # Add test data + self.node2.l2_node_attributes.name = "Nice Name4" + logger.debug("Network %s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + logger.debug("Node %s", nw.node[NUM_NODES_L2_NW]) + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name4") + + def test_update_network_node_l2attr2(self): + """ + Test: Updat a node to existing network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network node") + # Add test data + self.node2.node_id = "TempNode2" + self.node2.l2_node_attributes.description = "TempNode2 desc" + self.node2.l2_node_attributes.name = "Nice Name3" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3") + + # Add test data + self.node2.l2_node_attributes.management_address.append("10.0.0.1") + logger.debug("Network %s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].l2_node_attributes.management_address), 1) + + # Add test data + self.node2.l2_node_attributes.management_address.append("10.0.0.2") + logger.debug("Network %s", self.new_l2net) + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].l2_node_attributes.management_address), 2) + + +class SdnTopStoreNetworkNodeTpTest(unittest.TestCase): + def setUp(self): + """ + Initialize Top data store + """ + self._nwtopdata_store = NwtopDataStore(logger) + self.test_nwtop = RwTl.YangData_IetfNetwork() + + self.l2top = MyL2Topology(self.test_nwtop, logger) + self.l2top.setup_all() + + # Get initial test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + # Create initial nw + self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1) + # Get test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + assert self.l2net1 is not None + self.new_l2net = RwTl.YangData_IetfNetwork_Network() + self.new_l2net.network_id = "L2HostNetwork-1" + self.node2 = self.new_l2net.node.add() + self.node2.node_id = "TempNode2" + self.tp1 = self.node2.termination_point.add() + self.tp1.tp_id = "TempTp1" + logger.info("SdnTopStoreTest: setUp NetworkNodeTptest") + + def tearDown(self): + logger.info("SdnTopStoreTest: Done with NetworkNodeTptest") + + self.new_l2net = None + self.node2 = None + self.tp1 = None + + def test_add_network_node_tp(self): + """ + Test: Add a node to existing network + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network ") + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + + def test_update_network_node_tp(self): + """ + Test: Update a tp to existing network, add all tp elements + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network ") + self.tp1.tp_id = "TempTp1" + self.tp1.l2_termination_point_attributes.description = "TempTp1 Desc" + self.tp1.l2_termination_point_attributes.maximum_frame_size = 1296 + self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:01" + self.tp1.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1296) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01") + + def test_update_network_node_tp2(self): + """ + Test: Update a tp to existing network, change tp elements + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network ") + self.tp1.tp_id = "TempTp1" + self.tp1.l2_termination_point_attributes.description = "TempTp1 Desc" + self.tp1.l2_termination_point_attributes.maximum_frame_size = 1296 + self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:01" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1296) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01") + + # Change frame size + self.tp1.l2_termination_point_attributes.maximum_frame_size = 1396 + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01") + + # Change MAC address + self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:02" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:02") + + # Add encapsulation type + self.tp1.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:02") + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.eth_encapsulation, "l2t:ethernet") + + def test_update_extra_network_node_tp2(self): + """ + Test: Update a tp to existing network, change tp elements + """ + num_nodes = 0 + num_tps = 0 + logger.debug("SdnTopStoreTest: Update network ") + self.tp2 = self.node2.termination_point.add() + self.tp2.tp_id = "TempTp2" + # Use data store APIs + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[1].tp_id, "TempTp2") + + + +class SdnTopStoreNetworkLinkTest(unittest.TestCase): + def setUp(self): + """ + Initialize Top data store + """ + self._nwtopdata_store = NwtopDataStore(logger) + self.test_nwtop = RwTl.YangData_IetfNetwork() + + self.l2top = MyL2Topology(self.test_nwtop, logger) + self.l2top.setup_all() + + # Get initial test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + # Create initial nw + self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1) + # Get test data + self.l2net1 = self.l2top.find_nw("L2HostNetwork-1") + assert self.l2net1 is not None + self.new_l2net = RwTl.YangData_IetfNetwork_Network() + self.new_l2net.network_id = "L2HostNetwork-1" + + self.src_node = self.new_l2net.node.add() + self.src_node.node_id = "TempNode1" + self.tp1 = self.src_node.termination_point.add() + self.tp1.tp_id = "TempTp1" + + self.dest_node = self.new_l2net.node.add() + self.dest_node.node_id = "TempNode2" + self.tp2 = self.dest_node.termination_point.add() + self.tp2.tp_id = "TempTp2" + logger.info("SdnTopStoreTest: setUp NetworkLinkTest") + + def tearDown(self): + logger.info("SdnTopStoreTest: Done with NetworkLinkTest") + + self.new_l2net = None + self.src_node = None + self.tp1 = None + self.dest_node = None + self.tp2 = None + + def test_add_network_link(self): + """ + Test: Add a link to existing network + """ + logger.info("SdnTopStoreTest: Update network link") + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data created + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(len(nw.link), NUM_LINKS ) + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + # Use data store APIs + logger.info("SdnTopStoreTest: Update network link - Part 2") + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + # Verify data created + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(nw.link[NUM_LINKS].source.source_node, self.src_node.node_id) + self.assertEqual(nw.link[NUM_LINKS].source.source_tp, self.tp1.tp_id) + self.assertEqual(nw.link[NUM_LINKS].destination.dest_node, self.dest_node.node_id) + self.assertEqual(nw.link[NUM_LINKS].destination.dest_tp, self.tp2.tp_id) + self.assertEqual(len(nw.link), NUM_LINKS + 1) + + def test_add_extra_network_link(self): + """ + Test: Add a link to existing network + """ + logger.info("SdnTopStoreTest: Update extra network link") + # Create initial state + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify initial state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(len(nw.link), NUM_LINKS + 1) + + # Add extra link (reverse) + self.link2 = self.new_l2net.link.add() + self.link2.link_id = "Link2" + self.link2.source.source_node = self.dest_node.node_id + self.link2.source.source_tp = self.tp2.tp_id + self.link2.destination.dest_node = self.src_node.node_id + self.link2.destination.dest_tp = self.tp1.tp_id + # Use data store APIs + logger.info("SdnTopStoreTest: Update extra network link - Part 2") + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + # Verify data created + self.assertEqual(nw.link[NUM_LINKS+1].link_id, "Link2") + self.assertEqual(len(nw.link), NUM_LINKS + 2) + self.assertEqual(nw.link[NUM_LINKS+1].source.source_node, self.dest_node.node_id) + self.assertEqual(nw.link[NUM_LINKS+1].source.source_tp, self.tp2.tp_id) + self.assertEqual(nw.link[NUM_LINKS+1].destination.dest_node, self.src_node.node_id) + self.assertEqual(nw.link[NUM_LINKS+1].destination.dest_tp, self.tp1.tp_id) + + def test_add_network_link_l2attr(self): + """ + Test: Check L2 link attributes + """ + logger.info("SdnTopStoreTest: Add network link L2 attributes") + # Create test state + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + self.link1.l2_link_attributes.name = "Link L2 name" + self.link1.l2_link_attributes.rate = 10000 + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify data state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(len(nw.link), NUM_LINKS + 1) + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name") + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000) + + def test_change_network_link_l2attr(self): + """ + Test: Change L2 link attributes + """ + logger.info("SdnTopStoreTest: Change network link L2 attributes") + # Create initial state + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + self.link1.l2_link_attributes.name = "Link L2 name" + self.link1.l2_link_attributes.rate = 10000 + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify initial state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(len(nw.link), NUM_LINKS + 1) + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name") + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000) + + # Create initial state + self.test_l2net = RwTl.YangData_IetfNetwork_Network() + self.test_l2net.network_id = "L2HostNetwork-1" + self.link1 = self.test_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.l2_link_attributes.name = "Link L2 updated name" + self._nwtopdata_store.update_network("L2HostNetwork-1", self.test_l2net) + # Verify test state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 updated name") + + def test_change_network_link_dest_tp(self): + """ + Test: Change L2 link attributes + """ + logger.info("SdnTopStoreTest: Change network link dest-tp") + # Create initial state + self.link1 = self.new_l2net.link.add() + self.link1.link_id = "Link1" + self.link1.source.source_node = self.src_node.node_id + self.link1.source.source_tp = self.tp1.tp_id + self.link1.destination.dest_node = self.dest_node.node_id + self.link1.destination.dest_tp = self.tp2.tp_id + self.link1.l2_link_attributes.name = "Link L2 name" + self.link1.l2_link_attributes.rate = 10000 + self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net) + # Verify initial state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertIsNotNone(nw) + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(len(self._nwtopdata_store._networks), 1) + self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2) + self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1") + self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1) + self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2") + self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2") + self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1") + self.assertEqual(len(nw.link), NUM_LINKS + 1) + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name") + self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000) + + # Create test state + self.test_l2net = RwTl.YangData_IetfNetwork_Network() + self.test_l2net.network_id = "L2HostNetwork-1" + self.link1 = self.test_l2net.link.add() + self.link1.link_id = "Link1" + # Changing dest node params + self.link1.destination.dest_node = self.src_node.node_id + self.link1.destination.dest_tp = self.tp1.tp_id + self._nwtopdata_store.update_network("L2HostNetwork-1", self.test_l2net) + # Verify test state + nw = self._nwtopdata_store.get_network("L2HostNetwork-1") + self.assertEqual(nw.network_id, "L2HostNetwork-1") + self.assertEqual(nw.link[NUM_LINKS].destination.dest_node, self.src_node.node_id) + + + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + unittest.main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py new file mode 100755 index 0000000..8c19072 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import logging +import os +import sys +import types +import unittest +import uuid +import random + +import xmlrunner + +import gi +gi.require_version('CF', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwMain', '1.0') +gi.require_version('RwManifestYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwTypes', '1.0') +import gi.repository.CF as cf +import gi.repository.RwDts as rwdts +import gi.repository.RwMain as rwmain +import gi.repository.RwManifestYang as rwmanifest +import gi.repository.IetfL2TopologyYang as l2Tl +import gi.repository.RwTopologyYang as RwTl +import gi.repository.RwLaunchpadYang as launchpadyang +from gi.repository import RwsdnYang +from gi.repository.RwTypes import RwStatus + +from create_stackedl2topology import MyL2Topology +from create_stackedProvNettopology import MyProvTopology +from create_stackedVMNettopology import MyVMTopology +from create_stackedSfctopology import MySfcTopology + +import rw_peas +import rift.tasklets +import rift.test.dts + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class TopMgrTestCase(rift.test.dts.AbstractDTSTest): + + @classmethod + def configure_suite(cls, rwmain): + vns_mgr_dir = os.environ.get('VNS_MGR_DIR') + + cls.rwmain.add_tasklet(vns_mgr_dir, 'rwvnstasklet') + + @classmethod + def configure_schema(cls): + return RwTl.get_schema() + + @asyncio.coroutine + def wait_tasklets(self): + yield from asyncio.sleep(1, loop=self.loop) + + @classmethod + def configure_timeout(cls): + return 360 + + + @asyncio.coroutine + def configure_l2_network(self, dts): + nwtop = RwTl.YangData_IetfNetwork() + l2top = MyL2Topology(nwtop, self.log) + l2top.setup_all() + nw_xpath = "C,/nd:network" + self.log.info("Configuring l2 network: %s",nwtop) + yield from dts.query_create(nw_xpath, + rwdts.Flag.ADVISE, + nwtop) + + @asyncio.coroutine + def configure_prov_network(self, dts): + nwtop = RwTl.YangData_IetfNetwork() + l2top = MyL2Topology(nwtop, self.log) + l2top.setup_all() + + provtop = MyProvTopology(nwtop, l2top, self.log) + provtop.setup_all() + nw_xpath = "C,/nd:network" + self.log.info("Configuring provider network: %s",nwtop) + yield from dts.query_create(nw_xpath, + rwdts.Flag.ADVISE, + nwtop) + + @asyncio.coroutine + def configure_vm_network(self, dts): + nwtop = RwTl.YangData_IetfNetwork() + l2top = MyL2Topology(nwtop, self.log) + l2top.setup_all() + + provtop = MyProvTopology(nwtop, l2top, self.log) + provtop.setup_all() + + vmtop = MyVMTopology(nwtop, l2top, provtop, self.log) + vmtop.setup_all() + nw_xpath = "C,/nd:network" + self.log.info("Configuring VM network: %s",nwtop) + yield from dts.query_create(nw_xpath, + rwdts.Flag.ADVISE, + nwtop) + + @asyncio.coroutine + def configure_sfc_network(self, dts): + nwtop = RwTl.YangData_IetfNetwork() + l2top = MyL2Topology(nwtop, self.log) + l2top.setup_all() + + provtop = MyProvTopology(nwtop, l2top, self.log) + provtop.setup_all() + + vmtop = MyVMTopology(nwtop, l2top, provtop, self.log) + vmtop.setup_all() + + sfctop = MySfcTopology(nwtop, l2top, provtop, vmtop, self.log) + sfctop.setup_all() + + nw_xpath = "C,/nd:network" + self.log.info("Configuring SFC network: %s",nwtop) + yield from dts.query_create(nw_xpath, + rwdts.Flag.ADVISE, + nwtop) + + + #@unittest.skip("Skipping test_network_config") + def test_network_config(self): + self.log.debug("STARTING - test_network_config") + tinfo = self.new_tinfo('static_network') + dts = rift.tasklets.DTS(tinfo, self.schema, self.loop) + + @asyncio.coroutine + def run_test(): + networks = [] + computes = [] + + yield from asyncio.sleep(120, loop=self.loop) + yield from self.configure_l2_network(dts) + yield from self.configure_prov_network(dts) + yield from self.configure_vm_network(dts) + yield from self.configure_sfc_network(dts) + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + self.log.debug("DONE - test_network_config") + +def main(): + top_dir = __file__[:__file__.find('/modules/core/')] + build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build') + mc_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build') + launchpad_build_dir = os.path.join(mc_build_dir, 'rwlaunchpad') + + if 'VNS_MGR_DIR' not in os.environ: + os.environ['VNS_MGR_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwvns') + + if 'MESSAGE_BROKER_DIR' not in os.environ: + os.environ['MESSAGE_BROKER_DIR'] = os.path.join(build_dir, 'rwmsg/plugins/rwmsgbroker-c') + + if 'ROUTER_DIR' not in os.environ: + os.environ['ROUTER_DIR'] = os.path.join(build_dir, 'rwdts/plugins/rwdtsrouter-c') + + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + unittest.main(testRunner=runner) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt new file mode 100644 index 0000000..3e17eb5 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt @@ -0,0 +1,59 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Creation Date: 10/28/2015 +# + +## +# Allow specific compiler warnings +## +rift_allow_compiler_warning(unused-but-set-variable) + +set(VALA_NAME rwsdn) +set(VALA_FILES ${VALA_NAME}.vala) +set(VALA_VERSION 1.0) +set(VALA_RELEASE 1) +set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION}) +set(VALA_TYPELIB_PREFIX RwSdn-${VALA_VERSION}) + +rift_add_vala( + ${VALA_LONG_NAME} + VALA_FILES ${VALA_FILES} + VALA_PACKAGES + rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0 + rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rwsdn_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0 + ietf_network_yang-1.0 ietf_network_topology_yang-1.0 + ietf_l2_topology_yang-1.0 rw_topology_yang-1.0 + rw_log-1.0 + VAPI_DIRS + ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang + ${RIFT_SUBMODULE_BINARY_ROOT}/rwlaunchpad/plugins/rwvns/yang/ + GIR_PATHS + ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang + ${RIFT_SUBMODULE_BINARY_ROOT}/rwlaunchpad/plugins/rwvns/yang/ + GENERATE_HEADER_FILE ${VALA_NAME}.h + GENERATE_SO_FILE lib${VALA_LONG_NAME}.so + GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi + GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir + GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib + DEPENDS rwcal_yang rwsdn_yang mano_yang rwlog_gi rwschema_yang + ) + +rift_install_vala_artifacts( + HEADER_FILES ${VALA_NAME}.h + SO_FILES lib${VALA_LONG_NAME}.so + VAPI_FILES ${VALA_LONG_NAME}.vapi + GIR_FILES ${VALA_TYPELIB_PREFIX}.gir + TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib + COMPONENT ${PKG_LONG_NAME} + DEST_PREFIX . + ) + + +set(subdirs + rwsdn_mock + rwsdn_sim + rwsdn_odl + rwsdn-python + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt new file mode 100644 index 0000000..261e82f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwsdn-plugin rwsdn-plugin.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py new file mode 100644 index 0000000..d984362 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py @@ -0,0 +1,96 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import logging + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import ( + GObject, + RwSdn, # Vala package + RwTypes) + +import rw_status +import rwlogger + +import rift.cal +import rift.sdn + +logger = logging.getLogger('rwsdn') + +rwstatus = rw_status.rwstatus_from_exc_map({ + IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + + }) + + +class TopologyPlugin(GObject.Object, RwSdn.Topology): + def __init__(self): + GObject.Object.__init__(self) + self._impl = None + + @rwstatus + def do_init(self, rwlog_ctx): + providers = { + "sdnsim": rift.sdn.SdnSim, + "mock": rift.sdn.Mock, + } + + logger.addHandler( + rwlogger.RwLogger( + category="rwsdn", + log_hdl=rwlog_ctx, + ) + ) + + self._impl = {} + for name, impl in providers.items(): + try: + self._impl[name] = impl() + + except Exception: + msg = "unable to load SDN implementation for {}" + logger.exception(msg.format(name)) + + @rwstatus + def do_get_network_list(self, account, network_top): + obj = self._impl[account.account_type] + return obj.get_network_list(account, network_top) + +def main(): + @rwstatus + def blah(): + raise IndexError() + + a = blah() + assert(a == RwTypes.RwStatus.NOTFOUND) + + @rwstatus({IndexError: RwTypes.RwStatus.NOTCONNECTED}) + def blah2(): + """Some function""" + raise IndexError() + + a = blah2() + assert(a == RwTypes.RwStatus.NOTCONNECTED) + assert(blah2.__doc__ == "Some function") + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala new file mode 100644 index 0000000..a79f5a7 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala @@ -0,0 +1,79 @@ +namespace RwSdn { + + public interface Topology: GLib.Object { + /* + * Init routine + */ + public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx); + + /* + * Configuring related APIs + */ + /* TODO */ + + /* + * Network related APIs + */ + public abstract RwTypes.RwStatus get_network_list( + Rwsdn.SDNAccount account, + out RwTopology.YangData_IetfNetwork network_topology); + + /* + * VNFFG Chain related APIs + */ + public abstract RwTypes.RwStatus create_vnffg_chain( + Rwsdn.SDNAccount account, + Rwsdn.VNFFGChain vnffg_chain, + out string vnffg_id); + + /* + * VNFFG Chain Terminate related APIs + */ + public abstract RwTypes.RwStatus terminate_vnffg_chain( + Rwsdn.SDNAccount account, + string vnffg_id); + + + /* + * Network related APIs + */ + public abstract RwTypes.RwStatus get_vnffg_rendered_paths( + Rwsdn.SDNAccount account, + out Rwsdn.VNFFGRenderedPaths rendered_paths); + + /* + * Classifier related APIs + */ + public abstract RwTypes.RwStatus create_vnffg_classifier( + Rwsdn.SDNAccount account, + Rwsdn.VNFFGClassifier vnffg_classifier, + out string vnffg_classifier_id); + + /* + * Classifier related APIs + */ + public abstract RwTypes.RwStatus terminate_vnffg_classifier( + Rwsdn.SDNAccount account, + string vnffg_classifier_id); + + + + /* + * Node Related APIs + */ + /* TODO */ + + /* + * Termination-point Related APIs + */ + /* TODO */ + + /* + * Link Related APIs + */ + /* TODO */ + + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt new file mode 100644 index 0000000..1588ddf --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwsdn_mock rwsdn_mock.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py new file mode 100644 index 0000000..833ccc4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py @@ -0,0 +1,174 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import collections +import logging + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import ( + GObject, + RwSdn, # Vala package + RwTypes, + RwTopologyYang as RwTl, + RwsdnYang + ) + +import rw_status +import rwlogger + +logger = logging.getLogger('rwsdn.mock') + + +class UnknownAccountError(Exception): + pass + + +class MissingFileError(Exception): + pass + + +rwstatus = rw_status.rwstatus_from_exc_map({ + IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + UnknownAccountError: RwTypes.RwStatus.NOTFOUND, + MissingFileError: RwTypes.RwStatus.NOTFOUND, + }) + +GRUNT118 = {"name": "grunt118", "ip_addr": "10.66.4.118", "tps": ["eth0"]} +GRUNT44 = {"name": "grunt44", "ip_addr": "10.66.4.44", "tps": ["eth0"]} +AS1 = {"name":"AristaSw1", "ip_addr": "10.66.4.54", "tps": ["Ethernet8/7","Ethernet8/8"]} +NW_NODES = [GRUNT118, GRUNT44, AS1] +NW_BIDIR_LINKS = [{"src" : ("grunt118","eth0"), "dest" : ("AristaSw1","Ethernet8/7")}, + {"src" : ("grunt44","eth0"), "dest" : ("AristaSw1","Ethernet8/8")}] + + +class DataStore(object): + def __init__(self): + self.topology = None + self.nw = None + self.next_mac = 11 + + def create_link(self, cfg_src_node, cfg_src_tp, cfg_dest_node, cfg_dest_tp): + lnk= self.nw.link.add() + lnk.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(cfg_src_node, cfg_src_tp, cfg_dest_node, cfg_dest_tp) + lnk.source.source_node = cfg_src_node + lnk.source.source_tp = cfg_src_tp + lnk.destination.dest_node = cfg_dest_node + lnk.destination.dest_tp = cfg_dest_tp + # L2 link augmentation + lnk.l2_link_attributes.name = cfg_src_tp + cfg_dest_tp + lnk.l2_link_attributes.rate = 1000000000.00 + + def create_tp(self, node, cfg_tp): + tp = node.termination_point.add() + tp.tp_id = ("urn:Rift:Lab:{}:{}").format(node.node_id, cfg_tp) + # L2 TP augmentation + tp.l2_termination_point_attributes.description = cfg_tp + tp.l2_termination_point_attributes.maximum_frame_size = 1500 + tp.l2_termination_point_attributes.mac_address = "00:1e:67:d8:48:" + str(self.next_mac) + self.next_mac = self.next_mac + 1 + tp.l2_termination_point_attributes.tp_state = "in_use" + tp.l2_termination_point_attributes.eth_encapsulation = "ethernet" + + def create_node(self, cfg_node): + node = self.nw.node.add() + node.node_id = cfg_node['name'] + # L2 Node augmentation + node.l2_node_attributes.name = cfg_node['name'] + node.l2_node_attributes.description = "Host with OVS-DPDK" + node.l2_node_attributes.management_address.append(cfg_node['ip_addr']) + for cfg_tp in cfg_node['tps']: + self.create_tp(node, cfg_tp) + + def create_default_topology(self): + logger.debug('Creating default topology: ') + + self.topology = RwTl.YangData_IetfNetwork() + self.nw = self.topology.network.add() + self.nw.network_id = "L2HostTopology-Def1" + self.nw.server_provided = 'true' + + # L2 Network type augmentation + self.nw.network_types.l2_network = self.nw.network_types.l2_network.new() + # L2 Network augmentation + self.nw.l2_network_attributes.name = "Rift LAB SFC-Demo Host Network" + + for cfg_node in NW_NODES: + self.create_node(cfg_node) + + for cfg_link in NW_BIDIR_LINKS: + self.create_link(cfg_link['src'][0], cfg_link['src'][1], cfg_link['dest'][0], cfg_link['dest'][1]) + self.create_link(cfg_link['src'][1], cfg_link['src'][0], cfg_link['dest'][1], cfg_link['dest'][0]) + + return self.topology + + +class Resources(object): + def __init__(self): + self.networks = dict() + + +class MockPlugin(GObject.Object, RwSdn.Topology): + """This class implements the abstract methods in the Topology class. + Mock is used for unit testing.""" + + def __init__(self): + GObject.Object.__init__(self) + self.resources = collections.defaultdict(Resources) + self.datastore = None + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler( + rwlogger.RwLogger( + category="rwsdn.mock", + log_hdl=rwlog_ctx, + ) + ) + + account = RwsdnYang.SDNAccount() + account.name = 'mock' + account.account_type = 'mock' + account.mock.username = 'rift' + + self.datastore = DataStore() + self.topology = self.datastore.create_default_topology() + + @rwstatus(ret_on_failure=[None]) + def do_get_network_list(self, account): + """ + Returns the list of discovered network + + @param account - a SDN account + + """ + logger.debug('Get network list: ') + + if (self.topology): + logger.debug('Returning network list: ') + return self.topology + + logger.debug('Returning empty network list: ') + return None + + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt new file mode 100644 index 0000000..ffa8dec --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwsdn_odl rwsdn_odl.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py new file mode 100644 index 0000000..31e1402 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py @@ -0,0 +1,943 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import logging + +import requests + +import json +import re + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwsdnYang', '1.0') +gi.require_version('RwSdn', '1.0') +gi.require_version('RwTopologyYang','1.0') + +from gi.repository import ( + GObject, + RwSdn, # Vala package + RwTypes, + RwsdnYang, + RwTopologyYang as RwTl, + ) + +import rw_status +import rwlogger + + +logger = logging.getLogger('rwsdn.sdnodl') +logger.setLevel(logging.DEBUG) + + +sff_rest_based = True + +class UnknownAccountError(Exception): + pass + + +class MissingFileError(Exception): + pass + + +rwstatus = rw_status.rwstatus_from_exc_map({ + IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + UnknownAccountError: RwTypes.RwStatus.NOTFOUND, + MissingFileError: RwTypes.RwStatus.NOTFOUND, + }) + + +class SdnOdlPlugin(GObject.Object, RwSdn.Topology): + + def __init__(self): + GObject.Object.__init__(self) + self.sdnodl = SdnOdl() + + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler( + rwlogger.RwLogger( + category="sdnodl", + log_hdl=rwlog_ctx, + ) + ) + + @rwstatus(ret_on_failure=[None]) + def do_get_network_list(self, account): + """ + Returns the list of discovered networks + + @param account - a SDN account + + """ + logger.debug('Received Get network list: ') + nwtop = self.sdnodl.get_network_list( account) + logger.debug('Done with get network list: %s', type(nwtop)) + return nwtop + + @rwstatus(ret_on_failure=[""]) + def do_create_vnffg_chain(self, account,vnffg_chain): + """ + Creates Service Function chain in ODL + + @param account - a SDN account + + """ + logger.debug('Received Create VNFFG chain ') + vnffg_id = self.sdnodl.create_sfc( account,vnffg_chain) + logger.debug('Done with create VNFFG chain with name : %s', vnffg_id) + return vnffg_id + + @rwstatus + def do_terminate_vnffg_chain(self, account,vnffg_id): + """ + Terminate Service Function chain in ODL + + @param account - a SDN account + + """ + logger.debug('Received terminate VNFFG chain for id %s ', vnffg_id) + # TODO: Currently all the RSP, SFPs , SFFs and SFs are deleted + # Need to handle deletion of specific RSP, SFFs, SFs etc + self.sdnodl.terminate_all_sfc(account) + logger.debug('Done with terminate VNFFG chain with name : %s', vnffg_id) + + @rwstatus(ret_on_failure=[None]) + def do_get_vnffg_rendered_paths(self, account): + """ + Get ODL Rendered Service Path List (SFC) + + @param account - a SDN account + """ + vnffg_list = self.sdnodl.get_rsp_list(account) + return vnffg_list + + @rwstatus(ret_on_failure=[None]) + def do_create_vnffg_classifier(self, account, vnffg_classifier): + """ + Add VNFFG Classifier + + @param account - a SDN account + """ + classifier_name = self.sdnodl.create_sfc_classifier(account,vnffg_classifier) + return classifier_name + + @rwstatus(ret_on_failure=[None]) + def do_terminate_vnffg_classifier(self, account, vnffg_classifier_name): + """ + Add VNFFG Classifier + + @param account - a SDN account + """ + self.sdnodl.terminate_sfc_classifier(account,vnffg_classifier_name) + + +class Sff(object): + """ + Create SFF object to hold SFF related details + """ + + def __init__(self,sff_br_uid, sff_br_name , sff_ip, sff_br_ip): + import socket + self.name = socket.getfqdn(sff_ip) + self.br_uid = sff_br_uid + self.ip = sff_ip + self.br_ip = sff_br_ip + self.br_name = sff_br_name + self.sff_port = 6633 + self.sff_rest_port = 6000 + self.sf_dp_list = list() + + def add_sf_dp_to_sff(self,sf_dp): + self.sf_dp_list.append(sf_dp) + + def __repr__(self): + return 'Name:{},Bridge Name:{}, IP: {}, SF List: {}'.format(self.br_uid,self.br_name, self.ip, self.sf_dp_list) + +class SfDpLocator(object): + """ + Create Service Function Data Plane Locator related Object to hold details related to each DP Locator endpoint + """ + def __init__(self,sfdp_id,vnfr_name,vm_id): + self.name = sfdp_id + self.port_id = sfdp_id + self.vnfr_name = vnfr_name + self.vm_id = vm_id + self.sff_name = None + + def _update_sff_name(self,sff_name): + self.sff_name = sff_name + + def _update_vnf_params(self,service_function_type,address, port,transport_type): + self.service_function_type = 'service-function-type:{}'.format(service_function_type) + self.address = address + self.port = port + self.transport_type = "service-locator:{}".format(transport_type) + + def __repr__(self): + return 'Name:{},Port id:{}, VNFR ID: {}, VM ID: {}, SFF Name: {}'.format(self.name,self.port_id, self.vnfr_name, self.vm_id,self.sff_name) + +class SdnOdl(object): + """ + SDN ODL Class to support REST based API calls + """ + + @property + def _network_topology_path(self): + return 'restconf/operational/network-topology:network-topology' + + @property + def _node_inventory_path(self): + return 'restconf/operational/opendaylight-inventory:nodes' + + def _network_topology_rest_url(self,account): + return '{}/{}'.format(account.odl.url,self._network_topology_path) + + def _node_inventory_rest_url(self,account): + return '{}/{}'.format(account.odl.url,self._node_inventory_path) + + def _get_rest_url(self,account, rest_path): + return '{}/{}'.format(account.odl.url,rest_path) + + + def _get_peer_termination_point(self,node_inv,tp_id): + for node in node_inv['nodes']['node']: + if "node-connector" in node and len(node['node-connector']) > 0: + for nodec in node['node-connector']: + if ("flow-node-inventory:name" in nodec and nodec["flow-node-inventory:name"] == tp_id): + return(node['id'], nodec['id']) + return (None,None) + + def _get_termination_point_mac_address(self,node_inv,tp_id): + for node in node_inv['nodes']['node']: + if "node-connector" in node and len(node['node-connector']) > 0: + for nodec in node['node-connector']: + if ("flow-node-inventory:name" in nodec and nodec["flow-node-inventory:name"] == tp_id): + return nodec.get("flow-node-inventory:hardware-address") + + def _add_host(self,ntwk,node,term_point,vmid,node_inv): + for ntwk_node in ntwk.node: + if ntwk_node.node_id == vmid: + break + else: + ntwk_node = ntwk.node.add() + if "ovsdb:bridge-name" in node: + ntwk_node.rw_node_attributes.ovs_bridge_name = node["ovsdb:bridge-name"] + ntwk_node.node_id = vmid + intf_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'iface-id'] + if intf_id: + ntwk_node_tp = ntwk_node.termination_point.add() + ntwk_node_tp.tp_id = intf_id[0]['external-id-value'] + att_mac = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'attached-mac'] + if att_mac: + ntwk_node_tp.l2_termination_point_attributes.mac_address = att_mac[0]['external-id-value'] + peer_node,peer_node_tp = self._get_peer_termination_point(node_inv,term_point['tp-id']) + if peer_node and peer_node_tp: + nw_lnk = ntwk.link.add() + nw_lnk.source.source_tp = ntwk_node_tp.tp_id + nw_lnk.source.source_node = ntwk_node.node_id + nw_lnk.destination.dest_tp = term_point['tp-id'] + nw_lnk.destination.dest_node = node['node-id'] + nw_lnk.link_id = peer_node_tp + '-' + 'source' + + nw_lnk = ntwk.link.add() + nw_lnk.source.source_tp = term_point['tp-id'] + nw_lnk.source.source_node = node['node-id'] + nw_lnk.destination.dest_tp = ntwk_node_tp.tp_id + nw_lnk.destination.dest_node = ntwk_node.node_id + nw_lnk.link_id = peer_node_tp + '-' + 'dest' + + def _get_address_from_node_inventory(self,node_inv,node_id): + for node in node_inv['nodes']['node']: + if node['id'] == node_id: + return node["flow-node-inventory:ip-address"] + return None + + def _fill_network_list(self,nw_topo,node_inventory): + """ + Fill Topology related information + """ + nwtop = RwTl.YangData_IetfNetwork() + + for topo in nw_topo['network-topology']['topology']: + if ('node' in topo and len(topo['node']) > 0): + ntwk = nwtop.network.add() + ntwk.network_id = topo['topology-id'] + ntwk.server_provided = True + for node in topo['node']: + if ('termination-point' in node and len(node['termination-point']) > 0): + ntwk_node = ntwk.node.add() + ntwk_node.node_id = node['node-id'] + addr = self._get_address_from_node_inventory(node_inventory,ntwk_node.node_id) + if addr: + ntwk_node.l2_node_attributes.management_address.append(addr) + for term_point in node['termination-point']: + ntwk_node_tp = ntwk_node.termination_point.add() + ntwk_node_tp.tp_id = term_point['tp-id'] + mac_address = self._get_termination_point_mac_address(node_inventory,term_point['tp-id']) + if mac_address: + ntwk_node_tp.l2_termination_point_attributes.mac_address = mac_address + if 'ovsdb:interface-external-ids' in term_point: + vm_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'vm-id'] + if vm_id: + vmid = vm_id[0]['external-id-value'] + self._add_host(ntwk,node,term_point,vmid,node_inventory) + if ('link' in topo and len(topo['link']) > 0): + for link in topo['link']: + nw_link = ntwk.link.add() + if 'destination' in link: + nw_link.destination.dest_tp = link['destination'].get('dest-tp') + nw_link.destination.dest_node = link['destination'].get('dest-node') + if 'source' in link: + nw_link.source.source_node = link['source'].get('source-node') + nw_link.source.source_tp = link['source'].get('source-tp') + nw_link.link_id = link.get('link-id') + return nwtop + + + def get_network_list(self, account): + """ + Get the networks details from ODL + """ + url = self._network_topology_rest_url(account) + r=requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + nw_topo = r.json() + + url = self._node_inventory_rest_url(account) + r = requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + node_inventory = r.json() + return self._fill_network_list(nw_topo,node_inventory) + + @property + def _service_functions_path(self): + return 'restconf/config/service-function:service-functions' + + @property + def _service_function_path(self): + return 'restconf/config/service-function:service-functions/service-function/{}' + + @property + def _service_function_forwarders_path(self): + return 'restconf/config/service-function-forwarder:service-function-forwarders' + + @property + def _service_function_forwarder_path(self): + return 'restconf/config/service-function-forwarder:service-function-forwarders/service-function-forwarder/{}' + + @property + def _service_function_chains_path(self): + return 'restconf/config/service-function-chain:service-function-chains' + + @property + def _service_function_chain_path(self): + return 'restconf/config/service-function-chain:service-function-chains/service-function-chain/{}' + + + @property + def _sfps_path(self): + return 'restconf/config/service-function-path:service-function-paths' + + @property + def _sfp_path(self): + return 'restconf/config/service-function-path:service-function-paths/service-function-path/{}' + + + @property + def _create_rsp_path(self): + return 'restconf/operations/rendered-service-path:create-rendered-path' + + @property + def _delete_rsp_path(self): + return 'restconf/operations/rendered-service-path:delete-rendered-path' + + + @property + def _get_rsp_paths(self): + return 'restconf/operational/rendered-service-path:rendered-service-paths' + + @property + def _get_rsp_path(self): + return 'restconf/operational/rendered-service-path:rendered-service-paths/rendered-service-path/{}' + + @property + def _access_list_path(self): + return 'restconf/config/ietf-access-control-list:access-lists/acl/{}' + + @property + def _service_function_classifier_path(self): + return 'restconf/config/service-function-classifier:service-function-classifiers/service-function-classifier/{}' + + @property + def _access_lists_path(self): + return 'restconf/config/ietf-access-control-list:access-lists' + + @property + def _service_function_classifiers_path(self): + return 'restconf/config/service-function-classifier:service-function-classifiers' + + + def _create_sf(self,account,vnffg_chain,sf_dp_list): + "Create SF" + sf_json = {} + + for vnf in vnffg_chain.vnf_chain_path: + for vnfr in vnf.vnfr_ids: + sf_url = self._get_rest_url(account,self._service_function_path.format(vnfr.vnfr_name)) + print(sf_url) + r=requests.get(sf_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + # If the SF is not found; create new SF + if r.status_code == 200: + logger.info("SF with name %s is already present in ODL. Skipping update", vnfr.vnfr_name) + continue + elif r.status_code != 404: + r.raise_for_status() + + sf_dict = {} + sf_dict['name'] = vnfr.vnfr_name + sf_dict['nsh-aware'] = vnf.nsh_aware + sf_dict['type'] = 'service-function-type:{}'.format(vnf.service_function_type) + sf_dict['ip-mgmt-address'] = vnfr.mgmt_address + sf_dict['rest-uri'] = 'http://{}:{}'.format(vnfr.mgmt_address, vnfr.mgmt_port) + + sf_dict['sf-data-plane-locator'] = list() + for vdu in vnfr.vdu_list: + sf_dp = {} + if vdu.port_id in sf_dp_list.keys(): + sf_dp['name'] = vdu.name + sf_dp['ip'] = vdu.address + sf_dp['port'] = vdu.port + sf_dp['transport'] = "service-locator:{}".format(vnf.transport_type) + sff_name = sf_dp_list[vdu.port_id].sff_name + if sff_name is None: + logger.error("SFF not found for port %s in SF %s", vdu.port_id, vnfr.vnfr_name) + sf_dp['service-function-forwarder'] = sff_name + sf_dict['sf-data-plane-locator'].append(sf_dp) + else: + logger.error("Port %s not found in SF DP list",vdu.port_id) + + sf_json['service-function'] = sf_dict + sf_data = json.dumps(sf_json) + sf_url = self._get_rest_url(account,self._service_function_path.format(vnfr.vnfr_name)) + print(sf_url) + print(sf_data) + r=requests.put(sf_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sf_data) + r.raise_for_status() + + + def _create_sff(self,account,vnffg_chain,sff): + "Create SFF" + sff_json = {} + sff_dict = {} + + sff_url = self._get_rest_url(account,self._service_function_forwarder_path.format(sff.name)) + print(sff_url) + r=requests.get(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + # If the SFF is not found; create new SF + if r.status_code == 200: + logger.info("SFF with name %s is already present in ODL. Skipping full update", sff.name) + sff_dict = r.json() + sff_updated = False + for sf_dp in sff.sf_dp_list: + for sff_sf in sff_dict['service-function-forwarder'][0]['service-function-dictionary']: + if sf_dp.vnfr_name == sff_sf['name']: + logger.info("SF with name %s is already found in SFF %s SF Dictionay. Skipping update",sf_dp.vnfr_name,sff.name) + break + else: + logger.info("SF with name %s is not found in SFF %s SF Dictionay",sf_dp.vnfr_name, sff.name) + sff_updated = True + sff_sf_dict = {} + sff_sf_dp_loc = {} + sff_sf_dict['name'] = sf_dp.vnfr_name + + # Below two lines are enabled only for ODL Beryillium + sff_sf_dp_loc['sff-dpl-name'] = sff.name + sff_sf_dp_loc['sf-dpl-name'] = sf_dp.name + + sff_sf_dict['sff-sf-data-plane-locator'] = sff_sf_dp_loc + sff_dict['service-function-forwarder'][0]['service-function-dictionary'].append(sff_sf_dict) + if sff_updated is True: + sff_data = json.dumps(sff_dict) + print(sff_data) + r=requests.put(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sff_data) + r.raise_for_status() + return + elif r.status_code != 404: + r.raise_for_status() + + sff_name = sff.name + sff_ip = sff.ip + sff_br_ip = sff.br_ip + sff_port = sff.sff_port + sff_bridge_name = '' + sff_rest_port = sff.sff_rest_port + sff_ovs_op = {} + if sff_rest_based is False: + sff_bridge_name = sff.br_name + sff_ovs_op = {"key": "flow", + "nshc1": "flow", + "nsp": "flow", + "remote-ip": "flow", + "dst-port": sff_port, + "nshc3": "flow", + "nshc2": "flow", + "nshc4": "flow", + "nsi": "flow"} + + + sff_dict['name'] = sff_name + sff_dict['service-node'] = '' + sff_dict['ip-mgmt-address'] = sff_ip + if sff_rest_based: + sff_dict['rest-uri'] = 'http://{}:{}'.format(sff_ip, sff_rest_port) + else: + sff_dict['service-function-forwarder-ovs:ovs-bridge'] = {"bridge-name": sff_bridge_name} + sff_dict['service-function-dictionary'] = list() + for sf_dp in sff.sf_dp_list: + sff_sf_dict = {} + sff_sf_dp_loc = {} + sff_sf_dict['name'] = sf_dp.vnfr_name + + # Below set of lines are reqd for Lithium + #sff_sf_dict['type'] = sf_dp.service_function_type + #sff_sf_dp_loc['ip'] = sf_dp.address + #sff_sf_dp_loc['port'] = sf_dp.port + #sff_sf_dp_loc['transport'] = sf_dp.transport_type + #sff_sf_dp_loc['service-function-forwarder-ovs:ovs-bridge'] = {} + + # Below two lines are enabled only for ODL Beryillium + sff_sf_dp_loc['sff-dpl-name'] = sff_name + sff_sf_dp_loc['sf-dpl-name'] = sf_dp.name + + sff_sf_dict['sff-sf-data-plane-locator'] = sff_sf_dp_loc + sff_dict['service-function-dictionary'].append(sff_sf_dict) + + sff_dict['sff-data-plane-locator'] = list() + sff_dp = {} + dp_loc = {} + sff_dp['name'] = sff_name + dp_loc['ip'] = sff_br_ip + dp_loc['port'] = sff_port + dp_loc['transport'] = 'service-locator:vxlan-gpe' + sff_dp['data-plane-locator'] = dp_loc + if sff_rest_based is False: + sff_dp['service-function-forwarder-ovs:ovs-options'] = sff_ovs_op + sff_dp["service-function-forwarder-ovs:ovs-bridge"] = {'bridge-name':sff_bridge_name} + sff_dict['sff-data-plane-locator'].append(sff_dp) + + sff_json['service-function-forwarder'] = sff_dict + sff_data = json.dumps(sff_json) + print(sff_data) + r=requests.put(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sff_data) + r.raise_for_status() + + def _create_sfc(self,account,vnffg_chain): + "Create SFC" + sfc_json = {} + sfc_dict = {} + sfc_dict['name'] = vnffg_chain.name + sfc_dict['sfc-service-function'] = list() + vnf_chain_list = sorted(vnffg_chain.vnf_chain_path, key = lambda x: x.order) + for vnf in vnf_chain_list: + sfc_sf_dict = {} + sfc_sf_dict['name'] = vnf.service_function_type + sfc_sf_dict['type'] = 'service-function-type:{}'.format(vnf.service_function_type) + sfc_sf_dict['order'] = vnf.order + sfc_dict['sfc-service-function'].append(sfc_sf_dict) + sfc_json['service-function-chain'] = sfc_dict + sfc_data = json.dumps(sfc_json) + sfc_url = self._get_rest_url(account,self._service_function_chain_path.format(vnffg_chain.name)) + print(sfc_url) + print(sfc_data) + r=requests.put(sfc_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfc_data) + r.raise_for_status() + + def _create_sfp(self,account,vnffg_chain, sym_chain=False): + "Create SFP" + sfp_json = {} + sfp_dict = {} + sfp_dict['name'] = vnffg_chain.name + sfp_dict['service-chain-name'] = vnffg_chain.name + sfp_dict['symmetric'] = sym_chain + + sfp_json['service-function-path'] = sfp_dict + sfp_data = json.dumps(sfp_json) + sfp_url = self._get_rest_url(account,self._sfp_path.format(vnffg_chain.name)) + print(sfp_url) + print(sfp_data) + r=requests.put(sfp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfp_data) + r.raise_for_status() + + def _create_rsp(self,account,vnffg_chain_name, sym_chain=True): + "Create RSP" + rsp_json = {} + rsp_input = {} + rsp_json['input'] = {} + rsp_input['name'] = vnffg_chain_name + rsp_input['parent-service-function-path'] = vnffg_chain_name + rsp_input['symmetric'] = sym_chain + + rsp_json['input'] = rsp_input + rsp_data = json.dumps(rsp_json) + self._rsp_data = rsp_json + rsp_url = self._get_rest_url(account,self._create_rsp_path) + print(rsp_url) + print(rsp_data) + r=requests.post(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=rsp_data) + r.raise_for_status() + print(r.json()) + output_json = r.json() + return output_json['output']['name'] + + def _get_sff_list_for_chain(self, account,sf_dp_list): + """ + Get List of all SFF that needs to be created based on VNFs included in VNFFG chain. + """ + + sff_list = {} + if sf_dp_list is None: + logger.error("VM List for vnffg chain is empty while trying to get SFF list") + url = self._network_topology_rest_url(account) + r=requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + nw_topo = r.json() + + for topo in nw_topo['network-topology']['topology']: + if ('node' in topo and len(topo['node']) > 0): + for node in topo['node']: + if ('termination-point' in node and len(node['termination-point']) > 0): + for term_point in node['termination-point']: + if 'ovsdb:interface-external-ids' in term_point: + vm_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'vm-id'] + if len(vm_id) == 0: + continue + vmid = vm_id[0]['external-id-value'] + intf_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'iface-id'] + if len(intf_id) == 0: + continue + intfid = intf_id[0]['external-id-value'] + if intfid not in sf_dp_list.keys(): + continue + if sf_dp_list[intfid].vm_id != vmid: + logger.error("Intf ID %s is not present in VM %s", intfid, vmid) + continue + + if 'ovsdb:managed-by' in node: + rr=re.search('network-topology:node-id=\'([-\w\:\/]*)\'',node['ovsdb:managed-by']) + node_id = rr.group(1) + ovsdb_node = [node for node in topo['node'] if node['node-id'] == node_id] + if ovsdb_node: + if 'ovsdb:connection-info' in ovsdb_node[0]: + sff_ip = ovsdb_node[0]['ovsdb:connection-info']['local-ip'] + sff_br_name = node['ovsdb:bridge-name'] + sff_br_uuid = node['ovsdb:bridge-uuid'] + sff_br_ip = sff_ip + + if 'ovsdb:openvswitch-other-configs' in ovsdb_node[0]: + for other_key in ovsdb_node[0]['ovsdb:openvswitch-other-configs']: + if other_key['other-config-key'] == 'local_ip': + local_ip_str = other_key['other-config-value'] + sff_br_ip = local_ip_str.split(',')[0] + break + + if sff_br_uuid in sff_list: + sff_list[sff_br_uuid].add_sf_dp_to_sff(sf_dp_list[intfid]) + sf_dp_list[intfid]._update_sff_name(sff_list[sff_br_uuid].name) + else: + sff_list[sff_br_uuid] = Sff(sff_br_uuid,sff_br_name, sff_ip,sff_br_ip) + sff_list[sff_br_uuid].add_sf_dp_to_sff(sf_dp_list[intfid]) + sf_dp_list[intfid]._update_sff_name(sff_list[sff_br_uuid].name) + return sff_list + + + def _get_sf_dp_list_for_chain(self,account,vnffg_chain): + """ + Get list of all Service Function Data Plane Locators present in VNFFG + useful for easy reference while creating SF and SFF + """ + sfdp_list = {} + for vnf in vnffg_chain.vnf_chain_path: + for vnfr in vnf.vnfr_ids: + for vdu in vnfr.vdu_list: + sfdp = SfDpLocator(vdu.port_id,vnfr.vnfr_name, vdu.vm_id) + sfdp._update_vnf_params(vnf.service_function_type, vdu.address, vdu.port, vnf.transport_type) + sfdp_list[vdu.port_id] = sfdp + return sfdp_list + + def create_sfc(self, account, vnffg_chain): + "Create SFC chain" + + sff_list = {} + sf_dp_list = {} + sf_dp_list = self._get_sf_dp_list_for_chain(account,vnffg_chain) + + # Get the list of all SFFs required for vnffg chain + sff_list = self._get_sff_list_for_chain(account,sf_dp_list) + + #for name,sff in sff_list.items(): + # print(name, sff) + + #Create all the SF in VNFFG chain + self._create_sf(account,vnffg_chain,sf_dp_list) + + for _,sff in sff_list.items(): + self._create_sff(account,vnffg_chain,sff) + + self._create_sfc(account,vnffg_chain) + + self._create_sfp(account,vnffg_chain) + + ## Update to SFF could have deleted some RSP; so get list of SFP and + ## check RSP exists for same and create any as necessary + #rsp_name = self._create_rsp(account,vnffg_chain) + #return rsp_name + self._create_all_rsps(account) + return vnffg_chain.name + + def _create_all_rsps(self,account): + """ + Create all the RSPs for SFP found + """ + sfps_url = self._get_rest_url(account,self._sfps_path) + r=requests.get(sfps_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + r.raise_for_status() + sfps_json = r.json() + if 'service-function-path' in sfps_json['service-function-paths']: + for sfp in sfps_json['service-function-paths']['service-function-path']: + rsp_url = self._get_rest_url(account,self._get_rsp_path.format(sfp['name'])) + r = requests.get(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + if r.status_code == 404: + # Create the RSP + logger.info("Creating RSP for Service Path with name %s",sfp['name']) + self._create_rsp(account,sfp['name']) + + def delete_all_sf(self, account): + "Delete all the SFs" + sf_url = self._get_rest_url(account,self._service_functions_path) + print(sf_url) + r=requests.delete(sf_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + + def delete_all_sff(self, account): + "Delete all the SFFs" + sff_url = self._get_rest_url(account,self._service_function_forwarders_path) + print(sff_url) + r=requests.delete(sff_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def delete_all_sfc(self, account): + "Delete all the SFCs" + sfc_url = self._get_rest_url(account,self._service_function_chains_path) + print(sfc_url) + r=requests.delete(sfc_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def delete_all_sfp(self, account): + "Delete all the SFPs" + sfp_url = self._get_rest_url(account,self._sfps_path) + print(sfp_url) + r=requests.delete(sfp_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def delete_all_rsp(self, account): + "Delete all the RSP" + #rsp_list = self.get_rsp_list(account) + url = self._get_rest_url(account,self._get_rsp_paths) + print(url) + r = requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + print(r.json()) + rsp_list = r.json() + + #for vnffg in rsp_list.vnffg_rendered_path: + for sfc_rsp in rsp_list['rendered-service-paths']['rendered-service-path']: + rsp_json = {} + rsp_input = {} + rsp_json['input'] = {} + rsp_input['name'] = sfc_rsp['name'] + + rsp_json['input'] = rsp_input + rsp_data = json.dumps(rsp_json) + self._rsp_data = rsp_json + rsp_url = self._get_rest_url(account,self._delete_rsp_path) + print(rsp_url) + print(rsp_data) + + r=requests.post(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=rsp_data) + r.raise_for_status() + print(r.json()) + #output_json = r.json() + #return output_json['output']['name'] + + def terminate_all_sfc(self, account): + "Terminate SFC chain" + self.delete_all_rsp(account) + self.delete_all_sfp(account) + self.delete_all_sfc(account) + self.delete_all_sff(account) + self.delete_all_sf(account) + + + def _fill_rsp_list(self,sfc_rsp_list,sff_list): + vnffg_rsps = RwsdnYang.VNFFGRenderedPaths() + for sfc_rsp in sfc_rsp_list['rendered-service-paths']['rendered-service-path']: + rsp = vnffg_rsps.vnffg_rendered_path.add() + rsp.name = sfc_rsp['name'] + rsp.path_id = sfc_rsp['path-id'] + for sfc_rsp_hop in sfc_rsp['rendered-service-path-hop']: + rsp_hop = rsp.rendered_path_hop.add() + rsp_hop.hop_number = sfc_rsp_hop['hop-number'] + rsp_hop.service_index = sfc_rsp_hop['service-index'] + rsp_hop.vnfr_name = sfc_rsp_hop['service-function-name'] + rsp_hop.service_function_forwarder.name = sfc_rsp_hop['service-function-forwarder'] + for sff in sff_list['service-function-forwarders']['service-function-forwarder']: + if sff['name'] == rsp_hop.service_function_forwarder.name: + rsp_hop.service_function_forwarder.ip_address = sff['sff-data-plane-locator'][0]['data-plane-locator']['ip'] + rsp_hop.service_function_forwarder.port = sff['sff-data-plane-locator'][0]['data-plane-locator']['port'] + break + return vnffg_rsps + + + def get_rsp_list(self,account): + "Get RSP list" + + sff_url = self._get_rest_url(account,self._service_function_forwarders_path) + print(sff_url) + r=requests.get(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + r.raise_for_status() + sff_list = r.json() + + url = self._get_rest_url(account,self._get_rsp_paths) + print(url) + r = requests.get(url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + print(r.json()) + return self._fill_rsp_list(r.json(),sff_list) + + def create_sfc_classifier(self, account, sfc_classifiers): + "Create SFC Classifiers" + self._add_acl_rules(account, sfc_classifiers) + self._create_sf_classifier(account, sfc_classifiers) + return sfc_classifiers.name + + def terminate_sfc_classifier(self, account, sfc_classifier_name): + "Create SFC Classifiers" + self._terminate_sf_classifier(account, sfc_classifier_name) + self._del_acl_rules(account, sfc_classifier_name) + + def _del_acl_rules(self,account,sfc_classifier_name): + " Terminate SF classifiers" + acl_url = self._get_rest_url(account,self._access_lists_path) + print(acl_url) + r=requests.delete(acl_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def _terminate_sf_classifier(self,account,sfc_classifier_name): + " Terminate SF classifiers" + sfcl_url = self._get_rest_url(account,self._service_function_classifiers_path) + print(sfcl_url) + r=requests.delete(sfcl_url,auth=(account.odl.username,account.odl.password)) + r.raise_for_status() + + def _create_sf_classifier(self,account,sfc_classifiers): + " Create SF classifiers" + sf_classifier_json = {} + sf_classifier_dict = {} + sf_classifier_dict['name'] = sfc_classifiers.name + sf_classifier_dict['access-list'] = sfc_classifiers.name + sf_classifier_dict['scl-service-function-forwarder'] = list() + scl_sff = {} + scl_sff_name = '' + + if sfc_classifiers.has_field('port_id') and sfc_classifiers.has_field('vm_id'): + sf_dp = SfDpLocator(sfc_classifiers.port_id,'', sfc_classifiers.vm_id) + sf_dp_list= {} + sf_dp_list[sfc_classifiers.port_id] = sf_dp + self._get_sff_list_for_chain(account,sf_dp_list) + + if sf_dp.sff_name is None: + logger.error("SFF not found for port %s, VM: %s",sfc_classifiers.port.port_id,sfc_classifiers.vm_id) + else: + logger.error("SFF with name %s found for port %s, VM: %s",sf_dp.sff_name, sfc_classifiers.port_id,sfc_classifiers.vm_id) + scl_sff_name = sf_dp.sff_name + else: + rsp_url = self._get_rest_url(account,self._get_rsp_path.format(sfc_classifiers.rsp_name)) + r = requests.get(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}) + if r.status_code == 200: + rsp_data = r.json() + if 'rendered-service-path' in rsp_data and len(rsp_data['rendered-service-path'][0]['rendered-service-path-hop']) > 0: + scl_sff_name = rsp_data['rendered-service-path'][0]['rendered-service-path-hop'][0]['service-function-forwarder'] + + logger.debug("SFF for classifer %s found is %s",sfc_classifiers.name, scl_sff_name) + scl_sff['name'] = scl_sff_name + #scl_sff['interface'] = sff_intf_name + sf_classifier_dict['scl-service-function-forwarder'].append(scl_sff) + + sf_classifier_json['service-function-classifier'] = sf_classifier_dict + + sfcl_data = json.dumps(sf_classifier_json) + sfcl_url = self._get_rest_url(account,self._service_function_classifier_path.format(sfc_classifiers.name)) + print(sfcl_url) + print(sfcl_data) + r=requests.put(sfcl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfcl_data) + r.raise_for_status() + + def _add_acl_rules(self, account,sfc_classifiers): + "Create ACL rules" + access_list_json = {} + access_list_dict = {} + acl_entry_list = list() + acl_list_dict = {} + for acl_rule in sfc_classifiers.match_attributes: + acl_entry = {} + acl_entry['rule-name'] = acl_rule.name + acl_entry['actions'] = {} + #acl_entry['actions']['netvirt-sfc-acl:rsp-name'] = sfc_classifiers.rsp_name + acl_entry['actions']['service-function-acl:rendered-service-path'] = sfc_classifiers.rsp_name + + matches = {} + for field, value in acl_rule.as_dict().items(): + if field == 'ip_proto': + matches['protocol'] = value + elif field == 'source_ip_address': + matches['source-ipv4-network'] = value + elif field == 'destination_ip_address': + matches['destination-ipv4-network'] = value + elif field == 'source_port': + matches['source-port-range'] = {'lower-port':value, 'upper-port':value} + elif field == 'destination_port': + matches['destination-port-range'] = {'lower-port':value, 'upper-port':value} + acl_entry['matches'] = matches + acl_entry_list.append(acl_entry) + acl_list_dict['ace'] = acl_entry_list + access_list_dict['acl-name'] = sfc_classifiers.name + access_list_dict['access-list-entries'] = acl_list_dict + access_list_json['acl'] = access_list_dict + + acl_data = json.dumps(access_list_json) + acl_url = self._get_rest_url(account,self._access_list_path.format(sfc_classifiers.name)) + print(acl_url) + print(acl_data) + r=requests.put(acl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=acl_data) + r.raise_for_status() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt new file mode 100644 index 0000000..726abde --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwsdn_sim rwsdn_sim.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py new file mode 100644 index 0000000..74ed66e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py @@ -0,0 +1,95 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import collections +import itertools +import logging +import os +import uuid +import time + +import ipaddress + +import gi +gi.require_version('RwTypes', '1.0') +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwSdn', '1.0') +from gi.repository import ( + GObject, + RwSdn, # Vala package + RwTypes, + RwsdnYang, + #IetfL2TopologyYang as l2Tl, + RwTopologyYang as RwTl, + ) + +import rw_status +import rwlogger + +from rift.topmgr.sdnsim import SdnSim + + +logger = logging.getLogger('rwsdn.sdnsim') + + +class UnknownAccountError(Exception): + pass + + +class MissingFileError(Exception): + pass + + +rwstatus = rw_status.rwstatus_from_exc_map({ + IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + UnknownAccountError: RwTypes.RwStatus.NOTFOUND, + MissingFileError: RwTypes.RwStatus.NOTFOUND, + }) + + +class SdnSimPlugin(GObject.Object, RwSdn.Topology): + + def __init__(self): + GObject.Object.__init__(self) + self.sdnsim = SdnSim() + + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler( + rwlogger.RwLogger( + category="sdnsim", + log_hdl=rwlog_ctx, + ) + ) + + @rwstatus(ret_on_failure=[None]) + def do_get_network_list(self, account): + """ + Returns the list of discovered networks + + @param account - a SDN account + + """ + logger.debug('Get network list: ') + nwtop = self.sdnsim.get_network_list( account) + logger.debug('Done with get network list: %s', type(nwtop)) + return nwtop \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt new file mode 100644 index 0000000..0183de2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt @@ -0,0 +1,25 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# + +## +# Parse the yang files +## + +include(rift_yang) +include(rift_cmdargs) + +set(source_yang_files rwsdn.yang) + +rift_add_yang_target( + TARGET rwsdn_yang + YANG_FILES ${source_yang_files} + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + rwschema_yang_gen + rwyang + rwlog + rwlog-mgmt_yang_gen +) + diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/Makefile b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.tailf.yang new file mode 100644 index 0000000..9733eb6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.tailf.yang @@ -0,0 +1,17 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rwsdn-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rwsdn-annotation"; + prefix "rwsdn-ann"; + + import rwsdn { + prefix rwsdn; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang new file mode 100644 index 0000000..6994633 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang @@ -0,0 +1,303 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rwsdn +{ + namespace "http://riftio.com/ns/riftware-1.0/rwsdn"; + prefix "rwsdn"; + + import rw-base { + prefix rwbase; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-log { + prefix "rwlog"; + } + + import mano-types { + prefix "manotypes"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import ietf-yang-types { + prefix "yang"; + } + + + revision 2014-12-30 { + description + "Initial revision."; + reference + "RIFT RWSDN cloud data"; + } + + typedef sdn-account-type { + description "SDN account type"; + type enumeration { + enum odl; + enum mock; + enum sdnsim; + } + } + + grouping sdn-provider-auth { + leaf account-type { + type sdn-account-type; + } + + choice provider-specific-info { + container odl { + leaf username { + type string { + length "1..255"; + } + } + + leaf password { + type string { + length "1..32"; + } + } + + leaf url { + type string { + length "1..255"; + } + } + leaf plugin-name { + type string; + default "rwsdn_odl"; + } + } + container mock { + leaf username { + type string; + } + leaf plugin-name { + type string; + default "rwsdn_mock"; + } + } + + container sdnsim { + leaf username { + type string; + } + leaf plugin-name { + type string; + default "rwsdn_sim"; + } + } + } + } + + container sdn-accounts { + list sdn-account-list { + rwpb:msg-new SDNAccount; + key "name"; + + leaf name { + type string; + } + + uses sdn-provider-auth; + } + } + + container vnffgs { + list vnffg-chain { + key "name"; + rwpb:msg-new VNFFGChain; + + leaf name { + type string; + } + + list vnf-chain-path { + key "order"; + leaf order { + type uint32; + description " Order of the VNF in VNFFG chain"; + } + leaf service-function-type { + type string; + } + leaf nsh-aware { + type boolean; + } + leaf transport-type { + type string; + } + list vnfr-ids { + key "vnfr-id"; + leaf vnfr-id { + type yang:uuid; + } + leaf vnfr-name { + type string; + } + leaf mgmt-address { + type inet:ip-address; + } + leaf mgmt-port { + type inet:port-number; + } + list vdu-list { + key "vm-id port-id"; + leaf port-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf vm-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf name { + type string; + } + leaf address { + type inet:ip-address; + } + leaf port { + type inet:port-number; + } + } + } + } + } + } + + container vnffg-rendered-paths { + rwpb:msg-new VNFFGRenderedPaths; + list vnffg-rendered-path { + key "name"; + rwpb:msg-new VNFFGRenderedPath; + config false; + leaf name { + type string; + } + leaf path-id { + description + "Unique Identifier for the service path"; + type uint32; + } + list rendered-path-hop { + key "hop-number"; + leaf hop-number { + type uint8; + } + leaf service-index { + description + "Location within the service path"; + type uint8; + } + leaf vnfr-name { + type string; + } + container service-function-forwarder { + leaf name { + description + "Service Function Forwarder name"; + type string; + } + leaf ip-address { + description + "Service Function Forwarder Data Plane IP address"; + type inet:ip-address; + } + leaf port { + description + "Service Function Forwarder Data Plane port"; + type inet:port-number; + } + } + } + } + } + + + container vnffg-classifiers { + list vnffg-classifier { + key "name"; + rwpb:msg-new VNFFGClassifier; + + leaf name { + type string; + } + leaf rsp-name { + type string; + } + leaf port-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + leaf vm-id { + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + } + list match-attributes { + description + "List of match attributes."; + key "name"; + leaf name { + description + "Name for the Access list"; + type string; + } + + leaf ip-proto { + description + "IP Protocol."; + type uint8; + } + + leaf source-ip-address { + description + "Source IP address."; + type inet:ip-prefix; + } + + leaf destination-ip-address { + description + "Destination IP address."; + type inet:ip-prefix; + } + + leaf source-port { + description + "Source port number."; + type inet:port-number; + } + + leaf destination-port { + description + "Destination port number."; + type inet:port-number; + } + } //match-attributes + } + } + +} + +/* vim: set ts=2:sw=2: */ diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/CMakeLists.txt new file mode 100644 index 0000000..06917d0 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/CMakeLists.txt @@ -0,0 +1,13 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/31/2015 +# + +set(subdirs + rwve_vnfm_em + rwve_vnfm_vnf + rwos_ma_nfvo + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/Makefile b/modules/core/mano/rwlaunchpad/plugins/vala/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt new file mode 100644 index 0000000..5355ead --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt @@ -0,0 +1,52 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/31/2015 +# + +## +# Allow specific compiler warnings +## +rift_allow_compiler_warning(unused-but-set-variable) + +set(VALA_NAME rwos_ma_nfvo) +set(VALA_FILES ${VALA_NAME}.vala) +set(VALA_VERSION 1.0) +set(VALA_RELEASE 1) +set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION}) +set(VALA_TYPELIB_PREFIX RwOsMaNfvo-${VALA_VERSION}) + +rift_add_vala( + ${VALA_LONG_NAME} + VALA_FILES ${VALA_FILES} + VALA_PACKAGES + rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0 + rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0 + + #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + GENERATE_HEADER_FILE ${VALA_NAME}.h + + GENERATE_SO_FILE lib${VALA_LONG_NAME}.so + GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi + GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir + GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib + #DEPENDS rwcal_yang rwlog_gi rwschema_yang + ) + +rift_install_vala_artifacts( + HEADER_FILES ${VALA_NAME}.h + SO_FILES lib${VALA_LONG_NAME}.so + VAPI_FILES ${VALA_LONG_NAME}.vapi + GIR_FILES ${VALA_TYPELIB_PREFIX}.gir + TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib + COMPONENT ${PKG_LONG_NAME} + DEST_PREFIX . + ) + + +set(subdirs + rwos_ma_nfvo_rest + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala new file mode 100644 index 0000000..63e4601 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala @@ -0,0 +1,16 @@ +namespace RwOsMaNfvo { + + public interface Orchestrator: GLib.Object { + /* + * Init routine + */ + public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx); + + /* + * Notify the EM of lifecycle event + */ + public abstract RwTypes.RwStatus ns_lifecycle_event(); + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt new file mode 100644 index 0000000..bf9c897 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwos_ma_nfvo_rest rwos_ma_nfvo_rest.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py new file mode 100644 index 0000000..dd48b8b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py @@ -0,0 +1,53 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import re +import logging +import rw_status +import rwlogger +import subprocess, os + +import gi +gi.require_version('RwOsMaNfvo', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + GObject, + RwOsMaNfvo, + RwTypes) + +logger = logging.getLogger('rwos-ma-nfvo-rest') + + +rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}) + +class RwOsMaNfvoRestPlugin(GObject.Object, RwOsMaNfvo.Orchestrator): + """This class implements the Ve-Vnfm VALA methods.""" + + def __init__(self): + GObject.Object.__init__(self) + + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler(rwlogger.RwLogger(category="rwos-ma-nfvo-rest", + log_hdl=rwlog_ctx,)) + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt new file mode 100644 index 0000000..517f480 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt @@ -0,0 +1,52 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/31/2015 +# + +## +# Allow specific compiler warnings +## +rift_allow_compiler_warning(unused-but-set-variable) + +set(VALA_NAME rwve_vnfm_em) +set(VALA_FILES ${VALA_NAME}.vala) +set(VALA_VERSION 1.0) +set(VALA_RELEASE 1) +set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION}) +set(VALA_TYPELIB_PREFIX RwVeVnfmEm-${VALA_VERSION}) + +rift_add_vala( + ${VALA_LONG_NAME} + VALA_FILES ${VALA_FILES} + VALA_PACKAGES + rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0 + rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0 + + #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + GENERATE_HEADER_FILE ${VALA_NAME}.h + + GENERATE_SO_FILE lib${VALA_LONG_NAME}.so + GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi + GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir + GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib + #DEPENDS rwcal_yang rwlog_gi rwschema_yang + ) + +rift_install_vala_artifacts( + HEADER_FILES ${VALA_NAME}.h + SO_FILES lib${VALA_LONG_NAME}.so + VAPI_FILES ${VALA_LONG_NAME}.vapi + GIR_FILES ${VALA_TYPELIB_PREFIX}.gir + TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib + COMPONENT ${PKG_LONG_NAME} + DEST_PREFIX . + ) + + +set(subdirs + rwve_vnfm_em_rest + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala new file mode 100644 index 0000000..3da25f9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala @@ -0,0 +1,16 @@ +namespace RwVeVnfmEm { + + public interface ElementManager: GLib.Object { + /* + * Init routine + */ + public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx); + + /* + * Notify the EM of lifecycle event + */ + public abstract RwTypes.RwStatus vnf_lifecycle_event(); + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt new file mode 100644 index 0000000..58f5d7f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwve_vnfm_em_rest rwve_vnfm_em_rest.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py new file mode 100644 index 0000000..50704a6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py @@ -0,0 +1,56 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import re +import logging +import rw_status +import rwlogger +import subprocess, os + +import gi +gi.require_version('RwVeVnfmEm', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + GObject, + RwVeVnfmEm, + RwTypes) + +logger = logging.getLogger('rw_ve_vnfm_em.rest') + + +rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}) + +class RwVeVnfmEmRestPlugin(GObject.Object, RwVeVnfmEm.ElementManager): + """This class implements the Ve-Vnfm VALA methods.""" + + def __init__(self): + GObject.Object.__init__(self) + + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler(rwlogger.RwLogger(category="rwcal-aws", + log_hdl=rwlog_ctx,)) + @rwstatus + def do_vnf_lifecycle_event(self): + pass + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt new file mode 100644 index 0000000..0b0082b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt @@ -0,0 +1,52 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Anil Gunturu +# Creation Date: 10/31/2015 +# + +## +# Allow specific compiler warnings +## +rift_allow_compiler_warning(unused-but-set-variable) + +set(VALA_NAME rwve_vnfm_vnf) +set(VALA_FILES ${VALA_NAME}.vala) +set(VALA_VERSION 1.0) +set(VALA_RELEASE 1) +set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION}) +set(VALA_TYPELIB_PREFIX RwVeVnfmVnf-${VALA_VERSION}) + +rift_add_vala( + ${VALA_LONG_NAME} + VALA_FILES ${VALA_FILES} + VALA_PACKAGES + rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0 + rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0 + + #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang + GENERATE_HEADER_FILE ${VALA_NAME}.h + + GENERATE_SO_FILE lib${VALA_LONG_NAME}.so + GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi + GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir + GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib + #DEPENDS rwcal_yang rwlog_gi rwschema_yang + ) + +rift_install_vala_artifacts( + HEADER_FILES ${VALA_NAME}.h + SO_FILES lib${VALA_LONG_NAME}.so + VAPI_FILES ${VALA_LONG_NAME}.vapi + GIR_FILES ${VALA_TYPELIB_PREFIX}.gir + TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib + COMPONENT ${PKG_LONG_NAME} + DEST_PREFIX . + ) + + +set(subdirs + rwve_vnfm_vnf_rest + ) +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala new file mode 100644 index 0000000..6b5e84e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala @@ -0,0 +1,16 @@ +namespace RwVeVnfmVnf { + + public interface Vnf: GLib.Object { + /* + * Init routine + */ + public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx); + + /* + * Notify the EM of lifecycle event + */ + public abstract RwTypes.RwStatus get_monitoring_param(); + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt new file mode 100644 index 0000000..2d1ca9e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt @@ -0,0 +1,8 @@ + +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# + +include(rift_plugin) + +rift_install_python_plugin(rwve_vnfm_vnf_rest rwve_vnfm_vnf_rest.py) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py new file mode 100644 index 0000000..ea56ad7 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py @@ -0,0 +1,56 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import re +import logging +import rw_status +import rwlogger +import subprocess, os + +import gi +gi.require_version('RwVeVnfmVnf', '1.0') +gi.require_version('RwTypes', '1.0') +from gi.repository import ( + GObject, + RwVeVnfmVnf, + RwTypes) + +logger = logging.getLogger('rwve-vnfm-vnf-rest') + + +rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND, + KeyError: RwTypes.RwStatus.NOTFOUND, + NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}) + +class RwVeVnfmVnfRestPlugin(GObject.Object, RwVeVnfmVnf.Vnf): + """This class implements the Ve-Vnfm VALA methods.""" + + def __init__(self): + GObject.Object.__init__(self) + + @rwstatus + def do_init(self, rwlog_ctx): + if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers): + logger.addHandler(rwlogger.RwLogger(category="rwve-vnfm-vnf-rest", + log_hdl=rwlog_ctx,)) + + @rwstatus + def do_get_monitoring_param(self): + pass + \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/CMakeLists.txt b/modules/core/mano/rwlaunchpad/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..ba7166b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/CMakeLists.txt @@ -0,0 +1,36 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tom Seidenberg +# Creation Date: 2014/04/08 +# + +set(source_yang_files + rw-iwp.yang + rw-launchpad-log.yang + rw-launchpad.yang + rw-monitor.yang + rw-nsm.yang + rw-resource-mgr.yang + rw-vnfm.yang + rw-vns.yang + ) +## +# Yang targets +## +rift_add_yang_target( + TARGET rwlaunchpad_yang + YANG_FILES ${source_yang_files} + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + mano_yang_gen + rwcloud_yang_gen + rw_conman_yang_gen + rwconfig_agent_yang_gen + DEPENDS + mano_yang + rwcloud_yang + rw_conman_yang + rwconfig_agent_yang +) + diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/Makefile b/modules/core/mano/rwlaunchpad/plugins/yang/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.tailf.yang new file mode 100644 index 0000000..df8df03 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.tailf.yang @@ -0,0 +1,30 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-iwp-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-iwp-annotation"; + prefix "rw-iwp-ann"; + + import rw-iwp + { + prefix rw-iwp; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-iwp:resource-mgr/rw-iwp:pools/rw-iwp:vm-pool/rw-iwp:resources/rw-iwp:is_reserved" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-iwp:resource-mgr/rw-iwp:pools/rw-iwp:network-pool/rw-iwp:resources/rw-iwp:is_reserved" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.yang new file mode 100755 index 0000000..2ed608d --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-iwp.yang @@ -0,0 +1,184 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-iwp.yang + * @author Austin Cormier + * @date 2015/09/21 + * @brief Intelligent Workload Placement Yang + */ + +module rw-iwp +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-iwp"; + prefix "rw-iwp"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-09-21 { + description + "Initial revision."; + } + + container resource-mgr { + rwpb:msg-new ResourceManagerConfig; + container mission-control { + leaf mgmt-ip { + type string; + } + } + + container mgmt-domain { + leaf name { + description "The mgmt domain name this launchpad is associated with."; + type string; + } + } + + container pools { + rwpb:msg-new ResourcePools; + description "Pools configured for this mgmt domain."; + + list vm-pool { + rwpb:msg-new VmResourcePool; + key "name"; + leaf name { + type string; + } + list resources { + rwpb:msg-new VmPoolResource; + key "vm-id"; + + leaf vm-id { + rwpb:field-string-max 64; + type string; + } + + leaf is_reserved { + description "Flag indicating whether resource is reserved"; + type boolean; + default false; + config false; + } + } + } + + list network-pool { + rwpb:msg-new NetworkResourcePool; + key "name"; + leaf name { + type string; + } + list resources { + rwpb:msg-new NetworkPoolResource; + key "network-id"; + + leaf network-id { + rwpb:field-string-max 64; + type string; + } + + leaf is_reserved { + description "Flag indicating whether resource is reserved"; + type boolean; + default false; + config false; + } + } + } + } + + container network-request { + config false; + + list requests { + rwpb:msg-new NetworkRequest; + key "request-id"; + leaf request-id { + description "Identifier for the Network Request"; + type yang:uuid; + } + container network-response { + rwpb:msg-new NetworkResponse; + leaf network-id { + description "Allocated network id"; + type string; + } + leaf network-pool { + description "Pool that network resource was allocated from"; + type string; + } + } + } + } + + container vm-request { + config false; + + list requests { + rwpb:msg-new VMRequest; + key "request-id"; + leaf request-id { + description "Identifier for the VM Request"; + type yang:uuid; + } + + uses manotypes:vm-flavor; + uses manotypes:guest-epa; + uses manotypes:vswitch-epa; + uses manotypes:hypervisor-epa; + uses manotypes:host-epa; + + leaf image { + description "File/URL path to the software image"; + type string; + } + + container vm-response { + rwpb:msg-new VMResponse; + leaf vm-id { + description "Allocated VM id"; + type string; + } + leaf vm-ip { + description "Management IP Address of the VM"; + type string; + } + leaf vm-pool { + description "Pool that vm resource was allocated from"; + type string; + } + } + } + } + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad-log.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad-log.yang new file mode 100755 index 0000000..2fd7197 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad-log.yang @@ -0,0 +1,47 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + +/**0 + * @file rw-launchpad-log.yang + * @author Rift.IO + * @date 03/02/2015 + * @brief RiftWare Log Event Definitions for rw-launchpad logging + */ + +module rw-launchpad-log +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad-log"; + prefix "rwlaunchpadlog"; + + import rw-base { + prefix rwbase; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-notify-ext { + prefix "rwnotify"; + } + + import rw-log { + prefix "rwlog"; + } + + revision 2014-12-30 { + description + "Initial revision."; + reference + "RIFT Launchpad Logging"; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang new file mode 100644 index 0000000..74350e6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang @@ -0,0 +1,25 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-launchpad-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad-annotation"; + prefix "rw-launchpad-ann"; + + import rw-launchpad { + prefix rw-launchpad; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-launchpad:datacenters" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.yang new file mode 100755 index 0000000..efc48db --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-launchpad.yang @@ -0,0 +1,131 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + + + +/** + * @file rw-launchpad.yang + * @author Joshua Downer + * @date 2015/09/14 + * @brief Launchpad Yang + */ + +module rw-launchpad +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad"; + prefix "rw-launchpad"; + + import ietf-yang-types { + prefix "yang"; + } + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import vnfd { + prefix "vnfd"; + } + + import vld { + prefix "vld"; + } + + import nsd { + prefix "nsd"; + } + + import rw-cloud { + prefix "rw-cloud"; + } + + import rw-nsr { + prefix "rw-nsr"; + } + + import rw-conman { + prefix "rw-conman"; + } + + import rw-config-agent { + prefix "rw-config-agent"; + } + + + revision 2015-09-14 { + description + "Initial revision."; + } + + container datacenters { + description "OpenMano data centers"; + + rwpb:msg-new DataCenters; + config false; + + list cloud-accounts { + description + "A list of OpenMano cloud accounts that have data centers associated + with them"; + + rwpb:msg-new CloudAccount; + key "name"; + + leaf name { + description "The name of the cloud account"; + type leafref { + path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name"; + } + } + + list datacenters { + rwpb:msg-new DataCenter; + leaf uuid { + description "The UUID of the data center"; + type yang:uuid; + } + + leaf name { + description "The name of the data center"; + type string; + } + } + } + } + + container launchpad-config { + leaf operational-mode { + description + "The mode in which this launchpad is running + STANDALONE : This launchpad was started in the standalone mode. + MC_MANAGED : This lauchpad is managed by mission control. + "; + type enumeration { + enum STANDALONE { + value 1; + } + enum MC_MANAGED { + value 2; + } + } + default STANDALONE; + } + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.tailf.yang new file mode 100644 index 0000000..7c68c50 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.tailf.yang @@ -0,0 +1,21 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-monitor-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-monitor-annotation"; + prefix "rw-monitor-ann"; + + import rw-monitor { + prefix rw-monitor; + } + + import tailf-common { + prefix tailf; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.yang new file mode 100755 index 0000000..bea73a6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-monitor.yang @@ -0,0 +1,62 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-monitor.yang + * @author Joshua Downer + * @date 2015/10/30 + * @brief NFVI Monitor + */ + +module rw-monitor +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-monitor"; + prefix "rw-monitor"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-cloud { + prefix "rw-cloud"; + } + + import rw-nsr { + prefix "rw-nsr"; + } + + import rwcal { + prefix "rwcal"; + } + + import vnfr { + prefix "vnfr"; + } + + import nsr { + prefix "nsr"; + } + + import ietf-yang-types { + prefix "yang"; + } + + revision 2015-10-30 { + description + "Initial revision."; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-nsm.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-nsm.yang new file mode 100755 index 0000000..fe06c3e --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-nsm.yang @@ -0,0 +1,121 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-nsm.yang + * @author Rajesh Velandy + * @date 2015/10/07 + * @brief NSM yang + */ + +module rw-nsm +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-nsm"; + prefix "rw-nsm"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import ietf-inet-types { + prefix "inet"; + } + + import rw-nsd { + prefix "rw-nsd"; + } + import nsd { + prefix "nsd"; + } + import rw-nsr { + prefix "rw-nsr"; + } + import vld { + prefix "vld"; + } + import rw-vlr { + prefix "rw-vlr"; + } + import rw-vns { + prefix "rw-vns"; + } + import rw-vnfd { + prefix "rw-vnfd"; + } + import vnfd { + prefix "vnfd"; + } + import rw-vnfr { + prefix "rw-vnfr"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-launchpad { + prefix "rw-launchpad"; + } + + import rw-cloud { + prefix "rw-cloud"; + } + + import rw-sdn { + prefix "rw-sdn"; + } + + import rw-config-agent { + prefix "rw-config-agent"; + } + + revision 2015-10-07 { + description + "Initial revision."; + } + + grouping cm-endpoint { + leaf cm-ip-address { + type inet:ip-address; + description "IP Address"; + default "127.0.0.1"; + } + leaf cm-port { + type inet:port-number; + description "Port Number"; + default 2022; + } + leaf cm-username { + description "RO endpoint username"; + type string; + default "admin"; + } + leaf cm-password { + description "RO endpoint password"; + type string; + default "admin"; + } + } + + container ro-config { + description "Resource Orchestrator endpoint ip address"; + rwpb:msg-new "roConfig"; + rwcli:new-mode "ro-config"; + + container cm-endpoint { + description "Service Orchestrator endpoint ip address"; + rwpb:msg-new "SoEndpoint"; + uses cm-endpoint; + } + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang new file mode 100644 index 0000000..339bed4 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang @@ -0,0 +1,30 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-resource-mgr-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-resource-mgr-annotation"; + prefix "rw-resource-mgr-ann"; + + import rw-resource-mgr + { + prefix rw-resource-mgr; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-resource-mgr:resource-pool-records" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-resource-mgr:resource-mgmt" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.yang new file mode 100755 index 0000000..7bc65b8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-resource-mgr.yang @@ -0,0 +1,293 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +module rw-resource-mgr +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-resource-mgr"; + prefix "rw-resource-mgr"; + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-cloud { + prefix "rwcloud"; + } + + import rwcal { + prefix "rwcal"; + } + + import ietf-yang-types { + prefix "yang"; + } + + import mano-types { + prefix "manotypes"; + } + + revision 2015-10-16 { + description + "Initial revision."; + } + + grouping resource-pool-info { + leaf name { + description "Name of the resource pool"; + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + //mandatory true; + } + + leaf resource-type { + description "Type of resource"; + type enumeration { + enum compute; + enum network; + } + } + + leaf pool-type { + description "Type of pool"; + type enumeration { + enum static; + enum dynamic; + } + default "static"; + } + + leaf max-size { + description "Maximum size to which a dynamic resource pool can grow"; + type uint32; + } + + } + + container resource-mgr-config { + description "Data model for configuration of resource-mgr"; + rwpb:msg-new ResourceManagerConfig; + config true; + + container management-domain { + leaf name { + description "The management domain name this launchpad is associated with."; + rwpb:field-inline "true"; + rwpb:field-string-max 64; + type string; + //mandatory true; + } + } + + container resource-pools { + description "Resource Pool configuration"; + rwpb:msg-new ResourcePools; + list cloud-account { + key "name"; + leaf name { + description + "Resource pool for the configured cloud account"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + } + } + } + + grouping resource-state { + leaf resource-state { + type enumeration { + enum inactive; + enum active; + enum pending; + enum failed; + } + } + } + + container resource-mgmt { + description "Resource management "; + config false; + + container vdu-event { + description "Events for VDU Management"; + rwpb:msg-new VDUEvent; + + list vdu-event-data { + rwpb:msg-new VDUEventData; + key "event-id"; + + leaf event-id { + description "Identifier associated with the VDU transaction"; + type yang:uuid; + } + + leaf cloud-account { + description "The cloud account to use for this resource request"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + container request-info { + description "Information about required resource"; + + uses rwcal:vdu-create-params; + } + + container resource-info { + description "Information about allocated resource"; + leaf pool-name { + type string; + } + uses resource-state; + uses rwcal:vdu-info-params; + } + } + } + + container vlink-event { + description "Events for Virtual Link management"; + rwpb:msg-new VirtualLinkEvent; + + list vlink-event-data { + rwpb:msg-new VirtualLinkEventData; + + key "event-id"; + + leaf event-id { + description "Identifier associated with the Virtual Link transaction"; + type yang:uuid; + } + + leaf cloud-account { + description "The cloud account to use for this resource request"; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + container request-info { + description "Information about required resource"; + + uses rwcal:virtual-link-create-params; + } + + container resource-info { + leaf pool-name { + type string; + } + uses resource-state; + uses rwcal:virtual-link-info-params; + } + } + } + } + + + container resource-pool-records { + description "Resource Pool Records"; + rwpb:msg-new ResourcePoolRecords; + config false; + + list cloud-account { + key "name"; + leaf name { + description + "The configured cloud account's pool records."; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + list records { + rwpb:msg-new ResourceRecordInfo; + key "name"; + uses resource-pool-info; + + leaf pool-status { + type enumeration { + enum unknown; + enum locked; + enum unlocked; + } + } + + leaf total-resources { + type uint32; + } + + leaf free-resources { + type uint32; + } + + leaf allocated-resources { + type uint32; + } + } + } + } + + + container resource-mgr-data{ + description "Resource Manager operational data"; + config false; + + container pool-record { + description "Resource Pool record"; + + list cloud { + key "name"; + max-elements 16; + rwpb:msg-new "ResmgrCloudPoolRecords"; + leaf name { + description + "The configured cloud account's pool records."; + type leafref { + path "/rwcloud:cloud/rwcloud:account/rwcloud:name"; + } + } + + list records { + key "name"; + uses resource-pool-info; + + list free-vdu-list { + key vdu-id; + uses rwcal:vdu-info-params; + } + + list in-use-vdu-list { + key vdu-id; + uses rwcal:vdu-info-params; + } + + list free-vlink-list { + key virtual-link-id; + uses rwcal:virtual-link-info-params; + } + + list in-use-vlink-list { + key virtual-link-id; + uses rwcal:virtual-link-info-params; + } + } + } + } + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-vnfm.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vnfm.yang new file mode 100755 index 0000000..e254b26 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vnfm.yang @@ -0,0 +1,66 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-vnfm.yang + * @author Rajesh Velandy + * @date 2015/10/07 + * @brief VNFM yang + */ + +module rw-vnfm +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vnfm"; + prefix "rw-vnfm"; + + import vld { + prefix "vld"; + } + + import vlr { + prefix "vlr"; + } + + import rw-vlr { + prefix "rw-vlr"; + } + + import rw-vns { + prefix "rw-vns"; + } + + import rw-vnfd { + prefix "rw-vnfd"; + } + + import rw-vnfr { + prefix "rw-vnfr"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rw-manifest { + prefix "rw-manifest"; + } + + import rw-resource-mgr { + prefix "rw-resource-mgr"; + } + + import rw-launchpad { + prefix "rw-launchpad"; + } + + revision 2015-10-07 { + description + "Initial revision."; + } +} diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.tailf.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.tailf.yang new file mode 100644 index 0000000..722a185 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.tailf.yang @@ -0,0 +1,51 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-vns-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vns"; + prefix "rw-vns-ann"; + + import tailf-common { + prefix tailf; + } + + import rw-base { + prefix rwbase; + } + + import ietf-network { + prefix nd; + } + + import ietf-network-topology { + prefix nt; + } + + import ietf-l2-topology { + prefix lt; + } + + tailf:annotate "/nd:network" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nt:link" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nd:node" { + tailf:callpoint base_show; + } + + tailf:annotate "/nd:network/nd:node/nt:termination-point" { + tailf:callpoint base_show; + } +} + + diff --git a/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.yang b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.yang new file mode 100755 index 0000000..b8ecef8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/plugins/yang/rw-vns.yang @@ -0,0 +1,88 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-vns.yang + * @author Austin Cormier + * @date 2015/10/06 + * @brief Virtual Network Service Yang + */ + +module rw-vns +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-vns"; + prefix "rw-vns"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import rwsdn { + prefix "rwsdn"; + } + + + import ietf-yang-types { + prefix "yang"; + } + + import rw-iwp { + prefix "rwiwp"; + } + + import rw-vlr { + prefix "rwvlr"; + } + + import vld { + prefix "vld"; + } + + import ietf-network { + prefix "nw"; + } + + import ietf-network-topology { + prefix "nt"; + } + + import ietf-l2-topology { + prefix "l2t"; + } + + import rw-topology { + prefix "rw-topology"; + } + + import rw-resource-mgr { + prefix "rw-resource-mgr"; + } + + import rw-sdn { + prefix "rw-sdn"; + } + + revision 2015-10-05 { + description + "Initial revision."; + } +} diff --git a/modules/core/mano/rwlaunchpad/ra/CMakeLists.txt b/modules/core/mano/rwlaunchpad/ra/CMakeLists.txt new file mode 100644 index 0000000..81f567d --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/CMakeLists.txt @@ -0,0 +1,51 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Paul Laidler +# Creation Date: 09/16/2015 +# + +cmake_minimum_required(VERSION 2.8) + +install( + PROGRAMS + pingpong_longevity_systest + pingpong_vnf_systest + pingpong_records_systest + pingpong_vnf_reload_systest + pingpong_lp_standalone_systest + DESTINATION usr/rift/systemtest/pingpong_vnf + COMPONENT ${PKG_LONG_NAME}) + +install( + PROGRAMS + launchpad_longevity_systest + DESTINATION usr/rift/systemtest/launchpad + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + pytest/test_launchpad_longevity.py + pytest/test_startstop.py + DESTINATION usr/rift/systemtest/pytest/mission_control/launchpad + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + pytest/conftest.py + pytest/test_pingpong_longevity.py + pytest/test_pingpong_vnf.py + pytest/test_records.py + DESTINATION usr/rift/systemtest/pytest/mission_control/pingpong_vnf + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + racfg/pingpong_vnf_systest_cloudsim.racfg + racfg/pingpong_vnf_systest_openstack.racfg + racfg/pingpong_records_systest_openstack.racfg + racfg/pingpong_vnf_reload_systest_openstack.racfg + racfg/pingpong_lp_standalone_systest_openstack.racfg + DESTINATION usr/rift/systemtest/pingpong_vnf + COMPONENT ${PKG_LONG_NAME}) + diff --git a/modules/core/mano/rwlaunchpad/ra/launchpad_longevity_systest b/modules/core/mano/rwlaunchpad/ra/launchpad_longevity_systest new file mode 100755 index 0000000..de00966 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/launchpad_longevity_systest @@ -0,0 +1,44 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2016/01/04 +# +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -x -v \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/launchpad/test_launchpad_longevity.py" + +test_prefix="launchpad_longevity_systest" +test_cmd="" +repeat=10 +repeat_keyword="longevity" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_longevity_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_longevity_systest new file mode 100755 index 0000000..687b62c --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_longevity_systest @@ -0,0 +1,31 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2016/01/04 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +# Helper script for invoking the mission control system test using the systest_wrapper +SCRIPT_TEST="py.test -x -v \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_longevity.py" + +test_prefix="pingpong_longevity_systest" +test_cmd="" +repeat_keyword="longevity" +repeat=10 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_lp_standalone_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_lp_standalone_systest new file mode 100755 index 0000000..8ebf68b --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_lp_standalone_systest @@ -0,0 +1,32 @@ +#!/bin/bash +# +# +# Author(s): Varun Prasad +# Creation Date: 19-Feb-2016 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +# Helper script for invoking the mission control system test using the systest_wrapper +SCRIPT_TEST="py.test -v -p no:cacheprovider \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py" + +test_prefix="pingpong_lp_standalone" +test_cmd="" + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Force standalone launchpad +lp_standalone=true + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} + diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_records_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_records_systest new file mode 100755 index 0000000..36aa4f9 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_records_systest @@ -0,0 +1,29 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/09/15 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh +restconf=true + +# Helper script for invoking the mission control system test using the systest_wrapper +SCRIPT_TEST="py.test -v -p no:cacheprovider \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py" + +test_prefix="pingpong_records_systest" +test_cmd="" + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_reload_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_reload_systest new file mode 100755 index 0000000..954d387 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_reload_systest @@ -0,0 +1,33 @@ +#!/bin/bash +# +# +# Author(s): Varun Prasad +# Creation Date: 2016/01/04 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +# Helper script for invoking the mission control system test using the systest_wrapper +SCRIPT_TEST="py.test -v -p no:cacheprovider -k 'not Teardown or test_stop_launchpad' \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py" + +REBOOT_SCRIPT_TEST="py.test -v -p no:cacheprovider -k 'test_wait_for_launchpad_started or test_wait_for_pingpong_started or Teardown' \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_records.py" + +test_prefix="pingpong_vnf_reload_systest" +test_cmd="" + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} diff --git a/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_systest b/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_systest new file mode 100755 index 0000000..91635a1 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pingpong_vnf_systest @@ -0,0 +1,28 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/09/15 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +# Helper script for invoking the mission control system test using the systest_wrapper + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/pingpong_vnf/test_pingpong_vnf.py" + +test_prefix="pingpong_vnf_systest" +test_cmd="" + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +eval ${test_cmd} diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/conftest.py b/modules/core/mano/rwlaunchpad/ra/pytest/conftest.py new file mode 100644 index 0000000..0d3b2bc --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/conftest.py @@ -0,0 +1,151 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import pytest +import os + +import gi +import rift.auto.session +import rift.mano.examples.ping_pong_nsd as ping_pong +import rift.vcs.vcs + +gi.require_version('RwMcYang', '1.0') +from gi.repository import RwMcYang + + +class PackageError(Exception): + pass + +@pytest.fixture(scope='session', autouse=True) +def cloud_account_name(request): + '''fixture which returns the name used to identify the cloud account''' + return 'cloud-0' + +@pytest.fixture(autouse=True) +def mc_only(request, standalone_launchpad): + """Fixture to skip any tests that needs to be run only when a MC is used, + and not in lp standalone mode. + + Arugments: + request - pytest request fixture + standalone_launchpad - indicates if the launchpad is running standalone + """ + if request.node.get_marker('mc_only'): + if standalone_launchpad: + pytest.skip('Test marked skip for launchpad standalone mode') + + +@pytest.fixture(scope='session') +def launchpad_session(mgmt_session, mgmt_domain_name, session_type, standalone_launchpad, use_https): + '''Fixture containing a rift.auto.session connected to the launchpad + + Arguments: + mgmt_session - session connected to the mission control instance + (or launchpad in the case of a standalone session) + mgmt_domain_name - name of the mgmt_domain being used + session_type - Restconf or Netconf + standalone_launchpad - indicates if the launchpad is running standalone + ''' + if standalone_launchpad: + return mgmt_session + + mc_proxy = mgmt_session.proxy(RwMcYang) + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + + if session_type == 'netconf': + launchpad_session = rift.auto.session.NetconfSession(host=launchpad_host) + elif session_type == 'restconf': + launchpad_session = rift.auto.session.RestconfSession( + host=launchpad_host, + use_https=use_https) + + launchpad_session.connect() + rift.vcs.vcs.wait_until_system_started(launchpad_session) + + return launchpad_session + + +@pytest.fixture(scope='session') +def ping_pong_install_dir(): + '''Fixture containing the location of ping_pong installation + ''' + install_dir = os.path.join( + os.environ["RIFT_ROOT"], + "images" + ) + return install_dir + +@pytest.fixture(scope='session') +def ping_vnfd_package_file(ping_pong_install_dir): + '''Fixture containing the location of the ping vnfd package + + Arguments: + ping_pong_install_dir - location of ping_pong installation + ''' + ping_pkg_file = os.path.join( + ping_pong_install_dir, + "ping_vnfd_with_image.tar.gz", + ) + if not os.path.exists(ping_pkg_file): + raise_package_error() + + return ping_pkg_file + + +@pytest.fixture(scope='session') +def pong_vnfd_package_file(ping_pong_install_dir): + '''Fixture containing the location of the pong vnfd package + + Arguments: + ping_pong_install_dir - location of ping_pong installation + ''' + pong_pkg_file = os.path.join( + ping_pong_install_dir, + "pong_vnfd_with_image.tar.gz", + ) + if not os.path.exists(pong_pkg_file): + raise_package_error() + + return pong_pkg_file + + +@pytest.fixture(scope='session') +def ping_pong_nsd_package_file(ping_pong_install_dir): + '''Fixture containing the location of the ping_pong_nsd package + + Arguments: + ping_pong_install_dir - location of ping_pong installation + ''' + ping_pong_pkg_file = os.path.join( + ping_pong_install_dir, + "ping_pong_nsd.tar.gz", + ) + if not os.path.exists(ping_pong_pkg_file): + raise_package_error() + + return ping_pong_pkg_file + + +# Setting scope to be module, so that we get a different UUID when called +# by different files/modules. +@pytest.fixture(scope='module') +def ping_pong_records(): + '''Fixture containing a set of generated ping and pong descriptors + ''' + return ping_pong.generate_ping_pong_descriptors(pingcount=1) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_launchpad_longevity.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_launchpad_longevity.py new file mode 100644 index 0000000..99d5db6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_launchpad_longevity.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Paul Laidler +# Creation Date: 2016/01/04 +# + +import rift.vcs.vcs +import time +import gi +gi.require_version('RwMcYang', '1.0') +from gi.repository import RwMcYang + +def test_launchpad_longevity(mgmt_session, mgmt_domain_name): + time.sleep(60) + rift.vcs.vcs.wait_until_system_started(mgmt_session) + launchpad_state = mgmt_session.proxy(RwMcYang).get("/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name) + assert launchpad_state == 'started' \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_longevity.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_longevity.py new file mode 100644 index 0000000..4711281 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_longevity.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# +# Author(s): Paul Laidler +# Creation Date: 2016/01/04 +# + +import pytest +import rift.vcs.vcs +import time + +import gi +gi.require_version('RwMcYang', '1.0') +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def launchpad_session(request, mgmt_session, mgmt_domain_name, session_type): + launchpad_host = mgmt_session.proxy(RwMcYang).get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + + if session_type == 'netconf': + launchpad_session = rift.auto.session.NetconfSession(host=launchpad_host) + elif session_type == 'restconf': + launchpad_session = rift.auto.session.RestconfSession(host=launchpad_host) + + launchpad_session.connect() + rift.vcs.vcs.wait_until_system_started(launchpad_session) + return launchpad_session + +@pytest.fixture(scope='module') +def rwnsr_proxy(launchpad_session): + return launchpad_session.proxy(RwNsrYang) + +def test_launchpad_longevity(launchpad_session, mgmt_session, mgmt_domain_name, rwnsr_proxy): + time.sleep(60) + + rift.vcs.vcs.wait_until_system_started(mgmt_session) + + launchpad_state = mgmt_session.proxy(RwMcYang).get("/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name) + assert launchpad_state == 'started' + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + for nsr in nsr_opdata.nsr: + xpath = ("/ns-instance-opdata" + "/nsr[ns-instance-config-ref='%s']" + "/operational-status") % (nsr.ns_instance_config_ref) + operational_status = rwnsr_proxy.get(xpath) + assert operational_status == 'running' \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf.py new file mode 100755 index 0000000..6f182ba --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 11/03/2015 +@brief Launchpad System Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import shutil +import subprocess +import tempfile +import time +import uuid +import rift.auto.session + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +from gi.repository import ( + RwMcYang, + NsdYang, + RwNsrYang, + RwVnfrYang, + NsrYang, + VnfrYang, + VldYang, + RwVnfdYang, + RwLaunchpadYang, + RwBaseYang +) + +logging.basicConfig(level=logging.DEBUG) + + +@pytest.fixture(scope='module') +def launchpad_proxy(request, launchpad_session): + return launchpad_session.proxy(RwLaunchpadYang) + +@pytest.fixture(scope='module') +def vnfd_proxy(request, launchpad_session): + return launchpad_session.proxy(RwVnfdYang) + +@pytest.fixture(scope='module') +def rwvnfr_proxy(request, launchpad_session): + return launchpad_session.proxy(RwVnfrYang) + +@pytest.fixture(scope='module') +def vld_proxy(request, launchpad_session): + return launchpad_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, launchpad_session): + return launchpad_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, launchpad_session): + return launchpad_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, launchpad_session): + return launchpad_session.proxy(RwBaseYang) + + +def create_nsr(nsd_id, input_param_list, cloud_account_name): + """ + Create the NSR record object + + Arguments: + nsd_id - NSD id + input_param_list - list of input-parameter objects + + Return: + NSR object + """ + nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + + nsr.id = str(uuid.uuid4()) + nsr.name = "nsr_name" + nsr.short_name = "nsr_short_name" + nsr.description = "This is a description" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + nsr.input_parameter.extend(input_param_list) + nsr.cloud_account = cloud_account_name + + return nsr + + +def upload_descriptor( + logger, + descriptor_file, + scheme, + cert, + host="127.0.0.1", + endpoint="upload"): + curl_cmd = ('curl --cert {cert} --key {key} -F "descriptor=@{file}" -k ' + '{scheme}://{host}:4567/api/{endpoint}'.format( + cert=cert[0], + key=cert[1], + scheme=scheme, + endpoint=endpoint, + file=descriptor_file, + host=host, + )) + + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_onboard_transaction_finished( + logger, + transaction_id, + scheme, + cert, + timeout=600, + host="127.0.0.1", + endpoint="upload"): + logger.info("Waiting for onboard trans_id %s to complete", transaction_id) + uri = '%s://%s:4567/api/%s/%s/state' % (scheme, host, endpoint, transaction_id) + elapsed = 0 + start = time.time() + while elapsed < timeout: + reply = requests.get(uri, cert=cert, verify=False) + state = reply.json() + if state["status"] == "success": + break + + if state["status"] != "pending": + raise DescriptorOnboardError(state) + + time.sleep(1) + elapsed = time.time() - start + + if state["status"] != "success": + raise DescriptorOnboardError(state) + + logger.info("Descriptor onboard was successful") + + +def terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger): + """ + Terminate the instance and check if the record is deleted. + + Asserts: + 1. NSR record is deleted from instance-config. + + """ + logger.debug("Terminating Ping Pong NSR") + + nsr_path = "/ns-instance-config" + nsr = rwnsr_proxy.get_config(nsr_path) + ping_pong = nsr.nsr[0] + + xpath = "/ns-instance-config/nsr[id='{}']".format(ping_pong.id) + rwnsr_proxy.delete_config(xpath) + + time.sleep(30) + nsr = rwnsr_proxy.get_config(xpath) + assert nsr is None + + # Termination tests + vnfr = "/vnfr-catalog/vnfr" + vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True) + assert vnfrs is None or len(vnfrs.vnfr) == 0 + + # nsr = "/ns-instance-opdata/nsr" + # nsrs = rwnsr_proxy.get(nsr, list_obj=True) + # assert len(nsrs.nsr) == 0 + + +@pytest.fixture(scope='module', params=["upload", "update"]) +def endpoint(request): + """A simple fixture, which in combination with the incremental marker, lets + the ENTIRE TestPingPongStart class to be run twice in order. + """ + return request.param + + +@pytest.mark.setup('pingpong') +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestPingPongStart(object): + """A brief overview of the steps performed. + 1. Generate & on-board new descriptors + 2. Start & stop the ping pong NSR + 3. Update the exiting descriptor files. + 4. Start the ping pong NSR. + """ + def generate_tar_files(self, tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd): + """Converts the descriptor to files and package them into zip files + that can be uploaded to LP instance. + + Args: + tmpdir (string): Full path where the zipped files should be + ping_vnfd (VirtualNetworkFunction): Ping VNFD data + pong_vnfd (VirtualNetworkFunction): Pong VNFD data + ping_pong_nsd (NetworkService): PingPong NSD data + + Returns: + Tuple: file path for ping vnfd, pong vnfd and ping_pong_nsd + """ + rift_build = os.environ['RIFT_BUILD'] + MANO_DIR = os.path.join( + rift_build, + "modules/core/mano/src/core_mano-build/examples/ping_pong_ns") + ping_img = os.path.join(MANO_DIR, "ping_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2") + pong_img = os.path.join(MANO_DIR, "pong_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2") + + """ grab cached copies of these files if not found. They may not exist + because our git submodule dependency mgmt + will not populate these because they live in .build, not .install + """ + if not os.path.exists(ping_img): + ping_img = os.path.join( + os.environ['RIFT_ROOT'], + 'images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2') + pong_img = os.path.join( + os.environ['RIFT_ROOT'], + 'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2') + + for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]: + descriptor.write_to_file(output_format='xml', outdir=tmpdir.name) + + ping_img_path = os.path.join(tmpdir.name, "{}/images/".format(ping_vnfd.name)) + pong_img_path = os.path.join(tmpdir.name, "{}/images/".format(pong_vnfd.name)) + os.makedirs(ping_img_path) + os.makedirs(pong_img_path) + + shutil.copy(ping_img, ping_img_path) + shutil.copy(pong_img, pong_img_path) + + for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]: + subprocess.call([ + "sh", + "{}/bin/generate_descriptor_pkg.sh".format(os.environ['RIFT_ROOT']), + tmpdir.name, + dir_name]) + + return (os.path.join(tmpdir.name, "{}.tar.gz".format(ping_vnfd.name)), + os.path.join(tmpdir.name, "{}.tar.gz".format(pong_vnfd.name)), + os.path.join(tmpdir.name, "{}.tar.gz".format(ping_pong_nsd.name))) + + def onboard_descriptor(self, host, file_name, logger, endpoint, scheme, cert): + """On-board/update the descriptor. + + Args: + host (str): Launchpad IP + file_name (str): Full file path. + logger: Logger instance + endpoint (str): endpoint to be used for the upload operation. + + """ + logger.info("Onboarding package: %s", file_name) + trans_id = upload_descriptor( + logger, + file_name, + scheme, + cert, + host=host, + endpoint=endpoint) + wait_onboard_transaction_finished( + logger, + trans_id, + scheme, + cert, + host=host, + endpoint=endpoint) + + def test_onboard_descriptors( + self, + logger, + vnfd_proxy, + nsd_proxy, + launchpad_session, + scheme, + cert, + ping_pong_records, + endpoint): + """Generates & On-boards the descriptors. + """ + ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records + + # if update_mode: + # for vnfd_record in [ping_vnfd, pong_vnfd]: + # vnfd_record.descriptor.vnfd[0].description += "_update" + # ping_pong_nsd.descriptor.nsd[0].description += "_update" + + tmpdir = tempfile.TemporaryDirectory() + + ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file = \ + self.generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd) + + for file_name in [ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file]: + self.onboard_descriptor( + launchpad_session.host, + file_name, + logger, + endpoint, + scheme, + cert) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should two vnfds" + assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name] + assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name] + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + assert nsds[0].name == "ping_pong_nsd" + + def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account_name, endpoint): + + def verify_input_parameters(running_config, config_param): + """ + Verify the configured parameter set against the running configuration + """ + for run_input_param in running_config.input_parameter: + if (run_input_param.xpath == config_param.xpath and + run_input_param.value == config_param.value): + return True + + assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} " + "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath, + config_param.value, + running_config.input_parameter)) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + input_parameters = [] + descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id + descr_value = "New NSD Description" + in_param_id = str(uuid.uuid4()) + + input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath=descr_xpath, + value=descr_value) + + input_parameters.append(input_param_1) + + nsr = create_nsr(nsd.id, input_parameters, cloud_account_name) + + logger.info("Instantiating the Network Service") + rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id)) + assert nsr_opdata is not None + + # Verify the input parameter configuration + running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id) + for input_param in input_parameters: + verify_input_parameters(running_config, input_param) + + def test_wait_for_pingpong_started(self, rwnsr_proxy, endpoint): + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + + assert len(nsrs) == 1 + current_nsr = nsrs[0] + + xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(current_nsr.ns_instance_config_ref) + rwnsr_proxy.wait_for(xpath, "running", timeout=120) + + def test_stop_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger, endpoint): + """If the package is being on-boarded, not updated, then terminate the + current NSR instance, as we will be triggering another instance + after updating the descriptor files. + """ + if endpoint == "upload": + terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger) + + +@pytest.mark.teardown('pingpong') +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestPingPongTeardown(object): + def test_terminate_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger): + """ + Terminate the instance and check if the record is deleted. + + Asserts: + 1. NSR record is deleted from instance-config. + + """ + logger.debug("Terminating Ping Pong NSR") + terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger) + + def test_delete_records(self, nsd_proxy, vnfd_proxy): + """Delete the NSD & VNFD records + + Asserts: + The records are deleted. + """ + nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True) + for nsd in nsds.nsd: + xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id) + nsd_proxy.delete_config(xpath) + + nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True) + assert nsds is None or len(nsds.nsd) == 0 + + vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True) + for vnfd_record in vnfds.vnfd: + xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id) + vnfd_proxy.delete_config(xpath) + + vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True) + assert vnfds is None or len(vnfds.vnfd) == 0 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf_static.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf_static.py new file mode 100644 index 0000000..012a500 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_pingpong_vnf_static.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 11/03/2015 +@brief Launchpad System Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid +import rift.auto.session + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +from gi.repository import RwMcYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwLaunchpadYang, RwBaseYang + +logging.basicConfig(level=logging.DEBUG) + + +@pytest.fixture(scope='module') +def mc_proxy(request, mgmt_session): + return mgmt_session.proxy(RwMcYang) + + +@pytest.fixture(scope='module') +def launchpad_session(request, mc_proxy, mgmt_domain_name, session_type): + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + + if session_type == 'netconf': + launchpad_session = rift.auto.session.NetconfSession(host=launchpad_host) + elif session_type == 'restconf': + launchpad_session = rift.auto.session.RestconfSession(host=launchpad_host) + + launchpad_session.connect() + rift.vcs.vcs.wait_until_system_started(launchpad_session) + return launchpad_session + + +@pytest.fixture(scope='module') +def launchpad_proxy(request, launchpad_session): + return launchpad_session.proxy(RwLaunchpadYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, launchpad_session): + return launchpad_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, launchpad_session): + return launchpad_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, launchpad_session): + return launchpad_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, launchpad_session): + return launchpad_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, launchpad_session): + return launchpad_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, launchpad_session): + return launchpad_session.proxy(RwBaseYang) + + +def create_nsr_from_nsd_id(nsd_id): + nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "nsr_name" + nsr.short_name = "nsr_short_name" + nsr.description = "This is a description" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + nsr.cloud_account = "cloud_account_name" + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_onboard_transaction_finished(logger, transaction_id, timeout=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", transaction_id) + uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id) + elapsed = 0 + start = time.time() + while elapsed < timeout: + reply = requests.get(uri) + state = reply.json() + if state["status"] == "success": + break + + if state["status"] != "pending": + raise DescriptorOnboardError(state) + + time.sleep(1) + elapsed = time.time() - start + + if state["status"] != "success": + raise DescriptorOnboardError(state) + + logger.info("Descriptor onboard was successful") + + + +@pytest.mark.setup('pingpong') +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestPingPongStart(object): + def test_configure_pools(self, mc_proxy, vm_pool_name, network_pool_name): + vm_pool = mc_proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in vm_pool.available] + + assert len(available_ids) >= 2 + + network_pool = mc_proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + available_ids = [network.id for network in network_pool.available] + assert len(available_ids) >= 3 + + vm_pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'assigned':[ + {'id':available_ids[0]}, + {'id':available_ids[1]}, + ]}) + + mc_proxy.merge_config( + "/vm-pool/pool[name='%s']" % vm_pool_name, + vm_pool_config) + + network_pool_config = RwMcYang.NetworkPool.from_dict({ + 'name':network_pool_name, + 'assigned':[ + {'id':available_ids[0]}, + {'id':available_ids[1]}, + {'id':available_ids[2]}, + ]}) + mc_proxy.merge_config( + "/network-pool/pool[name='%s']" % network_pool_name, + network_pool_config) + + def test_restart_launchpad(self, mc_proxy, mgmt_domain_name, launchpad_session, launchpad_scraper): + mc_proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = mc_proxy.rpc(stop_launchpad_input) + + mc_proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=60, + fail_on=['crashed']) + + start_launchpad_input = RwMcYang.StartLaunchpadInput(mgmt_domain=mgmt_domain_name) + start_launchpad_output = mc_proxy.rpc(start_launchpad_input) + mc_proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=200, + fail_on=['crashed']) + + rift.vcs.vcs.wait_until_system_started(launchpad_session) + launchpad_scraper.reset() + + def test_onboard_ping_vnfd(self, logger, mc_proxy, mgmt_domain_name, vnfd_proxy, ping_vnfd_package_file): + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + logger.info("Onboarding ping_vnfd package: %s", ping_vnfd_package_file) + trans_id = upload_descriptor(logger, ping_vnfd_package_file, launchpad_host) + wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should only be a single vnfd" + vnfd = vnfds[0] + assert vnfd.name == "rw_ping_vnfd" + + def test_onboard_pong_vnfd(self, logger, mc_proxy, mgmt_domain_name, vnfd_proxy, pong_vnfd_package_file): + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + logger.info("Onboarding pong_vnfd package: %s", pong_vnfd_package_file) + trans_id = upload_descriptor(logger, pong_vnfd_package_file, launchpad_host) + wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "rw_pong_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ping_pong_nsd(self, logger, mc_proxy, mgmt_domain_name, nsd_proxy, ping_pong_nsd_package_file): + launchpad_host = mc_proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + logger.info("Onboarding ping_pong_nsd package: %s", ping_pong_nsd_package_file) + trans_id = upload_descriptor(logger, ping_pong_nsd_package_file, launchpad_host) + wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "rw_ping_pong_nsd" + + def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + rwnsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id + + logger.info("Waiting up to 120 seconds for ping and pong components to show " + "up in show vcs info") + + start_time = time.time() + while (time.time() - start_time) < 120: + vcs_info = base_proxy.get('/vcs/info') + components = vcs_info.components.component_info + + def find_component_by_name(name): + for component in components: + if name in component.component_name: + return component + + logger.warning("Did not find %s component name in show vcs info", + name) + + return None + + ping_vm_component = find_component_by_name( + "rw_ping_vnfd:rwping_vm" + ) + if ping_vm_component is None: + continue + + pong_vm_component = find_component_by_name( + "rw_pong_vnfd:rwpong_vm" + ) + if pong_vm_component is None: + continue + + ping_proc_component = find_component_by_name( + "rw_ping_vnfd:rwping_proc" + ) + if ping_proc_component is None: + continue + + pong_proc_component = find_component_by_name( + "rw_pong_vnfd:rwpong_proc" + ) + if pong_proc_component is None: + continue + + ping_tasklet_component = find_component_by_name( + "rw_ping_vnfd:rwping_tasklet" + ) + if ping_tasklet_component is None: + continue + + pong_tasklet_component = find_component_by_name( + "rw_pong_vnfd:rwpong_tasklet" + ) + if pong_tasklet_component is None: + continue + + logger.info("TEST SUCCESSFUL: All ping and pong components were found in show vcs info") + break + + else: + assert False, "Did not find all ping and pong component in time" \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_records.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_records.py new file mode 100755 index 0000000..266b1dd --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_records.py @@ -0,0 +1,357 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import socket +import subprocess +import time + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +from gi.repository import ( + NsdYang, + RwConmanYang, + RwMcYang, + RwNsrYang, + VlrYang, + RwVlrYang, + RwVnfdYang, + RwVnfrYang, + VnfrYang + ) +import rift.auto.session +import rift.mano.examples.ping_pong_nsd as ping_pong + + +@pytest.fixture(scope='module') +def proxy(request, launchpad_session): + return launchpad_session.proxy + + +def yield_vnfd_vnfr_pairs(proxy, nsr=None): + """ + Yields tuples of vnfd & vnfr entries. + + Args: + proxy (callable): Launchpad proxy + nsr (optional): If specified, only the vnfr & vnfd records of the NSR + are returned + + Yields: + Tuple: VNFD and its corresponding VNFR entry + """ + def get_vnfd(vnfd_id): + xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id) + return proxy(RwVnfdYang).get(xpath) + + vnfr = "/vnfr-catalog/vnfr" + vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True) + for vnfr in vnfrs.vnfr: + + if nsr and vnfr.id not in nsr.constituent_vnfr_ref: + continue + + vnfd = get_vnfd(vnfr.vnfd_ref) + yield vnfd, vnfr + + +def yield_nsd_nsr_pairs(proxy): + """Yields tuples of NSD & NSR pairs + + Args: + proxy (callable): Launchpad proxy + + Yields: + Tuple: NSD and its corresponding NSR record + """ + nsr = "/ns-instance-opdata/nsr" + nsrs = proxy(RwNsrYang).get(nsr, list_obj=True) + for nsr in nsrs.nsr: + nsd_path = "/ns-instance-config/nsr[id='{}']".format( + nsr.ns_instance_config_ref) + nsd = proxy(RwNsrYang).get_config(nsd_path) + + yield nsd, nsr + + +def assert_records(proxy): + """Verifies if the NSR & VNFR records are created + """ + ns_tuple = list(yield_nsd_nsr_pairs(proxy)) + assert len(ns_tuple) == 1 + + vnf_tuple = list(yield_vnfd_vnfr_pairs(proxy)) + assert len(vnf_tuple) == 2 + + +@pytest.mark.depends('pingpong') +@pytest.mark.incremental +class TestRecords(object): + def is_valid_ip(self, address): + """Verifies if it is a valid IP and if its accessible + + Args: + address (str): IP address + + Returns: + boolean + """ + try: + socket.inet_aton(address) + except socket.error: + return False + else: + return True + + def test_records_present(self, proxy): + assert_records(proxy) + + def test_vdu_record_params(self, proxy): + """ + Asserts: + 1. If a valid floating IP has been assigned to the VM + 3. Check if the VM flavor has been copied over the VDUR + """ + for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy): + assert vnfd.mgmt_interface.port == vnfr.mgmt_interface.port + + for vdud, vdur in zip(vnfd.vdu, vnfr.vdur): + assert vdud.vm_flavor == vdur.vm_flavor + assert self.is_valid_ip(vdur.management_ip) is True + assert vdud.external_interface[0].vnfd_connection_point_ref == \ + vdur.external_interface[0].vnfd_connection_point_ref + + def test_external_vl(self, proxy): + """ + Asserts: + 1. Valid IP for external connection point + 2. A valid external network fabric + 3. Connection point names are copied over + """ + for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy): + cp_des, cp_rec = vnfd.connection_point, vnfr.connection_point + assert cp_des[0].name == cp_rec[0].name + assert self.is_valid_ip(cp_rec[0].ip_address) is True + + xpath = "/vlr-catalog/vlr[id='{}']/network-id".format(cp_rec[0].vlr_ref) + network_id = proxy(VlrYang).get(xpath) + assert len(network_id) > 0 + + def test_monitoring_params(self, proxy): + """ + Asserts: + 1. The value counter ticks? + 2. If the meta fields are copied over + """ + def mon_param_record(vnfr_id, mon_param_id): + return '/vnfr-catalog/vnfr[id="{}"]/monitoring-param[id="{}"]'.format( + vnfr_id, mon_param_id) + + for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy): + for mon_des in (vnfd.monitoring_param): + mon_rec = mon_param_record(vnfr.id, mon_des.id) + mon_rec = proxy(VnfrYang).get(mon_rec) + + # Meta data check + fields = mon_des.as_dict().keys() + for field in fields: + assert getattr(mon_des, field) == getattr(mon_rec, field) + # Tick check + #assert mon_rec.value_integer > 0 + + def test_nsr_record(self, proxy): + """ + Currently we only test for the components of NSR tests. Ignoring the + operational-events records + + Asserts: + 1. The constituent components. + 2. Admin status of the corresponding NSD record. + """ + for nsd, nsr in yield_nsd_nsr_pairs(proxy): + # 1 n/w and 2 connection points + assert len(nsr.vlr) == 1 + assert len(nsr.vlr[0].vnfr_connection_point_ref) == 2 + + assert len(nsr.constituent_vnfr_ref) == 2 + assert nsd.admin_status == 'ENABLED' + + def test_create_update_vnfd(self, proxy, ping_pong_records): + """ + Verify VNFD related operations + + Asserts: + If a VNFD record is created + """ + ping_vnfd, pong_vnfd, _ = ping_pong_records + vnfdproxy = proxy(RwVnfdYang) + + for vnfd_record in [ping_vnfd, pong_vnfd]: + xpath = "/vnfd-catalog/vnfd" + vnfdproxy.create_config(xpath, vnfd_record.vnfd) + + xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id) + vnfd = vnfdproxy.get(xpath) + assert vnfd.id == vnfd_record.id + + vnfdproxy.replace_config(xpath, vnfd_record.vnfd) + + def test_create_update_nsd(self, proxy, ping_pong_records): + """ + Verify NSD related operations + + Asserts: + If NSD record was created + """ + _, _, ping_pong_nsd = ping_pong_records + nsdproxy = proxy(NsdYang) + + xpath = "/nsd-catalog/nsd" + nsdproxy.create_config(xpath, ping_pong_nsd.descriptor) + + xpath = "/nsd-catalog/nsd[id='{}']".format(ping_pong_nsd.id) + nsd = nsdproxy.get(xpath) + assert nsd.id == ping_pong_nsd.id + + nsdproxy.replace_config(xpath, ping_pong_nsd.descriptor) + + def test_wait_for_pingpong_configured(self, proxy): + nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + + assert len(nsrs) == 1 + current_nsr = nsrs[0] + + xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref) + proxy(RwNsrYang).wait_for(xpath, "configured", timeout=240) + + def test_cm_nsr(self, proxy): + """ + Asserts: + 1. The ID of the NSR in cm-state + 2. Name of the cm-nsr + 3. The vnfr component's count + 4. State of the cm-nsr + """ + for nsd, _ in yield_nsd_nsr_pairs(proxy): + con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsd.id) + con_data = proxy(RwConmanYang).get(con_nsr_xpath) + + assert con_data is not None, \ + "No Config data obtained for the nsd {}: {}".format( + nsd.name, nsd.id) + assert con_data.name == "ping_pong_nsd" + assert len(con_data.cm_vnfr) == 2 + + state_path = con_nsr_xpath + "/state" + proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120) + + def test_cm_vnfr(self, proxy): + """ + Asserts: + 1. The ID of Vnfr in cm-state + 2. Name of the vnfr + 3. State of the VNFR + 4. Checks for a reachable IP in mgmt_interface + 5. Basic checks for connection point and cfg_location. + """ + def is_reachable(ip): + rc = subprocess.call(["ping", "-c1", ip]) + if rc == 0: + return True + return False + + nsd, _ = list(yield_nsd_nsr_pairs(proxy))[0] + con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsd.id) + + for _, vnfr in yield_vnfd_vnfr_pairs(proxy): + con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id='{}']".format(vnfr.id) + con_data = proxy(RwConmanYang).get(con_vnfr_path) + + assert con_data is not None + + state_path = con_vnfr_path + "/state" + proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120) + + con_data = proxy(RwConmanYang).get(con_vnfr_path) + assert is_reachable(con_data.mgmt_interface.ip_address) is True + + assert len(con_data.connection_point) == 1 + connection_point = con_data.connection_point[0] + assert connection_point.name == vnfr.connection_point[0].name + assert connection_point.ip_address == vnfr.connection_point[0].ip_address + + assert con_data.cfg_location is not None + +@pytest.mark.depends('pingpong') +@pytest.mark.incremental +class TestNfviMetrics(object): + + def test_records_present(self, proxy): + assert_records(proxy) + + def test_nfvi_metrics(self, proxy): + """ + Verify the NFVI metrics + + Asserts: + 1. Computed metrics, such as memory, cpu, storage and ports, match + with the metrics in NSR record. The metrics are computed from the + descriptor records. + 2. Check if the 'utilization' field has a valid value (> 0) and matches + with the 'used' field, if available. + """ + for nsd, nsr in yield_nsd_nsr_pairs(proxy): + nfvi_metrics = nsr.nfvi_metrics + computed_metrics = collections.defaultdict(int) + + # Get the constituent VNF records. + for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy, nsr): + vdu = vnfd.vdu[0] + vm_spec = vdu.vm_flavor + computed_metrics['vm'] += 1 + computed_metrics['memory'] += vm_spec.memory_mb * (10**6) + computed_metrics['storage'] += vm_spec.storage_gb * (10**9) + computed_metrics['vcpu'] += vm_spec.vcpu_count + computed_metrics['external_ports'] += len(vnfd.connection_point) + computed_metrics['internal_ports'] += len(vdu.internal_connection_point) + + assert nfvi_metrics.vm.active_vm == computed_metrics['vm'] + + # Availability checks + for metric_name in computed_metrics: + metric_data = getattr(nfvi_metrics, metric_name) + total_available = getattr(metric_data, 'total', None) + + if total_available is not None: + assert computed_metrics[metric_name] == total_available + + # Utilization checks + for metric_name in ['memory', 'storage', 'vcpu']: + metric_data = getattr(nfvi_metrics, metric_name) + + utilization = metric_data.utilization + # assert utilization > 0 + + # If used field is available, check if it matches with utilization! + total = metric_data.total + used = getattr(metric_data, 'used', None) + if used is not None: + assert total > 0 + computed_utilization = round((used/total) * 100, 2) + assert abs(computed_utilization - utilization) <= 0.1 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/pytest/test_startstop.py b/modules/core/mano/rwlaunchpad/ra/pytest/test_startstop.py new file mode 100644 index 0000000..550bd47 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/pytest/test_startstop.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_startstop.py +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 12/17/2015 +@brief System test of launchpad start and stop functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session class + ''' + return mgmt_session.proxy(RwMcYang) + +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestLaunchpadStartStop: + + @pytest.mark.feature('mission-control') + def test_stop_launchpad(self, proxy, mgmt_domain_name): + '''Invoke stop launchpad RPC + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + ''' + + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=120, + fail_on=['crashed']) + + @pytest.mark.feature('mission-control') + def test_start_launchpad(self, proxy, mgmt_domain_name, launchpad_scraper): + '''Invoke start launchpad RPC + + Asserts: + Launchpad begins test in state 'stopped' + Launchpad finishes test in state 'started' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=10, + fail_on=['crashed']) + start_launchpad_input = RwMcYang.StartLaunchpadInput(mgmt_domain=mgmt_domain_name) + start_launchpad_output = proxy.rpc(start_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=400, + fail_on=['crashed']) + launchpad_scraper.reset() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_lp_standalone_systest_openstack.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_lp_standalone_systest_openstack.racfg new file mode 100644 index 0000000..79abeb8 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_lp_standalone_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_PINGPONG_LP_STANDALONE_OPENSTACK", + "commandline":"./pingpong_lp_standalone_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo --lp-standalone ", + "test_description":"System test for ping and pong vnf with standalone Launcpad (Openstack)", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg new file mode 100644 index 0000000..6ad25f6 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_PINGPONG_RECORDS_OPENSTACK", + "commandline":"./pingpong_records_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo ", + "test_description":"System test for ping and pong vnf (Openstack)", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg new file mode 100644 index 0000000..ce97a13 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_PINGPONG_VNF_RELOAD_OPENSTACK", + "commandline":"./pingpong_vnf_reload_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo ", + "test_description":"System test for ping pong vnf reload(Openstack)", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2200, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg new file mode 100644 index 0000000..9dd3279 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_PINGPONG_VNF_CLOUDSIM", + "commandline":"./pingpong_vnf_systest", + "target_vm":"VM", + "test_description":"System test for ping and pong vnf", + "run_as_root": true, + "status":"working", + "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"], + "timelimit": 1800, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg new file mode 100644 index 0000000..2371c9d --- /dev/null +++ b/modules/core/mano/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_PINGPONG_VNF_OPENSTACK", + "commandline":"./pingpong_vnf_systest --cloud-type 'openstack' --cloud-host '10.66.4.115'", + "test_description":"System test for ping and pong vnf (Openstack)", + "run_as_root": false, + "status":"broken", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2200, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/test/CMakeLists.txt b/modules/core/mano/rwlaunchpad/test/CMakeLists.txt new file mode 100644 index 0000000..64017c2 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/CMakeLists.txt @@ -0,0 +1,59 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Creation Date: 10/01/2015 +# + +cmake_minimum_required(VERSION 2.8) + +install( + PROGRAMS + launchpad.py + DESTINATION demos + COMPONENT ${PKG_LONG_NAME} + ) + +install( + FILES + racfg/launchpad_module_test.racfg + DESTINATION + usr/rift/systemtest/launchpad + COMPONENT ${PKG_LONG_NAME} + ) + +install( + FILES + pytest/lp_test.py + DESTINATION + usr/rift/systemtest/pytest/launchpad + COMPONENT ${PKG_LONG_NAME} + ) + +install( + PROGRAMS + launchpad_module_test + DESTINATION + usr/rift/systemtest/launchpad + COMPONENT ${PKG_LONG_NAME} + ) + +rift_py3test(rwmano_utest + TEST_ARGS + ${CMAKE_CURRENT_SOURCE_DIR}/mano_ut.py + ) + +#rift_py3test(rwmano_error_utest +# TEST_ARGS +# ${CMAKE_CURRENT_SOURCE_DIR}/mano_error_ut.py +# ) + +rift_py3test(utest_rwmonitor + TEST_ARGS + ${CMAKE_CURRENT_SOURCE_DIR}/utest_rwmonitor.py + ) + +rift_py3test(utest_rwnsm + TEST_ARGS + ${CMAKE_CURRENT_SOURCE_DIR}/utest_rwnsm.py + ) diff --git a/modules/core/mano/rwlaunchpad/test/juju_ut.py b/modules/core/mano/rwlaunchpad/test/juju_ut.py new file mode 100755 index 0000000..17bff54 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/juju_ut.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import asyncio +import logging +import os +import sys +import time +import unittest +import uuid + +import xmlrunner + +from gi.repository import ( + RwDts as rwdts, + RwLaunchpadYang as launchpadyang, + RwNsmYang as rwnsmyang, + RwCloudYang as rwcloudyang, + RwResourceMgrYang, + RwConfigAgentYang, + NsrYang + ) +import rift.tasklets +import rift.test.dts + +import mano_ut + + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class OpenManoNsmTestCase(mano_ut.ManoTestCase): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + + @classmethod + def configure_suite(cls, rwmain): + launchpad_build_dir = os.path.join( + cls.top_dir, + '.build/modules/core/mc/core_mc-build/rwlaunchpad' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwnsm'), + 'rwnsmtasklet' + ) + + cls.waited_for_tasklets = False + + @classmethod + def configure_schema(cls): + return rwnsmyang.get_schema() + + @classmethod + def configure_timeout(cls): + return 240 + + @asyncio.coroutine + def wait_tasklets(self): + if not OpenManoNsmTestCase.waited_for_tasklets: + OpenManoNsmTestCase.waited_for_tasklets = True + self._wait_event = asyncio.Event(loop=self.loop) + yield from asyncio.sleep(5, loop=self.loop) + self._wait_event.set() + + yield from self._wait_event.wait() + + @asyncio.coroutine + def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1): + yield from self.ping_pong.publish_desciptors( + num_external_vlrs, + num_internal_vlrs, + num_ping_vms + ) + + def unpublish_descriptors(self): + self.ping_pong.unpublish_descriptors() + + @asyncio.coroutine + def wait_until_nsr_active_or_failed(self, nsr_id, timeout_secs=20): + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + if len(nsrs) == 0: + continue + self.assertEqual(1, len(nsrs)) + if nsrs[0].operational_status in ['running', 'failed']: + return + + self.log.debug("Rcvd NSR with %s status", nsrs[0].operational_status) + yield from asyncio.sleep(2, loop=self.loop) + + self.assertIn(nsrs[0].operational_status, ['running', 'failed']) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.tinfo = self.new_tinfo(self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = mano_ut.ManoQuerier(self.log, self.dts) + + # Add a task to wait for tasklets to come up + asyncio.ensure_future(self.wait_tasklets(), loop=self.loop) + + @asyncio.coroutine + def configure_cloud_account(self, cloud_name="cloud1"): + account = rwcloudyang.CloudAccount() + account.name = cloud_name + account.account_type = "mock" + account.mock.username = "mock_user" + + account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name) + self.log.info("Configuring cloud-account: %s", account) + yield from self.dts.query_create(account_xpath, + rwdts.Flag.ADVISE | rwdts.Flag.TRACE, + account) + + @asyncio.coroutine + def configure_config_agent(self): + account_xpath = "C,/rw-config-agent:config-agent/account[name='Juju1 config']" + + juju1 = RwConfigAgentYang.ConfigAgentAccount.from_dict({ + "name": "Juju1 config", + "account_type": "juju", + "juju": { + "ip_address": "1.1.1.1", + "port": 9000, + "user": "foo", + "secret": "1232" + } + }) + + cfg_agt = RwConfigAgentYang.ConfigAgent() + cfg_agt.account.append(juju1) + cfg_agt.as_dict() + + yield from self.dts.query_create( + account_xpath, + rwdts.Flag.ADVISE, + juju1, + ) + + + @asyncio.coroutine + def configure_config_primitive(self, nsr_id): + job_data = NsrYang.YangInput_Nsr_ExecNsConfigPrimitive.from_dict({ + "name": "Add Corporation", + "nsr_id_ref": nsr_id, + "vnf_list": [{ + "vnfr_id_ref": "10631555-757e-4924-96e6-41a0297a9406", + "member_vnf_index_ref": 1, + "vnf_primitive": [{ + "name": "create-update-user", + "parameter": [ + {"name" : "number", "value": "1234334"}, + {"name" : "password", "value": "1234334"}, + ] + }] + }] + + }) + yield from self.dts.query_rpc( + "/nsr:exec-ns-config-primitive", + 0, + job_data, + ) + + @rift.test.dts.async_test + def test_ping_pong_nsm_instantiate(self): + yield from self.wait_tasklets() + yield from self.configure_cloud_account("mock_account") + yield from self.configure_config_agent() + yield from self.publish_desciptors(num_internal_vlrs=0) + + nsr_id = yield from self.ping_pong.publish_nsr_config("mock_account") + yield from asyncio.sleep(10, loop=self.loop) + + res_iter = yield from self.dts.query_read("D,/nsr:ns-instance-opdata/nsr:nsr") + for i in res_iter: + result = yield from i + + print ("**", result) + # yield from self.configure_config_primitive(nsr_id) + yield from asyncio.sleep(10, loop=self.loop) + + # nsrs = yield from self.querier.get_nsr_opdatas() + # nsr = nsrs[0] + + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + OpenManoNsmTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN + + unittest.main(testRunner=runner, argv=[sys.argv[0]]+unittest_args) + +if __name__ == '__main__': + main() + +# vim: sw \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/launchpad.py b/modules/core/mano/rwlaunchpad/test/launchpad.py new file mode 100755 index 0000000..5387e1f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/launchpad.py @@ -0,0 +1,406 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import logging +import os +import resource +import socket +import sys +import subprocess +import shlex +import netifaces + +from rift.rwlib.util import certs +import rift.rwcal.cloudsim +import rift.rwcal.cloudsim.net +import rift.vcs +import rift.vcs.core as core +import rift.vcs.demo +import rift.vcs.vms + +import rift.rwcal.cloudsim +import rift.rwcal.cloudsim.net + +from rift.vcs.ext import ClassProperty + +logger = logging.getLogger(__name__) + + +class NsmTasklet(rift.vcs.core.Tasklet): + """ + This class represents a network services manager tasklet. + """ + + def __init__(self, name='network-services-manager', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a NsmTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(NsmTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwnsmtasklet') + plugin_name = ClassProperty('rwnsmtasklet') + + +class VnsTasklet(rift.vcs.core.Tasklet): + """ + This class represents a network services manager tasklet. + """ + + def __init__(self, name='virtual-network-service', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a VnsTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(VnsTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnstasklet') + plugin_name = ClassProperty('rwvnstasklet') + + +class VnfmTasklet(rift.vcs.core.Tasklet): + """ + This class represents a virtual network function manager tasklet. + """ + + def __init__(self, name='virtual-network-function-manager', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a VnfmTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(VnfmTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnfmtasklet') + plugin_name = ClassProperty('rwvnfmtasklet') + + +class ResMgrTasklet(rift.vcs.core.Tasklet): + """ + This class represents a Resource Manager tasklet. + """ + + def __init__(self, name='Resource-Manager', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a ResMgrTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(ResMgrTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwresmgrtasklet') + plugin_name = ClassProperty('rwresmgrtasklet') + + +class MonitorTasklet(rift.vcs.core.Tasklet): + """ + This class represents a tasklet that is used to monitor NFVI metrics. + """ + + def __init__(self, name='nfvi-metrics-monitor', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a MonitorTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + + """ + super(MonitorTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonitor') + plugin_name = ClassProperty('rwmonitor') + + +def get_ui_ssl_args(): + """Returns the SSL parameter string for launchpad UI processes""" + + try: + use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key() + except certs.BootstrapSslMissingException: + logger.error('No bootstrap certificates found. Disabling UI SSL') + use_ssl = False + + # If we're not using SSL, no SSL arguments are necessary + if not use_ssl: + return "" + + return "--enable-https --keyfile-path=%s --certfile-path=%s" % (keyfile_path, certfile_path) + + +class UIServer(rift.vcs.NativeProcess): + def __init__(self, name="RW.MC.UI", + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + super(UIServer, self).__init__( + name=name, + exe="./usr/share/rw.ui/webapp/scripts/launch_ui.sh", + config_ready=config_ready, + recovery_action=recovery_action, + ) + + @property + def args(self): + return get_ui_ssl_args() + + +class ComposerUI(rift.vcs.NativeProcess): + def __init__(self, name="RW.COMPOSER.UI", + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + super(ComposerUI, self).__init__( + name=name, + exe="./usr/share/composer/scripts/launch_composer.sh", + config_ready=config_ready, + recovery_action=recovery_action, + ) + + @property + def args(self): + return get_ui_ssl_args() + + +class ConfigManagerTasklet(rift.vcs.core.Tasklet): + """ + This class represents a Resource Manager tasklet. + """ + + def __init__(self, name='Configuration-Manager', uid=None, + config_ready=True, + recovery_action=core.RecoveryType.FAILCRITICAL.value, + ): + """ + Creates a ConfigManagerTasklet object. + + Arguments: + name - the name of the tasklet + uid - a unique identifier + """ + super(ConfigManagerTasklet, self).__init__(name=name, uid=uid, + config_ready=config_ready, + recovery_action=recovery_action, + ) + + plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet') + plugin_name = ClassProperty('rwconmantasklet') + + +class Demo(rift.vcs.demo.Demo): + def __init__(self, with_cntr_mgr=False): + + procs = [ + rift.vcs.RiftCli(), + rift.vcs.uAgentTasklet(), + rift.vcs.DtsRouterTasklet(), + rift.vcs.MsgBrokerTasklet(), + rift.vcs.RestconfTasklet(), + rift.vcs.Watchdog(), + rift.vcs.RestPortForwardTasklet(), + rift.vcs.CalProxy(), + MonitorTasklet(), + NsmTasklet(), + #VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value,), + VnsTasklet(), + MonitorTasklet(), + UIServer(), + ComposerUI(), + ConfigManagerTasklet(), + rift.vcs.Launchpad(), + ResMgrTasklet(), + ] + + restart_procs = [ + VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value,), + ] + if with_cntr_mgr: + procs.append(rift.vcs.ContainerManager()) + + super(Demo, self).__init__( + # Construct the system. This system consists of 1 cluster in 1 + # colony. The master cluster houses CLI and management VMs + sysinfo = rift.vcs.SystemInfo( + colonies=[ + rift.vcs.Colony( + name='master', + clusters=[ + rift.vcs.VirtualMachine( + name='vm-launchpad', + ip='127.0.0.1', + procs=procs, + restart_procs=restart_procs, + ), + ] + ) + ] + ), + + # Define the generic portmap. + port_map = {}, + + # Define a mapping from the placeholder logical names to the real + # port names for each of the different modes supported by this demo. + port_names = { + 'ethsim': { + }, + 'pci': { + } + }, + + # Define the connectivity between logical port names. + port_groups = {}, + ) + + +def clear_salt_keys(): + # clear all the previously installed salt keys + logger.info("Removing all unconnected salt keys") + stdout = subprocess.check_output( + shlex.split('salt-run manage.down'), + universal_newlines=True, + ) + + down_minions = stdout.splitlines() + + for line in down_minions: + salt_id = line.strip().replace("- ", "") + logger.info("Removing old unconnected salt id: %s", salt_id) + minion_keys_stdout = subprocess.check_output( + shlex.split('salt-key -f {}'.format(salt_id)), + universal_newlines=True) + + minion_keys = minion_keys_stdout.splitlines() + for key_line in minion_keys: + if "Keys" in key_line: + continue + + key_split = key_line.split(":") + if len(key_split) < 2: + continue + + key = key_split[0] + + # Delete the minion key + logger.info("Deleting minion %s key: %s", salt_id, key) + subprocess.check_call(shlex.split('salt-key -d {} -y'.format(key))) + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s') + + # Create a parser which includes all generic demo arguments + parser = rift.vcs.demo.DemoArgParser() + + parser.add_argument( + '--with-cntr-mgr', + action='store_true', + help='Enable the container manager tasklet' + ) + + args = parser.parse_args(argv) + + # Disable loading any kernel modules for the launchpad VM + # since it doesn't need it and it will fail within containers + os.environ["NO_KERNEL_MODS"] = "1" + + if args.with_cntr_mgr: + # In order to reliably module test, the virbr0 bridge + # with IP 192.168.122.1 must exist before we start executing. + # This is because in expanded mode, we need to use a container + # accessible IP address for zookeeper clients. + rift.rwcal.cloudsim.net.virsh_initialize_default() + clear_salt_keys() + + # Remove the persistant DTS recovery files + for f in os.listdir(os.environ["INSTALLDIR"]): + if f.endswith(".db"): + os.remove(os.path.join(os.environ["INSTALLDIR"], f)) + + #load demo info and create Demo object + demo = Demo(args.with_cntr_mgr) + + # Create the prepared system from the demo + system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args, + northbound_listing="cli_rwmc_schema_listing.txt") + + confd_ip = socket.gethostbyname(socket.gethostname()) + intf = netifaces.ifaddresses('eth0') + if intf and netifaces.AF_INET in intf and len(intf[netifaces.AF_INET]): + confd_ip = intf[netifaces.AF_INET][0]['addr'] + rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip) + + # Start the prepared system + system.start() + + +if __name__ == "__main__": + resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) ) + try: + main() + except rift.vcs.demo.ReservationError: + print("ERROR: unable to retrieve a list of IP addresses from the reservation system") + sys.exit(1) + except rift.vcs.demo.MissingModeError: + print("ERROR: you need to provide a mode to run the script") + sys.exit(1) + finally: + os.system("stty sane") \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/launchpad_module_test b/modules/core/mano/rwlaunchpad/test/launchpad_module_test new file mode 100755 index 0000000..40b1e34 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/launchpad_module_test @@ -0,0 +1,43 @@ +#!/bin/bash +# +# +# Author(s): Austin Cormier +# Creation Date: 2015/10/15 +# +# Helper script for invoking the Launchpad module test using the systest_wrapper + +set -o nounset +set -u +set -e + +THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PYTEST_DIR="${RIFT_INSTALL}/usr/rift/systemtest/pytest" +SYSTEM_TEST_UTIL_DIR="${RIFT_INSTALL}/usr/rift/systemtest/util" +DEMO_DIR=$RIFT_INSTALL/demos +DEMO_TEST_DIR=$DEMO_DIR/tests + +TEST_NAME="TC_LAUNCHPAD_MODTEST_0100" +SCRIPT_SYSTEM="${RIFT_INSTALL}/demos/launchpad.py" +SCRIPT_TEST="py.test -s -v ${PYTEST_DIR}/launchpad/lp_test.py" +RESULT_XML="launchpad_modtest.xml" +wait_system=1000 + +up_cmd="$SYSTEM_TEST_UTIL_DIR/wait_until_system_started.py \ + --max-wait $wait_system" + +system_args="\ + --mode ethsim \ + --ip-list=\"192.168.122.1\" \ + --with-cntr-mgr" + +test_args="\ + --junitprefix ${TEST_NAME} \ + --junitxml ${RIFT_MODULE_TEST}/${RESULT_XML}" + +echo "Executing Launchpad Module test" + +# We must be in the pytest install directory to correctly include +# conftest.py +cd ${PYTEST_DIR} + +${SYSTEM_TEST_UTIL_DIR}/systest_wrapper.sh --system_cmd "${SCRIPT_SYSTEM} ${system_args}" --up_cmd "${up_cmd}" --test_cmd "${SCRIPT_TEST} ${test_args}" diff --git a/modules/core/mano/rwlaunchpad/test/mano_error_ut.py b/modules/core/mano/rwlaunchpad/test/mano_error_ut.py new file mode 100755 index 0000000..616597f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/mano_error_ut.py @@ -0,0 +1,904 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import asyncio +import logging +import os +import sys +import time +import unittest +import uuid + +import xmlrunner + +import gi.repository.RwDts as rwdts +import gi.repository.RwNsmYang as rwnsmyang +import gi.repository.RwResourceMgrYang as RwResourceMgrYang +import gi.repository.RwLaunchpadYang as launchpadyang +import rift.tasklets +import rift.test.dts + +import mano_ut + + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class OutOfResourceError(Exception): + pass + + +class ComputeResourceRequestMockEventHandler(object): + def __init__(self): + self._pool_name = "vm_pool" + self._vdu_id = str(uuid.uuid4()) + self._vdu_info = { + "vdu_id": self._vdu_id, + "state": "active", + "management_ip": "1.1.1.1", + "public_ip": "1.1.1.1", + "connection_points": [], + } + + self._resource_state = "active" + + self._event_id = None + self._request_info = None + + def allocate(self, event_id, request_info): + self._event_id = event_id + self._request_info = request_info + + self._vdu_info.update({ + "name": self._request_info.name, + "flavor_id": self._request_info.flavor_id, + "image_id": self._request_info.image_id, + }) + + for cp in request_info.connection_points: + info_cp = dict( + name=cp.name, + virtual_link_id=cp.virtual_link_id, + vdu_id=self._vdu_id, + state="active", + ip_address="1.2.3.4", + ) + info_cp = self._vdu_info["connection_points"].append(info_cp) + + @property + def event_id(self): + return self._event_id + + @property + def resource_state(self): + return self._resource_state + + def set_active(self): + self._resource_state = "active" + + def set_failed(self): + self._resource_state = "failed" + + def set_pending(self): + self._resource_state = "pending" + + @property + def response_msg(self): + resource_info = dict( + pool_name=self._pool_name, + resource_state=self.resource_state, + ) + resource_info.update(self._vdu_info) + + response = RwResourceMgrYang.VDUEventData.from_dict(dict( + event_id=self._event_id, + request_info=self._request_info.as_dict(), + resource_info=resource_info, + )) + + return response.resource_info + + +class NetworkResourceRequestMockEventHandler(object): + def __init__(self): + self._pool_name = "network_pool" + self._link_id = str(uuid.uuid4()) + self._link_info = { + "virtual_link_id": self._link_id, + "state": "active", + } + + self._resource_state = "active" + + self._event_id = None + self._request_info = None + + def allocate(self, event_id, request_info): + self._event_id = event_id + self._request_info = request_info + + self._link_info.update({ + "name": self._request_info.name, + "subnet": self._request_info.subnet, + }) + + @property + def event_id(self): + return self._event_id + + @property + def resource_state(self): + return self._resource_state + + def set_active(self): + self._resource_state = "active" + + def set_failed(self): + self._resource_state = "failed" + + def set_pending(self): + self._resource_state = "pending" + + @property + def response_msg(self): + resource_info = dict( + pool_name=self._pool_name, + resource_state=self.resource_state, + ) + resource_info.update(self._link_info) + + response = RwResourceMgrYang.VirtualLinkEventData.from_dict(dict( + event_id=self._event_id, + request_info=self._request_info.as_dict(), + resource_info=resource_info, + )) + + return response.resource_info + + +class ResourceMgrMock(object): + VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data" + VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data" + + def __init__(self, dts, log, loop): + self._log = log + self._dts = dts + self._loop = loop + self._vdu_reg = None + self._link_reg = None + + self._vdu_reg_event = asyncio.Event(loop=self._loop) + self._link_reg_event = asyncio.Event(loop=self._loop) + + self._available_compute_handlers = [] + self._available_network_handlers = [] + + self._used_compute_handlers = {} + self._used_network_handlers = {} + + self._compute_allocate_requests = 0 + self._network_allocate_requests = 0 + + self._registered = False + + def _allocate_virtual_compute(self, event_id, request_info): + self._compute_allocate_requests += 1 + + if not self._available_compute_handlers: + raise OutOfResourceError("No more compute handlers") + + handler = self._available_compute_handlers.pop() + handler.allocate(event_id, request_info) + self._used_compute_handlers[event_id] = handler + + return handler.response_msg + + def _allocate_virtual_network(self, event_id, request_info): + self._network_allocate_requests += 1 + + if not self._available_network_handlers: + raise OutOfResourceError("No more network handlers") + + handler = self._available_network_handlers.pop() + handler.allocate(event_id, request_info) + self._used_network_handlers[event_id] = handler + + return handler.response_msg + + def _release_virtual_network(self, event_id): + del self._used_network_handlers[event_id] + + def _release_virtual_compute(self, event_id): + del self._used_compute_handlers[event_id] + + def _read_virtual_network(self, event_id): + return self._used_network_handlers[event_id].response_msg + + def _read_virtual_compute(self, event_id): + return self._used_compute_handlers[event_id].response_msg + + @asyncio.coroutine + def on_link_request_prepare(self, xact_info, action, ks_path, request_msg): + if not self._registered: + self._log.error("Got a prepare callback when not registered!") + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + self._log.debug("Received virtual-link on_prepare callback (self: %s, xact_info: %s, action: %s): %s", + self, xact_info, action, request_msg) + + response_info = None + response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info" + + schema = RwResourceMgrYang.VirtualLinkEventData().schema() + pathentry = schema.keyspec_to_entry(ks_path) + + if action == rwdts.QueryAction.CREATE: + response_info = self._allocate_virtual_network( + pathentry.key00.event_id, + request_msg.request_info, + ) + + elif action == rwdts.QueryAction.DELETE: + self._release_virtual_network(pathentry.key00.event_id) + + elif action == rwdts.QueryAction.READ: + response_info = self._read_virtual_network( + pathentry.key00.event_id + ) + else: + raise ValueError("Only read/create/delete actions available. Received action: %s" %(action)) + + self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.", + response_xpath, response_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info) + + @asyncio.coroutine + def on_vdu_request_prepare(self, xact_info, action, ks_path, request_msg): + if not self._registered: + self._log.error("Got a prepare callback when not registered!") + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + @asyncio.coroutine + def monitor_vdu_state(response_xpath, pathentry): + self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath) + loop_cnt = 120 + while loop_cnt > 0: + self._log.debug("VDU state monitoring: Sleeping for 1 second ") + yield from asyncio.sleep(1, loop = self._loop) + try: + response_info = self._read_virtual_compute( + pathentry.key00.event_id + ) + except Exception as e: + self._log.error( + "VDU state monitoring: Received exception %s " + "in VDU state monitoring for %s. Aborting monitoring", + str(e), response_xpath + ) + raise + + if response_info.resource_state == 'active' or response_info.resource_state == 'failed': + self._log.info( + "VDU state monitoring: VDU reached terminal state." + "Publishing VDU info: %s at path: %s", + response_info, response_xpath + ) + yield from self._dts.query_update(response_xpath, + rwdts.Flag.ADVISE, + response_info) + return + else: + loop_cnt -= 1 + + ### End of while loop. This is only possible if VDU did not reach active state + self._log.info("VDU state monitoring: VDU at xpath :%s did not reached active state in 120 seconds. Aborting monitoring", + response_xpath) + response_info = RwResourceMgrYang.VDUEventData_ResourceInfo() + response_info.resource_state = 'failed' + yield from self._dts.query_update(response_xpath, + rwdts.Flag.ADVISE, + response_info) + return + + self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s", + xact_info, action, request_msg) + + response_info = None + response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info" + + schema = RwResourceMgrYang.VDUEventData().schema() + pathentry = schema.keyspec_to_entry(ks_path) + + if action == rwdts.QueryAction.CREATE: + response_info = self._allocate_virtual_compute( + pathentry.key00.event_id, + request_msg.request_info, + ) + if response_info.resource_state == 'pending': + asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry), + loop = self._loop) + + elif action == rwdts.QueryAction.DELETE: + self._release_virtual_compute( + pathentry.key00.event_id + ) + + elif action == rwdts.QueryAction.READ: + response_info = self._read_virtual_compute( + pathentry.key00.event_id + ) + else: + raise ValueError("Only create/delete actions available. Received action: %s" %(action)) + + self._log.debug("Responding with VDUInfo at xpath %s: %s", + response_xpath, response_info) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info) + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_request_ready(registration, status): + self._log.debug("Got request ready event (registration: %s) (status: %s)", + registration, status) + + if registration == self._link_reg: + self._link_reg_event.set() + elif registration == self._vdu_reg: + self._vdu_reg_event.set() + else: + self._log.error("Unknown registration ready event: %s", registration) + + + with self._dts.group_create() as group: + self._log.debug("Registering for Link Resource Request using xpath: %s", + ResourceMgrMock.VLINK_REQUEST_XPATH) + + self._link_reg = group.register( + xpath=ResourceMgrMock.VLINK_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready, + on_prepare=self.on_link_request_prepare), + flags=rwdts.Flag.PUBLISHER) + + self._log.debug("Registering for VDU Resource Request using xpath: %s", + ResourceMgrMock.VDU_REQUEST_XPATH) + + self._vdu_reg = group.register( + xpath=ResourceMgrMock.VDU_REQUEST_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready, + on_prepare=self.on_vdu_request_prepare), + flags=rwdts.Flag.PUBLISHER) + + self._registered = True + + def unregister(self): + self._link_reg.deregister() + self._vdu_reg.deregister() + self._registered = False + + @asyncio.coroutine + def wait_ready(self, timeout=5): + self._log.debug("Waiting for all request registrations to become ready.") + yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()], + timeout=timeout, loop=self._loop) + + def create_compute_mock_event_handler(self): + handler = ComputeResourceRequestMockEventHandler() + self._available_compute_handlers.append(handler) + + return handler + + def create_network_mock_event_handler(self): + handler = NetworkResourceRequestMockEventHandler() + self._available_network_handlers.append(handler) + + return handler + + @property + def num_compute_requests(self): + return self._compute_allocate_requests + + @property + def num_network_requests(self): + return self._network_allocate_requests + + @property + def num_allocated_compute_resources(self): + return len(self._used_compute_handlers) + + @property + def num_allocated_network_resources(self): + return len(self._used_network_handlers) + + +@unittest.skip('failing and needs rework') +class ManoErrorTestCase(rift.test.dts.AbstractDTSTest): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + + @classmethod + def configure_suite(cls, rwmain): + launchpad_build_dir = os.path.join( + cls.top_dir, + '.build/modules/core/mc/core_mc-build/rwlaunchpad' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwvns'), + 'rwvnstasklet' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwvnfm'), + 'rwvnfmtasklet' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwnsm'), + 'rwnsmtasklet' + ) + + cls.waited_for_tasklets = False + + @asyncio.coroutine + def register_mock_res_mgr(self): + self.res_mgr = ResourceMgrMock( + self.dts, + self.log, + self.loop, + ) + yield from self.res_mgr.register() + + self.log.info("Waiting for resource manager to be ready") + yield from self.res_mgr.wait_ready() + + def unregister_mock_res_mgr(self): + self.res_mgr.unregister() + + @classmethod + def configure_schema(cls): + return rwnsmyang.get_schema() + + @classmethod + def configure_timeout(cls): + return 240 + + @asyncio.coroutine + def wait_tasklets(self): + if not ManoErrorTestCase.waited_for_tasklets: + yield from asyncio.sleep(5, loop=self.loop) + ManoErrorTestCase.waited_for_tasklets = True + + @asyncio.coroutine + def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1): + yield from self.ping_pong.publish_desciptors( + num_external_vlrs, + num_internal_vlrs, + num_ping_vms + ) + + def unpublish_descriptors(self): + self.ping_pong.unpublish_descriptors() + + @asyncio.coroutine + def wait_until_nsr_active_or_failed(self, nsr_id, timeout_secs=20): + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + self.assertEqual(1, len(nsrs)) + if nsrs[0].operational_status in ['running', 'failed']: + return + + self.log.debug("Rcvd NSR with %s status", nsrs[0].operational_status) + yield from asyncio.sleep(2, loop=self.loop) + + self.assertIn(nsrs[0].operational_status, ['running', 'failed']) + + def verify_number_compute_requests(self, num_requests): + self.assertEqual(num_requests, self.res_mgr.num_compute_requests) + + def verify_number_network_requests(self, num_requests): + self.assertEqual(num_requests, self.res_mgr.num_network_requests) + + def verify_number_allocated_compute(self, num_allocated): + self.assertEqual(num_allocated, self.res_mgr.num_allocated_compute_resources) + + def verify_number_allocated_network(self, num_allocated): + self.assertEqual(num_allocated, self.res_mgr.num_allocated_network_resources) + + def allocate_network_handlers(self, num_networks): + return [self.res_mgr.create_network_mock_event_handler() for _ in range(num_networks)] + + def allocate_compute_handlers(self, num_computes): + return [self.res_mgr.create_compute_mock_event_handler() for _ in range(num_computes)] + + @asyncio.coroutine + def create_mock_launchpad_tasklet(self): + yield from mano_ut.create_mock_launchpad_tasklet(self.log, self.dts) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.tinfo = self.new_tinfo(self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = mano_ut.ManoQuerier(self.log, self.dts) + + # Add a task to wait for tasklets to come up + asyncio.ensure_future(self.wait_tasklets(), loop=self.loop) + + @rift.test.dts.async_test + def test_fail_first_nsm_vlr(self): + yield from self.publish_desciptors(num_external_vlrs=2) + yield from self.register_mock_res_mgr() + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(1) + yield from self.verify_num_nsr_vlrs(nsr_id, 2) + yield from self.verify_num_vnfrs(0) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "failed") + + self.verify_number_network_requests(1) + self.verify_number_compute_requests(0) + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_second_nsm_vlr(self): + yield from self.publish_desciptors(num_external_vlrs=2) + yield from self.register_mock_res_mgr() + self.allocate_network_handlers(1) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(2) + yield from self.verify_num_nsr_vlrs(nsr_id, 2) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + yield from self.verify_vlr_state(nsr_vlrs[1], "failed") + + self.verify_number_network_requests(2) + self.verify_number_compute_requests(0) + self.verify_number_allocated_network(1) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_first_vnf_first_vlr(self): + yield from self.publish_desciptors(num_internal_vlrs=2) + yield from self.register_mock_res_mgr() + self.allocate_network_handlers(1) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(2) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(1) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "failed") + + self.verify_number_network_requests(2) + self.verify_number_compute_requests(0) + self.verify_number_allocated_network(1) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_first_vnf_second_vlr(self): + yield from self.publish_desciptors(num_internal_vlrs=2) + yield from self.register_mock_res_mgr() + self.allocate_network_handlers(2) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(3) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(1) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "failed") + + self.verify_number_network_requests(3) + self.verify_number_compute_requests(0) + self.verify_number_allocated_network(2) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_first_vnf_first_vdu(self): + yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2) + yield from self.register_mock_res_mgr() + yield from self.create_mock_launchpad_tasklet() + self.allocate_network_handlers(3) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(3) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(1) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "running") + + yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2) + vdus = yield from self.get_vnf_vdus(nsr_vnfs[0]) + self.verify_vdu_state(vdus[0], "failed") + + self.verify_number_network_requests(3) + self.verify_number_compute_requests(1) + self.verify_number_allocated_network(3) + self.verify_number_allocated_compute(0) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_first_vnf_second_vdu(self): + yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2) + yield from self.register_mock_res_mgr() + yield from self.create_mock_launchpad_tasklet() + self.allocate_network_handlers(3) + self.allocate_compute_handlers(1) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(3) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(1) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "running") + + yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2) + + vdus = yield from self.get_vnf_vdus(nsr_vnfs[0]) + self.verify_vdu_state(vdus[0], "running") + self.verify_vdu_state(vdus[1], "failed") + + self.verify_number_network_requests(3) + self.verify_number_compute_requests(2) + self.verify_number_allocated_network(3) + self.verify_number_allocated_compute(1) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + @rift.test.dts.async_test + def test_fail_second_vnf_second_vdu(self): + yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2) + yield from self.register_mock_res_mgr() + yield from self.create_mock_launchpad_tasklet() + self.allocate_network_handlers(5) + self.allocate_compute_handlers(3) + + nsr_id = yield from self.ping_pong.create_nsr() + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "failed") + yield from self.verify_num_vlrs(5) + yield from self.verify_num_nsr_vlrs(nsr_id, 1) + + nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id) + yield from self.verify_vlr_state(nsr_vlrs[0], "running") + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + # Verify only a single vnfr was instantiated and is failed + yield from self.verify_num_vnfrs(2) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "running") + yield from self.verify_vnf_state(nsr_vnfs[1], "failed") + + yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2) + + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "running") + + vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[1]) + yield from self.verify_vlr_state(vnf_vlrs[0], "running") + yield from self.verify_vlr_state(vnf_vlrs[1], "running") + + yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2) + yield from self.verify_num_vnfr_vdus(nsr_vnfs[1], 2) + + vdus = yield from self.get_vnf_vdus(nsr_vnfs[0]) + self.verify_vdu_state(vdus[0], "running") + self.verify_vdu_state(vdus[1], "running") + + vdus = yield from self.get_vnf_vdus(nsr_vnfs[1]) + self.verify_vdu_state(vdus[0], "running") + self.verify_vdu_state(vdus[1], "failed") + + self.verify_number_network_requests(5) + self.verify_number_compute_requests(4) + self.verify_number_allocated_network(5) + self.verify_number_allocated_compute(3) + + yield from self.terminate_nsr(nsr_id) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vlrs(0) + + self.verify_number_allocated_network(0) + self.verify_number_allocated_compute(0) + + self.unregister_mock_res_mgr() + self.unpublish_descriptors() + + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + args, _ = parser.parse_known_args() + + ManoErrorTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN + + unittest.main(testRunner=runner) + +if __name__ == '__main__': + main() + +# vim: sw \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/mano_ut.py b/modules/core/mano/rwlaunchpad/test/mano_ut.py new file mode 100755 index 0000000..8ef373c --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/mano_ut.py @@ -0,0 +1,814 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import asyncio +import os +import sys +import unittest +import uuid +import xmlrunner +import argparse +import logging + +import gi +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwDts', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwLaunchpadYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwcalYang', '1.0') + +from gi.repository import ( + NsrYang as nsryang, + RwCloudYang as rwcloudyang, + RwDts as rwdts, + RwLaunchpadYang as launchpadyang, + RwNsmYang as rwnsmyang, + RwNsrYang as rwnsryang, + RwResourceMgrYang as rmgryang, + RwcalYang as rwcalyang, +) + +from gi.repository.RwTypes import RwStatus +import rift.mano.examples.ping_pong_nsd as ping_pong_nsd +import rift.tasklets +import rift.test.dts +import rw_peas + + +openstack_info = { + 'username': 'pluto', + 'password': 'mypasswd', + 'auth_url': 'http://10.66.4.14:5000/v3/', + 'project_name': 'demo', + 'mgmt_network': 'private', + 'image_id': '03bafdd3-8faa-44d5-bb5d-571b1655232f', + 'vms': ['test1', 'test2'], + 'networks': ['testnet1', 'testnet2', 'testnet3'] + } + + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class XPaths(object): + @staticmethod + def nsd(k=None): + return ("C,/nsd:nsd-catalog/nsd:nsd" + + ("[nsd:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def vld(k=None): + return ("C,/vld:vld-catalog/vld:vld" + + ("[vld:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def vnfd(k=None): + return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" + + ("[vnfd:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def vnfr(k=None): + return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" + + ("[vnfr:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def vlr(k=None): + return ("D,/vlr:vlr-catalog/vlr:vlr" + + ("[vlr:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def nsd_ref_count(k=None): + return ("D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" + + ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else "")) + + @staticmethod + def vnfd_ref_count(k=None): + return ("D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" + + ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else "")) + + @staticmethod + def nsr_config(k=None): + return ("C,/nsr:ns-instance-config/nsr:nsr" + + ("[nsr:id='{}']".format(k) if k is not None else "")) + + @staticmethod + def nsr_opdata(k=None): + return ("D,/nsr:ns-instance-opdata/nsr:nsr" + + ("[nsr:ns-instance-config-ref='{}']".format(k) if k is not None else "")) + + +class ManoQuerier(object): + def __init__(self, log, dts): + self.log = log + self.dts = dts + + @asyncio.coroutine + def _read_query(self, xpath, do_trace=False): + flags = rwdts.Flag.MERGE + flags += rwdts.Flag.TRACE if do_trace else 0 + res_iter = yield from self.dts.query_read( + xpath, flags=flags + ) + + results = [] + for i in res_iter: + result = yield from i + if result is not None: + results.append(result.result) + + return results + + @asyncio.coroutine + def get_nsr_opdatas(self, nsr_id=None): + return (yield from self._read_query(XPaths.nsr_opdata(nsr_id), True)) + + @asyncio.coroutine + def get_nsr_configs(self, nsr_id=None): + return (yield from self._read_query(XPaths.nsr_config(nsr_id))) + + @asyncio.coroutine + def get_vnfrs(self, vnfr_id=None): + return (yield from self._read_query(XPaths.vnfr(vnfr_id))) + + @asyncio.coroutine + def get_vlrs(self, vlr_id=None): + return (yield from self._read_query(XPaths.vlr(vlr_id))) + + @asyncio.coroutine + def get_nsd_ref_counts(self, nsd_id=None): + return (yield from self._read_query(XPaths.nsd_ref_count(nsd_id))) + + @asyncio.coroutine + def get_vnfd_ref_counts(self, vnfd_id=None): + return (yield from self._read_query(XPaths.vnfd_ref_count(vnfd_id))) + + @asyncio.coroutine + def delete_nsr(self, nsr_id): + with self.dts.transaction() as xact: + yield from self.dts.query_delete( + XPaths.nsr_config(nsr_id), + rwdts.Flag.TRACE, + #rwdts.Flag.ADVISE, + ) + + @asyncio.coroutine + def delete_nsd(self, nsd_id): + nsd_xpath = XPaths.nsd(nsd_id) + self.log.debug("Attempting to delete NSD with path = %s", nsd_xpath) + with self.dts.transaction() as xact: + yield from self.dts.query_delete( + nsd_xpath, + rwdts.Flag.ADVISE, + ) + + @asyncio.coroutine + def delete_vnfd(self, vnfd_id): + vnfd_xpath = XPaths.vnfd(vnfd_id) + self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath) + with self.dts.transaction() as xact: + yield from self.dts.query_delete( + vnfd_xpath, + rwdts.Flag.ADVISE, + ) + + @asyncio.coroutine + def update_nsd(self, nsd_id, nsd_msg): + nsd_xpath = XPaths.nsd(nsd_id) + self.log.debug("Attempting to update NSD with path = %s", nsd_xpath) + with self.dts.transaction() as xact: + yield from self.dts.query_update( + nsd_xpath, + rwdts.Flag.ADVISE, + nsd_msg, + ) + + @asyncio.coroutine + def update_vnfd(self, vnfd_id, vnfd_msg): + vnfd_xpath = XPaths.vnfd(vnfd_id) + self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath) + with self.dts.transaction() as xact: + yield from self.dts.query_update( + vnfd_xpath, + rwdts.Flag.ADVISE, + vnfd_msg, + ) + + +class ManoTestCase(rift.test.dts.AbstractDTSTest): + @asyncio.coroutine + def verify_nsr_state(self, nsr_id, state): + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + self.assertEqual(1, len(nsrs)) + nsr = nsrs[0] + + self.log.debug("Got nsr = %s", nsr) + self.assertEqual(state, nsr.operational_status) + + @asyncio.coroutine + def verify_vlr_state(self, vlr_id, state): + vlrs = yield from self.querier.get_vlrs(vlr_id) + self.assertEqual(1, len(vlrs)) + vlr = vlrs[0] + + self.assertEqual(state, vlr.operational_status) + + def verify_vdu_state(self, vdu, state): + self.assertEqual(state, vdu.operational_status) + + @asyncio.coroutine + def verify_vnf_state(self, vnfr_id, state): + vnfrs = yield from self.querier.get_vnfrs(vnfr_id) + self.assertEqual(1, len(vnfrs)) + vnfr = vnfrs[0] + + self.assertEqual(state, vnfr.operational_status) + + @asyncio.coroutine + def terminate_nsr(self, nsr_id): + self.log.debug("Terminating nsr id: %s", nsr_id) + yield from self.querier.delete_nsr(nsr_id) + + @asyncio.coroutine + def verify_nsr_deleted(self, nsr_id): + nsr_opdatas = yield from self.querier.get_nsr_opdatas(nsr_id) + self.assertEqual(0, len(nsr_opdatas)) + + nsr_configs = yield from self.querier.get_nsr_configs(nsr_id) + self.assertEqual(0, len(nsr_configs)) + + @asyncio.coroutine + def verify_num_vlrs(self, num_vlrs): + vlrs = yield from self.querier.get_vlrs() + self.assertEqual(num_vlrs, len(vlrs)) + + @asyncio.coroutine + def get_nsr_vlrs(self, nsr_id): + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + return [v.vlr_ref for v in nsrs[0].vlr] + + @asyncio.coroutine + def get_nsr_vnfs(self, nsr_id): + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + return nsrs[0].constituent_vnfr_ref + + @asyncio.coroutine + def get_vnf_vlrs(self, vnfr_id): + vnfrs = yield from self.querier.get_vnfrs(vnfr_id) + return [i.vlr_ref for i in vnfrs[0].internal_vlr] + + @asyncio.coroutine + def verify_num_nsr_vlrs(self, nsr_id, num_vlrs): + vlrs = yield from self.get_nsr_vlrs(nsr_id) + self.assertEqual(num_vlrs, len(vlrs)) + + @asyncio.coroutine + def verify_num_nsr_vnfrs(self, nsr_id, num_vnfs): + vnfs = yield from self.get_nsr_vnfs(nsr_id) + self.assertEqual(num_vnfs, len(vnfs)) + + @asyncio.coroutine + def verify_num_vnfr_vlrs(self, vnfr_id, num_vlrs): + vlrs = yield from self.get_vnf_vlrs(vnfr_id) + self.assertEqual(num_vlrs, len(vlrs)) + + @asyncio.coroutine + def get_vnf_vdus(self, vnfr_id): + vnfrs = yield from self.querier.get_vnfrs(vnfr_id) + return [i for i in vnfrs[0].vdur] + + @asyncio.coroutine + def verify_num_vnfr_vdus(self, vnfr_id, num_vdus): + vdus = yield from self.get_vnf_vdus(vnfr_id) + self.assertEqual(num_vdus, len(vdus)) + + @asyncio.coroutine + def verify_num_vnfrs(self, num_vnfrs): + vnfrs = yield from self.querier.get_vnfrs() + self.assertEqual(num_vnfrs, len(vnfrs)) + + @asyncio.coroutine + def verify_nsd_ref_count(self, nsd_id, num_ref): + nsd_ref_counts = yield from self.querier.get_nsd_ref_counts(nsd_id) + self.assertEqual(num_ref, nsd_ref_counts[0].instance_ref_count) + + +class DescriptorPublisher(object): + def __init__(self, log, loop, dts): + self.log = log + self.loop = loop + self.dts = dts + + self._registrations = [] + + @asyncio.coroutine + def publish(self, w_path, path, desc): + ready_event = asyncio.Event(loop=self.loop) + + @asyncio.coroutine + def on_ready(regh, status): + self.log.debug("Create element: %s, obj-type:%s obj:%s", + path, type(desc), desc) + with self.dts.transaction() as xact: + regh.create_element(path, desc, xact.xact) + self.log.debug("Created element: %s, obj:%s", path, desc) + ready_event.set() + + handler = rift.tasklets.DTS.RegistrationHandler( + on_ready=on_ready + ) + + self.log.debug("Registering path: %s, obj:%s", w_path, desc) + reg = yield from self.dts.register( + w_path, + handler, + flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ + ) + self._registrations.append(reg) + self.log.debug("Registered path : %s", w_path) + yield from ready_event.wait() + + return reg + + def unpublish_all(self): + self.log.debug("Deregistering all published descriptors") + for reg in self._registrations: + reg.deregister() + + +class PingPongNsrConfigPublisher(object): + XPATH = "C,/nsr:ns-instance-config" + + def __init__(self, log, loop, dts, nsd_id, cloud_account_name): + self.dts = dts + self.log = log + self.loop = loop + self.ref = None + + self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig() + + nsr = rwnsryang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "ns1.{}".format(nsr.id) + nsr.nsd_ref = nsd_id + nsr.cloud_account = cloud_account_name + + inputs = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter() + inputs.xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(nsd_id) + inputs.value = "inigo montoya" + + nsr.input_parameter.append(inputs) + + self.nsr_config.nsr.append(nsr) + + @asyncio.coroutine + def register(self): + ready_event = asyncio.Event(loop=self.loop) + + @asyncio.coroutine + def on_ready(regh, status): + with self.dts.transaction() as xact: + regh.create_element( + PingPongNsrConfigPublisher.XPATH, + self.nsr_config, + xact=xact.xact, + ) + + ready_event.set() + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + if action == rwdts.QueryAction.READ: + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath=PingPongNsrConfigPublisher.XPATH, + msg=self.nsr_config, + ) + elif action == rwdts.QueryAction.DELETE: + self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig() + self.reg.delete_element( + PingPongNsrConfigPublisher.XPATH, + ) + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + else: + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self.log.debug("Registering path: %s", PingPongNsrConfigPublisher.XPATH) + self.reg = yield from self.dts.register( + PingPongNsrConfigPublisher.XPATH, + flags=rwdts.Flag.PUBLISHER, + handler=rift.tasklets.DTS.RegistrationHandler( + on_ready=on_ready, + on_prepare=on_prepare, + ), + ) + + yield from ready_event.wait() + + def deregister(self): + if self.reg is not None: + self.reg.deregister() + + +class PingPongDescriptorPublisher(object): + def __init__(self, log, loop, dts): + self.log = log + self.loop = loop + self.dts = dts + + self.querier = ManoQuerier(self.log, self.dts) + self.publisher = DescriptorPublisher(self.log, self.loop, self.dts) + self.nsr_config_publisher = None + + @property + def nsd_id(self): + return self.ping_pong_nsd.id + + @property + def ping_vnfd_id(self): + return self.ping_vnfd.id + + @property + def pong_vnfd_id(self): + return self.pong_vnfd.id + + @asyncio.coroutine + def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1): + self.ping_vnfd, self.pong_vnfd, self.ping_pong_nsd = \ + ping_pong_nsd.generate_ping_pong_descriptors( + pingcount=1, + external_vlr_count=num_external_vlrs, + internal_vlr_count=num_internal_vlrs, + num_vnf_vms=2, + ) + + # Publish ping_vnfd + xpath = XPaths.vnfd(self.ping_vnfd_id) + xpath_wild = XPaths.vnfd() + for obj in self.ping_vnfd.descriptor.vnfd: + self.log.debug("Publishing ping_vnfd path: %s - %s, type:%s, obj:%s", + xpath, xpath_wild, type(obj), obj) + yield from self.publisher.publish(xpath_wild, xpath, obj) + + # Publish pong_vnfd + xpath = XPaths.vnfd(self.pong_vnfd_id) + xpath_wild = XPaths.vnfd() + for obj in self.pong_vnfd.descriptor.vnfd: + self.log.debug("Publishing pong_vnfd path: %s, wild_path: %s, obj:%s", + xpath, xpath_wild, obj) + yield from self.publisher.publish(xpath_wild, xpath, obj) + + # Publish ping_pong_nsd + xpath = XPaths.nsd(self.nsd_id) + xpath_wild = XPaths.nsd() + for obj in self.ping_pong_nsd.descriptor.nsd: + self.log.debug("Publishing ping_pong nsd path: %s, wild_path: %s, obj:%s", + xpath, xpath_wild, obj) + yield from self.publisher.publish(xpath_wild, xpath, obj) + + self.log.debug("DONE - publish_desciptors") + + def unpublish_descriptors(self): + self.publisher.unpublish_all() + if self.nsr_config_publisher is not None: + self.nsr_config_publisher.deregister() + + @asyncio.coroutine + def publish_nsr_config(self, cloud_account_name): + self.nsr_config_publisher = PingPongNsrConfigPublisher( + self.log, + self.loop, + self.dts, + self.nsd_id, + cloud_account_name, + ) + + yield from self.nsr_config_publisher.register() + return self.nsr_config_publisher.nsr_config.nsr[0].id + + @asyncio.coroutine + def delete_nsd(self): + yield from self.querier.delete_nsd(self.ping_pong_nsd.id) + + @asyncio.coroutine + def delete_ping_vnfd(self): + yield from self.querier.delete_vnfd(self.ping_vnfd.id) + + @asyncio.coroutine + def update_nsd(self): + yield from self.querier.update_nsd( + self.ping_pong_nsd.id, + self.ping_pong_nsd.descriptor.nsd[0] + ) + + @asyncio.coroutine + def update_ping_vnfd(self): + yield from self.querier.update_vnfd( + self.ping_vnfd.id, + self.ping_vnfd.descriptor.vnfd[0] + ) + + +class VnsTestCase(rift.test.dts.AbstractDTSTest): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + + @classmethod + def configure_suite(cls, rwmain): + vns_dir = os.environ.get('VNS_DIR') + vnfm_dir = os.environ.get('VNFM_DIR') + nsm_dir = os.environ.get('NSM_DIR') + rm_dir = os.environ.get('RM_DIR') + + rwmain.add_tasklet(vns_dir, 'rwvnstasklet') + rwmain.add_tasklet(vnfm_dir, 'rwvnfmtasklet') + rwmain.add_tasklet(nsm_dir, 'rwnsmtasklet') + rwmain.add_tasklet(rm_dir, 'rwresmgrtasklet') + + @classmethod + def configure_schema(cls): + return rwnsmyang.get_schema() + + @classmethod + def configure_timeout(cls): + return 240 + + @staticmethod + def get_cal_account(account_type, account_name): + """ + Creates an object for class RwcalYang.Clo + """ + account = rwcloudyang.CloudAccount() + if account_type == 'mock': + account.name = account_name + account.account_type = "mock" + account.mock.username = "mock_user" + elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')): + account.name = account_name + account.account_type = 'openstack' + account.openstack.key = openstack_info['username'] + account.openstack.secret = openstack_info['password'] + account.openstack.auth_url = openstack_info['auth_url'] + account.openstack.tenant = openstack_info['project_name'] + account.openstack.mgmt_network = openstack_info['mgmt_network'] + return account + + @asyncio.coroutine + def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"): + account = self.get_cal_account(cloud_type, cloud_name) + account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name) + self.log.info("Configuring cloud-account: %s", account) + yield from dts.query_create(account_xpath, + rwdts.Flag.ADVISE | rwdts.Flag.TRACE, + account) + + @asyncio.coroutine + def wait_tasklets(self): + yield from asyncio.sleep(5, loop=self.loop) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = ManoQuerier(self.log, self.dts) + + def test_create_nsr_record(self): + @asyncio.coroutine + def verify_nsr_opdata(termination=False): + self.log.debug("Verifying nsr opdata path = %s", XPaths.nsr_opdata()) + + while True: + nsrs = yield from self.querier.get_nsr_opdatas() + if termination: + self.assertEqual(0, len(nsrs)) + return + + nsr = nsrs[0] + self.log.debug("Got nsr record %s", nsr) + if nsr.operational_status == 'running': + self.log.debug("!!! Rcvd NSR with running status !!!") + break + + self.log.debug("Rcvd NSR with %s status", nsr.operational_status) + self.log.debug("Sleeping for 10 seconds") + yield from asyncio.sleep(10, loop=self.loop) + + @asyncio.coroutine + def verify_nsr_config(termination=False): + self.log.debug("Verifying nsr config path = %s", XPaths.nsr_config()) + + nsr_configs = yield from self.querier.get_nsr_configs() + self.assertEqual(1, len(nsr_configs)) + + nsr_config = nsr_configs[0] + self.assertEqual( + "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id), + nsr_config.input_parameter[0].xpath, + ) + + @asyncio.coroutine + def verify_vnfr_record(termination=False): + self.log.debug("Verifying vnfr record path = %s, Termination=%d", + XPaths.vnfr(), termination) + if termination: + for i in range(5): + vnfrs = yield from self.querier.get_vnfrs() + if len(vnfrs) == 0: + return True + + for vnfr in vnfrs: + self.log.debug("VNFR still exists = %s", vnfr) + + + assert len(vnfrs) == 0 + + while True: + vnfrs = yield from self.querier.get_vnfrs() + if len(vnfrs) != 0 and termination is False: + vnfr = vnfrs[0] + self.log.debug("Rcvd VNFR with %s status", vnfr.operational_status) + if vnfr.operational_status == 'running': + self.log.debug("!!! Rcvd VNFR with running status !!!") + return True + + elif vnfr.operational_status == "failed": + self.log.debug("!!! Rcvd VNFR with failed status !!!") + return False + + self.log.debug("Sleeping for 10 seconds") + yield from asyncio.sleep(10, loop=self.loop) + + @asyncio.coroutine + def verify_vlr_record(termination=False): + vlr_xpath = XPaths.vlr() + self.log.debug("Verifying vlr record path = %s, termination: %s", + vlr_xpath, termination) + res_iter = yield from self.dts.query_read(vlr_xpath) + + for i in res_iter: + result = yield from i + if termination: + self.assertIsNone(result) + + self.log.debug("Got vlr record %s", result) + + @asyncio.coroutine + def verify_nsd_ref_count(termination): + self.log.debug("Verifying nsd ref count= %s", XPaths.nsd_ref_count()) + res_iter = yield from self.dts.query_read(XPaths.nsd_ref_count()) + + for i in res_iter: + result = yield from i + self.log.debug("Got nsd ref count record %s", result) + + @asyncio.coroutine + def verify_vnfd_ref_count(termination): + self.log.debug("Verifying vnfd ref count= %s", XPaths.vnfd_ref_count()) + res_iter = yield from self.dts.query_read(XPaths.vnfd_ref_count()) + + for i in res_iter: + result = yield from i + self.log.debug("Got vnfd ref count record %s", result) + + @asyncio.coroutine + def verify_results(termination=False): + yield from verify_vnfr_record(termination) + yield from verify_vlr_record(termination) + yield from verify_nsr_opdata(termination) + yield from verify_nsr_config(termination) + yield from verify_nsd_ref_count(termination) + yield from verify_vnfd_ref_count(termination) + + @asyncio.coroutine + def terminate_ns(nsr_id): + xpath = XPaths.nsr_config(nsr_id) + self.log.debug("Terminating network service with path %s", xpath) + yield from self.dts.query_delete(xpath, flags=rwdts.Flag.ADVISE) + self.log.debug("Terminated network service with path %s", xpath) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.tinfo = self.new_tinfo(self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = mano_ut.ManoQuerier(self.log, self.dts) + + # Add a task to wait for tasklets to come up + asyncio.ensure_future(self.wait_tasklets(), loop=self.loop) + + @asyncio.coroutine + def run_test(): + yield from self.wait_tasklets() + + cloud_type = "mock" + yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account") + + yield from self.ping_pong.publish_desciptors() + + # Attempt deleting VNFD not in use + yield from self.ping_pong.update_ping_vnfd() + + # Attempt deleting NSD not in use + yield from self.ping_pong.update_nsd() + + # Attempt deleting VNFD not in use + yield from self.ping_pong.delete_ping_vnfd() + + # Attempt deleting NSD not in use + yield from self.ping_pong.delete_nsd() + + yield from self.ping_pong.publish_desciptors() + + # Create an ns-instance-config element and prompt the creation of + # an NSR. + nsr_id = yield from self.ping_pong.publish_nsr_config("mock_account") + + yield from verify_results() + + # Attempt deleting VNFD in use + yield from self.ping_pong.delete_ping_vnfd() + + # Attempt deleting NSD in use + yield from self.ping_pong.delete_nsd() + + yield from terminate_ns(nsr_id) + + yield from asyncio.sleep(2, loop=self.loop) + self.log.debug("Verifying termination results") + yield from verify_results(termination=True) + self.log.debug("Verified termination results") + + self.log.debug("Attempting to delete VNFD for real") + yield from self.ping_pong.delete_ping_vnfd() + + self.log.debug("Attempting to delete NSD for real") + yield from self.ping_pong.delete_nsd() + + future = asyncio.ensure_future(run_test(), loop=self.loop) + self.run_until(future.done) + if future.exception() is not None: + self.log.error("Caught exception during test") + raise future.exception() + + +def main(): + top_dir = __file__[:__file__.find('/modules/core/')] + build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build') + launchpad_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build/rwlaunchpad') + + if 'VNS_DIR' not in os.environ: + os.environ['VNS_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwvns') + + if 'VNFM_DIR' not in os.environ: + os.environ['VNFM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwvnfm') + + if 'NSM_DIR' not in os.environ: + os.environ['NSM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwnsm') + + if 'RM_DIR' not in os.environ: + os.environ['RM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwresmgrtasklet') + + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + VnsTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN + + unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args) + +if __name__ == '__main__': + main() + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/openmano_nsm_ut.py b/modules/core/mano/rwlaunchpad/test/openmano_nsm_ut.py new file mode 100755 index 0000000..d6947f0 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/openmano_nsm_ut.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import asyncio +import logging +import os +import sys +import time +import unittest +import uuid + +import xmlrunner + +from gi.repository import ( + RwDts as rwdts, + RwLaunchpadYang as launchpadyang, + RwNsmYang as rwnsmyang, + RwCloudYang as rwcloudyang, + RwResourceMgrYang, + ) +import rift.tasklets +import rift.test.dts + +import mano_ut + + +if sys.version_info < (3, 4, 4): + asyncio.ensure_future = asyncio.async + + +class OpenManoNsmTestCase(mano_ut.ManoTestCase): + """ + DTS GI interface unittests + + Note: Each tests uses a list of asyncio.Events for staging through the + test. These are required here because we are bring up each coroutine + ("tasklet") at the same time and are not implementing any re-try + mechanisms. For instance, this is used in numerous tests to make sure that + a publisher is up and ready before the subscriber sends queries. Such + event lists should not be used in production software. + """ + + @classmethod + def configure_suite(cls, rwmain): + launchpad_build_dir = os.path.join( + cls.top_dir, + '.build/modules/core/mc/core_mc-build/rwlaunchpad' + ) + + rwmain.add_tasklet( + os.path.join(launchpad_build_dir, 'plugins/rwnsm'), + 'rwnsmtasklet' + ) + + cls.waited_for_tasklets = False + + @classmethod + def configure_schema(cls): + return rwnsmyang.get_schema() + + @classmethod + def configure_timeout(cls): + return 240 + + @asyncio.coroutine + def wait_tasklets(self): + if not OpenManoNsmTestCase.waited_for_tasklets: + OpenManoNsmTestCase.waited_for_tasklets = True + self._wait_event = asyncio.Event(loop=self.loop) + yield from asyncio.sleep(5, loop=self.loop) + self._wait_event.set() + + yield from self._wait_event.wait() + + @asyncio.coroutine + def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1): + yield from self.ping_pong.publish_desciptors( + num_external_vlrs, + num_internal_vlrs, + num_ping_vms + ) + + def unpublish_descriptors(self): + self.ping_pong.unpublish_descriptors() + + @asyncio.coroutine + def wait_until_nsr_active_or_failed(self, nsr_id, timeout_secs=20): + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + nsrs = yield from self.querier.get_nsr_opdatas(nsr_id) + if len(nsrs) == 0: + continue + self.assertEqual(1, len(nsrs)) + if nsrs[0].operational_status in ['running', 'failed']: + return + + self.log.debug("Rcvd NSR with %s status", nsrs[0].operational_status) + yield from asyncio.sleep(2, loop=self.loop) + + self.assertIn(nsrs[0].operational_status, ['running', 'failed']) + + def configure_test(self, loop, test_id): + self.log.debug("STARTING - %s", self.id()) + self.tinfo = self.new_tinfo(self.id()) + self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop) + self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts) + self.querier = mano_ut.ManoQuerier(self.log, self.dts) + + # Add a task to wait for tasklets to come up + asyncio.ensure_future(self.wait_tasklets(), loop=self.loop) + + @asyncio.coroutine + def configure_cloud_account(self): + account_xpath = "C,/rw-cloud:cloud-account" + account = rwcloudyang.CloudAccount() + account.name = "openmano_name" + account.account_type = "openmano" + account.openmano.host = "10.64.5.73" + account.openmano.port = 9090 + account.openmano.tenant_id = "eecfd632-bef1-11e5-b5b8-0800273ab84b" + self.log.info("Configuring cloud-account: %s", account) + yield from self.dts.query_create( + account_xpath, + rwdts.Flag.ADVISE, + account, + ) + + @rift.test.dts.async_test + def test_ping_pong_nsm_instantiate(self): + yield from self.wait_tasklets() + yield from self.configure_cloud_account() + yield from self.publish_desciptors(num_internal_vlrs=0) + + nsr_id = yield from self.ping_pong.publish_nsr_config() + + yield from self.wait_until_nsr_active_or_failed(nsr_id) + + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1) + yield from self.verify_nsr_state(nsr_id, "running") + yield from self.verify_num_vlrs(0) + + yield from self.verify_num_nsr_vnfrs(nsr_id, 2) + + yield from self.verify_num_vnfrs(2) + nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id) + yield from self.verify_vnf_state(nsr_vnfs[0], "running") + yield from self.verify_vnf_state(nsr_vnfs[1], "running") + + yield from self.terminate_nsr(nsr_id) + yield from asyncio.sleep(2, loop=self.loop) + + yield from self.verify_nsr_deleted(nsr_id) + yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0) + yield from self.verify_num_vnfrs(0) + +def main(): + runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"]) + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-n', '--no-runner', action='store_true') + args, unittest_args = parser.parse_known_args() + if args.no_runner: + runner = None + + OpenManoNsmTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN + + unittest.main(testRunner=runner, argv=[sys.argv[0]]+unittest_args) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_test.py new file mode 100644 index 0000000..6f9fcbb --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_test.py @@ -0,0 +1,308 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid +import gi + +gi.require_version('RwlogMgmtYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') + +from gi.repository import ( + NsdYang, + NsrYang, + RwBaseYang, + RwCloudYang, + RwIwpYang, + RwlogMgmtYang, + RwNsmYang, + RwNsrYang, + RwResourceMgrYang, + RwConmanYang, + RwVnfdYang, + VldYang, + ) + +logging.basicConfig(level=logging.DEBUG) + + +RW_KT_UTM_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/kt_utm" + ) + +RW_KT_UTM_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/utm_only" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + + +@pytest.fixture(scope='module') +def rwlog_mgmt_proxy(request, mgmt_session): + return mgmt_session.proxy(RwlogMgmtYang) + + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + + +@pytest.fixture(scope='session') +def kt_utm_vnfd_package_file(): + ktutm_pkg_file = os.path.join( + RW_KT_UTM_PKG_INSTALL_DIR, + "kt_utm_vnfd.tar.gz", + ) + if not os.path.exists(ktutm_pkg_file): + raise_package_error() + + return ktutm_pkg_file + +@pytest.fixture(scope='session') +def utm_only_nsd_package_file(): + ktutm_nsd_pkg_file = os.path.join( + RW_KT_UTM_NSD_PKG_INSTALL_DIR, + "utm_only_nsd.tar.gz", + ) + if not os.path.exists(ktutm_nsd_pkg_file): + raise_package_error() + + return ktutm_nsd_pkg_file + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "UTM-only" + nsr.short_name = "UTM-only" + nsr.description = "1 VNFs with 5 VLs" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_logging(self, rwlog_mgmt_proxy): + logging = RwlogMgmtYang.Logging.from_dict({ + "console": { + "on": True, + "filter": { + "category": [{ + "name": "rw-generic", + "severity": "error" + }], + } + } + }) + rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging) + + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + # cloud_account.name = "cloudsim_proxy" + # cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "openstack" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.13:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file): + logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file) + trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should only be a single vnfd" + vnfd = vnfds[0] + assert vnfd.name == "kt_utm_vnfd" + + def test_onboard_utm_only_nsd(self, logger, nsd_proxy, utm_only_nsd_package_file): + logger.info("Onboarding utm_onlynsd package: %s", utm_only_nsd_package_file) + trans_id = upload_descriptor(logger, utm_only_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + + def test_instantiate_utm_only_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py new file mode 100644 index 0000000..d366e0f --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py @@ -0,0 +1,335 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid +import gi + +gi.require_version('RwlogMgmtYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') + +from gi.repository import ( + NsdYang, + NsrYang, + RwBaseYang, + RwCloudYang, + RwIwpYang, + RwlogMgmtYang, + RwNsmYang, + RwNsrYang, + RwResourceMgrYang, + RwConmanYang, + RwVnfdYang, + VldYang, + ) + +logging.basicConfig(level=logging.DEBUG) + + +RW_KT_UTM_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/kt_utm" + ) + +RW_KT_WIMS_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/kt_wims" + ) + +RW_KT_UTM_WIMS_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/utm_wims" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + + +@pytest.fixture(scope='module') +def rwlog_mgmt_proxy(request, mgmt_session): + return mgmt_session.proxy(RwlogMgmtYang) + + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + + +@pytest.fixture(scope='session') +def kt_utm_vnfd_package_file(): + ktutm_pkg_file = os.path.join( + RW_KT_UTM_PKG_INSTALL_DIR, + "kt_utm_vnfd.tar.gz", + ) + if not os.path.exists(ktutm_pkg_file): + raise_package_error() + + return ktutm_pkg_file + +@pytest.fixture(scope='session') +def kt_wims_vnfd_package_file(): + ktwims_pkg_file = os.path.join( + RW_KT_WIMS_PKG_INSTALL_DIR, + "kt_wims_vnfd.tar.gz", + ) + if not os.path.exists(ktwims_pkg_file): + raise_package_error() + + return ktwims_pkg_file + +@pytest.fixture(scope='session') +def utm_wims_nsd_package_file(): + ktutm_wims_nsd_pkg_file = os.path.join( + RW_KT_UTM_WIMS_NSD_PKG_INSTALL_DIR, + "utm_wims_nsd.tar.gz", + ) + if not os.path.exists(ktutm_wims_nsd_pkg_file): + raise_package_error() + + return ktutm_wims_nsd_pkg_file + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "UTM-WIMS" + nsr.short_name = "UTM-WIMS" + nsr.description = "2 VNFs with 4 VLs" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_logging(self, rwlog_mgmt_proxy): + logging = RwlogMgmtYang.Logging.from_dict({ + "console": { + "on": True, + "filter": { + "category": [{ + "name": "rw-generic", + "severity": "error" + }], + } + } + }) + rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging) + + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + # cloud_account.name = "cloudsim_proxy" + # cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "openstack" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file): + logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file) + trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should only be a single vnfd" + vnfd = vnfds[0] + assert vnfd.name == "kt_utm_vnfd" + + def test_onboard_ktwims_vnfd(self, logger, vnfd_proxy, kt_wims_vnfd_package_file): + logger.info("Onboarding kt_wims_vnfd package: %s", kt_wims_vnfd_package_file) + trans_id = upload_descriptor(logger, kt_wims_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should only be two vnfd" + assert "kt_wims_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_utm_wims_nsd(self, logger, nsd_proxy, utm_wims_nsd_package_file): + logger.info("Onboarding utm_wims_nsd package: %s", utm_wims_nsd_package_file) + trans_id = upload_descriptor(logger, utm_wims_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + + def test_instantiate_utm_wims_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_test.py new file mode 100644 index 0000000..3253dae --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_test.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid +import datetime + +import gi +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwlogMgmtYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') + +from gi.repository import ( + NsdYang, + NsrYang, + RwBaseYang, + RwCloudYang, + RwIwpYang, + RwlogMgmtYang, + RwNsmYang, + RwNsrYang, + RwResourceMgrYang, + RwConmanYang, + RwVnfdYang, + VldYang, + ) + +logging.basicConfig(level=logging.DEBUG) + + +RW_PING_PONG_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_ROOT"], + "images" + ) + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + + +@pytest.fixture(scope='module') +def rwlog_mgmt_proxy(request, mgmt_session): + return mgmt_session.proxy(RwlogMgmtYang) + + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + + +@pytest.fixture(scope='session') +def ping_vnfd_package_file(): + ping_pkg_file = os.path.join( + RW_PING_PONG_PKG_INSTALL_DIR, + "ping_vnfd_with_image.tar.gz", + ) + if not os.path.exists(ping_pkg_file): + raise_package_error() + + return ping_pkg_file + + +@pytest.fixture(scope='session') +def pong_vnfd_package_file(): + pong_pkg_file = os.path.join( + RW_PING_PONG_PKG_INSTALL_DIR, + "pong_vnfd_with_image.tar.gz", + ) + if not os.path.exists(pong_pkg_file): + raise_package_error() + + return pong_pkg_file + + +@pytest.fixture(scope='session') +def ping_pong_nsd_package_file(): + ping_pong_pkg_file = os.path.join( + RW_PING_PONG_PKG_INSTALL_DIR, + "ping_pong_nsd.tar.gz", + ) + if not os.path.exists(ping_pong_pkg_file): + raise_package_error() + + return ping_pong_pkg_file + + +def create_nsr_from_nsd_id(nsd_id): + nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "pingpong_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) + nsr.short_name = "nsr_short_name" + nsr.description = "This is a description" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + nsr.cloud_account = "openstack" + + param = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter() + param.xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:vendor' + param.value = "rift-o-matic" + + nsr.input_parameter.append(param) + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_logging(self, rwlog_mgmt_proxy): + logging = RwlogMgmtYang.Logging.from_dict({ + "console": { + "on": True, + "filter": { + "category": [{ + "name": "rw-generic", + "severity": "error" + }], + } + } + }) + rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging) + + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccount() + # cloud_account.name = "cloudsim_proxy" + # cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "openstack" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.96.4.2:5000/v3/' + cloud_account.openstack.tenant = 'mano1' + cloud_account.openstack.mgmt_network = 'private1' + + cloud_proxy.merge_config("/rw-cloud:cloud/account", cloud_account) + + def test_onboard_ping_vnfd(self, logger, vnfd_proxy, ping_vnfd_package_file): + logger.info("Onboarding ping_vnfd package: %s", ping_vnfd_package_file) + trans_id = upload_descriptor(logger, ping_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should only be a single vnfd" + vnfd = vnfds[0] + assert vnfd.name == "ping_vnfd" + + def test_onboard_pong_vnfd(self, logger, vnfd_proxy, pong_vnfd_package_file): + logger.info("Onboarding pong_vnfd package: %s", pong_vnfd_package_file) + trans_id = upload_descriptor(logger, pong_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ping_pong_nsd(self, logger, nsd_proxy, ping_pong_nsd_package_file): + logger.info("Onboarding ping_pong_nsd package: %s", ping_pong_nsd_package_file) + trans_id = upload_descriptor(logger, ping_pong_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "ping_pong_nsd" + + def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + rwnsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id + + # logger.info("Waiting up to 30 seconds for ping and pong components to show " + # "up in show tasklet info") + + # start_time = time.time() + # while (time.time() - start_time) < 30: + # vcs_info = base_proxy.get('/vcs/info') + # components = vcs_info.components.component_info + + # def find_component_by_name(name): + # for component in components: + # if name in component.component_name: + # return component + + # logger.warning("Did not find %s component name in show tasklet info", + # name) + + # return None + + # """ + # ping_cluster_component = find_component_by_name( + # "rw_ping_vnfd:rwping_cluster" + # ) + # if ping_cluster_component is None: + # continue + + # pong_cluster_component = find_component_by_name( + # "rw_pong_vnfd:rwpong_cluster" + # ) + # if pong_cluster_component is None: + # continue + # """ + + # ping_vm_component = find_component_by_name( + # "rw_ping_vnfd:rwping_vm" + # ) + # if ping_vm_component is None: + # continue + + # pong_vm_component = find_component_by_name( + # "rw_pong_vnfd:rwpong_vm" + # ) + # if pong_vm_component is None: + # continue + + # ping_proc_component = find_component_by_name( + # "rw_ping_vnfd:rwping_proc" + # ) + # if ping_proc_component is None: + # continue + + # pong_proc_component = find_component_by_name( + # "rw_pong_vnfd:rwpong_proc" + # ) + # if pong_proc_component is None: + # continue + + # ping_tasklet_component = find_component_by_name( + # "rw_ping_vnfd:rwping_tasklet" + # ) + # if ping_tasklet_component is None: + # continue + + # pong_tasklet_component = find_component_by_name( + # "rw_pong_vnfd:rwpong_tasklet" + # ) + # if pong_tasklet_component is None: + # continue + + # logger.info("TEST SUCCESSFUL: All ping and pong components were found in show tasklet info") + # break + + # else: + # assert False, "Did not find all ping and pong component in time" + + #def test_terminate_ping_pong_ns(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + # nsr_configs = nsr_proxy.get_config('/ns-instance-config') + # nsr = nsr_configs.nsr[0] + # nsr_id = nsr.id + + # nsr_configs = nsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(nsr_id)) \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py new file mode 100644 index 0000000..167e65a --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_3vnfs_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test ExtVNF +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid + +import gi +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsmYang', '1.0') + + + +from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang + +logging.basicConfig(level=logging.DEBUG) + + +RW_VROUTER_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/vrouter" + ) +RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafgen" + ) +RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafsink" + ) +RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/tg_2vrouter_ts" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + +@pytest.fixture(scope='session') +def vrouter_vnfd_package_file(): + vrouter_pkg_file = os.path.join( + RW_VROUTER_PKG_INSTALL_DIR, + "vrouter_vnfd_with_epa.tar.gz", + ) + if not os.path.exists(vrouter_pkg_file): + raise_package_error() + + return vrouter_pkg_file + +@pytest.fixture(scope='session') +def tg_vnfd_package_file(): + tg_pkg_file = os.path.join( + RW_TRAFGEN_PKG_INSTALL_DIR, + "trafgen_vnfd_with_epa.tar.gz", + ) + if not os.path.exists(tg_pkg_file): + raise_package_error() + + return tg_pkg_file + +@pytest.fixture(scope='session') +def ts_vnfd_package_file(): + ts_pkg_file = os.path.join( + RW_TRAFSINK_PKG_INSTALL_DIR, + "trafsink_vnfd_with_epa.tar.gz", + ) + if not os.path.exists(ts_pkg_file): + raise_package_error() + + return ts_pkg_file + +@pytest.fixture(scope='session') +def tg_2vrouter_ts_nsd_package_file(): + tg_2vrouter_ts_nsd_pkg_file = os.path.join( + RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR, + "tg_2vrouter_ts_nsd_with_epa.tar.gz", + ) + if not os.path.exists(tg_2vrouter_ts_nsd_pkg_file): + raise_package_error() + + return tg_2vrouter_ts_nsd_pkg_file + + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "TG-2Vrouter-TS EPA" + nsr.short_name = "TG-2Vrouter-TS EPA" + nsr.description = "4 VNFs with Trafgen, 2 Vrouters and Trafsink EPA" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + #cloud_account.name = "cloudsim_proxy" + #cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "riftuser1" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file): + logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file) + trans_id = upload_descriptor(logger, tg_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should be one vnfds" + assert "trafgen_vnfd" in [vnfds[0].name] + + def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file): + logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file) + trans_id = upload_descriptor(logger, vrouter_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file): + logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file) + trans_id = upload_descriptor(logger, ts_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 3, "There should be three vnfds" + assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name] + + def test_onboard_tg_2vrouter_ts_nsd(self, logger, nsd_proxy, tg_2vrouter_ts_nsd_package_file): + logger.info("Onboarding tg_2vrouter_ts nsd package: %s", tg_2vrouter_ts_nsd_package_file) + trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "tg_vrouter_ts_nsd" + assert nsd.short_name == "tg_2vrouter_ts_nsd" + + def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py new file mode 100644 index 0000000..9570002 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_3vnfs_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test ExtVNF +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid + +import gi +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsmYang', '1.0') + + + +from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang + +logging.basicConfig(level=logging.DEBUG) + + +RW_VROUTER_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/vrouter" + ) +RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafgen" + ) +RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafsink" + ) +RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/tg_2vrouter_ts" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + +@pytest.fixture(scope='session') +def vrouter_vnfd_package_file(): + vrouter_pkg_file = os.path.join( + RW_VROUTER_PKG_INSTALL_DIR, + "vrouter_vnfd.tar.gz", + ) + if not os.path.exists(vrouter_pkg_file): + raise_package_error() + + return vrouter_pkg_file + +@pytest.fixture(scope='session') +def tg_vnfd_package_file(): + tg_pkg_file = os.path.join( + RW_TRAFGEN_PKG_INSTALL_DIR, + "trafgen_vnfd.tar.gz", + ) + if not os.path.exists(tg_pkg_file): + raise_package_error() + + return tg_pkg_file + +@pytest.fixture(scope='session') +def ts_vnfd_package_file(): + ts_pkg_file = os.path.join( + RW_TRAFSINK_PKG_INSTALL_DIR, + "trafsink_vnfd.tar.gz", + ) + if not os.path.exists(ts_pkg_file): + raise_package_error() + + return ts_pkg_file + +@pytest.fixture(scope='session') +def tg_2vrouter_ts_nsd_package_file(): + tg_2vrouter_ts_nsd_pkg_file = os.path.join( + RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR, + "tg_2vrouter_ts_nsd.tar.gz", + ) + if not os.path.exists(tg_2vrouter_ts_nsd_pkg_file): + raise_package_error() + + return tg_2vrouter_ts_nsd_pkg_file + + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "TG-2Vrouter-TS EPA" + nsr.short_name = "TG-2Vrouter-TS EPA" + nsr.description = "4 VNFs with Trafgen, 2 Vrouters and Trafsink EPA" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + #cloud_account.name = "cloudsim_proxy" + #cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "riftuser1" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file): + logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file) + trans_id = upload_descriptor(logger, tg_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should be one vnfds" + assert "trafgen_vnfd" in [vnfds[0].name] + + def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file): + logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file) + trans_id = upload_descriptor(logger, vrouter_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file): + logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file) + trans_id = upload_descriptor(logger, ts_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 3, "There should be three vnfds" + assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name] + + def test_onboard_tg_2vrouter_ts_nsd(self, logger, nsd_proxy, tg_2vrouter_ts_nsd_package_file): + logger.info("Onboarding tg_2vrouter_ts nsd package: %s", tg_2vrouter_ts_nsd_package_file) + trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "tg_vrouter_ts_nsd" + assert nsd.short_name == "tg_2vrouter_ts_nsd" + + def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py new file mode 100644 index 0000000..fb41684 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file lp_3vnfs_test.py +@author Austin Cormier (Austin.Cormier@riftio.com) +@date 10/15/2015 +@brief Launchpad Module Test ExtVNF +""" + +import json +import logging +import os +import pytest +import shlex +import requests +import subprocess +import time +import uuid + +import gi +gi.require_version('RwIwpYang', '1.0') +gi.require_version('RwNsrYang', '1.0') +gi.require_version('RwVnfdYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsmYang', '1.0') + + +from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang + +logging.basicConfig(level=logging.DEBUG) + + +RW_VROUTER_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/vrouter" + ) +RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafgen" + ) +RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/vnfds/trafsink" + ) +RW_TG_VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join( + os.environ["RIFT_INSTALL"], + "usr/rift/mano/nsds/tg_vrouter_ts" + ) + + +class PackageError(Exception): + pass + + +def raise_package_error(): + raise PackageError("Could not find ns packages") + + +@pytest.fixture(scope='module') +def iwp_proxy(request, mgmt_session): + return mgmt_session.proxy(RwIwpYang) + +@pytest.fixture(scope='module') +def resource_mgr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwResourceMgrYang) + + +@pytest.fixture(scope='module') +def cloud_proxy(request, mgmt_session): + return mgmt_session.proxy(RwCloudYang) + + +@pytest.fixture(scope='module') +def vnfd_proxy(request, mgmt_session): + return mgmt_session.proxy(RwVnfdYang) + + +@pytest.fixture(scope='module') +def vld_proxy(request, mgmt_session): + return mgmt_session.proxy(VldYang) + + +@pytest.fixture(scope='module') +def nsd_proxy(request, mgmt_session): + return mgmt_session.proxy(NsdYang) + + +@pytest.fixture(scope='module') +def nsr_proxy(request, mgmt_session): + return mgmt_session.proxy(NsrYang) + + +@pytest.fixture(scope='module') +def rwnsr_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsrYang) + + +@pytest.fixture(scope='module') +def base_proxy(request, mgmt_session): + return mgmt_session.proxy(RwBaseYang) + +@pytest.fixture(scope='module') +def so_proxy(request, mgmt_session): + return mgmt_session.proxy(RwConmanYang) + +@pytest.fixture(scope='module') +def nsm_proxy(request, mgmt_session): + return mgmt_session.proxy(RwNsmYang) + +@pytest.fixture(scope='session') +def vrouter_vnfd_package_file(): + vrouter_pkg_file = os.path.join( + RW_VROUTER_PKG_INSTALL_DIR, + "vrouter_vnfd_with_epa_sriov.tar.gz", + ) + if not os.path.exists(vrouter_pkg_file): + raise_package_error() + + return vrouter_pkg_file + +@pytest.fixture(scope='session') +def tg_vnfd_package_file(): + tg_pkg_file = os.path.join( + RW_TRAFGEN_PKG_INSTALL_DIR, + "trafgen_vnfd_with_epa_sriov.tar.gz", + ) + if not os.path.exists(tg_pkg_file): + raise_package_error() + + return tg_pkg_file + +@pytest.fixture(scope='session') +def ts_vnfd_package_file(): + ts_pkg_file = os.path.join( + RW_TRAFSINK_PKG_INSTALL_DIR, + "trafsink_vnfd_with_epa_sriov.tar.gz", + ) + if not os.path.exists(ts_pkg_file): + raise_package_error() + + return ts_pkg_file + +@pytest.fixture(scope='session') +def tg_vrouter_ts_nsd_package_file(): + tg_vrouter_ts_nsd_pkg_file = os.path.join( + RW_TG_VROUTER_TS_NSD_PKG_INSTALL_DIR, + "tg_vrouter_ts_nsd_with_epa_sriov.tar.gz", + ) + if not os.path.exists(tg_vrouter_ts_nsd_pkg_file): + raise_package_error() + + return tg_vrouter_ts_nsd_pkg_file + + +def create_nsr_from_nsd_id(nsd_id): + nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr.id = str(uuid.uuid4()) + nsr.name = "TG-Vrouter-TS-EPA-SRIOV" + nsr.short_name = "TG-Vrouter-TS-EPA-SRIOV" + nsr.description = "3 VNFs with Trafgen, Vrouter and Trafsink EPA SRIOV" + nsr.nsd_ref = nsd_id + nsr.admin_status = "ENABLED" + + return nsr + + +def upload_descriptor(logger, descriptor_file, host="127.0.0.1"): + curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format( + file=descriptor_file, + host=host, + ) + logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd) + stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True) + + json_out = json.loads(stdout) + transaction_id = json_out["transaction_id"] + + return transaction_id + + +class DescriptorOnboardError(Exception): + pass + + +def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"): + logger.info("Waiting for onboard trans_id %s to complete", + transaction_id) + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + r = requests.get( + 'http://{host}:4567/api/upload/{t_id}/state'.format( + host=host, t_id=transaction_id + ) + ) + state = r.json() + if state["status"] == "pending": + time.sleep(1) + continue + + elif state["status"] == "success": + logger.info("Descriptor onboard was successful") + return + + else: + raise DescriptorOnboardError(state) + + if state["status"] != "success": + raise DescriptorOnboardError(state) + +@pytest.mark.incremental +class TestLaunchpadStartStop(object): + def test_configure_cloud_account(self, cloud_proxy, logger): + cloud_account = RwCloudYang.CloudAccountConfig() + #cloud_account.name = "cloudsim_proxy" + #cloud_account.account_type = "cloudsim_proxy" + cloud_account.name = "riftuser1" + cloud_account.account_type = "openstack" + cloud_account.openstack.key = 'pluto' + cloud_account.openstack.secret = 'mypasswd' + cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/' + cloud_account.openstack.tenant = 'demo' + cloud_account.openstack.mgmt_network = 'private' + + cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account) + + def test_configure_pools(self, resource_mgr_proxy): + pools = RwResourceMgrYang.ResourcePools.from_dict({ + "pools": [{ "name": "vm_pool_a", + "resource_type": "compute", + "pool_type" : "dynamic"}, + {"name": "network_pool_a", + "resource_type": "network", + "pool_type" : "dynamic",}]}) + + resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools) + + def test_configure_resource_orchestrator(self, so_proxy): + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + so_proxy.merge_config('/rw-conman:cm-config', cfg) + + def test_configure_service_orchestrator(self, nsm_proxy): + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg) + + + def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file): + logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file) + trans_id = upload_descriptor(logger, tg_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 1, "There should be one vnfds" + assert "trafgen_vnfd" in [vnfds[0].name] + + def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file): + logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file) + trans_id = upload_descriptor(logger, vrouter_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 2, "There should be two vnfds" + assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name] + + def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file): + logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file) + trans_id = upload_descriptor(logger, ts_vnfd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = vnfd_proxy.get_config('/vnfd-catalog') + vnfds = catalog.vnfd + assert len(vnfds) == 3, "There should be three vnfds" + assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name] + + def test_onboard_tg_vrouter_ts_nsd(self, logger, nsd_proxy, tg_vrouter_ts_nsd_package_file): + logger.info("Onboarding tg_vrouter_ts nsd package: %s", tg_vrouter_ts_nsd_package_file) + trans_id = upload_descriptor(logger, tg_vrouter_ts_nsd_package_file) + wait_unboard_transaction_finished(logger, trans_id) + + catalog = nsd_proxy.get_config('/nsd-catalog') + nsds = catalog.nsd + assert len(nsds) == 1, "There should only be a single nsd" + nsd = nsds[0] + assert nsd.name == "tg_vrouter_ts_nsd" + + def test_instantiate_tg_vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy): + catalog = nsd_proxy.get_config('/nsd-catalog') + nsd = catalog.nsd[0] + + nsr = create_nsr_from_nsd_id(nsd.id) + nsr_proxy.merge_config('/ns-instance-config', nsr) + + nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata') + nsrs = nsr_opdata.nsr + assert len(nsrs) == 1 + assert nsrs[0].ns_instance_config_ref == nsr.id \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/racfg/launchpad_module_test.racfg b/modules/core/mano/rwlaunchpad/test/racfg/launchpad_module_test.racfg new file mode 100644 index 0000000..9a13244 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/racfg/launchpad_module_test.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_LAUNCHPAD_MODULE_0100", + "commandline":"./launchpad_module_test", + "target_vm":"VM", + "test_description":"Module test for launchpad", + "run_as_root": true, + "status":"working", + "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"], + "timelimit": 600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwlaunchpad/test/utest_rwmonitor.py b/modules/core/mano/rwlaunchpad/test/utest_rwmonitor.py new file mode 100755 index 0000000..e692956 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/utest_rwmonitor.py @@ -0,0 +1,454 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import asyncio +import concurrent.futures +import logging +import os +import sys +import unittest +import uuid +import xmlrunner + +import gi +gi.require_version('RwcalYang', '1.0') +gi.require_version('RwVnfrYang', '1.0') +gi.require_version('RwTypes', '1.0') + +from gi.repository import ( + RwcalYang, + RwVnfrYang, + RwTypes, + VnfrYang, + NsrYang, + ) + +from rift.tasklets.rwmonitor.core import (RecordManager, NfviMetricsAggregator) + + +class MockTasklet(object): + def __init__(self, dts, log, loop, records): + self.dts = dts + self.log = log + self.loop = loop + self.records = records + self.polling_period = 0 + self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=16) + + +def make_nsr(ns_instance_config_ref=str(uuid.uuid4())): + nsr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr() + nsr.ns_instance_config_ref = ns_instance_config_ref + return nsr + +def make_vnfr(id=str(uuid.uuid4())): + vnfr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr() + vnfr.id = id + return vnfr + +def make_vdur(id=str(uuid.uuid4()), vim_id=str(uuid.uuid4())): + vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur() + vdur.id = id + vdur.vim_id = vim_id + return vdur + + +class MockNfviMonitorPlugin(object): + def __init__(self): + self.metrics = dict() + + def nfvi_metrics(self, account, vim_id): + key = (account, vim_id) + + if key in self.metrics: + return RwTypes.RwStatus.SUCCESS, self.metrics[key] + + metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + status = RwTypes.RwStatus.FAILURE + + return status, metrics + + +class TestAggregator(unittest.TestCase): + """ + Ths NfviMetricsAggregator queries NFVI metrics from VIM components and + aggregates the data are the VNF and NS levels. This test case validates + that the aggregation happens as expected. + """ + + def setUp(self): + self.nfvi_monitor = MockNfviMonitorPlugin() + self.cloud_account = RwcalYang.CloudAccount( + name="test-account", + account_type="mock", + ), + + # Create a simple record hierarchy to represent the system + self.records = RecordManager() + + nsr = make_nsr('test-nsr') + + vnfr_1 = make_vnfr('test-vnfr-1') + vnfr_2 = make_vnfr('test-vnfr-1') + + vdur_1 = make_vdur(vim_id='test-vdur-1') + vdur_1.vm_flavor.vcpu_count = 4 + vdur_1.vm_flavor.memory_mb = 16e3 + vdur_1.vm_flavor.storage_gb = 1e3 + + vdur_2 = make_vdur(vim_id='test-vdur-2') + vdur_2.vm_flavor.vcpu_count = 4 + vdur_2.vm_flavor.memory_mb = 16e3 + vdur_2.vm_flavor.storage_gb = 1e3 + + vdur_3 = make_vdur(vim_id='test-vdur-3') + vdur_3.vm_flavor.vcpu_count = 8 + vdur_3.vm_flavor.memory_mb = 32e3 + vdur_3.vm_flavor.storage_gb = 1e3 + + nsr.constituent_vnfr_ref.append(vnfr_1.id) + nsr.constituent_vnfr_ref.append(vnfr_2.id) + + vnfr_1.vdur.append(vdur_1) + vnfr_1.vdur.append(vdur_2) + vnfr_2.vdur.append(vdur_3) + + self.records.add_nsr(nsr) + self.records.add_vnfr(vnfr_1) + self.records.add_vnfr(vnfr_2) + + # Populate the NFVI monitor with static data + vdu_metrics_1 = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + vdu_metrics_1.vcpu.utilization = 10.0 + vdu_metrics_1.memory.used = 2e9 + vdu_metrics_1.storage.used = 1e10 + vdu_metrics_1.network.incoming.bytes = 1e5 + vdu_metrics_1.network.incoming.packets = 1e3 + vdu_metrics_1.network.incoming.byte_rate = 1e6 + vdu_metrics_1.network.incoming.packet_rate = 1e4 + vdu_metrics_1.network.outgoing.bytes = 1e5 + vdu_metrics_1.network.outgoing.packets = 1e3 + vdu_metrics_1.network.outgoing.byte_rate = 1e6 + vdu_metrics_1.network.outgoing.packet_rate = 1e4 + + vdu_metrics_2 = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + vdu_metrics_2.vcpu.utilization = 10.0 + vdu_metrics_2.memory.used = 2e9 + vdu_metrics_2.storage.used = 1e10 + vdu_metrics_2.network.incoming.bytes = 1e5 + vdu_metrics_2.network.incoming.packets = 1e3 + vdu_metrics_2.network.incoming.byte_rate = 1e6 + vdu_metrics_2.network.incoming.packet_rate = 1e4 + vdu_metrics_2.network.outgoing.bytes = 1e5 + vdu_metrics_2.network.outgoing.packets = 1e3 + vdu_metrics_2.network.outgoing.byte_rate = 1e6 + vdu_metrics_2.network.outgoing.packet_rate = 1e4 + + vdu_metrics_3 = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics() + vdu_metrics_3.vcpu.utilization = 20.0 + vdu_metrics_3.memory.used = 28e9 + vdu_metrics_3.storage.used = 1e10 + vdu_metrics_3.network.incoming.bytes = 1e5 + vdu_metrics_3.network.incoming.packets = 1e3 + vdu_metrics_3.network.incoming.byte_rate = 1e6 + vdu_metrics_3.network.incoming.packet_rate = 1e4 + vdu_metrics_3.network.outgoing.bytes = 1e5 + vdu_metrics_3.network.outgoing.packets = 1e3 + vdu_metrics_3.network.outgoing.byte_rate = 1e6 + vdu_metrics_3.network.outgoing.packet_rate = 1e4 + + metrics = self.nfvi_monitor.metrics + metrics[(self.cloud_account, vdur_1.vim_id)] = vdu_metrics_1 + metrics[(self.cloud_account, vdur_2.vim_id)] = vdu_metrics_2 + metrics[(self.cloud_account, vdur_3.vim_id)] = vdu_metrics_3 + + def test_aggregation(self): + """ + The hierarchy of the network service tested here is, + + test-nsr + |-- test-vnfr-1 + | |-- test-vdur-1 + | \-- test-vdur-2 + \-- test-vnfr-2 + \-- test-vdur-3 + + """ + loop = asyncio.get_event_loop() + + tasklet = MockTasklet( + dts=None, + loop=loop, + log=logging.getLogger(), + records=self.records, + ) + + # Create an instance of the NfviMetricsAggregator using a mock cloud + # account and NFVI monitor + aggregator = NfviMetricsAggregator( + tasklet=tasklet, + cloud_account=self.cloud_account, + nfvi_monitor=self.nfvi_monitor, + ) + + # Run the event loop to retrieve the metrics from the aggregator + task = loop.create_task(aggregator.request_ns_metrics('test-nsr')) + loop.run_until_complete(task) + + ns_metrics = task.result() + + # Validate the metrics returned by the aggregator + self.assertEqual(ns_metrics.vm.active_vm, 3) + self.assertEqual(ns_metrics.vm.inactive_vm, 0) + + self.assertEqual(ns_metrics.vcpu.total, 16) + self.assertEqual(ns_metrics.vcpu.utilization, 15.0) + + self.assertEqual(ns_metrics.memory.used, 32e9) + self.assertEqual(ns_metrics.memory.total, 64e9) + self.assertEqual(ns_metrics.memory.utilization, 50.0) + + self.assertEqual(ns_metrics.storage.used, 30e9) + self.assertEqual(ns_metrics.storage.total, 3e12) + self.assertEqual(ns_metrics.storage.utilization, 1.0) + + self.assertEqual(ns_metrics.network.incoming.bytes, 3e5) + self.assertEqual(ns_metrics.network.incoming.packets, 3e3) + self.assertEqual(ns_metrics.network.incoming.byte_rate, 3e6) + self.assertEqual(ns_metrics.network.incoming.packet_rate, 3e4) + + self.assertEqual(ns_metrics.network.outgoing.bytes, 3e5) + self.assertEqual(ns_metrics.network.outgoing.packets, 3e3) + self.assertEqual(ns_metrics.network.outgoing.byte_rate, 3e6) + self.assertEqual(ns_metrics.network.outgoing.packet_rate, 3e4) + + def test_publish_nfvi_metrics(self): + loop = asyncio.get_event_loop() + + class RegistrationHandle(object): + """ + Normally the aggregator uses the DTS RegistrationHandle to publish + the NFVI metrics. This placeholder class is used to record the + first NFVI metric data published by the aggregator, and then + removes the NSR so that the aggregator terminates. + + """ + + def __init__(self, test): + self.xpath = None + self.data = None + self.test = test + + def deregister(self): + pass + + def create_element(self, xpath, data): + pass + + def update_element(self, xpath, data): + # Record the results + self.xpath = xpath + self.data = data + + # Removing the NSR from the record manager will cause the + # coroutine responsible for publishing the NFVI metric data to + # terminate + self.test.records.remove_nsr('test-nsr') + + @asyncio.coroutine + def delete_element(self, xpath): + assert xpath == self.xpath + + class Dts(object): + """ + Placeholder Dts class that is used solely for the purpose of + returning a RegistrationHandle to the aggregator. + + """ + def __init__(self, test): + self.handle = RegistrationHandle(test) + + @asyncio.coroutine + def register(self, *args, **kwargs): + return self.handle + + dts = Dts(self) + + tasklet = MockTasklet( + dts=dts, + loop=loop, + log=logging.getLogger(), + records=self.records, + ) + + # Create an instance of the NfviMetricsAggregator using a mock cloud + # account and NFVI monitor + aggregator = NfviMetricsAggregator( + tasklet=tasklet, + cloud_account=self.cloud_account, + nfvi_monitor=self.nfvi_monitor, + ) + + # Create a coroutine wrapper to timeout the test if it takes too long. + @asyncio.coroutine + def timeout_wrapper(): + coro = aggregator.publish_nfvi_metrics('test-nsr') + yield from asyncio.wait_for(coro, timeout=1) + + loop.run_until_complete(timeout_wrapper()) + + # Verify the data published by the aggregator + self.assertEqual(dts.handle.data.vm.active_vm, 3) + self.assertEqual(dts.handle.data.vm.inactive_vm, 0) + + self.assertEqual(dts.handle.data.vcpu.total, 16) + self.assertEqual(dts.handle.data.vcpu.utilization, 15.0) + + self.assertEqual(dts.handle.data.memory.used, 32e9) + self.assertEqual(dts.handle.data.memory.total, 64e9) + self.assertEqual(dts.handle.data.memory.utilization, 50.0) + + self.assertEqual(dts.handle.data.storage.used, 30e9) + self.assertEqual(dts.handle.data.storage.total, 3e12) + self.assertEqual(dts.handle.data.storage.utilization, 1.0) + + self.assertEqual(dts.handle.data.network.incoming.bytes, 3e5) + self.assertEqual(dts.handle.data.network.incoming.packets, 3e3) + self.assertEqual(dts.handle.data.network.incoming.byte_rate, 3e6) + self.assertEqual(dts.handle.data.network.incoming.packet_rate, 3e4) + + self.assertEqual(dts.handle.data.network.outgoing.bytes, 3e5) + self.assertEqual(dts.handle.data.network.outgoing.packets, 3e3) + self.assertEqual(dts.handle.data.network.outgoing.byte_rate, 3e6) + self.assertEqual(dts.handle.data.network.outgoing.packet_rate, 3e4) + + +class TestRecordManager(unittest.TestCase): + def setUp(self): + pass + + def test_add_and_remove_nsr(self): + records = RecordManager() + + # Create an empty NSR and add it to the record manager + nsr = make_nsr() + records.add_nsr(nsr) + + # The record manager should ignore this NSR because it contains no + # VNFRs + self.assertFalse(records.has_nsr(nsr.ns_instance_config_ref)) + + + # Now add a VNFR (with a VDUR) to the NSR and, once again, add it to + # the record manager + vdur = make_vdur() + vnfr = make_vnfr() + + vnfr.vdur.append(vdur) + + nsr.constituent_vnfr_ref.append(vnfr.id) + records.add_nsr(nsr) + + # The mapping from the NSR to the VNFR has been added, but the + # relationship between the VNFR and the VDUR is not added. + self.assertTrue(records.has_nsr(nsr.ns_instance_config_ref)) + self.assertFalse(records.has_vnfr(vnfr.id)) + + + # Try adding the same NSR again. The record manager should be + # unchanged. + records.add_nsr(nsr) + + self.assertEqual(1, len(records._nsr_to_vnfrs.keys())) + self.assertEqual(1, len(records._nsr_to_vnfrs.values())) + + + # Now remove the NSR and check that the internal structures have been + # properly cleaned up. + records.remove_nsr(nsr.ns_instance_config_ref) + + self.assertFalse(records.has_nsr(nsr.ns_instance_config_ref)) + self.assertFalse(records.has_vnfr(vnfr.id)) + + def test_add_and_remove_vnfr(self): + records = RecordManager() + + # Create an empty VNFR and add it to the record manager + vnfr = make_vnfr() + records.add_vnfr(vnfr) + + # The record manager should ignore this VNFR because it contains no + # VDURs + self.assertFalse(records.has_vnfr(vnfr.id)) + + + # Now add a VDUR to the VNFR and, once again, add it to the record + # manager. + vdur = make_vdur() + vnfr.vdur.append(vdur) + + records.add_vnfr(vnfr) + + # The mapping from the VNFR to the VDUR has been added, and the VDUR + # has been added the internal dictionary for mapping a vim_id to a + # VDUR. + self.assertTrue(records.has_vnfr(vnfr.id)) + self.assertIn(vdur.vim_id, records._vdurs) + + + # Try adding the same VNFR again. The record manager should be + # unchanged. + records.add_vnfr(vnfr) + + self.assertEqual(1, len(records._vnfr_to_vdurs.keys())) + self.assertEqual(1, len(records._vnfr_to_vdurs.values())) + self.assertEqual(1, len(records._vdurs)) + + + # Now remove the VNFR and check that the internal structures have been + # properly cleaned up. + records.remove_vnfr(vnfr.id) + + self.assertFalse(records.has_vnfr(vnfr.id)) + self.assertNotIn(vdur.vim_id, records._vdurs) + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='TEST %(message)s') + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + + args = parser.parse_args(argv) + + # Set the global logging level + logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR) + + # The unittest framework requires a program name, so use the name of this + # file instead (we do not want to have to pass a fake program name to main + # when this is called from the interpreter). + unittest.main(argv=[__file__] + argv, + testRunner=xmlrunner.XMLTestRunner( + output=os.environ["RIFT_MODULE_TEST"])) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/utest_rwnsm.py b/modules/core/mano/rwlaunchpad/test/utest_rwnsm.py new file mode 100755 index 0000000..44e6dda --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/utest_rwnsm.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import logging +import os +import sys +import unittest +import uuid +import xmlrunner + +from gi.repository import ( + NsdYang, + NsrYang, + ) + +logger = logging.getLogger('test-rwnsmtasklet') + +import rift.tasklets.rwnsmtasklet.rwnsmtasklet as rwnsmtasklet +import rift.tasklets.rwnsmtasklet.xpath as rwxpath + +class TestGiXpath(unittest.TestCase): + def setUp(self): + rwxpath.reset_cache() + + def test_nsd_elements(self): + """ + Test that a particular element in a list is corerctly retrieved. In + this case, we are trying to retrieve an NSD from the NSD catalog. + + """ + # Create the initial NSD catalog + nsd_catalog = NsdYang.YangData_Nsd_NsdCatalog() + + # Create an NSD, set its 'id', and add it to the catalog + nsd_id = str(uuid.uuid4()) + nsd_catalog.nsd.append( + NsdYang.YangData_Nsd_NsdCatalog_Nsd( + id=nsd_id, + ) + ) + + # Retrieve the NSD using and xpath expression + xpath = '/nsd:nsd-catalog/nsd:nsd[nsd:id={}]'.format(nsd_id) + nsd = rwxpath.getxattr(nsd_catalog, xpath) + + self.assertEqual(nsd_id, nsd.id) + + # Modified the name of the NSD using an xpath expression + rwxpath.setxattr(nsd_catalog, xpath + "/nsd:name", "test-name") + + name = rwxpath.getxattr(nsd_catalog, xpath + "/nsd:name") + self.assertEqual("test-name", name) + + def test_nsd_scalar_fields(self): + """ + Test that setxattr correctly sets the value specified by an xpath. + + """ + # Define a simple NSD + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + + # Check that the unset fields are in fact set to None + self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name")) + self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name")) + + # Set the values of the 'name' and 'short-name' fields + rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name", "test-name") + rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name", "test-short-name") + + # Check that the 'name' and 'short-name' fields are correctly set + self.assertEqual(nsd.name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name")) + self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name")) + + +class TestInputParameterSubstitution(unittest.TestCase): + def setUp(self): + self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger) + + def test_null_arguments(self): + """ + If None is passed to the substitutor for either the NSD or the NSR + config, no exception should be raised. + + """ + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + + self.substitute_input_parameters(None, None) + self.substitute_input_parameters(nsd, None) + self.substitute_input_parameters(None, nsr_config) + + def test_illegal_input_parameter(self): + """ + In the NSD there is a list of the parameters that are allowed to be + sbustituted by input parameters. This test checks that when an input + parameter is provided in the NSR config that is not in the NSD, it is + not applied. + + """ + # Define the original NSD + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + nsd.name = "robert" + nsd.short_name = "bob" + + # Define which parameters may be modified + nsd.input_parameter_xpath.append( + NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name", + label="NSD Name", + ) + ) + + # Define the input parameters that are intended to be modified + nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr_config.input_parameter.extend([ + NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name", + value="alice", + ), + NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name", + value="alice", + ), + ]) + + self.substitute_input_parameters(nsd, nsr_config) + + # Verify that only the parameter in the input_parameter_xpath list is + # modified after the input parameters have been applied. + self.assertEqual("alice", nsd.name) + self.assertEqual("bob", nsd.short_name) + + def test_substitution(self): + """ + Test that substitution of input parameters occurs as expected. + + """ + # Define the original NSD + nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd() + nsd.name = "robert" + nsd.short_name = "bob" + + # Define which parameters may be modified + nsd.input_parameter_xpath.extend([ + NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name", + label="NSD Name", + ), + NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name", + label="NSD Short Name", + ), + ]) + + # Define the input parameters that are intended to be modified + nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr() + nsr_config.input_parameter.extend([ + NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name", + value="robert", + ), + NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter( + xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name", + value="bob", + ), + ]) + + self.substitute_input_parameters(nsd, nsr_config) + + # Verify that both the 'name' and 'short-name' fields are correctly + # replaced. + self.assertEqual("robert", nsd.name) + self.assertEqual("bob", nsd.short_name) + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='TEST %(message)s') + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + + args = parser.parse_args(argv) + + # Set the global logging level + logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.FATAL) + + # Make the test logger very quiet + logger.addHandler(logging.NullHandler()) + + # The unittest framework requires a program name, so use the name of this + # file instead (we do not want to have to pass a fake program name to main + # when this is called from the interpreter). + unittest.main(argv=[__file__] + argv, + testRunner=xmlrunner.XMLTestRunner( + output=os.environ["RIFT_MODULE_TEST"])) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwlaunchpad/test/utest_uploader.py b/modules/core/mano/rwlaunchpad/test/utest_uploader.py new file mode 100755 index 0000000..058ab18 --- /dev/null +++ b/modules/core/mano/rwlaunchpad/test/utest_uploader.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import io +import logging +import os +import random +import string +import sys +import unittest +import xmlrunner + +from rift.tasklets.rwlaunchpad.uploader import ( + boundary_search, + extract_package, + ) + + +message_template = """ +---------------------------------------- +POST /{url} HTTP/1.1 +User-Agent: curl/7.32.0 +Host: localhost:1337 +Accept: */* +Content-Length: {length} +Expect: 100-continue +Content-Type: multipart/form-data; boundary={boundary} + +{boundary} +Content-Disposition: form-data; name=descriptor + +{binary} +{boundary}-- +""" + +def random_string(ncharacters): + refs = string.ascii_lowercase + '\n' + return ''.join(random.choice(refs) for _ in range(ncharacters)) + +class TestBoundarySearch(unittest.TestCase): + """ + The boundary_search function is used to efficiently search for a boundary + string of a message that has been saved to file. In searches the file, + without loading it all into memory. + + """ + def setUp(self): + self.log = logging.getLogger('test') + self.boundary = "------------------------test-boundary" + + def test(self): + """ + Create a message that contains 3 instance of the boundary interspersed + with random characters. The message is presented to the boundary_search + function as a BytesIO so that it can treat it was a file. + + """ + # Construct the message + message = self.boundary + message += random_string(32) + message += self.boundary + message += random_string(64) + message += self.boundary + + # Search for the boundaries + indices = boundary_search(io.BytesIO(message.encode()), self.boundary) + + # Check the results + self.assertEqual(0, indices[0]) + self.assertEqual(32 + len(self.boundary), indices[1]) + self.assertEqual(96 + 2 * len(self.boundary), indices[2]) + + +class TestExtractPackage(unittest.TestCase): + def setUp(self): + self.log = logging.getLogger('devnull') + self.log.addHandler(logging.NullHandler()) + self.boundary = "------------------------test-boundary" + self.pkgfile = "/tmp/test-extract-package" + self.package = random_string(128) + self.url = "api/upload" + + def test(self): + """ + This test takes a known message (form-data) and extract the 'package' + data from it. + + """ + try: + message = message_template.format( + length=len(self.package), + boundary=self.boundary, + binary=self.package, + url=self.url, + ) + + extract_package( + self.log, + io.BytesIO(message.encode()), + self.boundary, + self.pkgfile, + ) + + # Read the package file that is extracted to disk, and compare it with + # the expected data. + with open(self.pkgfile) as fp: + for u, v in zip(fp.readline(), self.package): + self.assertEqual(u, v) + + finally: + # Cleanup possible files + if os.path.exists(self.package): + os.remove(self.package) + + if os.path.exists(self.package + ".partial"): + os.remove(self.package + ".partial") + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='TEST %(message)s') + + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='store_true') + + args = parser.parse_args(argv) + + # Set the global logging level + logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR) + + # The unittest framework requires a program name, so use the name of this + # file instead (we do not want to have to pass a fake program name to main + # when this is called from the interpreter). + unittest.main(argv=[__file__] + argv, + testRunner=xmlrunner.XMLTestRunner( + output=os.environ["RIFT_MODULE_TEST"])) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/core/mano/rwmc/CMakeLists.txt b/modules/core/mano/rwmc/CMakeLists.txt new file mode 100644 index 0000000..f1a5c92 --- /dev/null +++ b/modules/core/mano/rwmc/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(PKG_NAME rwmc) +set(PKG_VERSION 1.0) +set(PKG_RELEASE 1) +set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION}) + +set(subdirs + plugins + ra + test + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +install( + PROGRAMS + bin/cloudsim_http_proxy.sh + DESTINATION usr/bin + COMPONENT ${PKG_LONG_NAME} +) diff --git a/modules/core/mano/rwmc/Makefile b/modules/core/mano/rwmc/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwmc/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwmc/bin/cloudsim_http_proxy.sh b/modules/core/mano/rwmc/bin/cloudsim_http_proxy.sh new file mode 100755 index 0000000..f2319d4 --- /dev/null +++ b/modules/core/mano/rwmc/bin/cloudsim_http_proxy.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + + +function die { + echo "$@" + exit 1 +} + +which tinyproxy 2>/dev/null || die "You must install tinyproxy (sudo yum install tinyproxy)" + +tiny_cfg=$(mktemp) + +trap "rm ${tiny_cfg}" EXIT + +# Some default tinyproxy config to act as a very simple http proxy +cat << EOF > ${tiny_cfg} +User tinyproxy +Group tinyproxy +Port 9999 +Timeout 600 +DefaultErrorFile "/usr/share/tinyproxy/default.html" +StatFile "/usr/share/tinyproxy/stats.html" +LogFile "/var/log/tinyproxy/tinyproxy.log" +LogLevel Info +PidFile "/run/tinyproxy/tinyproxy.pid" +MaxClients 100 +MinSpareServers 5 +MaxSpareServers 20 +StartServers 10 +MaxRequestsPerChild 0 +ViaProxyName "tinyproxy" +EOF + +echo "Running TinyProxy in the foreground. Ctrl-C to exit." +tinyproxy -c ${tiny_cfg} -d \ No newline at end of file diff --git a/modules/core/mano/rwmc/include/riftware/rwmc_log.h b/modules/core/mano/rwmc/include/riftware/rwmc_log.h new file mode 100644 index 0000000..bd6f00e --- /dev/null +++ b/modules/core/mano/rwmc/include/riftware/rwmc_log.h @@ -0,0 +1,40 @@ +/* * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ + + +/** + * @file rwmc_log.h + * @author Anil Gunturu (anil.gunturu@riftio.com) + * @date 08/14/2015 + * @brief Internal logging macros for rwmc + * + */ + +#include "rw-mc-log.pb-c.h" +#include "rw-log.pb-c.h" +#include "rwlog.h" + +// logging macros +#define RWMC_LOG_HANDLE(_inst) \ + ((_inst)->rwtasklet_info->rwlog_instance) + +#define RWMC_LOG_EVENT(__inst__, __evt__, ...) \ + RWLOG_EVENT(RWMC_LOG_HANDLE(__inst__), RwMcLog_notif_##__evt__, __VA_ARGS__) diff --git a/modules/core/mano/rwmc/include/riftware/rwmctasklet.h b/modules/core/mano/rwmc/include/riftware/rwmctasklet.h new file mode 100644 index 0000000..057cfd4 --- /dev/null +++ b/modules/core/mano/rwmc/include/riftware/rwmctasklet.h @@ -0,0 +1,84 @@ +/* * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + * + */ + + +#ifndef __rwmctasklet_H__ +#define __rwmctasklet_H__ + +#include +#include "rwtasklet.h" +#include "rwdts.h" +#include "rwmc_log.h" + +struct rwmctasklet_component_s { + CFRuntimeBase _base; + /* ADD ADDITIONAL FIELDS HERE */ +}; + +RW_TYPE_DECL(rwmctasklet_component); +RW_CF_TYPE_EXTERN(rwmctasklet_component_ptr_t); + + +struct rwmctasklet_instance_s { + CFRuntimeBase _base; + rwtasklet_info_ptr_t rwtasklet_info; + rwmctasklet_component_ptr_t component; + + rwdts_member_reg_handle_t dts_member_handle; + rwdts_api_t *dts_h; + rwdts_appconf_t *dts_mgmt_handle; + + + + /* ADD ADDITIONAL FIELDS HERE */ +}; + +struct rwmctasklet_scratchpad_s { + char reason[256]; + struct rwmctasklet_instance_s *instance; +}; +RW_TYPE_DECL(rwmctasklet_scratchpad); +RW_CF_TYPE_EXTERN(rwmctasklet_scratchpad_ptr_t); + + +RW_TYPE_DECL(rwmctasklet_instance); +RW_CF_TYPE_EXTERN(rwmctasklet_instance_ptr_t); + +rwmctasklet_component_ptr_t rwmctasklet_component_init(void); + +void rwmctasklet_component_deinit(rwmctasklet_component_ptr_t component); + +rwmctasklet_instance_ptr_t rwmctasklet_instance_alloc( + rwmctasklet_component_ptr_t component, + struct rwtasklet_info_s * rwtasklet_info, + RwTaskletPlugin_RWExecURL *instance_url); + +void rwmctasklet_instance_free( + rwmctasklet_component_ptr_t component, + rwmctasklet_instance_ptr_t instance); + +void rwmctasklet_instance_start( + rwmctasklet_component_ptr_t component, + rwmctasklet_instance_ptr_t instance); + +#endif //__rwmctasklet_H__ + diff --git a/modules/core/mano/rwmc/plugins/CMakeLists.txt b/modules/core/mano/rwmc/plugins/CMakeLists.txt new file mode 100644 index 0000000..dd64b02 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/CMakeLists.txt @@ -0,0 +1,19 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +set(subdirs + rwmctasklet + yang + ) + +## +# Include the subdirs +## +rift_add_subdirs(SUBDIR_LIST ${subdirs}) diff --git a/modules/core/mano/rwmc/plugins/Makefile b/modules/core/mano/rwmc/plugins/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwmc/plugins/cli/cli_rwmc.xml b/modules/core/mano/rwmc/plugins/cli/cli_rwmc.xml new file mode 100644 index 0000000..ff12cc2 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/cli/cli_rwmc.xml @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/cli/cli_rwmc_schema_listing.txt b/modules/core/mano/rwmc/plugins/cli/cli_rwmc_schema_listing.txt new file mode 100644 index 0000000..402c281 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/cli/cli_rwmc_schema_listing.txt @@ -0,0 +1,54 @@ +ietf-inet-types +ietf-l2-topology +ietf-netconf-notifications +ietf-network +ietf-network-topology +ietf-restconf-monitoring +ietf-yang-types +mano-types +nsd +nsr +rw-base +rwcal +rw-cli-ext +rw-cloud +rw-config-agent +rw-conman +rw-debug +rw-dts +rw-dtsperf +rw-dtsperfmgr +rw-launchpad +rw-log +rwlog-mgmt +rw-manifest +rw-mc +rw-memlog +rw-mgmtagt +rw-mgmt-schema +rwmsg-data +rw-netconf +rw-notify-ext +rw-nsd +rw-nsm +rw-nsr +rw-pb-ext +rw-resource-mgr +rw-restportforward +rwsdn +rw-sdn +rwshell-mgmt +rw-sorch +rw-topology +rw-vcs +rwvcs-types +rw-vld +rw-vlr +rw-vnfd +rw-vnfr +rw-yang-types +vld +vlr +vnfd +vnffgd +vnfr diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/CMakeLists.txt b/modules/core/mano/rwmc/plugins/rwmctasklet/CMakeLists.txt new file mode 100644 index 0000000..72b5aa7 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/CMakeLists.txt @@ -0,0 +1,29 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 05/15/2015 +# + +include(rift_plugin) + +set(TASKLET_NAME rwmctasklet) + +## +# This function creates an install target for the plugin artifacts +## +rift_install_python_plugin(${TASKLET_NAME}-python ${TASKLET_NAME}.py) + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. +rift_python_install_tree( + FILES + rift/tasklets/${TASKLET_NAME}/__init__.py + rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py + rift/tasklets/${TASKLET_NAME}/launchpad.py + rift/tasklets/${TASKLET_NAME}/salt.py + rift/tasklets/${TASKLET_NAME}/util.py + COMPONENT ${PKG_LONG_NAME} + PYTHON3_ONLY) diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/Makefile b/modules/core/mano/rwmc/plugins/rwmctasklet/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/__init__.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/__init__.py new file mode 100644 index 0000000..388fbaf --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/__init__.py @@ -0,0 +1,17 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rwmctasklet import MissionControlTasklet +from . import launchpad diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/launchpad.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/launchpad.py new file mode 100644 index 0000000..e52afe6 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/launchpad.py @@ -0,0 +1,495 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import ncclient +import ncclient.asyncio_manager +import os +import time +from datetime import timedelta + +from . import salt + +import gi +gi.require_version('RwYang', '1.0') +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwBaseYang', '1.0') +gi.require_version('RwResourceMgrYang', '1.0') +gi.require_version('RwConmanYang', '1.0') +gi.require_version('RwNsmYang', '1.0') +from gi.repository import RwYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang, RwLaunchpadYang + + +class JobNotStarted(Exception): + pass + + +class LaunchpadStartError(Exception): + pass + + +class LaunchpadConfigError(Exception): + pass + + +class Launchpad(object): + def __init__(self, mgmt_domain_name, node_id=None, ip_address=None): + self._mgmt_domain_name = mgmt_domain_name + self._node_id = node_id + self._ip_address = ip_address + + def __repr__(self): + return "Launchpad(mgmt_domain_name={}, node_id={}, ip_address={})".format( + self._mgmt_domain_name, self._node_id, self._ip_address + ) + + @property + def ip_address(self): + return self._ip_address + + @ip_address.setter + def ip_address(self, ip_address): + self._ip_address = ip_address + + @property + def node_id(self): + return self._node_id + + @node_id.setter + def node_id(self, node_id): + self._node_id = node_id + + @property + def mgmt_domain_name(self): + return self._mgmt_domain_name + + @property + def exe_path(self): + return "{}/demos/launchpad.py".format(os.environ["RIFT_INSTALL"]) + + @property + def args(self): + return "-m ethsim --ip-list=\"{}\"".format(self.ip_address) + + +class LaunchpadConfigurer(object): + NETCONF_PORT=2022 + NETCONF_USER="admin" + NETCONF_PW="admin" + + def __init__(self, log, loop, launchpad, vm_pool_mgr, network_pool_mgr): + self._log = log + self._loop = loop + self._launchpad = launchpad + self._vm_pool_mgr = vm_pool_mgr + self._network_pool_mgr = network_pool_mgr + + self._manager = None + + self._model = RwYang.Model.create_libncx() + self._model.load_schema_ypbc(RwCloudYang.get_schema()) + self._model.load_schema_ypbc(RwBaseYang.get_schema()) + self._model.load_schema_ypbc(RwResourceMgrYang.get_schema()) + self._model.load_schema_ypbc(RwNsmYang.get_schema()) + self._model.load_schema_ypbc(RwConmanYang.get_schema()) + self._model.load_schema_ypbc(RwLaunchpadYang.get_schema()) + self._cloud_account = None + + @staticmethod + def wrap_netconf_config_xml(xml): + xml = '{}'.format(xml) + return xml + + @asyncio.coroutine + def _connect(self, timeout_secs=240): + + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + + try: + self._log.debug("Attemping Launchpad netconf connection.") + + manager = yield from ncclient.asyncio_manager.asyncio_connect( + loop=self._loop, + host=self._launchpad.ip_address, + port=LaunchpadConfigurer.NETCONF_PORT, + username=LaunchpadConfigurer.NETCONF_USER, + password=LaunchpadConfigurer.NETCONF_PW, + allow_agent=False, + look_for_keys=False, + hostkey_verify=False, + ) + + return manager + + except ncclient.transport.errors.SSHError as e: + self._log.warning("Netconf connection to launchpad %s failed: %s", + self._launchpad, str(e)) + + yield from asyncio.sleep(5, loop=self._loop) + + raise LaunchpadConfigError("Failed to connect to Launchpad within %s seconds" % + timeout_secs) + + @asyncio.coroutine + def _configure_launchpad_mode(self): + """ configure launchpad mode """ + cfg = RwLaunchpadYang.YangData_RwLaunchpad_LaunchpadConfig.from_dict({'operational_mode': 'MC_MANAGED'}) + xml = cfg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + self._log.debug("Sending launchpad mode config xml to %s: %s", + netconf_xml, self._launchpad.ip_address) + + response = yield from self._manager.edit_config(target="running", + config=netconf_xml,) + + self._log.debug("Received edit config response: %s", str(response)) + + @asyncio.coroutine + def _configure_service_orchestrator(self): + @asyncio.coroutine + def configure_service_orchestrator_endpoint(): + """ Configure Service Orchestrator Information to NSM Tasklet""" + cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1', + 'cm_port' : 2022, + 'cm_username' : 'admin', + 'cm_password' : 'admin'}) + + xml = cfg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + self._log.debug("Sending cm-endpoint config xml to %s: %s", + netconf_xml, self._launchpad.ip_address) + + response = yield from self._manager.edit_config(target="running", + config=netconf_xml,) + self._log.debug("Received edit config response: %s", str(response)) + + @asyncio.coroutine + def configure_resource_orchestrator_endpoint(): + """ Configure Resource Orchestrator Information to SO Tasklet""" + cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1', + 'ro_port' : 2022, + 'ro_username' : 'admin', + 'ro_password' : 'admin'}) + xml = cfg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + self._log.debug("Sending ro-endpoint config xml to %s: %s", + netconf_xml, self._launchpad.ip_address) + + response = yield from self._manager.edit_config(target="running", + config=netconf_xml,) + self._log.debug("Received edit config response: %s", str(response)) + + yield from configure_service_orchestrator_endpoint() + yield from configure_resource_orchestrator_endpoint() + + + @asyncio.coroutine + def _configure_cloud_account(self, cloud_account): + self._log.debug("Configuring launchpad %s cloud account: %s", + self._launchpad, cloud_account) + + cloud_account_cfg = RwCloudYang.CloudAccount.from_dict( + cloud_account.account.as_dict() + ) + + xml = cloud_account_cfg.to_xml_v2(self._model) + netconf_xml = self.wrap_netconf_config_xml(xml) + + self._log.debug("Sending configure cloud account xml to %s: %s", + netconf_xml, self._launchpad.ip_address) + + response = yield from self._manager.edit_config( + target="running", + config=netconf_xml, + ) + + self._log.debug("Received edit config response: %s", str(response)) + + @asyncio.coroutine + def _wait_until_system_ready(self, timeout_secs=60): + self._log.debug("Waiting for all tasklets in launchpad %s to be ready", self._launchpad) + + start_time = time.time() + while (time.time() - start_time) < timeout_secs: + yield from asyncio.sleep(1, loop=self._loop) + + if self._manager is None: + self._log.info("Reconnecting to launchpad") + self._manager = yield from self._connect() + + try: + response = yield from self._manager.get(('xpath', '/vcs/info')) + except (ncclient.NCClientError, ncclient.operations.errors.TimeoutExpiredError) as e: + self._log.error("Caught error when requesting tasklet info: %s", str(e)) + self._manager = None + continue + + try: + response_xml = response.data_xml.decode() + except Exception as e: + self._log.error("ncclient_manager failed to decode xml: %s", str(e)) + self._log.error("raw ncclient response: %s", response.xml) + continue + + response_xml = response_xml[response_xml.index(' len(self._unallocated_ids): + raise AllocateError("Not enough unallocated resources in pool %s. " + "(num_resources: %s, num_unallocated: %s)", + self, len(self._resource_ids), len(self._unallocated_ids)) + + ids = [] + for i in range(num): + ids.append(self._unallocated_ids.pop()) + + return ids + + def deallocate(self, resource_ids): + for id in resource_ids: + if id not in self._resource_ids: + raise DeallocateError("Unknown resource id: %s", id) + + for id in resource_ids: + self._unallocated_ids.add(id) + + +class PoolResourceAllocator(object): + def __init__(self, log, loop, mgmt_domain, pool_mgr, num_allocate): + self._log = log + self._loop = loop + self._mgmt_domain = mgmt_domain + self._pool_mgr = pool_mgr + self._num_allocate = num_allocate + + self._pool = None + self._resources = None + + def __del__(self): + if self._resources is not None: + self.deallocate() + + @property + def pool(self): + return self._pool + + @property + def resources(self): + return self._resources + + def has_static_resources(self): + for pool in self._pool_mgr.list_mgmt_domain_pools(self._mgmt_domain.name): + if pool.resource_ids: + return True + + return False + + def get_cloud_account(self): + for pool in self._pool_mgr.list_mgmt_domain_pools(self._mgmt_domain.name): + return pool.cloud_account + + raise CloudAccountError("Could not find cloud account associated with mgmt_domain: %s", + self._mgmt_domain.name) + + @asyncio.coroutine + def allocate(self): + self._log.info("Entered Pool Resource allocate") + if self.pool is not None or self.resources is not None: + raise AllocateError("Resources already allocated") + + self._log.info("Starting %s pool allocation for %s resouces", + self._pool, self._num_allocate) + while self._resources is None: + self._log.info("Pool resources is None, waiting for resources to allocate") + # Add a callback notification to the pool for when resources + # are available. + yield from asyncio.sleep(3, loop=self._loop) + + try: + current_pools = self._pool_mgr.list_mgmt_domain_pools(self._mgmt_domain.name) + except Exception as e: + self._log.warning("Mgmt Domain lookup may have failed (possibly due to mgmt-domain being deleted) , current_pools: %s", current_pools) + break + + for pool in current_pools: + try: + self._resources = pool.allocate(self._num_allocate) + self._pool = pool + except AllocateError as e: + self._log.debug("Could not allocate resources from pool %s: %s", + pool, str(e)) + + return self._resources + + def deallocate(self): + if self._resources is None: + self._log.warning("Nothing to deallocate") + return + + self._pool.deallocate(self._resources) + + self._resources = None + self._pool = None + + +class ResourcePoolManager(object): + def __init__(self, log, mgmt_domains, cloud_accounts): + self._log = log + self._mgmt_domains = mgmt_domains + self._cloud_accounts = cloud_accounts + + self._resource_pools = {} + + @property + def id_field(self): + raise NotImplementedError() + + def list_cloud_resources(self, cloud_account): + raise NotImplementedError() + + def _find_resource_id_pool(self, resource_id, cloud_account): + for pool in self._resource_pools.values(): + if resource_id in pool.resource_ids: + return pool + + return None + + def _get_mgmt_domain(self, mgmt_domain_name): + try: + return self._mgmt_domains[mgmt_domain_name] + except KeyError as e: + raise MgmtDomainNotFound(e) + + def _get_cloud_account(self, cloud_account_name): + if cloud_account_name not in self._cloud_accounts: + raise CloudAccountNotFound("Cloud account name not found: %s", cloud_account_name) + + cloud_account = self._cloud_accounts[cloud_account_name] + + return cloud_account + + def _assign_resource_pool_mgmt_domain(self, pool, mgmt_domain): + try: + self._log.debug("Assigning pool (%s) to mgmt_domain (%s)", pool, mgmt_domain) + pool.mgmt_domain = mgmt_domain + except PoolError as e: + raise AssignResourceError(e) + + self._log.info("Assigned pool (%s) to mgmt_domain (%s)", pool, mgmt_domain) + + def _unassign_resource_pool_mgmt_domain(self, pool): + try: + mgmt_domain = pool.mgmt_domain + if mgmt_domain is None: + self._log.warning("Pool does not have a mgmt_domain assigned.") + return + + self._log.debug("Unassigning pool (%s) from mgmt_domain (%s)", pool, mgmt_domain) + pool.mgmt_domain = None + except PoolError as e: + raise AssignResourceError(e) + + self._log.info("Unassigned mgmt_domain (%s) from pool: %s", mgmt_domain, pool) + + def _assign_mgmt_domain(self, mgmt_domain, pool): + self._log.debug("Assigning pool %s to mgmt_domain %s", pool, mgmt_domain) + pool.mgmt_domain = mgmt_domain + + def _unassign_mgmt_domain(self, mgmt_domain, pool): + self._log.debug("Unassigning pool %s from mgmt_domain %s", pool, mgmt_domain) + pool.mgmt_domain = None + + def list_cloud_pools(self, cloud_account_name): + cloud_pools = [] + cloud_account = self._get_cloud_account(cloud_account_name) + for pool in self._resource_pools.values(): + if pool.cloud_account == cloud_account: + cloud_pools.append(pool) + + return cloud_pools + + def list_mgmt_domain_pools(self, mgmt_domain_name): + mgmt_domain_pools = [] + mgmt_domain = self._get_mgmt_domain(mgmt_domain_name) + for pool in self._resource_pools.values(): + if pool.mgmt_domain == mgmt_domain: + mgmt_domain_pools.append(pool) + + return mgmt_domain_pools + + def list_available_cloud_resources(self, cloud_account_name, cloud_resources=None): + cloud = self._get_cloud_account(cloud_account_name) + resources = [] + + # If cloud_resources wasn't passed in, then fetch the latest resources + # from the import cloud. + if cloud_resources is None: + cloud_resources = self.list_cloud_resources(cloud_account_name) + + for resource in cloud_resources: + if self._find_resource_id_pool( + getattr(resource, self.id_field), + cloud, + ) is None: + resources.append(resource) + + return resources + + def list_available_resources(self, pool_name, cloud_resources=None): + pool = self.get_pool(pool_name) + cloud_account = pool.cloud_account + + return self.list_available_cloud_resources(cloud_account.name, cloud_resources) + + def get_pool(self, pool_name): + try: + return self._resource_pools[pool_name] + except KeyError as e: + raise PoolNotFoundError(e) + + def delete_mgmt_domain_pool(self, mgmt_domain_name, pool_name): + mgmt_domain = self._get_mgmt_domain(mgmt_domain_name) + pool = self.get_pool(pool_name) + + self._log.debug("Deleting mgmt_domain %s pool: %s)", + mgmt_domain, pool) + + self._unassign_mgmt_domain(mgmt_domain, pool) + + def update_mgmt_domain_pools(self, mgmt_domain_name, pool_name): + mgmt_domain = self._get_mgmt_domain(mgmt_domain_name) + pool = self.get_pool(pool_name) + + self._log.debug("Updating mgmt_domain %s pools: %s", + mgmt_domain, pool) + + self._assign_mgmt_domain(mgmt_domain, pool) + + def add_id_to_pool(self, pool_name, resource_id): + pool = self.get_pool(pool_name) + resource_list = self.list_cloud_resources(pool.cloud_account.name) + resource_ids = [getattr(r, self.id_field) for r in resource_list] + if resource_id not in resource_ids: + msg = ("Could not find resource_id %s in cloud account %s" % + (resource_id, pool.cloud_account.name)) + raise AddResourceError(msg) + + find_pool = self._find_resource_id_pool(pool.cloud_account, resource_id) + if find_pool is not None: + msg = ("resource_id %s in cloud account %s already added to pool %s" % + (resource_id, pool.cloud_account.name, find_pool.name)) + raise AddResourceError(msg) + + self._log.debug("Adding id %s to pool %s", resource_id, pool) + pool.add_resource_id(resource_id) + + def remove_id_from_pool(self, pool_name, resource_id): + pool = self.get_pool(pool_name) + try: + self._log.debug("Removing id %s from pool %s", resource_id, pool) + pool.remove_resource_id(resource_id) + except ValueError as e: + self._log.error("Could not remove unknown resource_id(%s) from pool(%s)", + resource_id, pool_name) + raise RemoveResourceError(e) + + self._log.info("Removed Resource (%s) from pool: %s", resource_id, pool) + + def update_dynamic_scaling(self, pool_name, dynamic_scaling): + pool = self.get_pool(pool_name) + pool.set_dynamic_scaling(dynamic_scaling) + self._log.info("Updated Resource pool Dynamic Scaling to %s", dynamic_scaling) + + def add_resource_pool(self, pool_name, cloud_account_name, assigned_ids, is_dynamic_scaling): + if pool_name in self._resource_pools: + self._log.warning("Pool name already exists: %s" % pool_name) + return + + avail_resources = self.list_available_cloud_resources(cloud_account_name) + avail_ids = [getattr(a, self.id_field) for a in avail_resources] + for assign_id in assigned_ids: + if assign_id not in avail_ids: + raise AddPoolError("Resource ID already assigned or not found: %s", assign_id) + + cloud_account = self._get_cloud_account(cloud_account_name) + + pool = ResourcePool( + self._log, + pool_name, + cloud_account, + is_dynamic_scaling, + ) + + self._resource_pools[pool_name] = pool + + self._log.info("Added Resource Pool: %s", pool) + + def update_resource_pool(self, pool_name, cloud_account_name, assigned_ids, is_dynamic_scaling): + pool = self.get_pool(pool_name) + cloud_account = self._get_cloud_account(cloud_account_name) + + if pool.cloud_account != cloud_account: + raise PoolError("Cannnot modify a resource pool's cloud account") + + current_ids = pool.resource_ids + + added_ids = set(assigned_ids) - set(current_ids) + for id in added_ids: + pool.add_resource_id(id) + + removed_ids = set(current_ids) - set(assigned_ids) + for id in removed_ids: + pool.remove_resource_id(id) + + pool.set_dynamic_scaling(is_dynamic_scaling) + + self._log.info("Updated Resource Pool: %s", pool) + + def delete_resource_pool(self, pool_name): + pool = self.get_pool(pool_name) + if pool.resource_ids: + self._log.warning("Resource pool still has Resources: %s. Disassociating them from the pool.", pool.resource_ids) + for resourceid in pool.resource_ids: + self.remove_id_from_pool(pool_name, resourceid) + + if pool.mgmt_domain: + raise DeletePoolError("Management Domain %s still associated with Resource Pool: %s", + pool.mgmt_domain.name, pool_name) + + del self._resource_pools[pool_name] + + self._log.info("Removed Resource Pool: %s", pool) + + +class VMPoolManager(ResourcePoolManager): + @property + def id_field(self): + return "vm_id" + + def list_cloud_resources(self, cloud_account_name): + cloud = self._get_cloud_account(cloud_account_name) + resources = cloud.list_vms() + return resources.vminfo_list + + +class NetworkPoolManager(ResourcePoolManager): + @property + def id_field(self): + return "network_id" + + def list_cloud_resources(self, cloud_account_name): + cloud = self._get_cloud_account(cloud_account_name) + resources = cloud.list_networks() + return resources.networkinfo_list + + +def get_add_delete_update_cfgs(dts_member_reg, xact, key_name): + # Unforunately, it is currently difficult to figure out what has exactly + # changed in this xact without Pbdelta support (RIFT-4916) + # As a workaround, we can fetch the pre and post xact elements and + # perform a comparison to figure out adds/deletes/updates + xact_cfgs = list(dts_member_reg.get_xact_elements(xact)) + curr_cfgs = list(dts_member_reg.elements) + + xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs} + curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs} + + # Find Adds + added_keys = set(xact_key_map) - set(curr_key_map) + added_cfgs = [xact_key_map[key] for key in added_keys] + + # Find Deletes + deleted_keys = set(curr_key_map) - set(xact_key_map) + deleted_cfgs = [curr_key_map[key] for key in deleted_keys] + + # Find Updates + updated_keys = set(curr_key_map) & set(xact_key_map) + updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]] + + return added_cfgs, deleted_cfgs, updated_cfgs + + +class ResourcePoolDtsConfigHandler(object): + def __init__(self, dts, log, pool_mgr, xpath): + self._dts = dts + self._log = log + self._pool_mgr = pool_mgr + self._xpath = xpath + + self._pool_reg = None + + def _delete_pool(self, pool_name): + self._log.info("Deleting pool %s", pool_name) + + self._pool_mgr.delete_resource_pool(pool_name) + + def _add_pool(self, pool_cfg): + self._log.info("Adding pool: %s", pool_cfg) + + self._pool_mgr.add_resource_pool( + pool_name=pool_cfg.name, + cloud_account_name=pool_cfg.cloud_account, + assigned_ids=[a.id for a in pool_cfg.assigned], + is_dynamic_scaling=pool_cfg.dynamic_scaling, + ) + + def _update_pool(self, pool_cfg): + self._log.info("Updating pool: %s", pool_cfg) + + self._pool_mgr.update_resource_pool( + pool_name=pool_cfg.name, + cloud_account_name=pool_cfg.cloud_account, + assigned_ids=[a.id for a in pool_cfg.assigned], + is_dynamic_scaling=pool_cfg.dynamic_scaling, + ) + + def register(self): + """ Register for Resource Pool create/update/delete/read requests from dts """ + + def apply_config(dts, acg, xact, action, _): + """Apply the pending pool configuration""" + + self._log.debug("Got pool apply config (xact: %s) (action: %s)", + xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ prepare callback from dts for resource pool """ + + action = xact_info.handle.get_query_action() + + self._log.debug("Got resource pool prepare config (msg %s) (action %s)", + msg, action) + + fref = ProtobufC.FieldReference.alloc() + pb_msg = msg.to_pbcm() + fref.goto_whole_message(pb_msg) + + if action == rwdts.QueryAction.UPDATE: + # Got UPDATE action in prepare callback. Check what got Created/Updated in a Resource Pool + # It could either be a create of a new pool or updates for existing pool. + # Separating the creation of Pool and adding resources to the pool. + # In case of updates, we do not get the entire existing config, but only what changed + + # Create a new pool, return if it already exists + fref.goto_proto_name(pb_msg,"name") + if fref.is_field_present(): + self._add_pool(msg) + + # Now either a resource ID is assigned to a newly created pool + # or a pool is updated with a resource ID. + fref.goto_proto_name(pb_msg,"assigned") + if fref.is_field_present(): + ids = msg.get_assigned() + for assign_id in ids: + assign_id_pb = assign_id.to_pbcm() + fref.goto_proto_name(assign_id_pb,"id") + if fref.is_field_present(): + self._pool_mgr.add_id_to_pool(msg.get_name(), assign_id.get_id()) + + # Dynamic scaling attribute was updated + fref.goto_proto_name(pb_msg, "dynamic_scaling") + if fref.is_field_present(): + self._pool_mgr.update_dynamic_scaling(msg.get_name(), msg.get_dynamic_scaling()) + + + elif action == rwdts.QueryAction.DELETE: + # Got DELETE action in prepare callback + # Check what got deleted - it could be either + # the pool itself, or its cloud account, or its assigned IDs. + + # Did the entire pool get deleted? + # no [vm|network]-pool pool + if fref.is_field_deleted(): + self._delete_pool(msg.name); + + # Did the assigned ID get deleted? + # no [vm|network]-pool pool assigned + fref.goto_proto_name(pb_msg,"assigned") + if fref.is_field_deleted(): + ids = msg.get_assigned() + for assign_id in ids: + assign_id_pb = assign_id.to_pbcm() + fref.goto_proto_name(assign_id_pb,"id") + if fref.is_field_present(): + self._pool_mgr.remove_id_from_pool(msg.get_name(), assign_id.get_id()) + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for Resource Pool config using xpath: %s", + self._xpath, + ) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + with self._dts.appconf_group_create(handler=acg_handler) as acg: + self._pool_reg = acg.register( + xpath="C," + self._xpath, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) + + +class PoolDtsOperdataHandler(object): + def __init__(self, dts, log, pool_mgr): + self._dts = dts + self._log = log + self._pool_mgr = pool_mgr + + @property + def pool_gi_cls(self): + raise NotImplementedError() + + @property + def id_field(self): + raise NotImplementedError() + + @property + def name_field(self): + raise NotImplementedError() + + def get_show_pool_xpath(self, pool_name=None): + raise NotImplementedError() + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_prepare_pool(xact_info, action, ks_path, msg): + path_entry = self.pool_gi_cls.schema().keyspec_to_entry(ks_path) + pool_name = path_entry.key00.name + self._log.debug("Got show %s request: %s", + str(self.pool_gi_cls), ks_path.create_string()) + + if not pool_name: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + pool = self._pool_mgr.get_pool(pool_name) + self._log.debug("Showing pool: %s", pool) + except Exception as e: + self._log.warning("Could not get pool: %s", e) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + cloud_resources = self._pool_mgr.list_cloud_resources(pool.cloud_account.name) + available_resources = self._pool_mgr.list_available_resources(pool_name, cloud_resources) + unreserved_pool_resources = pool.unallocated_ids + + def find_cloud_resource(resource_id): + for resource in cloud_resources: + if getattr(resource, self.id_field) == resource_id: + return resource + + raise ResourceNotFoundError( + "Could not find resource id %s in pool %s cloud account %s" % + (resource_id, pool, pool.cloud_account) + ) + + msg = self.pool_gi_cls(name=pool_name) + if pool.mgmt_domain is not None: + msg.mgmt_domain = pool.mgmt_domain.name + + for avail in available_resources: + new_avail = msg.available.add() + new_avail.id = getattr(avail, self.id_field) + new_avail.name = getattr(avail, self.name_field) + + for assigned_id in pool.resource_ids: + cloud_resource = find_cloud_resource(assigned_id) + self._log.debug("Found cloud resource: %s", cloud_resource) + assigned = msg.assigned_detail.add() + assigned.id = assigned_id + assigned.is_reserved = assigned_id not in unreserved_pool_resources + assigned.resource_info.from_dict(cloud_resource.as_dict()) + + msg.dynamic_scaling = pool.is_dynamic_scaling + + self._log.debug("Responding to show pool: %s", msg) + + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath=self.get_show_pool_xpath(pool_name), + msg=msg, + ) + + yield from self._dts.register( + xpath=self.get_show_pool_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare_pool), + flags=rwdts.Flag.PUBLISHER, + ) + + + +class NetworkPoolDtsOperdataHandler(PoolDtsOperdataHandler): + def __init__(self, dts, log, network_pool_mgr): + super().__init__(dts, log, network_pool_mgr) + + @property + def pool_gi_cls(self): + return RwMcYang.NetworkPool + + @property + def id_field(self): + return "network_id" + + @property + def name_field(self): + return "network_name" + + def get_show_pool_xpath(self, pool_name=None): + path = "D,/rw-mc:network-pool/pool{}".format( + "[rw-mc:name='%s']" % pool_name if pool_name is not None else "" + ) + + return path + + +class VMPoolDtsOperdataHandler(PoolDtsOperdataHandler): + def __init__(self, dts, log, vm_pool_mgr): + super().__init__(dts, log, vm_pool_mgr) + + @property + def pool_gi_cls(self): + return RwMcYang.VmPool + + @property + def id_field(self): + return "vm_id" + + @property + def name_field(self): + return "vm_name" + + def get_show_pool_xpath(self, pool_name=None): + path = "D,/rw-mc:vm-pool/pool{}".format( + "[rw-mc:name='%s']" % pool_name if pool_name is not None else "" + ) + + return path + + +class CloudAccountDtsConfigHandler(object): + XPATH = "/rw-mc:cloud-account/account" + + def __init__(self, dts, loop, log, cloud_accounts): + self._dts = dts + self._loop = loop + self._log = log + self._cloud_accounts = cloud_accounts + + self._cloud_reg = None + + def _add_cloud(self, cfg): + self._log.info("Adding cloud account: %s", cfg) + # Check if cloud account already exists, if it does, its really + # an update for the cloud account, and rest of the details are + # handled in _update_cloud + if cfg.name in self._cloud_accounts: + self._log.warning("Cloud account name %s already exists!", cfg.name) + if cfg.has_field('account_type'): + raise CloudAccountError("Cannot change cloud's account-type") + + return False + + # If this is a new cloud account, then account-type field is mandatory + # NOTE: Right now, account-type is not mandatory in yang due to a bug, + # so we need to check for it and artifically enforce it to be mandatory + if cfg.has_field('account_type'): + cls = get_cloud_account_cls_from_type(cfg.account_type) + else: + raise CloudAccountError("Missing mandatory 'cloud-account' field") + + account = cls.from_cfg(self._log, cfg) + + self._cloud_accounts[account.name] = account + return True + + def _delete_cloud(self, name): + self._log.info("Deleting cloud account: %s", name) + + if name not in self._cloud_accounts: + self._log.warning("Cloud name doesn't exist!") + return + + del self._cloud_accounts[name] + + def _update_cloud(self, cfg): + self._log.info("Updating cloud account: %s", cfg) + + if cfg.name not in self._cloud_accounts: + self._log.warning("Cloud name doesn't exist!") + return + + account = self._cloud_accounts[cfg.name] + account.update_from_cfg(cfg) + self._log.debug("After update, new account details: %s", account.account ) + + def register(self): + """ Register for Cloud Account create/update/delete/read requests from dts """ + + def apply_config(dts, acg, xact, action, _): + """Apply the pending cloud account configuration""" + + self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", + xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + #return RwTypes.RwStatus.SUCCESS + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ Prepare callback from DTS for Cloud Account """ + + action = xact_info.handle.get_query_action() + self._log.debug("Got cloud account prepare config (msg %s) (action %s)", + msg, action) + + @asyncio.coroutine + def start_cloud_account_validation(cloud_name): + account = self._cloud_accounts[cloud_name] + yield from account.validate_cloud_account_credentials(self._loop) + + fref = ProtobufC.FieldReference.alloc() + pb_msg = msg.to_pbcm() + fref.goto_whole_message(pb_msg) + is_new_account = True + + if action == rwdts.QueryAction.UPDATE: + # We get an UPDATE if either a new cloud-account is created or one + # of its fields is updated. + # Separating the creation of cloud-account from updating its fields + fref.goto_proto_name(pb_msg,"name") + if fref.is_field_present(): + is_new_account = self._add_cloud(msg) + + if not is_new_account: + # This was an Update of the fields of the cloud account + # Need to check which account-type's fields were updated + self._update_cloud(msg) + + # Asynchronously check the cloud accounts credentials as soon as a + # new cloud account is created or an existing account is updated + self._loop.create_task(start_cloud_account_validation(msg.name)) + + elif action == rwdts.QueryAction.DELETE: + # Got DELETE action in prepare callback + # We only allow the deletion of cloud account itself, not its fields. + + fref.goto_whole_message(pb_msg) + if fref.is_field_deleted(): + # Cloud account was deleted + self._delete_cloud(msg.name); + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for Cloud Account config using xpath: %s", + CloudAccountDtsConfigHandler.XPATH) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + with self._dts.appconf_group_create(handler=acg_handler) as acg: + self._cloud_reg = acg.register( + xpath="C," + CloudAccountDtsConfigHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) + + +class CloudAccountDtsOperdataHandler(object): + def __init__(self, dts, loop, log, cloud_accounts, + vm_pool_mgr, network_pool_mgr): + self._dts = dts + self._loop = loop + self._log = log + self._cloud_accounts = cloud_accounts + self._vm_pool_mgr = vm_pool_mgr + self._network_pool_mgr = network_pool_mgr + + def _register_show_pools(self): + def get_xpath(cloud_name=None): + return "D,/rw-mc:cloud-account/account{}/pools".format( + "[name='%s']" % cloud_name if cloud_name is not None else '' + ) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwMcYang.CloudAccount.schema().keyspec_to_entry(ks_path) + cloud_account_name = path_entry.key00.name + self._log.debug("Got show cloud pools request: %s", ks_path.create_string()) + + if not cloud_account_name: + self._log.warning("Cloud account name %s not found", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + account = self._cloud_accounts[cloud_account_name] + if not account: + self._log.warning("Cloud account %s does not exist", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + # If cloud account's credentials are not even valid, don't even try to fetch data using CAL APIs + # as they will throw an exception & tracebacks. + if account.credential_status != "Validated": + self._log.warning("Cloud Account Credentials are not valid: %s", account.credential_status_details) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + cloud_vm_pools = self._vm_pool_mgr.list_cloud_pools(cloud_account_name) + cloud_network_pools = self._network_pool_mgr.list_cloud_pools(cloud_account_name) + except Exception as e: + self._log.warning("Could not get cloud pools: %s", e) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + cloud_pools = RwMcYang.CloudPools() + + for vm in cloud_vm_pools: + cloud_pools.vm.add().name = vm.name + + for network in cloud_network_pools: + cloud_pools.network.add().name = network.name + + self._log.debug("Responding to cloud pools request: %s", cloud_pools) + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + xpath=get_xpath(cloud_account_name), + msg=cloud_pools, + ) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + def _register_show_resources(self): + def get_xpath(cloud_name=None): + return "D,/rw-mc:cloud-account/account{}/resources".format( + "[name='%s']" % cloud_name if cloud_name is not None else '' + ) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwMcYang.CloudAccount.schema().keyspec_to_entry(ks_path) + cloud_account_name = path_entry.key00.name + xpath = ks_path.create_string() + self._log.debug("Got show cloud resources request: %s", xpath) + + if not cloud_account_name: + self._log.warning("Cloud account name %s not found", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + account = self._cloud_accounts[cloud_account_name] + if not account: + self._log.warning("Cloud account %s does not exist", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + # If cloud account's credentials are not even valid, don't even try to fetch data using CAL APIs + # as they will throw an exception & tracebacks. + if account.credential_status != "Validated": + self._log.warning("Cloud Account Credentials are not valid: %s", account.credential_status_details) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + respond_types = ["vm", "network"] + if "vm" in xpath: + respond_types = ["vm"] + + if "network" in xpath: + respond_types = ["network"] + + try: + if "vm" in respond_types: + vms = self._vm_pool_mgr.list_cloud_resources(cloud_account_name) + avail_vms = self._vm_pool_mgr.list_available_cloud_resources(cloud_account_name, vms) + avail_vm_ids = [v.vm_id for v in avail_vms] + + if "network" in respond_types: + networks = self._network_pool_mgr.list_cloud_resources(cloud_account_name) + avail_networks = self._network_pool_mgr.list_available_cloud_resources(cloud_account_name, networks) + avail_network_ids = [n.network_id for n in avail_networks] + + except Exception as e: + self._log.error("Could not get cloud resources: %s", e, exc_info=True) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + avail = RwMcYang.CloudResources() + + if "vm" in respond_types: + for vm in vms: + add_vm = avail.vm.add() + add_vm.id = vm.vm_id + add_vm.name = vm.vm_name + add_vm.available = add_vm.id in avail_vm_ids + + if "network" in respond_types: + for network in networks: + add_network = avail.network.add() + add_network.id = network.network_id + add_network.name = network.network_name + add_network.available = add_network.id in avail_network_ids + + self._log.debug("Responding to cloud resources request: %s", avail) + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + xpath=get_xpath(cloud_account_name), + msg=avail, + ) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + def _register_show_status(self): + def get_xpath(cloud_name=None): + return "D,/rw-mc:cloud-account/account{}/connection".format( + "[name='%s']" % cloud_name if cloud_name is not None else '' + ) + + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwMcYang.CloudAccount.schema().keyspec_to_entry(ks_path) + cloud_account_name = path_entry.key00.name + self._log.debug("Got show cloud connection status request: %s", ks_path.create_string()) + + if not cloud_account_name: + self._log.warning("Cloud account name %s not found", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + account = self._cloud_accounts[cloud_account_name] + if not account: + self._log.warning("Cloud account %s does not exist", cloud_account_name) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + cred_status = account.credential_status + cred_details = account.credential_status_details + except Exception as e: + self._log.error("Could not get cloud status: %s", e) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + cloud_creds = RwMcYang.CloudStatus() + if cred_status is not None: + cloud_creds.status = cred_status + cloud_creds.details = cred_details + else: + cloud_creds.status = "Validating..." + cloud_creds.details = "Connection status is being validated, please wait..." + + self._log.debug("Responding to cloud connection status request: %s", cloud_creds) + xact_info.respond_xpath( + rwdts.XactRspCode.MORE, + xpath=get_xpath(cloud_account_name), + msg=cloud_creds, + ) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + yield from self._dts.register( + xpath=get_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + @asyncio.coroutine + def register(self): + yield from self._register_show_pools() + yield from self._register_show_resources() + yield from self._register_show_status() + +class SDNAccountDtsConfigHandler(object): + XPATH="/rw-mc:sdn/account" + + def __init__(self, dts, log, sdn_accounts): + self._dts = dts + self._log = log + self._sdn_accounts = sdn_accounts + + self._sdn_reg = None + + def _add_sdn(self, cfg): + self._log.info("Adding sdn account: %s", cfg) + if cfg.name in self._sdn_accounts: + self._log.warning("SDN name already exists!") + return + + # Right now we only have one SDN Account of type ODL; + # when we support more SDN account types, we should + # create a similar funtion to get sdn account class from type, + # like 'get_cloud_account_cls_from_type' + cls = OdlSDNAccount + account = cls.from_cfg(self._log, cfg) + self._sdn_accounts[account.name] = account + + def _delete_sdn(self, name): + self._log.info("Deleting sdn account: %s", name) + + if name not in self._sdn_accounts: + self._log.warning("SDN name doesn't exist!") + return + + del self._sdn_accounts[name] + + def _update_sdn(self, cfg): + self._log.info("Updating sdn account: %s", cfg) + + if cfg.name not in self._sdn_accounts: + self._log.warning("SDN name doesn't exist!") + return + + account = self._sdn_accounts[cfg.name] + account.update_from_cfg(cfg) + + def register(self): + def apply_config(dts, acg, xact, action, _): + """Apply the pending sdn account configuration""" + + self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", + xact, action) + + try: + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + sdn_add_cfgs, sdn_delete_cfgs, sdn_update_cfgs = get_add_delete_update_cfgs( + dts_member_reg=self._sdn_reg, + xact=xact, + key_name="name", + ) + + # Handle Deletes + for cfg in sdn_delete_cfgs: + self._delete_sdn(cfg.name) + + # Handle Adds + for cfg in sdn_add_cfgs: + self._add_sdn(cfg) + + # Handle Updates + for cfg in sdn_update_cfgs: + self._update_sdn(cfg) + + except Exception as e: + self._log.warning("Could not apply config for SDN account: %s", e) + + + self._log.debug("Registering for SDN Account config using xpath: %s", + SDNAccountDtsConfigHandler.XPATH) + + acg_handler = rift.tasklets.AppConfGroup.Handler( + on_apply=apply_config, + ) + with self._dts.appconf_group_create(acg_handler) as acg: + self._sdn_reg = acg.register( + xpath="C," + SDNAccountDtsConfigHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER, + ) + +class MgmtDomainDtsConfigHandler(object): + XPATH = "C,/rw-mc:mgmt-domain/rw-mc:domain" + + def __init__(self, dts, loop, log, mgmt_domains, + vm_pool_mgr, network_pool_mgr, lp_minions): + self._dts = dts + self._loop = loop + self._log = log + self._mgmt_domains = mgmt_domains + self._vm_pool_mgr = vm_pool_mgr + self._network_pool_mgr = network_pool_mgr + self._lp_minions = lp_minions + + self._fed_reg = None + self._vm_pool_configured = False + self._net_pool_configured = False + + def _delete_mgmt_domain_vm_pool(self, mgmt_domain_name, vm_pool_name): + self._log.debug("Deleting vm pool %s from mgmt_domain %s", vm_pool_name, mgmt_domain_name) + self._vm_pool_mgr.delete_mgmt_domain_pool(mgmt_domain_name, vm_pool_name) + self._vm_pool_configured = False + + def _delete_mgmt_domain_net_pool(self, mgmt_domain_name, net_pool_name): + self._log.debug("Deleting network pool %s from mgmt_domain %s", net_pool_name, mgmt_domain_name) + self._network_pool_mgr.delete_mgmt_domain_pool(mgmt_domain_name, net_pool_name) + self._net_pool_configured = False + + def _delete_mgmt_domain(self, fed_cfg): + self._log.debug("Deleting mgmt_domain: %s", fed_cfg.name) + + if self._mgmt_domains[fed_cfg.name].launchpad_state is "started": + # Launchpad is running, can not delete Mgmt-domin. Abort + raise DeleteMgmtDomainError("Cannot delete Mgmt-domain - Laucnhpad is still running!") + + for vm_pool in self._vm_pool_mgr.list_mgmt_domain_pools(fed_cfg.name): + self._delete_mgmt_domain_vm_pool(fed_cfg.name, vm_pool.name) + + for net_pool in self._network_pool_mgr.list_mgmt_domain_pools(fed_cfg.name): + self._delete_mgmt_domain_net_pool(fed_cfg.name, net_pool.name) + + # We need to free up LP resources when a MD is deleted + mgmt_domain = self._mgmt_domains[fed_cfg.name] + if self._mgmt_domains[fed_cfg.name].launchpad_state in ["pending", "configuring"]: + # Mgmt-domain was deleted while launchpad was in pending/configuring state + mgmt_domain.stop_launchpad() + + mgmt_domain.release_launchpad() + + del self._mgmt_domains[fed_cfg.name] + + def _update_mgmt_domain_pools(self, name, fed_cfg): + self._log.debug("Updating mgmt_domain pools %s", name) + + for vm_pool in fed_cfg.pools.vm: + self._vm_pool_mgr.update_mgmt_domain_pools(fed_cfg.name, vm_pool.name) + self._vm_pool_configured = True + + for network_pool in fed_cfg.pools.network: + self._network_pool_mgr.update_mgmt_domain_pools(fed_cfg.name, network_pool.name) + self._net_pool_configured = True + + def _add_mgmt_domain(self, fed_cfg): + self._log.debug("Creating new mgmt_domain: %s", fed_cfg.name) + if fed_cfg.name in self._mgmt_domains: + self._log.warning("Mgmt Domain name %s already exists!", fed_cfg.name) + return + + mgmt_domain = MgmtDomain( + self._loop, + self._log, + fed_cfg.name, + self._vm_pool_mgr, + self._network_pool_mgr, + self._lp_minions, + ) + + self._mgmt_domains[fed_cfg.name] = mgmt_domain + + def _update_mgmt_domain(self, fed_cfg): + self._log.debug("Updating mgmt_domain: %s", fed_cfg) + + self._update_mgmt_domain_pools(fed_cfg.name, fed_cfg) + + # Start launchpad ONLY IF both VM & NET pool have been configured + if self._vm_pool_configured and self._net_pool_configured: + mgmt_domain = self._mgmt_domains[fed_cfg.name] + mgmt_domain.allocate_start_configure_launchpad_task() + + + @asyncio.coroutine + def register(self): + """ Register for Mgmt Domain create/update/delete/read requests from dts """ + + def apply_config(dts, acg, xact, action, _): + """Apply the pending mgmt_domain configuration""" + + self._log.debug("Got mgmt_domain apply config (xact: %s) (action: %s)", + xact, action) + + if xact.xact is None: + # When RIFT first comes up, an INSTALL is called with the current config + # Since confd doesn't actally persist data this never has any data so + # skip this for now. + self._log.debug("No xact handle. Skipping apply config") + return + + @asyncio.coroutine + def on_prepare(dts, acg, xact, xact_info, ks_path, msg): + """ prepare callback from dts for mgmt domain """ + + action = xact_info.handle.get_query_action() + + self._log.debug("Got mgmt domain prepare config (msg %s) (action %s)", + msg, action) + + fref = ProtobufC.FieldReference.alloc() + pb_msg = msg.to_pbcm() + fref.goto_whole_message(pb_msg) + + if action == rwdts.QueryAction.UPDATE: + # We get an UPDATE if either a new mgmt-domain is created or a pool is added/updated. + # Separating the creation of mgmt-domain from adding its pools + fref.goto_proto_name(pb_msg,"name") + if fref.is_field_present(): + self._add_mgmt_domain(msg) + + fref.goto_proto_name(pb_msg,"pools") + if fref.is_field_present(): + self._update_mgmt_domain(msg) + + elif action == rwdts.QueryAction.DELETE: + # Got DELETE action in prepare callback + # Check what got deleted - it could be either + # the mgmt_domain itself, or its network pool or its vm pool + + # Did the entire mgmt_domain get deleted? + # no mgmt-domain domain + fref.goto_whole_message(pb_msg) + if fref.is_field_deleted(): + self._delete_mgmt_domain(msg) + + # Did the assigned pools get deleted? + # no mgm-domain domain pools + # or + # Did a specific pool get deleted? + # no mgmt-domain domain pools [vm|network] + # in either case, we get a DELETE call for each pool separately + fref.goto_proto_name(pb_msg,"pools") + if fref.is_field_deleted(): + self._log.info("Removing pool: %s from mgmt-domain: %s", msg.pools, msg.get_name()) + + pools = msg.get_pools() + + pools_pb = pools.to_pbcm() + fref.goto_proto_name(pools_pb, "vm") + vmpool = pools.vm + if fref.is_field_deleted(): + self._delete_mgmt_domain_vm_pool(msg.get_name(), vmpool[0].name) + + fref.goto_proto_name(pools_pb, "network") + netpool = pools.network + if fref.is_field_deleted(): + self._delete_mgmt_domain_net_pool(msg.get_name(), netpool[0].name) + + else: + self._log.error("Action (%s) NOT SUPPORTED", action) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + + xact_info.respond_xpath(rwdts.XactRspCode.ACK) + + self._log.debug("Registering for mgmt_domain config using xpath: %s", + MgmtDomainDtsConfigHandler.XPATH) + + acg_handler = rift.tasklets.AppConfGroup.Handler(on_apply=apply_config) + with self._dts.appconf_group_create(handler=acg_handler) as acg: + self._fed_reg = acg.register( + xpath=MgmtDomainDtsConfigHandler.XPATH, + flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY, + on_prepare=on_prepare + ) + + +class MgmtDomainDtsRpcHandler(object): + START_LAUNCHPAD_XPATH= "/rw-mc:start-launchpad" + STOP_LAUNCHPAD_XPATH= "/rw-mc:stop-launchpad" + + def __init__(self, dts, log, mgmt_domains): + self._dts = dts + self._log = log + self._mgmt_domains = mgmt_domains + + self.pending_msgs = [] + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_prepare_start(xact_info, action, ks_path, msg): + self._log.debug("Got launchpad start request: %s", msg) + + name = msg.mgmt_domain + if name not in self._mgmt_domains: + msg = "Launchpad name %s not found" % name + self._log.error(msg) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + mgmt_domain = self._mgmt_domains[name] + + try: + mgmt_domain.allocate_start_configure_launchpad_task() + except Exception as e: + self._log.error("Failed to start launchpad: %s", str(e)) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath="O," + MgmtDomainDtsRpcHandler.START_LAUNCHPAD_XPATH, + ) + + @asyncio.coroutine + def on_prepare_stop(xact_info, action, ks_path, msg): + self._log.debug("Got launchpad stop request: %s", msg) + + name = msg.mgmt_domain + if name not in self._mgmt_domains: + msg = "Launchpad name %s not found", name + self._log.error(msg) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + mgmt_domain = self._mgmt_domains[name] + try: + yield from mgmt_domain.stop_launchpad() + except Exception as e: + self._log.exception("Failed to stop launchpad: %s", str(e)) + xact_info.respond_xpath(rwdts.XactRspCode.NACK) + return + + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath="O," + MgmtDomainDtsRpcHandler.STOP_LAUNCHPAD_XPATH + ) + + yield from self._dts.register( + xpath="I," + MgmtDomainDtsRpcHandler.START_LAUNCHPAD_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare_start), + flags=rwdts.Flag.PUBLISHER + ) + + yield from self._dts.register( + xpath="I," + MgmtDomainDtsRpcHandler.STOP_LAUNCHPAD_XPATH, + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare_stop), + flags=rwdts.Flag.PUBLISHER + ) + + +class MgmtDomainDtsOperdataHandler(object): + def __init__(self, dts, log, mgmt_domains): + self._dts = dts + self._log = log + self._mgmt_domains = mgmt_domains + + def _get_respond_xpath(self, mgmt_domain_name=None): + return "D,/rw-mc:mgmt-domain/domain{}/launchpad".format( + "[name='%s']" % mgmt_domain_name if mgmt_domain_name is not None else "" + ) + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + path_entry = RwMcYang.MgmtDomain.schema().keyspec_to_entry(ks_path) + mgmt_domain_name = path_entry.key00.name + self._log.debug("Got show mgmt_domain launchpad request: %s", ks_path.create_string()) + + if not mgmt_domain_name: + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + mgmt_domain = self._mgmt_domains.get(mgmt_domain_name, None) + if mgmt_domain is None: + self._log.warning("Could not find management domain: %s", mgmt_domain) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + try: + lp_state = mgmt_domain.launchpad_state + lp_state_details= mgmt_domain.launchpad_state_details + lp_uptime = mgmt_domain.launchpad_uptime + lp_create_time = mgmt_domain.launchpad_create_time + + lp_ip = None + if mgmt_domain.launchpad_vm_info is not None: + if mgmt_domain.launchpad_vm_info.public_ip: + lp_ip = mgmt_domain.launchpad_vm_info.public_ip + else: + lp_ip = mgmt_domain.launchpad_vm_info.management_ip + + except Exception as e: + self._log.warning("Could not get mgmt-domain launchpad info: %s", e) + xact_info.respond_xpath(rwdts.XactRspCode.NA) + return + + msg = RwMcYang.MgmtDomainLaunchpad() + msg.state = lp_state + msg.state_details = lp_state_details + msg.uptime = lp_uptime + if lp_create_time is not None: + msg.create_time = lp_create_time + if lp_ip is not None: + msg.ip_address = lp_ip + + self._log.debug("Responding to mgmt_domain pools request: %s", msg) + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath=self._get_respond_xpath(mgmt_domain_name), + msg=msg, + ) + + yield from self._dts.register( + xpath=self._get_respond_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + +class MCUptimeDtsOperdataHandler(object): + def __init__(self, dts, log, start_time): + self._dts = dts + self._log = log + self._mc_start_time = start_time + + + def get_start_time(self): + return self._mc_start_time + + def _get_uptime_xpath(self): + return "D,/rw-mc:mission-control" + + @asyncio.coroutine + def register(self): + @asyncio.coroutine + def on_prepare(xact_info, action, ks_path, msg): + self._log.debug("Got show MC uptime request: %s", ks_path.create_string()) + + msg = RwMcYang.Uptime() + uptime_secs = float(time.time() - self.get_start_time()) + uptime_str = str(timedelta(seconds = uptime_secs)) + msg.uptime = uptime_str + msg.create_time = self.get_start_time() + + self._log.debug("Responding to MC Uptime request: %s", msg) + xact_info.respond_xpath( + rwdts.XactRspCode.ACK, + xpath=self._get_uptime_xpath(), + msg=msg, + ) + + yield from self._dts.register( + xpath=self._get_uptime_xpath(), + handler=rift.tasklets.DTS.RegistrationHandler( + on_prepare=on_prepare), + flags=rwdts.Flag.PUBLISHER, + ) + + +fallback_launchpad_resources = None +fallback_launchpad_public_ip = None + +def construct_fallback_launchpad_vm_pool(): + global fallback_launchpad_resources + global fallback_launchpad_public_ip + + if "RIFT_LP_NODES" not in os.environ: + return + + fallback_launchpad_resources = [] + for node in os.environ["RIFT_LP_NODES"].split(":"): + node_ip_id = node.split("|") + assert len(node_ip_id) == 2 + fallback_launchpad_resources.append(node_ip_id) + + if "RIFT_LP_PUBLIC_IP" not in os.environ: + fallback_launchpad_public_ip = None + return + + fallback_launchpad_public_ip = os.environ["RIFT_LP_PUBLIC_IP"] + + + +class MissionControlTasklet(rift.tasklets.Tasklet): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.rwlog.set_category('rw-mc') + self._dts = None + self._mgmt_domains = {} + self._domain_config_hdl = None + self._domain_rpc_hdl = None + self._pool_config_hdl = None + self._cloud_account_config_hdl = None + self._sdn_account_config_hdl = None + self._error_test_rpc_hdl = None + self._start_time = time.time() + + self._cloud_accounts = {} + self._sdn_accounts = {} + + self._lp_minions = {} + + self._vm_pool_mgr = VMPoolManager( + self.log, + self._mgmt_domains, + self._cloud_accounts, + ) + self._network_pool_mgr = NetworkPoolManager( + self.log, + self._mgmt_domains, + self._cloud_accounts, + ) + + def initialize_lxc(self): + self.log.info("Enabling Container Cal Debug Logging") + SimCloudAccount.enable_debug_logging(self.log.handlers) + + def start(self): + super().start() + self.log.info("Starting Mission Control Tasklet") + + CloudAccount.log_hdl = self.log_hdl + SDNAccount.log_hdl = self.log_hdl + + # Initialize LXC to the extent possible until RIFT-8483, RIFT-8485 are completed + self.initialize_lxc() + + # Use a fallback set of launchpad VM's when provided and no static + # resources are selected + construct_fallback_launchpad_vm_pool() + + self.log.debug("Registering with dts") + self._dts = rift.tasklets.DTS( + self.tasklet_info, + RwMcYang.get_schema(), + self.loop, + self.on_dts_state_change + ) + + self.log.debug("Created DTS Api GI Object: %s", self._dts) + + @asyncio.coroutine + def init(self): + """Initialize application. During this state transition all DTS + registrations and subscriptions required by application should be started + """ + + self._lp_minions = yield from launchpad.get_previous_lp( + self.log, self.loop) + + self._uptime_operdata_hdl = MCUptimeDtsOperdataHandler( + self._dts, + self._log, + self._start_time, + ) + yield from self._uptime_operdata_hdl.register() + + self._domain_config_hdl = MgmtDomainDtsConfigHandler( + self._dts, + self.loop, + self.log, + self._mgmt_domains, + self._vm_pool_mgr, + self._network_pool_mgr, + self._lp_minions, + ) + yield from self._domain_config_hdl.register() + + self._domain_rpc_hdl = MgmtDomainDtsRpcHandler( + self._dts, + self.log, + self._mgmt_domains, + ) + yield from self._domain_rpc_hdl.register() + + self._domain_operdata_hdl = MgmtDomainDtsOperdataHandler( + self._dts, + self.log, + self._mgmt_domains, + ) + yield from self._domain_operdata_hdl.register() + + self._vm_pool_config_hdl = ResourcePoolDtsConfigHandler( + self._dts, + self.log, + self._vm_pool_mgr, + "/vm-pool/pool", + ) + self._vm_pool_config_hdl.register() + + self._network_pool_config_hdl = ResourcePoolDtsConfigHandler( + self._dts, + self.log, + self._network_pool_mgr, + "/network-pool/pool", + ) + self._network_pool_config_hdl.register() + + self._vm_pool_operdata_hdl = VMPoolDtsOperdataHandler( + self._dts, + self.log, + self._vm_pool_mgr, + ) + yield from self._vm_pool_operdata_hdl.register() + + self._network_pool_operdata_hdl = NetworkPoolDtsOperdataHandler( + self._dts, + self.log, + self._network_pool_mgr, + ) + yield from self._network_pool_operdata_hdl.register() + + self._cloud_account_config_hdl = CloudAccountDtsConfigHandler( + self._dts, + self.loop, + self.log, + self._cloud_accounts, + ) + self._cloud_account_config_hdl.register() + + self._cloud_account_operdata_hdl = CloudAccountDtsOperdataHandler( + self._dts, + self.loop, + self.log, + self._cloud_accounts, + self._vm_pool_mgr, + self._network_pool_mgr, + ) + yield from self._cloud_account_operdata_hdl.register() + + self._sdn_account_config_hdl = SDNAccountDtsConfigHandler( + self._dts, + self.log, + self._sdn_accounts, + ) + self._sdn_account_config_hdl.register() + + @asyncio.coroutine + def run(self): + pass + + @asyncio.coroutine + def on_dts_state_change(self, state): + """Take action according to current dts state to transition + application into the corresponding application state + + Arguments + state - current dts state + """ + switch = { + rwdts.State.INIT: rwdts.State.REGN_COMPLETE, + rwdts.State.CONFIG: rwdts.State.RUN, + } + + handlers = { + rwdts.State.INIT: self.init, + rwdts.State.RUN: self.run, + } + + # Transition application to next state + handler = handlers.get(state, None) + if handler is not None: + yield from handler() + + # Transition dts to next state + next_state = switch.get(state, None) + if next_state is not None: + self._dts.handle.set_state(next_state) \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/salt.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/salt.py new file mode 100644 index 0000000..c7b7962 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/salt.py @@ -0,0 +1,284 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import argparse +import asyncio +import json +import logging +import shlex +import subprocess + +from . import util + + +class SaltCommandFailed(util.CommandFailed): + pass + + +class SaltCommandNotStarted(Exception): + pass + + +class MinionConnectionNotFound(Exception): + pass + + +def execute_salt_cmd(log, target, cmd): + saltcmd = "salt {target} cmd.run '{cmd}' --out txt".format( + target=target, + cmd=cmd + ) + log.info("Executing command: %s", saltcmd) + + try: + stdout = subprocess.check_output( + shlex.split(saltcmd), + universal_newlines=True, + ) + except subprocess.CalledProcessError as e: + log.error("Failed to execute subprocess command %s (exception %s)", cmd, str(e)) + raise + + return stdout + +def get_launchpad_hostname(log, node_id): + ''' + Find the hostname for launchpad VM + ''' + cmd = "hostnamectl --static" + + try: + stdout = execute_salt_cmd(log, node_id, cmd) + except Exception as e: + log.error("Failed to get Launchpad hostname (exception %s)", str(e)) + + for line in stdout.split("\n"): + (nodeid, hostname) = line.split(": ") + if nodeid is None: + raise SaltCommandFailed("Salt did not return proper node id (expected: %s) (received: %s)", + node_id, stdout) + + log.info("command (%s) returned result (%s) (id: %s)", cmd, hostname, nodeid) + return hostname + + raise SaltCommandFailed("Salt command did not return any output") + +@asyncio.coroutine +def is_node_connected(log, loop, node_id): + try: + stdout, _ = yield from util.run_command( + loop, 'salt %s test.ping' % node_id + ) + except subprocess.CalledProcessError: + log.warning("test.ping command failed against node_id: %s", node_id) + return True + + up_minions = stdout.splitlines() + for line in up_minions: + if "True" in line: + return True + + return False + + +@asyncio.coroutine +def find_job(log, loop, node_id, job_id): + cmd = "salt -t 60 {node_id} saltutil.find_job {job_id} --output json".format( + node_id=node_id, job_id=job_id) + + try: + output, _ = yield from util.run_command(loop, cmd) + except util.CommandFailed as e: + raise SaltCommandFailed("Salt command failed: %s" % str(e)) + + if not output: + raise SaltCommandFailed("Empty response from command: %s" % cmd) + + try: + resp = json.loads(output) + except ValueError: + raise SaltCommandFailed("Failed to parse find_job output: %s" % output) + + if node_id not in resp: + raise SaltCommandFailed("Expected %s in find_job response" % node_id) + + if "jid" in resp[node_id]: + return resp[node_id] + + return None + +@asyncio.coroutine +def find_running_minions(log, loop): + ''' + Queries Salt for running jobs, and creates a dict with node_id & job_id + Returns the node_id and job_id + ''' + cmd = "salt -t 60 '*' saltutil.running --output json --out-indent -1" + + try: + output, _ = yield from util.run_command(loop, cmd) + except util.CommandFailed as e: + raise SaltCommandFailed("Salt command failed: %s" % str(e)) + + if not output: + raise SaltCommandFailed("Empty response from command: %s" % cmd) + + minions = {} + for line in output.split("\n"): + # Interested in only those minions which have a "tgt" attribute in the result, + # as this points to a running target id minion + if "tgt" in line: + try: + resp = json.loads(line) + except ValueError: + raise SaltCommandFailed("Failed to parse find_minion output: %s" % output) + + # Get the job id ('jid') from the minion response and populate the dict, + # using node_id as key and job_id as value. + for key in resp: + minions[key] = resp[key][0]['jid'] + + log.info("Salt minions found: %s", minions) + return minions + +class SaltAsyncCommand(object): + def __init__(self, log, loop, target, command): + self._log = log + self._loop = loop + self._target = target + self._command = command + + self._job_id = None + + def _set_command(self, command): + self._command = command + + def _set_job_id(self, job_id): + self._job_id = job_id + + @asyncio.coroutine + def start(self): + cmd = "salt --async {target} cmd.run '{cmd}'".format( + target=self._target, + cmd=self._command, + ) + + stdout, stderr = yield from util.run_command(self._loop, cmd) + + for line in stdout.split("\n"): + if "job ID:" in line: + job_id = line.split(" ")[-1] + if job_id == "0": + raise SaltCommandFailed("Did not create a job id for async command: %s", stdout) + + self._job_id = job_id + + self._log.debug("Salt command (%s) started on node (%s) (jid: %s)", + cmd, self._target, self._job_id) + return + + raise SaltCommandFailed("Did not find async job id in output") + + @asyncio.coroutine + def is_running(self): + if not self._job_id: + raise SaltCommandNotStarted() + + @asyncio.coroutine + def job_exists(): + try: + job = yield from find_job(self._log, self._loop, self._target, self._job_id) + except SaltCommandFailed as e: + # Salt minion command failing is not a reliable indication that the process + # actually died. + self._log.warning("Ignoring find salt job %s error: %s", self._job_id, str(e)) + return True + + return job is not None + + for _ in range(3): + if (yield from job_exists()): + return True + + return False + + @asyncio.coroutine + def wait(self): + while True: + is_running = yield from self.is_running() + if not is_running: + return + + asyncio.sleep(.25) + + @asyncio.coroutine + def stop(self): + if not self._job_id: + raise SaltCommandNotStarted() + + cmd = "salt {target} saltutil.term_job {job_id}".format( + target=self._target, + job_id=self._job_id, + ) + + yield from util.run_command(self._loop, cmd) + + @asyncio.coroutine + def kill(self): + if not self._job_id: + raise SaltCommandNotStarted() + + cmd = "salt {target} saltutil.kill_job {job_id}".format( + target=self._target, + job_id=self._job_id, + ) + + yield from util.run_command(self._loop, cmd) + + +@asyncio.coroutine +def test_salt(loop, node): + logger.debug("Checking if node is connected") + assert (yield from is_node_connected(logger, loop, node)) + + logger.debug("Running sleep 10 command") + async_cmd = SaltAsyncCommand(logger, loop, node, "sleep 10") + yield from async_cmd.start() + + logger.debug("Check if sleep command is running") + is_running = yield from async_cmd.is_running() + assert is_running + + logger.debug("Stop the sleep command") + yield from async_cmd.stop() + + logger.debug("Check if sleep command is no longer running") + is_running = yield from async_cmd.is_running() + assert not is_running + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + logger = logging.getLogger("salt-test") + parser = argparse.ArgumentParser() + parser.add_argument("-n", "--node", required=True, help="A connected minion") + args = parser.parse_args() + + loop = asyncio.get_event_loop() + loop.run_until_complete(test_salt(loop, args.node)) \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/util.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/util.py new file mode 100644 index 0000000..a3a16b4 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rift/tasklets/rwmctasklet/util.py @@ -0,0 +1,38 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import asyncio +import subprocess + + +class CommandFailed(Exception): + pass + + +@asyncio.coroutine +def run_command(loop, cmd): + cmd_proc = yield from asyncio.create_subprocess_shell( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, loop=loop + ) + stdout, stderr = yield from cmd_proc.communicate() + if cmd_proc.returncode != 0: + raise CommandFailed("Starting async command (%s) failed (rc=%s). (stderr: %s)", + cmd, cmd_proc.returncode, stderr) + + return stdout.decode(), stderr.decode() \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/rwmctasklet/rwmctasklet.py b/modules/core/mano/rwmc/plugins/rwmctasklet/rwmctasklet.py new file mode 100755 index 0000000..b81b2f2 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/rwmctasklet/rwmctasklet.py @@ -0,0 +1,30 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +# Workaround RIFT-6485 - rpmbuild defaults to python2 for +# anything not in a site-packages directory so we have to +# install the plugin implementation in site-packages and then +# import it from the actual plugin. + +import rift.tasklets.rwmctasklet + +class Tasklet(rift.tasklets.rwmctasklet.MissionControlTasklet): + pass + +# vim: sw=4 \ No newline at end of file diff --git a/modules/core/mano/rwmc/plugins/yang/CMakeLists.txt b/modules/core/mano/rwmc/plugins/yang/CMakeLists.txt new file mode 100644 index 0000000..41bbba1 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/CMakeLists.txt @@ -0,0 +1,34 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Austin Cormier +# Creation Date: 2015/08/01 +# + +## +# Yang targets +## + +rift_add_yang_target( + TARGET rwmc_yang + YANG_FILES rw-mc.yang + LIBRARIES + rwcloud_yang_gen + DEPENDS + rwcloud_yang + COMPONENT ${PKG_LONG_NAME} + LIBRARIES + rwsdn_yang_gen +) + +## +# Install the XML file +## +install( + FILES + ../cli/cli_rwmc.xml + ../cli/cli_rwmc_schema_listing.txt + DESTINATION usr/data/manifest + COMPONENT ${PKG_LONG_NAME} +) + diff --git a/modules/core/mano/rwmc/plugins/yang/Makefile b/modules/core/mano/rwmc/plugins/yang/Makefile new file mode 100644 index 0000000..345c5f3 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/Makefile @@ -0,0 +1,24 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Tim Mortsolf +# Creation Date: 11/25/2013 +# + +## +# Define a Makefile function: find_upwards(filename) +# +# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc., +# until the file is found or the root directory is reached +## +find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done)) + +## +# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top +## +makefile.top := $(call find_upward, "Makefile.top") + +## +# If Makefile.top was found, then include it +## +include $(makefile.top) diff --git a/modules/core/mano/rwmc/plugins/yang/rw-mc.cli.xml b/modules/core/mano/rwmc/plugins/yang/rw-mc.cli.xml new file mode 100755 index 0000000..2bd7b84 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/rw-mc.cli.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/modules/core/mano/rwmc/plugins/yang/rw-mc.tailf.yang b/modules/core/mano/rwmc/plugins/yang/rw-mc.tailf.yang new file mode 100644 index 0000000..d527230 --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/rw-mc.tailf.yang @@ -0,0 +1,78 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + * + */ + +module rw-mc-annotation +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-mc-annotation"; + prefix "rw-mc-ann"; + + import rw-mc + { + prefix rw-mc; + } + + import tailf-common { + prefix tailf; + } + + tailf:annotate "/rw-mc:opdata" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:start-launchpad" { + tailf:actionpoint rw_action; + } + + tailf:annotate "/rw-mc:mgmt-domain/rw-mc:domain/rw-mc:launchpad" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:stop-launchpad" { + tailf:actionpoint rw_action; + } + + tailf:annotate "/rw-mc:vm-pool/rw-mc:pool/rw-mc:available" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:network-pool/rw-mc:pool/rw-mc:available" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:network-pool/rw-mc:pool/rw-mc:assigned-detail" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:vm-pool/rw-mc:pool/rw-mc:mgmt-domain" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:vm-pool/rw-mc:pool/rw-mc:assigned-detail" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:network-pool/rw-mc:pool/rw-mc:mgmt-domain" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:cloud-account/rw-mc:account/rw-mc:pools" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:cloud-account/rw-mc:account/rw-mc:resources" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:cloud-account/rw-mc:account/rw-mc:connection" { + tailf:callpoint base_show; + } + + tailf:annotate "/rw-mc:mission-control" { + tailf:callpoint base_show; + } +} diff --git a/modules/core/mano/rwmc/plugins/yang/rw-mc.yang b/modules/core/mano/rwmc/plugins/yang/rw-mc.yang new file mode 100755 index 0000000..a16a08e --- /dev/null +++ b/modules/core/mano/rwmc/plugins/yang/rw-mc.yang @@ -0,0 +1,519 @@ + +/* + * + * (c) Copyright RIFT.io, 2013-2016, All Rights Reserved + * + */ + + + +/** + * @file rw-mc.yang + * @author Austin Cormier + * @author Joshua Downer + * @date 2015/07/30 + * @brief Mission Control Yang + */ + +module rw-mc +{ + namespace "http://riftio.com/ns/riftware-1.0/rw-mc"; + prefix "rw-mc"; + + + import rw-pb-ext { + prefix "rwpb"; + } + + import rw-cli-ext { + prefix "rwcli"; + } + + import rw-yang-types { + prefix "rwt"; + } + + import rwcal { + prefix "rwcal"; + } + + import rwsdn { + prefix "rwsdn"; + } + + import rw-notify-ext { + prefix "rwnotify"; + } + + import rw-log { + prefix "rwlog"; + } + + revision 2014-07-30 { + description + "Initial revision."; + } + + typedef launchpad-state { + description "State of the launchpad within the mgmt-domain"; + type enumeration { + enum pending; + enum crashed; + enum stopping; + enum stopped; + enum starting; + enum configuring; + enum started; + } + } + + container mission-control { + rwpb:msg-new Uptime; + description "Show Mission Control related information"; + config false; + + leaf uptime { + description "Show the Mission Control uptime"; + type string; + } + + leaf create-time { + description + "Creation timestamp of the Mission Control. + The timestamp is expressed as seconds + since unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + } + + container sdn { + rwpb:msg-new SDNConfig; + list account { + rwpb:msg-new SDNAccount; + description "Configure SDN Accounts"; + + key "name"; + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + uses rwsdn:sdn-provider-auth; + } + } + + container cloud-account { + rwpb:msg-new CloudConfig; + list account { + rwpb:msg-new CloudAccount; + description "Configure Cloud Accounts"; + + max-elements 8; + key "name"; + + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + + uses rwcal:provider-auth; + + container resources { + rwpb:msg-new CloudResources; + description "The list of available resources belonging to this cloud account"; + config false; + + list vm { + description "The list of available VM's belonging to this cloud account"; + key "id"; + leaf id { + type string; + } + leaf name { + type string; + } + leaf available { + type boolean; + } + } + + list network { + description "The list of available Network's belonging to this cloud account"; + key "id"; + leaf id { + type string; + } + leaf name { + type string; + } + leaf available { + type boolean; + } + } + + } + + container pools { + rwpb:msg-new CloudPools; + description "The lists of various pools associated with this cloud account"; + config false; + + list vm { + key "name"; + leaf name{ + type string; + } + } + list network { + key "name"; + leaf name{ + type string; + } + } + } + + container connection { + rwpb:msg-new CloudStatus; + description "The status of Cloud Account credientials"; + config false; + + leaf status { + description "Status of Cloud Account's current credentials"; + type string; + } + + leaf details { + description "Details of Cloud Account's connection status"; + type string; + } + } + } + } + + grouping common-pool-attrs { + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + + leaf mgmt-domain { + description "Mgmt-domain this pool is assigned to"; + config false; + type leafref { + path "/rw-mc:mgmt-domain/rw-mc:domain/name"; + } + } + + leaf cloud-account { + description "The cloud account to use for this vm pool"; + mandatory true; + type leafref { + path "../../../cloud-account/account/name"; + } + } + + leaf dynamic-scaling { + description "Denotes whether the pool is Static or can grow Dynamically"; + type boolean; + default false; + } + } + + container vm-pool { + list pool { + rwpb:msg-new VmPool; + description "Configure VM Pools"; + + max-elements 128; + key "name"; + + uses common-pool-attrs; + + list available { + description "The list of available VM's belonging to this pools cloud account"; + config false; + key "id"; + leaf id { + type string; + } + leaf name { + type string; + } + } + + list assigned { + description "The list of created VM's belonging to this pool"; + key "id"; + leaf id { + type string; + } + } + + list assigned-detail { + description "The list of created VM's belonging to this pool"; + config false; + key "id"; + leaf id { + type string; + } + + leaf is_reserved { + description "Flag indicating whether resource is reserved"; + type boolean; + } + + container resource-info { + description "Detailed resource information provided by the CAL"; + rwpb:msg-new VmPoolResourceInfo; + config false; + + uses rwcal:vm-info-item; + } + } + } + } + + container network-pool { + list pool { + rwpb:msg-new NetworkPool; + description "Configure Network Pools"; + + max-elements 128; + key "name"; + + uses common-pool-attrs; + + list available { + description "The list of available Networks's belonging to this pools cloud account"; + config false; + key "id"; + leaf id { + type string; + } + leaf name { + type string; + } + } + + list assigned { + description "The list of created networks's belonging to this pool"; + key "id"; + leaf id { + type string; + } + } + + list assigned-detail { + description "The list of created Networks belonging to this pool"; + config false; + key "id"; + leaf id { + type string; + } + + leaf is_reserved { + description "Flag indicating whether resource is reserved"; + type boolean; + } + + container resource-info { + description "Detailed resource information provided by the CAL"; + rwpb:msg-new NetworkPoolResourceInfo; + + uses rwcal:network-info-item; + } + } + } + } + + container mgmt-domain { + rwpb:msg-new MgmtDomainConfig; + rwcli:new-mode "mgmt-domain"; + description "Configure Management Domain"; + + list domain { + rwpb:msg-new MgmtDomain; + key "name"; + + leaf name { + mandatory true; + type string { + length "1..255"; + } + } + + container pools { + rwpb:msg-new MgmtDomainPools; + description "The lists of various pools associated with this mgmt domain"; + + list vm { + key "name"; + leaf name { + type leafref { + path "/rw-mc:vm-pool/rw-mc:pool/name"; + } + } + } + list network { + key "name"; + leaf name { + type leafref { + path "/rw-mc:network-pool/rw-mc:pool/name"; + } + } + } + } + + container launchpad { + rwpb:msg-new MgmtDomainLaunchpad; + config false; + leaf state { + description "State of the mgmt-domain's launchpad"; + type launchpad-state; + } + + leaf state-details { + description "Details of the Launchpad's current state"; + type string; + } + + leaf ip_address { + description "VM IP address in use by the launchpad"; + type string; + } + + leaf uptime { + description "Show the Launchpad uptime"; + type string; + } + + leaf create-time { + description + "Creation timestamp of this Launchpad. + The timestamp is expressed as seconds + since unix epoch - 1970-01-01T00:00:00Z"; + type uint32; + } + } + } + } + + container opdata { + rwpb:msg-new Opdata; + config false; + list foodata { + key name; + leaf name { + type string; + } + } + } + + rpc start-launchpad { + input { + rwpb:msg-new StartLaunchpadInput; + leaf mgmt-domain { + mandatory true; + type leafref { + path "/rw-mc:mgmt-domain/rw-mc:domain/name"; + } + } + } + } + + rpc stop-launchpad { + input { + rwpb:msg-new StopLaunchpadInput; + leaf mgmt-domain { + mandatory true; + type leafref { + path "/rw-mc:mgmt-domain/rw-mc:domain/name"; + } + } + } + } + + + /* + * Generic Logger Log Events - ID space 120000 - 120099 + */ + notification debug { + rwpb:msg-new Debug; + rwnotify:log-event-id 120000; + description + "Generic Debug Log"; + uses rwlog:severity-debug; + leaf category { + type string; + } + leaf log { + type string; + } + } + + notification info { + rwpb:msg-new Info; + rwnotify:log-event-id 120001; + description + "Generic Info Log"; + uses rwlog:severity-info; + leaf category { + type string; + } + leaf log { + type string; + } + } + + notification warn { + rwpb:msg-new Warn; + rwnotify:log-event-id 120002; + description + "Generic Warning Log"; + uses rwlog:severity-warning; + leaf category { + type string; + } + leaf log { + type string; + } + } + + notification error { + rwpb:msg-new Error; + rwnotify:log-event-id 120003; + description + "Generic Warning Log"; + uses rwlog:severity-error; + leaf category { + type string; + } + leaf log { + type string; + } + } + + notification critical { + rwpb:msg-new Critical; + rwnotify:log-event-id 120004; + description + "Generic Critical Log"; + uses rwlog:severity-critical; + leaf category { + type string; + } + leaf log { + type string; + } + } + + /* + * END - generic log events + */ + +} diff --git a/modules/core/mano/rwmc/ra/CMakeLists.txt b/modules/core/mano/rwmc/ra/CMakeLists.txt new file mode 100644 index 0000000..fe900b1 --- /dev/null +++ b/modules/core/mano/rwmc/ra/CMakeLists.txt @@ -0,0 +1,45 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Paul Laidler +# Creation Date: 09/16/2015 +# + +cmake_minimum_required(VERSION 2.8) + +install( + PROGRAMS + mission_control_systest + mission_control_delete_systest + mission_control_negative_systest + mission_control_negative_cloud_account_systest + mission_control_negative_mgmt_domain_systest + mission_control_negative_vmpool_systest + mission_control_reload_systest + DESTINATION usr/rift/systemtest/mission_control + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + pytest/conftest.py + pytest/test_mission_control.py + pytest/test_mission_control_delete.py + pytest/test_mission_control_negative.py + pytest/test_mission_control_negative_cloud_account.py + pytest/test_mission_control_negative_mgmt_domain.py + pytest/test_mission_control_negative_vmpool.py + DESTINATION usr/rift/systemtest/pytest/mission_control + COMPONENT ${PKG_LONG_NAME}) + +install( + FILES + racfg/mission_control_systest_cloudsim.racfg + racfg/mission_control_systest_openstack.racfg + racfg/mission_control_delete_systest_cloudsim.racfg + racfg/mission_control_reload_systest_openstack.racfg + racfg/mission_control_systest_cloudsim_negative.racfg + racfg/mission_control_systest_openstack_negative.racfg + DESTINATION + usr/rift/systemtest/mission_control + COMPONENT ${PKG_LONG_NAME}) + diff --git a/modules/core/mano/rwmc/ra/mission_control_delete_systest b/modules/core/mano/rwmc/ra/mission_control_delete_systest new file mode 100755 index 0000000..9c7d177 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_delete_systest @@ -0,0 +1,43 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/02 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control_delete.py" + +test_prefix="mission_control_delete_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} + diff --git a/modules/core/mano/rwmc/ra/mission_control_negative_cloud_account_systest b/modules/core/mano/rwmc/ra/mission_control_negative_cloud_account_systest new file mode 100755 index 0000000..5536e08 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_negative_cloud_account_systest @@ -0,0 +1,42 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/07 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_cloud_account.py" + +test_prefix="mission_control_negative_cloud_account_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_negative_mgmt_domain_systest b/modules/core/mano/rwmc/ra/mission_control_negative_mgmt_domain_systest new file mode 100755 index 0000000..174f05c --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_negative_mgmt_domain_systest @@ -0,0 +1,42 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/07 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_mgmt_domain.py" + +test_prefix="mission_control_negative_mgmt_domain_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_negative_systest b/modules/core/mano/rwmc/ra/mission_control_negative_systest new file mode 100755 index 0000000..407f3b7 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_negative_systest @@ -0,0 +1,44 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/07 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v -p no:cacheprovider \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_vmpool.py \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_cloud_account.py \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_mgmt_domain.py" + +test_prefix="mission_control_negative_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_negative_vmpool_systest b/modules/core/mano/rwmc/ra/mission_control_negative_vmpool_systest new file mode 100755 index 0000000..0f21832 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_negative_vmpool_systest @@ -0,0 +1,42 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/12/07 +# + +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control_negative_vmpool.py" + +test_prefix="mission_control_negative_vmpool_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_reload_systest b/modules/core/mano/rwmc/ra/mission_control_reload_systest new file mode 100755 index 0000000..54f5df8 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_reload_systest @@ -0,0 +1,45 @@ +#!/bin/bash +# +# +# Author(s): Varun Prasad +# Creation Date: 2015/12/22 +# +# Helper script for invoking the mission control system test using the systest_wrapper +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh + +SCRIPT_TEST="py.test -v -p no:cacheprovider -k 'not Teardown' \ + ${PYTEST_DIR}/mission_control/test_mission_control.py" + +REBOOT_SCRIPT_TEST="py.test -v -p no:cacheprovider -k 'test_wait_for_launchpad_started or Teardown' \ + ${PYTEST_DIR}/mission_control/test_mission_control.py" + +test_prefix="mission_control_reload_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/mission_control_systest b/modules/core/mano/rwmc/ra/mission_control_systest new file mode 100755 index 0000000..2bbd951 --- /dev/null +++ b/modules/core/mano/rwmc/ra/mission_control_systest @@ -0,0 +1,43 @@ +#!/bin/bash +# +# +# Author(s): Paul Laidler +# Creation Date: 2015/09/15 +# +source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh +restconf=true + +SCRIPT_TEST="py.test -v \ + ${PYTEST_DIR}/mission_control/test_mission_control.py \ + ${PYTEST_DIR}/mission_control/launchpad/test_startstop.py" + +test_prefix="mission_control_systest" +test_cmd="" +repeat_system=1 + +# Parse commonline argument and set test variables +parse_args "${@}" + +# Construct the test command based on the test variables +construct_test_comand + +# Execute from pytest root directory to pick up conftest.py +cd "${PYTEST_DIR}" + +test_rc=0 +for i in $(seq ${repeat_system}); +do + echo "CYCLE: $i" + eval ${test_cmd} + test_rc=$? + echo "DEBUG: Got test command rc: $test_rc" + if [[ ${test_rc} -ne 0 ]]; then + echo "Exiting with test_rc: $test_rc" + break + fi +done + +# unit test XML files are converted to pretty printed format +pretty_print_junit_xml + +exit ${test_rc} diff --git a/modules/core/mano/rwmc/ra/pytest/conftest.py b/modules/core/mano/rwmc/ra/pytest/conftest.py new file mode 100644 index 0000000..375016d --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/conftest.py @@ -0,0 +1,202 @@ + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import pytest +import os +import subprocess +import sys + +import rift.auto.log +import rift.auto.session +import rift.vcs.vcs +import logging + +import gi +gi.require_version('RwCloudYang', '1.0') +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang, RwCloudYang + +@pytest.fixture(scope='session', autouse=True) +def cloud_account_name(): + '''fixture which returns the name used to identify the cloud account''' + return 'cloud-0' + +@pytest.fixture(scope='session', autouse=True) +def mgmt_domain_name(): + '''fixture which returns the name used to identify the mgmt_domain''' + return 'mgmt-0' + +@pytest.fixture(scope='session', autouse=True) +def vm_pool_name(): + '''fixture which returns the name used to identify the vm resource pool''' + return 'vm-0' + +@pytest.fixture(scope='session', autouse=True) +def network_pool_name(): + '''fixture which returns the name used to identify the network resource pool''' + return 'net-0' + +@pytest.fixture(scope='session', autouse=True) +def port_pool_name(): + '''fixture which returns the name used to identify the port resource pool''' + return 'port-0' + +@pytest.fixture(scope='session', autouse=True) +def sdn_account_name(): + '''fixture which returns the name used to identify the sdn account''' + return 'sdn-0' + +@pytest.fixture(scope='session', autouse=True) +def sdn_account_type(): + '''fixture which returns the account type used by the sdn account''' + return 'odl' + +@pytest.fixture(scope='session', autouse=True) +def _riftlog_scraper_session(log_manager, confd_host): + '''Fixture which returns an instance of rift.auto.log.FileSource to scrape riftlog + + Arguments: + log_manager - manager of logging sources and sinks + confd_host - host on which confd is running (mgmt_ip) + ''' + scraper = rift.auto.log.FileSource(host=confd_host, path='/var/log/rift/rift.log') + scraper.skip_to('Configuration management startup complete.') + log_manager.source(source=scraper) + return scraper + +@pytest.fixture(scope='session') +def cloud_module(standalone_launchpad): + '''Fixture containing the module which defines cloud account + + Depending on whether or not the system is being run with a standalone + launchpad, a different module will be used to configure the cloud + account + + Arguments: + standalone_launchpad - fixture indicating if the system is being run with a standalone launchpad + + Returns: + module to be used when configuring a cloud account + ''' + cloud_module = RwMcYang + if standalone_launchpad: + cloud_module = RwCloudYang + return cloud_module + +@pytest.fixture(scope='session') +def cloud_xpath(standalone_launchpad): + '''Fixture containing the xpath that should be used to configure a cloud account + + Depending on whether or not the system is being run with a standalone + launchpad, a different xpath will be used to configure the cloud + account + + Arguments: + standalone_launchpad - fixture indicating if the system is being run with a standalone launchpad + + Returns: + xpath to be used when configure a cloud account + ''' + xpath = '/cloud-account/account' + if standalone_launchpad: + xpath = '/cloud/account' + return xpath + +@pytest.fixture(scope='session', autouse=True) +def cloud_account(cloud_module, cloud_account_name, cloud_host, cloud_type): + '''fixture which returns an instance of RwMcYang.CloudAccount + + Arguments: + cloud_module - fixture: module defining cloud account + cloud_account_name - fixture: name used for cloud account + cloud_host - fixture: cloud host address + cloud_type - fixture: cloud account type + + Returns: + An instance of CloudAccount + ''' + account = None + + if cloud_type == 'lxc': + account = cloud_module.CloudAccount.from_dict({ + "name": cloud_account_name, + "account_type": "cloudsim"}) + + elif cloud_type == 'openstack': + username = 'pluto' + password = 'mypasswd' + auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host) + project_name = os.getenv('PROJECT_NAME', 'demo') + mgmt_network = os.getenv('MGMT_NETWORK', 'private') + account = cloud_module.CloudAccount.from_dict({ + 'name': cloud_account_name, + 'account_type': 'openstack', + 'openstack': { + 'admin': True, + 'key': username, + 'secret': password, + 'auth_url': auth_url, + 'tenant': project_name, + 'mgmt_network': mgmt_network}}) + + return account + +@pytest.fixture(scope='session') +def _launchpad_scraper_session(request, log_manager, mgmt_domain_name): + '''fixture which returns an instance of rift.auto.log_scraper.FileSource to scrape the launchpad + + Arguments: + log_manager - manager of log sources and sinks + mgmt_domain_name - the management domain created for the launchpad + ''' + if request.config.getoption("--lp-standalone"): + return + + scraper = rift.auto.log.FileSource(host=None, path='/var/log/launchpad_console.log') + log_manager.source(source=scraper) + return scraper + +@pytest.fixture(scope='function', autouse=False) +def _connect_launchpad_scraper(request, _launchpad_scraper_session, mgmt_session, mgmt_domain_name, standalone_launchpad): + '''Determines the address of the launchpad and connects the launchpad scraper to it + Needed because the launchpad address isn't known at the start of the test session. + + Arguments: + mgmt_session - management interface session + _launchpad_scraper_session - scraper responsible for collecting launchpad_console log + mgmt_domain_name - mgmt-domain in which the launchpad is located + ''' + if standalone_launchpad: + return + + if not _launchpad_scraper_session.connected(): + proxy = mgmt_session.proxy(RwMcYang) + launchpad_address = proxy.get("/mgmt-domain/domain[name='%s']/launchpad/ip_address" % mgmt_domain_name) + if launchpad_address: + _launchpad_scraper_session.connect(launchpad_address) + +@pytest.fixture(scope='session') +def launchpad_scraper(_launchpad_scraper_session): + '''Fixture exposing the scraper used to scrape the launchpad console log + + Arguments: + _launchpad_scraper_session - instance of rift.auto.log_scraper.FileSource targeting the launchpad console log + ''' + return _launchpad_scraper_session \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control.py new file mode 100755 index 0000000..666fc9b --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control.py @@ -0,0 +1,332 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_launchpad_startstop.py +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 06/19/2015 +@brief System test of basic mission control functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session class + ''' + return mgmt_session.proxy(RwMcYang) + + +@pytest.mark.setup('launchpad') +@pytest.mark.incremental +class TestMissionControlSetup: + def test_create_odl_sdn_account(self, proxy, sdn_account_name, sdn_account_type): + '''Configure sdn account + + Asserts: + SDN name and accout type. + ''' + sdn_account = RwMcYang.SDNAccount( + name=sdn_account_name, + account_type=sdn_account_type) + xpath = "/sdn/account[name='%s']" % sdn_account_name + proxy.create_config(xpath, sdn_account) + + sdn_account = proxy.get(xpath) + assert sdn_account.account_type == sdn_account_type + assert sdn_account.name == sdn_account_name + + def test_create_cloud_account(self, mgmt_session, cloud_module, cloud_xpath, cloud_account): + '''Configure a cloud account + + Asserts: + Cloud name and cloud type details + ''' + proxy = mgmt_session.proxy(cloud_module) + proxy.create_config(cloud_xpath, cloud_account) + xpath = '{}[name="{}"]'.format(cloud_xpath, cloud_account.name) + response = proxy.get(xpath) + assert response.name == cloud_account.name + assert response.account_type == cloud_account.account_type + + + @pytest.mark.feature('mission-control') + def test_create_mgmt_domain(self, proxy, mgmt_domain_name): + '''Configure mgmt domain + + Asserts: + If the launchpad configuration is created and updated succesfully. + ''' + xpath = '/mgmt-domain/domain' + domain_config = RwMcYang.MgmtDomain( + name=mgmt_domain_name) + proxy.create_config(xpath, domain_config) + + xpath += "[name='{}']".format(mgmt_domain_name) + proxy.merge_config(xpath, domain_config) + + response = proxy.get(xpath) + assert response.launchpad.state == 'pending' + + @pytest.mark.feature('mission-control') + def test_create_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure vm pool + + Asserts : + Newly configured vm pool has no resources assigned to it + ''' + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + + @pytest.mark.feature('mission-control') + def test_assign_vm_resource_to_vm_pool(self, proxy, cloud_account_name, vm_pool_name, launchpad_vm_id): + '''Configure a vm resource by adding it to a vm pool + + Asserts: + Cloud account has available resources + VM pool has has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data post assignment + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + if launchpad_vm_id: + cloud_vm_ids = [vm.id for vm in account.resources.vm if vm.id == launchpad_vm_id] + else: + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert cloud_vm_ids != [] + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + if launchpad_vm_id: + available_ids = [vm.id for vm in pool.available if vm.id == launchpad_vm_id] + else: + available_ids = [vm.id for vm in pool.available] + # NOTE: Desired API - request for a list of leaf elements + # available_ids = proxy.get("/vm-pool/pool[name='%s']/available/id" % vm_pool_name) + assert available_ids != [] # Assert pool has available resources + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) # Assert not split brain + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'dynamic_scaling': True, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + + @pytest.mark.feature('mission-control') + def test_create_network_pool(self, proxy, cloud_account_name, network_pool_name): + '''Configure network pool + + Asserts : + Newly configured network pool has no resources assigned to it + ''' + pool_config = RwMcYang.NetworkPool( + name=network_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + + proxy.create_config('/network-pool/pool', pool_config) + + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + assigned_ids = [network.id for network in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + + @pytest.mark.feature('mission-control') + def test_assign_network_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, network_pool_name): + '''Configure mgmt_domain by adding a network pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Network(name=network_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/network" % mgmt_domain_name, pool_config) + + + @pytest.mark.feature('mission-control') + def test_assign_vm_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, vm_pool_name): + '''Configure mgmt_domain by adding a VM pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Vm(name=vm_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/vm" % mgmt_domain_name, pool_config) + + + @pytest.mark.feature('mission-control') + def test_wait_for_launchpad_started(self, proxy, mgmt_domain_name): + '''Wait for the launchpad to start + + Additionally begins the launchpad scraper. + + Asserts: + Launchpad reaches state 'started' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=400, + fail_on=['crashed']) + + +@pytest.mark.incremental +@pytest.mark.depends('launchpad') +class TestMissionControl: + + def test_show_odl_sdn_account(self, proxy, sdn_account_name, sdn_account_type): + '''Showing sdn account configuration + + Asserts: + sdn_account.account_type is what was configured + ''' + xpath = "/sdn/account[name='%s']" % sdn_account_name + sdn_account = proxy.get_config(xpath) + assert sdn_account.account_type == sdn_account_type + + + @pytest.mark.feature('mission-control') + def test_launchpad_stats(self, proxy, mgmt_domain_name): + '''Verify launchpad stats + + Asserts: + Create time and uptime are configured for launchpad + ''' + xpath = "/mgmt-domain/domain[name='{}']/launchpad/uptime".format(mgmt_domain_name) + uptime = proxy.get(xpath) + assert len(uptime) > 0 + + xpath = "/mgmt-domain/domain[name='{}']/launchpad/create-time".format(mgmt_domain_name) + create_time = proxy.get(xpath) + assert int(create_time) > 0 + + @pytest.mark.feature('mission-control') + def test_mission_control_stats(self, proxy, mgmt_domain_name): + '''Verify Mission Control stats + + Asserts: + Create time and uptime are configured for MissionControl + ''' + xpath = "/mission-control/uptime" + uptime = proxy.get(xpath) + assert len(uptime) > 0 + + xpath = "/mission-control/create-time" + create_time = proxy.get(xpath) + assert int(create_time) > 0 + +@pytest.mark.teardown('launchpad') +@pytest.mark.incremental +class TestMissionControlTeardown: + + @pytest.mark.feature('mission-control') + def test_stop_launchpad(self, proxy, mgmt_domain_name): + '''Invoke stop launchpad RPC + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + ''' + + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=120, + fail_on=['crashed']) + + + @pytest.mark.feature('mission-control') + def test_remove_vm_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, vm_pool_name): + '''Unconfigure mgmt domain: remove a vm pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain_name, vm_pool_name) + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_remove_network_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, network_pool_name): + '''Unconfigure mgmt_domain: remove a network pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain_name, network_pool_name) + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_delete_mgmt_domain(self, proxy, mgmt_domain_name): + '''Unconfigure mgmt_domain: delete mgmt_domain''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: remove a vm resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + @pytest.mark.feature('mission-control') + def test_delete_network_pool(self, proxy, network_pool_name): + '''Unconfigure network pool''' + xpath = "/network-pool/pool[name='%s']" % network_pool_name + proxy.delete_config(xpath) + + def test_delete_odl_sdn_account(self, proxy, sdn_account_name): + '''Unconfigure sdn account''' + xpath = "/sdn/account[name='%s']" % sdn_account_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, mgmt_session, cloud_module, cloud_xpath, cloud_account_name): + '''Unconfigure cloud_account''' + proxy = mgmt_session.proxy(cloud_module) + xpath = "{}[name='{}']".format(cloud_xpath, cloud_account_name) + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_delete.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_delete.py new file mode 100755 index 0000000..91833c7 --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_delete.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_mission_control_delete.py +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 06/19/2015 +@brief System test exercising delete of mission control configuration +""" + +import pytest +import rift.auto.proxy +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session class + ''' + return mgmt_session.proxy(RwMcYang) + +@pytest.fixture(scope='module') +def mgmt_domain(mgmt_domain_name): + mgmt_domain = RwMcYang.MgmtDomain(name=mgmt_domain_name) + return mgmt_domain + +@pytest.fixture(scope='module') +def vm_pool(vm_pool_name, cloud_account_name): + vm_pool = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + return vm_pool + +@pytest.fixture(scope='module') +def network_pool(network_pool_name, cloud_account_name): + network_pool = RwMcYang.NetworkPool( + name=network_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + return network_pool + +@pytest.fixture(scope='module') +def sdn_account(sdn_account_name, sdn_account_type): + sdn_account = RwMcYang.SDNAccount( + name=sdn_account_name, + account_type=sdn_account_type, + ) + return sdn_account + +@pytest.fixture(scope='function', autouse=True) +def launchpad_setup(request, proxy, cloud_account, mgmt_domain, vm_pool, network_pool, sdn_account): + def _teardown(): + launchpad_state = proxy.get("/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name) + if launchpad_state: + if launchpad_state in ['configuring', 'starting']: + launchpad_state = proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name, + 'started', + timeout=200, + fail_on=['crashed']) + + if launchpad_state == 'started': + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain.name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + launchpad_state = proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name, + 'stopped', + timeout=200, + fail_on=['crashed']) + + if proxy.get_config("/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain.name, vm_pool.name)): + proxy.delete_config("/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain.name, vm_pool.name)) + + if proxy.get_config("/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain.name, network_pool.name)): + proxy.delete_config("/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain.name, network_pool.name)) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) + if pool: + for vm_id in [vm.id for vm in pool.assigned]: + proxy.delete_config("/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool.name, vm_id)) + + if proxy.get_config("/vm-pool/pool[name='%s']" % vm_pool.name): + proxy.delete_config("/vm-pool/pool[name='%s']" % vm_pool.name) + + if proxy.get_config("/network-pool/pool[name='%s']" % network_pool.name): + proxy.delete_config("/network-pool/pool[name='%s']" % network_pool.name) + + if proxy.get_config("/mgmt-domain/domain[name='%s']" % mgmt_domain.name): + proxy.delete_config("/mgmt-domain/domain[name='%s']" % mgmt_domain.name) + + if proxy.get_config("/cloud-account/account[name='%s']" % cloud_account.name): + proxy.delete_config("/cloud-account/account[name='%s']" % cloud_account.name) + + if proxy.get_config("/sdn/account[name='%s']" % sdn_account.name): + proxy.delete_config("/sdn/account[name='%s']" % sdn_account.name) + + def _setup(): + proxy.create_config('/cloud-account/account', cloud_account) + proxy.create_config('/mgmt-domain/domain', mgmt_domain) + proxy.create_config('/vm-pool/pool', vm_pool) + proxy.create_config('/network-pool/pool', network_pool) + proxy.create_config('/sdn/account', sdn_account) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) + available_ids = [vm.id for vm in pool.available] + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool.name, + 'cloud_account':cloud_account.name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool.name, pool_config) + + mgmt_vm_pool = RwMcYang.MgmtDomainPools_Vm(name=vm_pool.name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/vm" % mgmt_domain.name, mgmt_vm_pool) + + mgmt_network_pool = RwMcYang.MgmtDomainPools_Network(name=network_pool.name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/network" % mgmt_domain.name, mgmt_network_pool) + + # Teardown any existing launchpad configuration, and set it back up again + _teardown() + _setup() + + + +class DeleteResources: + def test_remove_vm_pool_from_mgmt_domain(self, proxy, mgmt_domain, vm_pool): + '''Unconfigure mgmt domain: remove a vm pool''' + # Can't remove vm pool without removing resources first +# pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) +# if pool: +# for vm_id in [vm.id for vm in pool.assigned]: +# proxy.delete_config("/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool.name, vm_id)) + + xpath = "/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain.name, vm_pool.name) + proxy.delete_config(xpath) + + def test_remove_network_pool_from_mgmt_domain(self, proxy, mgmt_domain, network_pool): + '''Unconfigure mgmt_domain: remove a network pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain.name, network_pool.name) + proxy.delete_config(xpath) + + def test_delete_mgmt_domain(self, proxy, vm_pool, mgmt_domain): + '''Unconfigure mgmt_domain: delete mgmt_domain''' + # Can't remove vm pool without removing resources first +# pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) +# if pool: +# for vm_id in [vm.id for vm in pool.assigned]: +# proxy.delete_config("/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool.name, vm_id)) + + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain.name + proxy.delete_config(xpath) + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool): + '''Unconfigure vm_pool: remove a vm resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool.name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool.name, assigned_id) + proxy.delete_config(xpath) + + def test_delete_vm_pool(self, proxy, vm_pool): + '''Unconfigure vm_pool''' + # Can't delete vm pool without removing it from mgmt domain first + with pytest.raises(rift.auto.proxy.ProxyRequestError) as excinfo: + xpath = "/vm-pool/pool[name='%s']" % vm_pool.name + proxy.delete_config(xpath) + assert 'illegal reference' in str(excinfo.value) + + def test_delete_network_pool(self, proxy, network_pool): + '''Unconfigure network pool''' + # Can't delete network pool without removing it from mgmt domain first + with pytest.raises(rift.auto.proxy.ProxyRequestError) as excinfo: + xpath = "/network-pool/pool[name='%s']" % network_pool.name + proxy.delete_config(xpath) + assert 'illegal reference' in str(excinfo.value) + + def test_delete_cloud_account(self, proxy, cloud_account): + '''Unconfigure cloud_account''' + # Can't delete cloud account without first deleting all of the pools associated with it + with pytest.raises(rift.auto.proxy.ProxyRequestError) as excinfo: + xpath = "/cloud-account/account[name='%s']" % cloud_account.name + proxy.delete_config(xpath) + assert 'illegal reference' in str(excinfo.value) + + + def test_delete_odl_sdn_account(self, proxy, sdn_account): + '''Unconfigure sdn account''' + xpath = "/sdn/account[name='%s']" % sdn_account.name + proxy.delete_config(xpath) + + +class TestDeleteFromStartingLaunchpad(DeleteResources): + pass + +@pytest.mark.slow +class TestDeleteFromStoppedLaunchpad(DeleteResources): + @pytest.fixture(scope='function', autouse=True) + def launchpad_stopped(self, launchpad_setup, proxy, mgmt_domain): + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name, + 'started', + timeout=200, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain.name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain.name, + 'stopped', + timeout=200, + fail_on=['crashed']) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative.py new file mode 100755 index 0000000..e22e3fb --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +import pytest + +dirpath = os.path.dirname(__file__) + +options = '-v' +pytest.main([options, os.path.join(dirpath, 'test_mission_control_negative_cloud_account.py')]) +pytest.main([options, os.path.join(dirpath, 'test_mission_control_negative_mgmt_domain.py')]) +pytest.main([options, os.path.join(dirpath, 'test_mission_control_negative_vmpool.py')]) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_cloud_account.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_cloud_account.py new file mode 100755 index 0000000..a3dd58f --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_cloud_account.py @@ -0,0 +1,379 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_mission_control_negative_cloud_account.py +@author +@date 12/04/2015 +@brief System test of negative (failure) mission control functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import GLib, RwMcYang +from rift.auto.session import ProxyRequestError + + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session + class + + ''' + return mgmt_session.proxy(RwMcYang) + + +@pytest.fixture(scope='session') +def cloud_account_type(request, cloud_type): + '''Workaround for the mixed labeled 'lxc' and 'cloudsim' + + Arguments: + cloud_type - The cloud type supplied via pytest command line parameter + + ''' + if cloud_type == 'lxc': + return 'cloudsim' + else: + return cloud_type + + +@pytest.mark.incremental +class TestCloudAccount: + '''Tests behaviors and properties common to all cloud account types''' + + # + # Test cloud_name + # + + def test_create_cloud_account_with_no_name(self, proxy, cloud_account_type): + '''Test that a cloud account cannot be created if no name is provided + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_type - a pytest fixture for the cloud account type + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { + 'account_type': cloud_account_type, + } + + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_cloud_account_with_empty_name(self, proxy, cloud_account_type): + '''Test that a cloud account cannot be created if name is an empty string + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_type - a pytest fixture for the cloud account type + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { + 'account_type': cloud_account_type, + 'name': '', + } + + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_cloud_account_with_null_name(self, proxy, cloud_account_type): + '''Test that a cloud account cannot be created if name is null + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_type - a pytest fixture for the cloud account type + + Asserts: + TypeError is raised + + ''' + properties = { + 'account_type': cloud_account_type, + 'name': None, + } + + with pytest.raises(TypeError): + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + + # + # Test cloud account type + # + + def _test_create_cloud_account_with_no_type(self, proxy, cloud_account_name): + '''Test that a cloud account cannot be created if no type is provided + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { + 'name': cloud_account_name, + } + + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_cloud_account_with_empty_type(self, proxy, cloud_account_name): + '''Test that a cloud account cannot be created if cloud type is an empty string + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + gi.repository.GLib.Error is raised + + ''' + properties = { + 'account_type': '', + 'name': cloud_account_name, + } + + with pytest.raises(GLib.Error): + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + + def test_create_cloud_account_with_invaid_type(self, proxy, cloud_account_name): + '''Test that a cloud account cannot be created if the cloud type is invalid + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + gi.repository.GLib.Error is raised + + ''' + properties = { + 'account_type': 'Nemesis', + 'name': cloud_account_name, + } + + with pytest.raises(GLib.Error): + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + + def test_create_cloud_account_with_null_type(self, proxy, cloud_account_name): + '''Test that a cloud account cannot be created if the cloud type is null + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + TypeError is raised + + ''' + properties = { + 'account_type': None, + 'name': cloud_account_name, + } + + with pytest.raises(TypeError): + cloud_account = RwMcYang.CloudAccount.from_dict(properties) + + # + # Test change cloud type + # + + def test_create_cloud_account(self, proxy, cloud_account_name, cloud_account): + '''Creates a cloud account for subsequent tests + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + cloud_account - a pytest fixture for the cloud account + + Asserts: + None + + ''' + assert cloud_account_name == cloud_account.name + proxy.create_config('/cloud-account/account', cloud_account) + + def test_change_cloud_account_type(self, proxy, cloud_account_name): + '''Test that a cloud account type cannot be changed + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + account_type_map = { + 'cloudsim': 'openstack', + 'openstack': 'cloudsim', + } + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + updated_cloud_account = RwMcYang.CloudAccount.from_dict({ + 'name': cloud_account.name, + 'account_type': account_type_map[cloud_account.account_type], + }) + with pytest.raises(ProxyRequestError): + proxy.merge_config(xpath, updated_cloud_account) + + def test_create_cloud_account_with_duplicate_name(self, proxy, cloud_account_name, + cloud_account): + '''Attempt to create a cloud account with a duplicate name + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert cloud_account_name == cloud_account.name + with pytest.raises(ProxyRequestError): + proxy.create_config('/cloud-account/account', cloud_account) + + def test_delete_cloud_account_with_vm_pool_with_vm_resources(self, proxy, + cloud_account_name, vm_pool_name): + '''Tests that a cloud account cannot be deleted if it has a vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the primary vm pool name + + Asserts: + A cloud account exists for the cloud_account_name + Newly configured vm pool has no resources assigned to it + Cloud account has available resources + VM pool has has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data + post assignment + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account is not None + + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 # pool contained resources before any were assigned + + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert len(cloud_vm_ids) >= 1 + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in pool.available] + assert len(available_ids) >= 1 # Assert pool has available resources + # Assert not split brain + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + with pytest.raises(ProxyRequestError): + proxy.delete_config(xpath) + + +@pytest.mark.incremental +class TestCloudAccountNegativeTeardown: + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool resource(s) + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + Resource is assigned before unassigning + Resource is no longer assigned after being unconfigured + + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) >= 1 # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 # Assert resource is not assigned + + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + None + + ''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, proxy, cloud_account_name): + '''Unconfigure cloud_account + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + None + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_mgmt_domain.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_mgmt_domain.py new file mode 100755 index 0000000..d1aaaa6 --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_mgmt_domain.py @@ -0,0 +1,497 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_mission_control_negative_mgmt_domain.py +@author RIFT.io +@date 12/4/2015 +@brief System test of negative (failure) mission control functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang +from rift.auto.session import ProxyRequestError +from rift.auto.session import ProxyExpectTimeoutError + + +def start_launchpad(proxy, mgmt_domain_name): + '''Invoke start launchpad RPC + + Arguments: + mgmt_domain_name - the management domain name string + + Asserts: + Launchpad begins test in state 'stopped' + Launchpad finishes test in state 'started' + + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=10, + fail_on=['crashed']) + start_launchpad_input = RwMcYang.StartLaunchpadInput(mgmt_domain=mgmt_domain_name) + start_launchpad_output = proxy.rpc(start_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=120, + fail_on=['crashed']) + + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session + class + + ''' + return mgmt_session.proxy(RwMcYang) + + +@pytest.mark.incremental +class TestMgmtDomainNegativeSetup: + '''Stand up object needed for the lifecycle of this test script ''' + + def test_create_cloud_account(self, proxy, logger, cloud_account): + '''Configure a cloud account + + This creates a cloud account to test other objects + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + logger - a pytest fixture to an instance of Logger + cloud_account - a pytest fixture to a cloud account object + + Asserts: + None + + ''' + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + A cloud account exists for the cloud_account_name + Newly configured vm pool has no resources assigned to it + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account is not None + + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 + + def test_create_mgmt_domain(self, proxy, mgmt_domain_name): + '''Configure a management domain + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + None + + ''' + domain_config = RwMcYang.MgmtDomain(name=mgmt_domain_name) + proxy.create_config('/mgmt-domain/domain', domain_config) + + +@pytest.mark.incremental +class TestMgmtDomain: + '''Test negative cases for the management domain''' + + # + # Creation tests + # + + def test_create_mgmt_domain_with_no_name(self, proxy): + '''Test that a mgmt domain cannot be created if name is not present + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { } + mgmt_domain = RwMcYang.MgmtDomain.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/mgmt-domain/domain', mgmt_domain) + + def test_create_mgmt_domain_with_blank_name(self, proxy): + '''Test that a management domain cannot be created if name is an empty + string + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + properties = { + 'name': '', + } + mgmt_domain = RwMcYang.MgmtDomain.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/mgmt-domain/domain', mgmt_domain) + + def test_create_mgmt_domain_with_null_name(self, proxy): + '''Test that a management domain cannot be created if name is null + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + + Asserts: + TypeError is raised + + ''' + properties = { + 'name':None, + } + with pytest.raises(TypeError): + mgmt_domain = RwMcYang.MgmtDomain.from_dict(properties) + + def test_create_mgmt_domain_with_duplicate_name(self, proxy, mgmt_domain_name): + '''Test that a management domain cannot be created when a management + domain with the same name already exists + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + management domain exists for the mgmt_domain_name + rift.auto.proxy.ProxyRequestError is raised + + ''' + mgmt_domain = proxy.get("/mgmt-domain/domain[name='%s']" % mgmt_domain_name) + assert mgmt_domain is not None + + properties = { + 'name': mgmt_domain.name, + } + duplicate_mgmt_domain = RwMcYang.MgmtDomain.from_dict(properties) + with pytest.raises(ProxyRequestError): + proxy.create_config('/mgmt-domain/domain', duplicate_mgmt_domain) + + # + # Launchpad related tests + # + + def test_verify_launchpad_not_started(self, proxy, mgmt_domain_name): + '''Verifies that the launchpad is not started + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + rift.auto.session.ProxyExpectTimeoutError is raised + + ''' + with pytest.raises(ProxyExpectTimeoutError): + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=120, + fail_on=['crashed']) + + def test_start_launchpad_when_no_vm_pool_assigned(self, proxy, mgmt_domain_name): + '''Verify that the launchpad cannot start when the management domain + does not have a vm pool assigned + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + rift.auto.session.ProxyExpectTimeoutError is raised + + ''' + with pytest.raises(ProxyExpectTimeoutError): + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=120, + fail_on=['crashed']) + + def test_start_lp_with_empty_vm_pool(self, proxy, mgmt_domain_name, vm_pool_name): + '''Tests that starting launchpad fails when vm pool does not have a vm + Configure mgmt_domain by adding a VM pool to it + + Arguments: + mgmt_domain_name - a pytest fixture for the management domain name + vm_pool_name - a pytest fixture for the vm pool name + + Asserts: + rift.auto.session.ProxyExpectTimeoutError is raised + + ''' + with pytest.raises(ProxyExpectTimeoutError): + pool_config = RwMcYang.MgmtDomainPools_Vm(name=vm_pool_name) + proxy.create_config( + "/mgmt-domain/domain[name='%s']/pools/vm" % mgmt_domain_name, + pool_config, + ) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=120, + fail_on=['crashed']) + + def test_launchpad_starts_when_vm_pool_has_a_vm_resource(self, proxy, + cloud_account_name, vm_pool_name, mgmt_domain_name, network_pool_name): + '''Tests that a launchpad can now start when the vm pool has a vm + resource + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the VM pool name + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + Cloud account has available resources + VM pool has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data + post assignment + Launchpad reaches state 'started' + + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert len(cloud_vm_ids) >= 1 + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in pool.available] + assert len(available_ids) >= 1 + # Assert not split brain + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert available_ids[0] in assigned_ids + + # Create NW pool + pool_config = RwMcYang.NetworkPool( + name=network_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/network-pool/pool', pool_config) + pool_config = RwMcYang.MgmtDomainPools_Network(name=network_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/network" % mgmt_domain_name, pool_config) + + + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=400, + fail_on=['crashed']) + + def test_delete_mgmt_domain_with_running_launchpad(self, proxy, mgmt_domain_name): + '''Test that a management domain cannot be deleted when the launchpad + is running + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + with pytest.raises(ProxyRequestError): + proxy.delete_config(xpath) + + def test_stop_launchpad(self, proxy, mgmt_domain_name): + '''Stop launchpad before we leave this class + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=120, + fail_on=['crashed']) + + +@pytest.mark.incremental +class TestMgmtDomainNegativeTeardown: + + @pytest.mark.xfail(raises=ProxyExpectTimeoutError) + def test_delete_mgmt_domain(self, proxy, mgmt_domain_name): + '''Test that deleting a management domain while a pool is attached will + fail + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + proxy.delete_config(xpath) + + def test_remove_vm_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, + vm_pool_name): + '''Unconfigure mgmt domain: remove a vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + vm_pool_name - a pytest fixture for the vm pool name + + Asserts: + ''' + xpath = "/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % ( + mgmt_domain_name, vm_pool_name) + proxy.delete_config(xpath) + + def test_delete_mgmt_domain(self, proxy, mgmt_domain_name): + '''Unconfigure mgmt_domain: delete mgmt_domain + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + mgmt_domain_name - a pytest fixture for the management domain name + + Asserts: + None + + ''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + proxy.delete_config(xpath) + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: remove a vm resource + + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + Resource is no longer assigned after being unconfigured + + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) >= 1 + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % ( + vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + + Asserts: + None + + ''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + def test_delete_nw_pool(self, proxy, network_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + + Asserts: + None + + ''' + xpath = "/network-pool/pool[name='%s']" % network_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, proxy, cloud_account_name): + '''Unconfigure cloud_account + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + None + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_vmpool.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_vmpool.py new file mode 100755 index 0000000..b78c283 --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_negative_vmpool.py @@ -0,0 +1,528 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_mission_control_negative.py +@author +@date 11/23/2015 +@brief System test of negative (failure) mission control functionality +""" + +import logging +import pytest + + +import gi +gi.require_version('RwMcYang', '1.0') + +from rift.auto.session import ProxyRequestError +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''Fixture which returns a proxy to RwMcYang + + Arguments: + mgmt_session - mgmt_session fixture - instance of a rift.auto.session + class + + ''' + return mgmt_session.proxy(RwMcYang) + +@pytest.fixture(scope='session') +def secondary_vm_pool_name(request): + '''Fixture which returns the secondary vm pool name''' + return 'vm-pool-2' + +def show_cloud_account(logger, cloud_account): + '''Helper method to output vm and network ids for debugging + + Here is a sample cloud account resources dict: + resources= {'vm': [ + {'name': 'rift-s1', 'available': True, 'id': '1'}]} + + Arguments: + logger - logging object which to send output + cloud_account - cloud_account object which to interrogate + + ''' + logger.debug('Showing cloud account. name=%s' % cloud_account.name) + logger.debug('account.resources=', cloud_account.resources) + cloud_vm_ids = [vm.id for vm in cloud_account.resources.vm] + logger.debug('cloud vm ids: %s' % cloud_vm_ids) + cloud_network_ids = [network.id for network in cloud_account.resources.network] + logger.debug('cloud network ids: %s' % cloud_network_ids) + + +@pytest.mark.incremental +class TestVmPoolNegativeSetup: + '''Performs module level setup''' + + def test_create_cloud_account(self, proxy, logger, cloud_account): + '''Configure a cloud account + + This creates a cloud account to test other objects + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + logger - a pytest fixture to an instance of Logger + cloud_account - a pytest fixture to a cloud account object + + Asserts: + None + + ''' + proxy.create_config('/cloud-account/account', cloud_account) + #show_cloud_account(logger, cloud_account) + + def test_create_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + A cloud account exists for the cloud_account_name + Newly configured vm pool has no resources assigned to it + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account is not None + + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 + + # def test_stub(self, proxy): + # '''The decorator is a fix to prevent the test script from failing due + # to the following error: + + # "Unable to resolve dependency ('launchpad',). failed to order test:" + + # Arguments: + # proxy - a pytest fixture proxy to RwMcYang + + # Asserts: + # True + + # ''' + # assert True + + +@pytest.mark.incremental +class TestVmPoolNegative: + '''This class is a container for testing VM pool negative cases. + + The following aspects are tested: + * create a vm pool object + * assign resources to a pool that have already been assigned to another pool + + ''' + + # + # Create: VM pool name tests + # + + def test_create_vm_pool_with_missing_pool_name(self, proxy, cloud_account): + '''Tests that a vm pool cannot be created without a name + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account - a pytest fixture for the cloud account + + Asserts: + cloud_account has a name + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert cloud_account.name is not None + + pool_config = RwMcYang.VmPool( + cloud_account=cloud_account.name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_blank_pool_name(self, proxy, cloud_account): + '''Tests that a vm pool cannot be created without a name + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account - a pytest fixture for the cloud account + + Asserts: + Cloud account has a name + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert cloud_account.name is not None + + pool_config = RwMcYang.VmPool( + name='', + cloud_account=cloud_account.name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_null_pool_name(self, proxy, cloud_account): + '''Tests that a vm pool cannot be created without a name + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account - a pytest fixture for the cloud account + + Asserts: + Cloud account has a name + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert cloud_account.name is not None + with pytest.raises(TypeError): + pool_config = RwMcYang.VmPool( + name=None, + cloud_account=cloud_account.name, + dynamic_scaling=True, + ) + #proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_duplicate_name(self, proxy, vm_pool_name, + cloud_account_name): + '''Tests that a vm pool cannot be created with a name that already exists + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the vm pool name + cloud_account - a pytest fixture for the cloud account + + Asserts: + Cloud account has a name + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account.name is not None + + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account.name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + # + # Cloud name tests + # + + @pytest.mark.xfail(raises=ProxyRequestError) + def test_create_vm_pool_with_missing_cloud_name(self, proxy, secondary_vm_pool_name): + '''Tests that a vm pool cannot be created with a name that already exists + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Secondary vm pool name exists + Secondary vm pool does not exist + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert secondary_vm_pool_name is not None + assert proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) is None + + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_blank_cloud_name(self, proxy, secondary_vm_pool_name): + '''Tests that a vm pool cannot be created with a name that already exists + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Secondary vm pool name exists + Secondary vm pool does not exist + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert secondary_vm_pool_name is not None + assert proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) is None + + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + cloud_account='', + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + def _test_create_vm_pool_with_null_cloud_name(self, proxy, secondary_vm_pool_name): + '''Tests that a vm pool cannot be created if the cloud name is None + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Secondary vm pool name exists + Secondary vm pool does not exist + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert secondary_vm_pool_name is not None + assert proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) is None + with pytest.raises(TypeError): + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + cloud_account=None, + dynamic_scaling=True, + ) + #proxy.create_config('/vm-pool/pool', pool_config) + + def test_create_vm_pool_with_bogus_cloud_name(self, proxy, secondary_vm_pool_name): + '''Tests that a vm pool cannot be created if the cloud name is None + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Secondary vm pool name exists + Secondary vm pool does not exist + Cloud account does not exist for the bogus cloud account name + rift.auto.proxy.ProxyRequestError is raised + + ''' + assert secondary_vm_pool_name is not None + assert proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) is None + + bogus_cloud_account_name = 'bogus-cloud-account-name' + cloud_account = proxy.get("/cloud-account/account[name='%s']" % bogus_cloud_account_name) + assert cloud_account is None + + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + cloud_account=bogus_cloud_account_name, + dynamic_scaling=True, + ) + with pytest.raises(ProxyRequestError): + proxy.create_config('/vm-pool/pool', pool_config) + + # + # Test VM pool assignments + # + + def test_assign_vm_resource_to_vm_pool(self, proxy, cloud_account_name, + vm_pool_name): + '''Configure a vm resource by adding it to a vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the primary vm pool name + + Asserts: + Cloud account has available resources + VM pool has has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data + post assignment + + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert len(cloud_vm_ids) >= 1 + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in pool.available] + assert len(available_ids) >= 1 # Assert pool has available resources + # Assert not split brain + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]} + ) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + def test_create_vm_pool_2(self, proxy, cloud_account_name, secondary_vm_pool_name): + '''Configure vm pool + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Cloud account exists for the given cloud_account_name + Newly configured vm pool has no resources assigned to it + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + cloud_account = proxy.get(xpath) + assert cloud_account is not None + + pool_config = RwMcYang.VmPool( + name=secondary_vm_pool_name, + cloud_account=cloud_account_name, + dynamic_scaling=True, + ) + proxy.create_config('/vm-pool/pool', pool_config) + pool = proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 # pool contained resources before any were assigned + + @pytest.mark.skipif(True, reason="Assigned VMS are able to be shared between VM pools") + @pytest.mark.xfail(raises=ProxyRequestError) + def test_assign_allocated_vm_to_vm_pool_2(self, proxy, cloud_account_name, + vm_pool_name, secondary_vm_pool_name): + '''This test tries to assign a vm from one vm pool to another vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + vm_pool_name - a pytest fixture for the primary vm pool name + secondary_vm_pool_name - a pytest fixture for the secondary vm pool name + + Asserts: + Prior to Pool 2 assignment, verifies that pool 1 has assigned id(s) + rift.auto.proxy.ProxyRequestError is raised + + ''' + pool_1 = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + pool_2 = proxy.get("/vm-pool/pool[name='%s']" % secondary_vm_pool_name) + assigned_ids = [vm.id for vm in pool_1.assigned] + assert len(assigned_ids >= 1) + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':secondary_vm_pool_name, + 'assigned':[{'id':assigned_ids[0]}]}) + with pytest.raises(ProxyRequestError): + proxy.merge_config( + "/vm-pool/pool[name='%s']" % secondary_vm_pool_name, pool_config + ) + + +@pytest.mark.incremental +class TestVmPoolNegativeTeardown: + '''This class serves to do cleanup for the VM pool negative tests''' + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool resource(s) + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + Resource is assigned before unassigning + Resource is no longer assigned after being unconfigured + + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) >= 1 # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert len(assigned_ids) == 0 # Assert resource is not assigned + + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: Remove the primary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + None + + ''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account_expect_fail(self, proxy, cloud_account_name): + '''Unconfigure cloud_account + + This should fail because we have not deleted vm pool 2 + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + rift.auto.proxy.ProxyRequestError is raised + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + with pytest.raises(ProxyRequestError): + proxy.delete_config(xpath) + + def test_delete_vm_pool_2(self, proxy, secondary_vm_pool_name): + '''Unconfigure secondary vm pool + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + vm_pool_name - a pytest fixture for the VM pool name + + Asserts: + None + + ''' + xpath = "/vm-pool/pool[name='%s']" % secondary_vm_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, proxy, cloud_account_name): + '''Unconfigure cloud_account + + Arguments: + proxy - a pytest fixture proxy to RwMcYang + cloud_account_name - a pytest fixture for the cloud account name + + Asserts: + None + + ''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/pytest/test_mission_control_static.py b/modules/core/mano/rwmc/ra/pytest/test_mission_control_static.py new file mode 100755 index 0000000..5b6e2cf --- /dev/null +++ b/modules/core/mano/rwmc/ra/pytest/test_mission_control_static.py @@ -0,0 +1,396 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +# +# + +@file test_launchpad_startstop.py +@author Paul Laidler (Paul.Laidler@riftio.com) +@date 06/19/2015 +@brief System test of basic mission control functionality +""" + +import pytest + +import gi +gi.require_version('RwMcYang', '1.0') + +from gi.repository import RwMcYang + +@pytest.fixture(scope='module') +def proxy(request, mgmt_session): + '''fixture which returns a proxy to RwMcYang + + Arguments: + request - pytest fixture request + mgmt_session - mgmt_session fixture - instance of a rift.auto.session class + ''' + return mgmt_session.proxy(RwMcYang) + +@pytest.mark.setup('launchpad') +@pytest.mark.incremental +class TestMissionControlSetup: + def test_create_cloud_account(self, proxy, cloud_account): + '''Configure a cloud account''' + proxy.create_config('/cloud-account/account', cloud_account) + + def test_create_mgmt_domain(self, proxy, mgmt_domain_name): + '''Configure mgmt domain''' + domain_config = RwMcYang.MgmtDomain( + name=mgmt_domain_name) + proxy.create_config('/mgmt-domain/domain', domain_config) + + def test_create_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure vm pool + + Asserts : + Newly configured vm pool has no resources assigned to it + ''' + pool_config = RwMcYang.VmPool( + name=vm_pool_name, + cloud_account=cloud_account_name) + proxy.create_config('/vm-pool/pool', pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + def test_assign_vm_resource_to_vm_pool(self, proxy, cloud_account_name, vm_pool_name): + '''Configure a vm resource by adding it to a vm pool + + Asserts: + Cloud account has available resources + VM pool has has available resources + Cloud account and vm pool agree on available resources + Configured resource is reflected as assigned in operational data post assignment + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_vm_ids = [vm.id for vm in account.resources.vm] + assert cloud_vm_ids != [] + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + available_ids = [vm.id for vm in pool.available] + # NOTE: Desired API - request for a list of leaf elements + # available_ids = proxy.get("/vm-pool/pool[name='%s']/available/id" % vm_pool_name) + assert available_ids != [] # Assert pool has available resources + assert set(cloud_vm_ids).difference(set(available_ids)) == set([]) # Assert not split brain + + pool_config = RwMcYang.VmPool.from_dict({ + 'name':vm_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/vm-pool/pool[name='%s']" % vm_pool_name, pool_config) + + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + print(available_ids[0], assigned_ids) + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + def test_create_network_pool(self, proxy, cloud_account_name, network_pool_name): + '''Configure network pool + + Asserts : + Newly configured network pool has no resources assigned to it + ''' + pool_config = RwMcYang.NetworkPool( + name=network_pool_name, + cloud_account=cloud_account_name) + proxy.create_config('/network-pool/pool', pool_config) + + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + assigned_ids = [network.id for network in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + def test_assign_network_resource_to_network_pool(self, proxy, cloud_account_name, network_pool_name): + '''Configure a network resource by adding it to a network pool + + Asserts: + Cloud account has available resources + Network pool has has available resources + Cloud account and network pool agree on available resources + Configured resource is reflected as assigned in operational data post assignment + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_network_ids = [network.id for network in account.resources.network] + assert cloud_network_ids != [] + + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + available_ids = [network.id for network in pool.available] + assert available_ids != [] # Assert pool has available resources + assert set(cloud_network_ids).difference(set(available_ids)) == set([]) # Assert not split brain + + pool_config = RwMcYang.NetworkPool.from_dict({ + 'name':network_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/network-pool/pool[name='%s']" % network_pool_name, pool_config) + + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + assigned_ids = [network.id for network in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + def test_assign_network_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, network_pool_name): + '''Configure mgmt_domain by adding a network pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Network(name=network_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/network" % mgmt_domain_name, pool_config) + + def test_create_port_pool(self, proxy, cloud_account_name, port_pool_name): + '''Configure port pool + + Asserts : + Newly configured port pool has no resources assigned to it + ''' + pool_config = RwMcYang.PortPool( + name=port_pool_name, + cloud_account=cloud_account_name) + proxy.create_config('/port-pool/pool', pool_config) + + pool = proxy.get("/port-pool/pool[name='%s']" % port_pool_name) + assigned_ids = [port.id for port in pool.assigned] + assert assigned_ids == [] # pool contained resources before any were assigned + + def test_assign_port_resource_to_port_pool(self, proxy, cloud_account_name, port_pool_name): + '''Configure a port resource by adding it to a port pool + + Asserts: + Cloud account has available resources + Port pool has has available resources + Cloud account and port pool agree on available resources + Configured resource is reflected as assigned in operational data post assignment + ''' + account = proxy.get("/cloud-account/account[name='%s']" % cloud_account_name) + cloud_port_ids = [port.id for port in account.resources.port] + assert cloud_port_ids != [] + + pool = proxy.get("/port-pool/pool[name='%s']" % port_pool_name) + available_ids = [port.id for port in pool.available] + assert available_ids != [] # Assert pool has available resources + assert set(cloud_port_ids).difference(set(available_ids)) == set([]) # Assert not split brain + + pool_config = RwMcYang.PortPool.from_dict({ + 'name':port_pool_name, + 'cloud_account':cloud_account_name, + 'assigned':[{'id':available_ids[0]}]}) + proxy.replace_config("/port-pool/pool[name='%s']" % port_pool_name, pool_config) + + pool = proxy.get("/port-pool/pool[name='%s']" % port_pool_name) + assigned_ids = [port.id for port in pool.assigned] + assert available_ids[0] in assigned_ids # Configured resource shows as assigned + + + def test_assign_port_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, port_pool_name): + '''Configure mgmt_domain by adding a port pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Port(name=port_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/port" % mgmt_domain_name, pool_config) + + def test_assign_vm_pool_to_mgmt_domain(self, proxy, mgmt_domain_name, vm_pool_name): + '''Configure mgmt_domain by adding a VM pool to it + ''' + pool_config = RwMcYang.MgmtDomainPools_Vm(name=vm_pool_name) + proxy.create_config("/mgmt-domain/domain[name='%s']/pools/vm" % mgmt_domain_name, pool_config) + + def test_wait_for_launchpad_started(self, proxy, mgmt_domain_name): + '''Wait for the launchpad to start + + Additionally begins the launchpad scraper. + + Asserts: + Launchpad reaches state 'started' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=200, + fail_on=['crashed']) + + +@pytest.mark.depends('launchpad') +@pytest.mark.incremental +class TestMissionControl: + + def test_stop_launchpad(self, proxy, mgmt_domain_name): + '''Invoke stop launchpad RPC + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + ''' + + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=60, + fail_on=['crashed']) + + def test_start_launchpad(self, proxy, mgmt_domain_name, launchpad_scraper): + '''Invoke start launchpad RPC + + Asserts: + Launchpad begins test in state 'stopped' + Launchpad finishes test in state 'started' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=10, + fail_on=['crashed']) + start_launchpad_input = RwMcYang.StartLaunchpadInput(mgmt_domain=mgmt_domain_name) + start_launchpad_output = proxy.rpc(start_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=200, + fail_on=['crashed']) + launchpad_scraper.reset() + + def test_stop_launchpad_redux(self, proxy, mgmt_domain_name): + '''Invoke stop launchpad RPC... Again... + + Asserts: + Launchpad begins test in state 'started' + Launchpad finishes test in state 'stopped' + ''' + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'started', + timeout=10, + fail_on=['crashed']) + stop_launchpad_input = RwMcYang.StopLaunchpadInput(mgmt_domain=mgmt_domain_name) + stop_launchpad_output = proxy.rpc(stop_launchpad_input) + proxy.wait_for( + "/mgmt-domain/domain[name='%s']/launchpad/state" % mgmt_domain_name, + 'stopped', + timeout=60, + fail_on=['crashed']) + + +@pytest.mark.teardown('launchpad') +@pytest.mark.incremental +class TestMissionControlTeardown: + def test_remove_vm_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, vm_pool_name): + '''Unconfigure mgmt domain: remove a vm pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/vm[name='%s']" % (mgmt_domain_name, vm_pool_name) + proxy.delete_config(xpath) + + def test_remove_network_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, network_pool_name): + '''Unconfigure mgmt_domain: remove a network pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/network[name='%s']" % (mgmt_domain_name, network_pool_name) + proxy.delete_config(xpath) + + def test_remove_port_pool_from_mgmt_domain(self, proxy, mgmt_domain_name, port_pool_name): + '''Unconfigure mgmt_domain: remove a port pool''' + xpath = "/mgmt-domain/domain[name='%s']/pools/port[name='%s']" % (mgmt_domain_name, port_pool_name) + proxy.delete_config(xpath) + + def test_delete_mgmt_domain(self, proxy, mgmt_domain_name): + '''Unconfigure mgmt_domain: delete mgmt_domain''' + xpath = "/mgmt-domain/domain[name='%s']" % mgmt_domain_name + proxy.delete_config(xpath) + + def test_remove_vm_resource_from_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool: remove a vm resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/vm-pool/pool[name='%s']" % vm_pool_name) + assigned_ids = [vm.id for vm in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/vm-pool/pool[name='%s']/assigned[id='%s']" % (vm_pool_name, assigned_id) + proxy.delete_config(xpath) + + def test_remove_network_resource_from_network_pool(self, proxy, network_pool_name): + '''Unconfigure network_pool: remove a network resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/network-pool/pool[name='%s']" % network_pool_name) + assigned_ids = [network.id for network in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/network-pool/pool[name='%s']/assigned[id='%s']" % (network_pool_name, assigned_id) + proxy.delete_config(xpath) + + def test_remove_port_resource_from_port_pool(self, proxy, port_pool_name): + '''Unconfigure port_pool: remove a port resource + + Asserts: + Resource is no longer assigned after being unconfigured + ''' + pool = proxy.get("/port-pool/pool[name='%s']" % port_pool_name) + assigned_ids = [port.id for port in pool.assigned] + assert assigned_ids != [] # Assert resource is still assigned + + for assigned_id in assigned_ids: + xpath = "/port-pool/pool[name='%s']/assigned[id='%s']" % (port_pool_name, assigned_id) + proxy.delete_config(xpath) + + def test_delete_vm_pool(self, proxy, vm_pool_name): + '''Unconfigure vm_pool''' + xpath = "/vm-pool/pool[name='%s']" % vm_pool_name + proxy.delete_config(xpath) + + def test_delete_network_pool(self, proxy, network_pool_name): + '''Unconfigure network pool''' + xpath = "/network-pool/pool[name='%s']" % network_pool_name + proxy.delete_config(xpath) + + def test_delete_port_pool(self, proxy, port_pool_name): + '''Unconfigure port_pool''' + xpath = "/port-pool/pool[name='%s']" % port_pool_name + proxy.delete_config(xpath) + + def test_delete_cloud_account(self, proxy, cloud_account_name): + '''Unconfigure cloud_account''' + xpath = "/cloud-account/account[name='%s']" % cloud_account_name + proxy.delete_config(xpath) + + def test_create_odl_sdn_account(self, proxy, sdn_account_name, sdn_account_type): + '''Configure sdn account''' + sdn_account = RwMcYang.SDNAccount( + name=sdn_account_name, + account_type=sdn_account_type) + xpath = "/sdn/account[name='%s']" % sdn_account_name + proxy.create_config(xpath, sdn_account) + + def test_show_odl_sdn_account(self, proxy, sdn_account_name, sdn_account_type): + '''Showing sdn account configuration + + Asserts: + sdn_account.account_type is what was configured + ''' + xpath = "/sdn/account[name='%s']" % sdn_account_name + sdn_account = proxy.get_config(xpath) + assert sdn_account.account_type == sdn_account_type + + def test_delete_odl_sdn_account(self, proxy, sdn_account_name): + '''Unconfigure sdn account''' + xpath = "/sdn/account[name='%s']" % sdn_account_name + proxy.delete_config(xpath) \ No newline at end of file diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_delete_systest_cloudsim.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_delete_systest_cloudsim.racfg new file mode 100644 index 0000000..20f1ed3 --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_delete_systest_cloudsim.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_MISSION_CONTROL_DELETE_CLOUDSIM", + "commandline":"./mission_control_delete_systest", + "target_vm":"VM", + "test_description":"System test targeting deleting mission control configuration", + "run_as_root": true, + "status":"working", + "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"], + "timelimit": 2400, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_reload_systest_openstack.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_reload_systest_openstack.racfg new file mode 100644 index 0000000..cbe7b08 --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_reload_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_MISSION_CONTROL_RELOAD_OPENSTACK", + "commandline":"./mission_control_reload_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo ", + "test_description":"System test for mission control reload(Openstack)", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 2200, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim.racfg new file mode 100644 index 0000000..d2131be --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_MISSION_CONTROL_CLOUDSIM", + "commandline":"./mission_control_systest", + "target_vm":"VM", + "test_description":"System test for mission control", + "run_as_root": true, + "status":"working", + "keywords":["nightly","smoke","MANO","cloudsim"], + "timelimit": 1400, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim_negative.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim_negative.racfg new file mode 100644 index 0000000..eda42cd --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_cloudsim_negative.racfg @@ -0,0 +1,19 @@ +{ + "test_name":"TC_MISSION_CONTROL_CLOUDSIM_NEGATIVE", + "commandline":"./mission_control_negative_systest", + "target_vm":"VM", + "test_description":"System test for mission control negative cases", + "run_as_root": true, + "status":"broken", + "keywords":["nightly","smoke"], + "timelimit": 600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack.racfg new file mode 100644 index 0000000..8bb59f5 --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_MISSION_CONTROL_OPENSTACK", + "commandline":"./mission_control_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo", + "test_description":"System test for mission control(Openstack)", + "run_as_root": false, + "status":"broken", + "keywords":["nightly","smoke","MANO","openstack"], + "timelimit": 1800, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack_negative.racfg b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack_negative.racfg new file mode 100644 index 0000000..6db3394 --- /dev/null +++ b/modules/core/mano/rwmc/ra/racfg/mission_control_systest_openstack_negative.racfg @@ -0,0 +1,18 @@ +{ + "test_name":"TC_MISSION_CONTROL_OPENSTACK_NEGATIVE", + "commandline":"./mission_control_negative_systest --cloud-type 'openstack' --cloud-host '10.66.4.115' --sysinfo ", + "test_description":"System test for mission control(Openstack) negative cases", + "run_as_root": false, + "status":"working", + "keywords":["nightly","smoke", "openstack"], + "timelimit": 1600, + "networks":[], + "vms":[ + { + "name": "VM", + "memory": 8192, + "cpus": 4 + } + ] +} + diff --git a/modules/core/mano/rwmc/test/CMakeLists.txt b/modules/core/mano/rwmc/test/CMakeLists.txt new file mode 100644 index 0000000..47b7219 --- /dev/null +++ b/modules/core/mano/rwmc/test/CMakeLists.txt @@ -0,0 +1,15 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Joshua Downer +# Author(s): Austin Cormier +# Creation Date: 5/12/2015 +# + +cmake_minimum_required(VERSION 2.8) + +install( + PROGRAMS mission_control.py + DESTINATION usr/rift/systemtest/mission_control + COMPONENT ${PKG_LONG_NAME} + ) diff --git a/modules/core/mano/rwmc/test/README b/modules/core/mano/rwmc/test/README new file mode 100644 index 0000000..2e91ca0 --- /dev/null +++ b/modules/core/mano/rwmc/test/README @@ -0,0 +1,10 @@ +To start mission control run the following command: + +./mission_control.py -m ethsim -c --skip-prepare-vm + +To run the mock +./mission_control.py -m ethsim -c --skip-prepare-vm --mock + +To invoke the mission control UI +http://10.0.106.51:8000/index.html?api_server=http://10.0.106.51#/ + diff --git a/modules/core/mano/rwmc/test/mission_control.py b/modules/core/mano/rwmc/test/mission_control.py new file mode 100755 index 0000000..9d13a3b --- /dev/null +++ b/modules/core/mano/rwmc/test/mission_control.py @@ -0,0 +1,299 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import ipaddress +import logging +import os +import shlex +import socket +import subprocess +import sys + +import rift.vcs +import rift.vcs.demo +import rift.vcs.vms + +import rift.rwcal.cloudsim +import rift.rwcal.cloudsim.net + +logger = logging.getLogger(__name__) + + +class MinionConnectionError(Exception): + pass + + +class MissionControlUI(rift.vcs.NativeProcess): + def __init__(self, name="RW.MC.UI"): + super(MissionControlUI, self).__init__( + name=name, + exe="./usr/share/rw.ui/webapp/scripts/launch_ui.sh", + ) + + @property + def args(self): + return ' ' + + +class Demo(rift.vcs.demo.Demo): + def __init__(self, use_mock=False, skip_ui=False, disable_cnt_mgr=False): + + procs = [ + rift.vcs.RiftCli(), + rift.vcs.DtsRouterTasklet(), + rift.vcs.MsgBrokerTasklet(), + rift.vcs.RestconfTasklet(), + rift.vcs.Watchdog(), + rift.vcs.RestPortForwardTasklet(), + rift.vcs.CalProxy(), + ] + + if not use_mock: + procs.append(rift.vcs.MissionControl()) + if not disable_cnt_mgr: + procs.append(rift.vcs.ContainerManager()) + else: + procs.extend([rift.vcs.CrossbarServer(), rift.vcs.DtsMockServerTasklet()]) + + if not skip_ui: + procs.extend([MissionControlUI()]) + + super(Demo, self).__init__( + # Construct the system. This system consists of 1 cluster in 1 + # colony. The master cluster houses CLI and management VMs + sysinfo = rift.vcs.SystemInfo( + colonies=[ + rift.vcs.Colony( + clusters=[ + rift.vcs.Cluster( + name='master', + virtual_machines=[ + rift.vcs.VirtualMachine( + name='vm-mission-control', + ip='127.0.0.1', + tasklets=[ + rift.vcs.uAgentTasklet(), + ], + procs=procs, + ), + ] + ) + ] + ) + ] + ), + + # Define the generic portmap. + port_map = {}, + + # Define a mapping from the placeholder logical names to the real + # port names for each of the different modes supported by this demo. + port_names = { + 'ethsim': { + }, + 'pci': { + } + }, + + # Define the connectivity between logical port names. + port_groups = {}, + ) + + +def check_salt_master_running(): + cmd = "systemctl status salt-master.service | grep Active | awk '{print $2}'" + salt_master_status = subprocess.check_output(cmd, universal_newlines=True, shell=True).rstrip('\n') + if salt_master_status != 'active': + logger.error("Salt master is not running on the host.") + logger.error("Start the salt master (systemctl start salt-master.service) and re-run mission control.") + exit(1) + + +def clear_salt_keys(): + # clear all the previosly installed salt keys + logger.info("Removing all unconnected salt keys") + stdout = subprocess.check_output( + shlex.split('salt-run manage.down'), + universal_newlines=True, + ) + + down_minions = stdout.splitlines() + + for line in down_minions: + salt_id = line.strip().replace("- ", "") + logger.info("Removing old unconnected salt id: %s", salt_id) + minion_keys_stdout = subprocess.check_output( + shlex.split('salt-key -f {}'.format(salt_id)), + universal_newlines=True) + + minion_keys = minion_keys_stdout.splitlines() + for key_line in minion_keys: + if "Keys" in key_line: + continue + + key_split = key_line.split(":") + if len(key_split) < 2: + continue + + key = key_split[0] + + # Delete the minion key + logger.info("Deleting minion %s key: %s", salt_id, key) + subprocess.check_call(shlex.split('salt-key -d {} -y'.format(key))) + + +def is_node_connected(node_id): + try: + stdout = subprocess.check_output( + shlex.split('salt %s test.ping' % node_id), + universal_newlines=True, + ) + except subprocess.CalledProcessError: + msg = "test.ping command failed against node_id: %s" % node_id + logger.warning(msg) + raise MinionConnectionError(msg) + + up_minions = stdout.splitlines() + for line in up_minions: + if "True" in line: + return True + + return False + +def construct_lp_public_ip_env_var(lp_public_ip): + ipaddress.IPv4Address(lp_public_ip) + os.environ["RIFT_LP_PUBLIC_IP"] = lp_public_ip + +def construct_lp_node_env_var(lp_salt_node_ip_ids): + format_msg = "--lp-node-id parameter must be in the following format :" + env_node_ip_str = "" + for node_ip_id in lp_salt_node_ip_ids: + if ":" not in node_ip_id: + raise ValueError(format_msg) + + ip_id_list = node_ip_id.split(":") + if len(ip_id_list) != 2: + raise ValueError(format_msg) + + node_ip, node_id = ip_id_list + + # Validate the VM ip address provided + ipaddress.IPv4Address(node_ip) + + if not is_node_connected(node_id): + logger.warning("Salt minion id %s is not connected", node_id) + + env_node_ip_str += "{}|{}:".format(node_ip, node_id) + + env_node_ip_str = env_node_ip_str.rstrip(":") + + os.environ["RIFT_LP_NODES"] = env_node_ip_str + + +def main(argv=sys.argv[1:]): + logging.basicConfig( + level=logging.INFO, + format='%(asctime)-15s %(levelname)s %(message)s') + + # Create a parser which includes all generic demo arguments + parser = rift.vcs.demo.DemoArgParser(conflict_handler='resolve') + + parser.add_argument( + "--mock", + help="Start the DTS mock server", + action="store_true", + ) + + parser.add_argument( + "--no-cntr-mgr", + action="store_true", + help="Disable the container manager" + ) + + parser.add_argument( + "--skip-ui", + help="Do not start UI services (MissionControlUI and Composer)", + action="store_true", + ) + + parser.add_argument( + "--lp-node-id", + help="Use provided vm ip and salt node id's as launchpad VM's if " + "no static resources allocated. Pass in as :", + action='append', + ) + + parser.add_argument( + "--lp-public-ip", + help="Use provided vm public/floating ip as launchpad VM's public ip. " + "Pass in as ", + ) + + + args = parser.parse_args(argv) + + # Disable loading any kernel modules for the mission control VM + os.environ["NO_KERNEL_MODS"] = "1" + + if args.lp_node_id: + construct_lp_node_env_var(args.lp_node_id) + + if args.lp_public_ip: + construct_lp_public_ip_env_var(args.lp_public_ip) + + if not args.mock: + # Ensure that salt master is running. + check_salt_master_running() + + # Clear salt keys to clear out any old/duplicate keys + #clear_salt_keys() + + # Initialize the virsh ahead of time to ensure container NAT + # is functional. This really should go into cloudsim container + # initialization. + if not args.no_cntr_mgr: + rift.rwcal.cloudsim.net.virsh_initialize_default() + + # load demo info and create Demo object + demo = Demo(use_mock=args.mock, skip_ui=args.skip_ui, disable_cnt_mgr=args.no_cntr_mgr) + + # Create the prepared system from the demo + system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args, + northbound_listing="cli_rwmc_schema_listing.txt") + + confd_ip = socket.gethostbyname(socket.gethostname()) + rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip) + + # Start the prepared system + system.start() + + +if __name__ == "__main__": + try: + main() + except rift.vcs.demo.ReservationError: + print("ERROR: unable to retrieve a list of IP addresses from the reservation system") + sys.exit(1) + except rift.vcs.demo.MissingModeError: + print("ERROR: you need to provide a mode to run the script") + sys.exit(1) + finally: + os.system("stty sane") \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/dts-perf-nc.py b/modules/core/mano/rwmc/test/perf/dts-perf-nc.py new file mode 100755 index 0000000..5f97b9a --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/dts-perf-nc.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python2 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +import subprocess +import contextlib +import rift.auto.proxy +import sys +import os +import time +import rw_peas +import requests +import argparse +import socket + +import gi +gi.require_version('RwMcYang', '1.0') +gi.require_version('YangModelPlugin', '1.0') + + + +from gi.repository import RwMcYang + +# stress the system using netconf + +yang = rw_peas.PeasPlugin('yangmodel_plugin-c', 'YangModelPlugin-1.0') +yang_model_api = yang.get_interface('Model') +yang_model = yang_model_api.alloc() +mc_module = yang_model_api.load_module(yang_model, 'rw-mc') + +@contextlib.contextmanager +def start_system(host, port): + print("Starting system") + + + # Retrieve the necessary rift paths + rift_root = os.environ["RIFT_ROOT"] + rift_install = os.environ["RIFT_INSTALL"] + rift_artifacts = os.environ["RIFT_ARTIFACTS"] + + cmd="{RIFT_INSTALL}/demos/dts-perf-system.py -m ethsim -c --ip-list {host} --skip-prepare-vm".format(RIFT_INSTALL=rift_install, host=host) + rift_shell_cmd="sudo {RIFT_ROOT}/rift-shell -e -- {cmd}".format(cmd=cmd, RIFT_ROOT=rift_root) + remote_cmd="shopt -s huponexit; cd {RIFT_ROOT}; {rift_shell_cmd}".format(RIFT_ROOT=rift_root, rift_shell_cmd=rift_shell_cmd) + ssh_opt="-o ConnectTimeout=5 -o StrictHostKeyChecking=no" + + + cmd='ssh {ssh_opt} {host} -t -t "{remote_cmd}"'.format( + ssh_opt=ssh_opt, + remote_cmd=remote_cmd, + host=host, + ) + + fout = open(os.path.join(rift_artifacts, "dts-perf.stdout"), "w") + ferr = open(os.path.join(rift_artifacts, "dts-perf.stderr"), "w") + + process = subprocess.Popen( + cmd, + shell=True, + stdout=fout, + stderr=ferr, + stdin=subprocess.PIPE, + ) + + # Wait for confd to become available + while True: + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((host, 8008)) + sock.close() + break + + except socket.error: + time.sleep(1) + + print("System ready") + + try: + yield + finally: + print("Killing confd") + process.terminate() + process.wait() + +def run_rpc_perf_test(proxy, num_rpcs=1): + start_time = time.time() + + for i in range(1, num_rpcs + 1): + start = RwMcYang.StartLaunchpadInput() + start.federation_name = "lp_%s" % i + print(proxy.rpc(start.to_xml(yang_model))) + + stop_time = time.time() + + print("Retrieved %s rpc in %s seconds" % (num_rpcs, stop_time - start_time)) + return (stop_time - start_time) + + +def run_federation_config_http_perf_test(num_federations=1): + session = requests.Session() + + start_time = time.time() + for i in range(1, num_federations + 1): + req = session.post( + url="http://localhost:8008/api/config", + json={"federation": {"name": "foo_%s" % i}}, + headers={'Content-Type': 'application/vnd.yang.data+json'}, + auth=('admin', 'admin') + ) + req.raise_for_status() + stop_time = time.time() + + print("Configured %s federations using restconf in %s seconds" % (num_federations, stop_time - start_time)) + return (stop_time - start_time) + +def run_opdata_get_opdata_perf_test(proxy, num_gets=1): + start_time = time.time() + + for i in range(1, num_gets + 1): + print(proxy.get_from_xpath(filter_xpath="/opdata")) + pass + + stop_time = time.time() + print("Retrieved %s opdata in %s seconds" % (num_gets, stop_time - start_time)) + return (stop_time - start_time) + +def run_federation_config_perf_test(proxy, num_federations=1): + start_time = time.time() + + for i in range(1, num_federations + 1): + fed = RwMcYang.FederationConfig() + fed.name = "foobar_%s" % i + print(proxy.merge_config(fed.to_xml(yang_model))) + + stop_time = time.time() + + print("Configured %s federations using netconf in %s seconds" % (num_federations, stop_time - start_time)) + return (stop_time - start_time) + +def run_federation_get_config_perf_test(proxy, num_gets=1): + start_time = time.time() + + for i in range(1, num_gets + 1): + print(proxy.get_config(filter_xpath="/federation")) + + stop_time = time.time() + + print("Retrieved %s federations in %s seconds" % (num_gets, stop_time - start_time)) + return (stop_time - start_time) + +def main(argv=sys.argv[1:]): + + parser = argparse.ArgumentParser() + parser.add_argument('--host', required=True) + parser.add_argument('--port', type=int, default=8888) + parser.add_argument('--output', default='dts-perf-results.tsv') + parser.add_argument('--uri', default="/federation") + parser.add_argument('--num-conn', type=int, default=5000) + parser.add_argument('--timeout', type=int, default=5) + parser.add_argument('--low-rate', type=int, default=20) + parser.add_argument('--high-rate', type=int, default=200) + parser.add_argument('--rate-step', type=int, default=20) + + args = parser.parse_args(argv) + + with start_system(args.host, args.port): + nc_proxy = rift.auto.proxy.NetconfProxy() + nc_proxy.connect() + n_fed = 10; + n_fed_get = 100 + n_opdata_get = 100 + n_rpc = 100 + config_time = run_federation_config_perf_test(nc_proxy, num_federations=n_fed) + config_get_time = run_federation_get_config_perf_test(nc_proxy, num_gets=n_fed_get) + opdata_get_time = run_opdata_get_opdata_perf_test(nc_proxy, num_gets=n_opdata_get) + rpc_time = run_rpc_perf_test(nc_proxy, num_rpcs=n_rpc) + + print("") + print("..............................................") + print("CONFD Performance Results Using Netconf Client") + print("..............................................") + print("Rate of config writes: %d" % (n_fed/config_time)) + print("Rate of config reads : %d" % (n_fed_get/config_get_time)) + print("Rate of opdata reads : %d" % (n_opdata_get/opdata_get_time)) + print("Rate of rpc calls : %d" % (n_rpc/rpc_time)) + print("* Config read is reading a list with %d entries" % n_fed) + print("* Opdata read is reading a list with 5 entries") + print("..............................................") + +if __name__ == "__main__": + if "RIFT_ROOT" not in os.environ: + print("Must be in rift shell to run.") + sys.exit(1) + + os.chdir(os.environ["RIFT_INSTALL"]) + main() \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/dts-perf-system.py b/modules/core/mano/rwmc/test/perf/dts-perf-system.py new file mode 100755 index 0000000..741001c --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/dts-perf-system.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import logging +import os +import sys + +import rift.vcs +import rift.vcs.demo +import rift.vcs.vms + +logger = logging.getLogger(__name__) + +class Webserver(rift.vcs.NativeProcess): + def __init__(self, host, name="rw.perf.webserver"): + super(Webserver, self).__init__( + name=name, + exe="./usr/local/bin/dts-perf-webserver.py", + args="--host={}".format(host), + ) + + +def main(argv=sys.argv[1:]): + logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s') + + # @HACK over-ride the mode (if is not important for this system) + argv.extend(('--mode', 'ethsim')) + + # Create a parser which includes all generic demo arguments + parser = rift.vcs.demo.DemoArgParser(conflict_handler='resolve') + + args = parser.parse_args(argv) + + # @HACK There should be one host IP provided; Use it as the host of the + # webserver. + host = args.ip_list[0] + + # Construct the system. This system consists of 1 cluster in 1 + # colony. The master cluster houses CLI and management VMs + sysinfo = rift.vcs.SystemInfo( + colonies=[ + rift.vcs.Colony( + clusters=[ + rift.vcs.Cluster( + name='master', + virtual_machines=[ + rift.vcs.VirtualMachine( + name='vm-mission-control', + ip='127.0.0.1', + tasklets=[ + rift.vcs.uAgentTasklet(), + ], + procs=[ + rift.vcs.CliTasklet(), + rift.vcs.MissionControl(), + rift.vcs.DtsRouterTasklet(), + rift.vcs.MsgBrokerTasklet(), + Webserver(host), + ], + ), + ] + ) + ] + ) + ] + ) + + + # Define the generic portmap. + port_map = {} + + # Define a mapping from the placeholder logical names to the real + # port names for each of the different modes supported by this demo. + port_names = { + 'ethsim': { + }, + 'pci': { + } + } + + # Define the connectivity between logical port names. + port_groups = {} + + #load demo info and create Demo object + demo = rift.vcs.demo.Demo(sysinfo=sysinfo, + port_map=port_map, + port_names=port_names, + port_groups=port_groups) + + # Create the prepared system from the demo + system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args, + northbound_listing="cli_rwfpath_schema_listing.txt") + + # Start the prepared system + system.start() + + +if __name__ == "__main__": + try: + main() + except rift.vcs.demo.ReservationError: + print("ERROR: unable to retrieve a list of IP addresses from the reservation system") + sys.exit(1) + except rift.vcs.demo.MissingModeError: + print("ERROR: you need to provide a mode to run the script") + sys.exit(1) + finally: + os.system("stty sane") \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/dts-perf-test.py b/modules/core/mano/rwmc/test/perf/dts-perf-test.py new file mode 100755 index 0000000..71d989b --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/dts-perf-test.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import argparse +import collections +import os +import socket +import sys +import subprocess +import time + + +class ProcessError(Exception): + pass + + +def check_dependency(package): + requirement = subprocess.Popen( + 'which {}'.format(package), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + _, stderr = requirement.communicate() + requirement.wait() + + if stderr: + print("'{}' is required to test the system".format(package)) + sys.exit(1) + + +class Autobench(collections.namedtuple( + "Autobench", [ + "host", + "port", + "uri", + "file", + "num_connections", + "timeout", + "low_rate", + "high_rate", + "rate_step", + ] + )): + def __repr__(self): + args = [ + "autobench --single_host", + "--host1 {}".format(self.host), + "--port1 {}".format(self.port), + "--uri1 {}".format(self.uri), + "--file {}".format(self.file), + "--num_conn {}".format(self.num_connections), + "--timeout {}".format(self.timeout), + "--low_rate {}".format(self.low_rate), + "--high_rate {}".format(self.high_rate), + "--rate_step {}".format(self.rate_step), + ] + + return ' '.join(args) + + +def launch_remote_system(host, port, autobench): + # Check dependencies + check_dependency('autobench') + check_dependency('httperf') + + # Retrieve the necessary rift paths + rift_root = os.environ["RIFT_ROOT"] + rift_install = os.environ["RIFT_INSTALL"] + rift_artifacts = os.environ["RIFT_ARTIFACTS"] + + cmd="{RIFT_INSTALL}/demos/dts-perf-system.py -m ethsim --ip-list {host} --skip-prepare-vm".format(RIFT_INSTALL=rift_install, host=host) + rift_shell_cmd="sudo {RIFT_ROOT}/rift-shell -e -- {cmd}".format(cmd=cmd, RIFT_ROOT=rift_root) + remote_cmd="shopt -s huponexit; cd {RIFT_ROOT}; {rift_shell_cmd}".format(RIFT_ROOT=rift_root, rift_shell_cmd=rift_shell_cmd) + ssh_opt="-o ConnectTimeout=5 -o StrictHostKeyChecking=no" + + + cmd='ssh {ssh_opt} {host} -t -t "{remote_cmd}"'.format( + ssh_opt=ssh_opt, + remote_cmd=remote_cmd, + host=host, + ) + + try: + print('starting system') + + fout = open(os.path.join(rift_artifacts, "dts-perf.stdout"), "w") + ferr = open(os.path.join(rift_artifacts, "dts-perf.stderr"), "w") + + process = subprocess.Popen( + cmd, + shell=True, + stdout=fout, + stderr=ferr, + stdin=subprocess.PIPE, + ) + + # Wait for confd to become available + while True: + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((host, 8008)) + sock.close() + break + + except socket.error: + time.sleep(1) + + print('system ready') + + # Launch autobench on another process + print('testing started') + test = subprocess.Popen( + str(autobench), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + (stdout, stderr) = test.communicate() + test.wait() + + if test.stderr is not None: + print(stderr) + + print('testing complete') + + except Exception as e: + print(str(e)) + + finally: + process.terminate() + process.wait() + + fout.close() + ferr.close() + + +def main(argv=sys.argv[1:]): + parser = argparse.ArgumentParser() + parser.add_argument('--host', required=True) + parser.add_argument('--port', type=int, default=8888) + parser.add_argument('--output', default='dts-perf-results.tsv') + parser.add_argument('--uri', default="/federation") + parser.add_argument('--num-conn', type=int, default=5000) + parser.add_argument('--timeout', type=int, default=5) + parser.add_argument('--low-rate', type=int, default=20) + parser.add_argument('--high-rate', type=int, default=200) + parser.add_argument('--rate-step', type=int, default=20) + + args = parser.parse_args(argv) + + autobench = Autobench( + host=args.host, + port=args.port, + uri=args.uri, + file=args.output, + num_connections=args.num_conn, + timeout=args.timeout, + low_rate=args.low_rate, + high_rate=args.high_rate, + rate_step=args.rate_step, + ) + + launch_remote_system(args.host, args.port, autobench) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/dts-perf-webserver.py b/modules/core/mano/rwmc/test/perf/dts-perf-webserver.py new file mode 100755 index 0000000..e178f4d --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/dts-perf-webserver.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + + +import uuid +import sys + +import json +import requests +import tornado.ioloop +import tornado.options +from tornado.options import options +import tornado.web +import tornado.escape + + + +class FederationHandler(tornado.web.RequestHandler): + def get(self): + headers = {'content-type': 'application/vnd.yang.data+json'} + name = str(uuid.uuid4().hex) + auth = ('admin', 'admin') + data = json.dumps({'federation': {'name': name}}) + url = "http://{host}:8008/api/config".format(host=options.host) + + response = requests.post(url, headers=headers, auth=auth, data=data) + if not response.ok: + print(response.status_code, response.reason) + print(response.text) + + +class OperationalHandler(tornado.web.RequestHandler): + def get(self): + headers = {'content-type': 'application/vnd.yang.operational+json'} + auth = ('admin', 'admin') + url = "http://{host}:8008/api/operational/federation".format(host=options.host) + + response = requests.get(url, headers=headers, auth=auth) + if not response.ok: + print(response.status_code, response.reason) + print(response.text) + + +class ConfigHandler(tornado.web.RequestHandler): + def get(self): + headers = {'content-type': 'application/vnd.yang.config+json'} + auth = ('admin', 'admin') + url = "http://{host}:8008/api/config/".format(host=options.host) + + response = requests.get(url, headers=headers, auth=auth) + if not response.ok: + print(response.status_code, response.reason) + print(response.text) + + + +def main(): + tornado.options.define("host") + + try: + tornado.options.parse_command_line() + + if options.host is None: + raise tornado.options.Error('A host must be specified') + + app = tornado.web.Application([ + (r"/federation", FederationHandler), + (r"/operational", OperationalHandler), + (r"/config", ConfigHandler), + ]) + + app.listen(8888) + tornado.ioloop.IOLoop.current().start() + + except tornado.options.Error as e: + print("{}\n\n".format(str(e))) + tornado.options.print_help() + sys.exit(1) + + except Exception as e: + print(str(e)) + sys.exit(1) + + except (KeyboardInterrupt, SystemExit): + pass + + finally: + tornado.ioloop.IOLoop.current().stop() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/modules/core/mano/rwmc/test/perf/test.sh b/modules/core/mano/rwmc/test/perf/test.sh new file mode 100755 index 0000000..938328c --- /dev/null +++ b/modules/core/mano/rwmc/test/perf/test.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + +# This script tests the throughput of get operations. +# change iter and loop variables + +NETCONF_CONSOLE_DIR=${RIFT_ROOT}/.install/usr/local/confd/bin + +iter=100 +loop=30 + +for i in `seq 1 $loop`; +do + echo "Background script $i" + ${NETCONF_CONSOLE_DIR}/netconf-console-tcp -s all --iter=$iter --get -x /opdata& +done + +wait + +total=$(($iter * $loop)) +echo "Total number of netconf operations=$total" \ No newline at end of file diff --git a/modules/core/mano/rwso/plugins/cli/cli_so_schema_listing.txt b/modules/core/mano/rwso/plugins/cli/cli_so_schema_listing.txt new file mode 100644 index 0000000..3031b19 --- /dev/null +++ b/modules/core/mano/rwso/plugins/cli/cli_so_schema_listing.txt @@ -0,0 +1,31 @@ +rw-base +rw-mgmtagt +rw-manifest +rw-vcs +rwlog-mgmt +rw-dts +rwmsg-data +rw-dtsperf +rwshell-mgmt +rw-debug +rw-dtsperfmgr +rw-memlog +mano-base +rw-sorch +rw-restportforward +mano-types +rw-yang-types +rw-log +rwvcs-types +rw-netconf +rwcal +rw-pb-ext +rw-notify-ext +rw-mgmt-schema +rw-cli-ext +ietf-inet-types +ietf-yang-types +vnfr +nsr +ietf-restconf-monitoring +ietf-netconf-notifications diff --git a/modules/ui/composer/CMakeLists.txt b/modules/ui/composer/CMakeLists.txt new file mode 100644 index 0000000..2331bd0 --- /dev/null +++ b/modules/ui/composer/CMakeLists.txt @@ -0,0 +1,62 @@ +# +# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved +# +# Author(s): Kiran Kashalkar +# Creation Date: 08/18/2015 +# + +## +# DEPENDENCY ALERT +# The submodule dependencies must be specified in the +# .gitmodules.dep file at the top level (supermodule) directory +# If this submodule depends other submodules remember to update +# the .gitmodules.dep +## + +cmake_minimum_required(VERSION 2.8) + +## +# Set the path to the top level cmake modules directory +## +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../../cmake/modules") + +## +# DO NOT add any code before this and DO NOT +# include this file anywhere else +## +include(rift_submodule) + +## +# Submodule specific includes will go here, +# These are specified here, since these variables are accessed +# from multiple sub directories. If the variable is subdirectory +# specific it must be declared in the subdirectory. +## + +rift_externalproject_add( + webapp + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/webapp + CONFIGURE_COMMAND echo + BUILD_COMMAND + ${CMAKE_CURRENT_BINARY_DIR}/webapp/webapp-build/scripts/build.sh + INSTALL_COMMAND + ${CMAKE_CURRENT_SOURCE_DIR}/scripts/install.sh + ${CMAKE_CURRENT_BINARY_DIR}/webapp/webapp-build + ${CMAKE_INSTALL_PREFIX}/usr/share/composer + ${RIFT_SUBMODULE_INSTALL_PREFIX}/webapp/${CMAKE_INSTALL_PREFIX}/usr/share/composer + + BCACHE_COMMAND echo +) + +## +# Include the subdirs +## +#set(subdirs +# api +# ) +#rift_add_subdirs(SUBDIR_LIST ${subdirs}) + +## +# This macro adds targets for documentaion, unittests, code coverage and packaging +## +rift_add_submodule_targets(SUBMODULE_PACKAGE_NAME "composer") diff --git a/modules/ui/composer/foss.txt b/modules/ui/composer/foss.txt new file mode 100644 index 0000000..e69de29 diff --git a/modules/ui/composer/manifest/LICENSE b/modules/ui/composer/manifest/LICENSE new file mode 100644 index 0000000..e69de29 diff --git a/modules/ui/composer/scripts/.install.sh.swp b/modules/ui/composer/scripts/.install.sh.swp new file mode 100644 index 0000000..96fed75 Binary files /dev/null and b/modules/ui/composer/scripts/.install.sh.swp differ diff --git a/modules/ui/composer/scripts/install.sh b/modules/ui/composer/scripts/install.sh new file mode 100755 index 0000000..a52a3fb --- /dev/null +++ b/modules/ui/composer/scripts/install.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + + +source_dir=$1 +dest_dir=$2 +bcache_dir=$3 + +mkdir -p $dest_dir +mkdir -p $bcache_dir +cp -Lrf $source_dir/dist $dest_dir +cp -Lrf $source_dir/scripts $dest_dir +cp -Lrf $source_dir/dist $bcache_dir +cp -Lrf $source_dir/scripts $bcache_dir \ No newline at end of file diff --git a/modules/ui/composer/webapp/.editorconfig b/modules/ui/composer/webapp/.editorconfig new file mode 100644 index 0000000..c308ed0 --- /dev/null +++ b/modules/ui/composer/webapp/.editorconfig @@ -0,0 +1,13 @@ +# http://editorconfig.org +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.md] +trim_trailing_whitespace = false diff --git a/modules/ui/composer/webapp/.eslintignore b/modules/ui/composer/webapp/.eslintignore new file mode 100644 index 0000000..f89c3b3 --- /dev/null +++ b/modules/ui/composer/webapp/.eslintignore @@ -0,0 +1 @@ +react/ \ No newline at end of file diff --git a/modules/ui/composer/webapp/.eslintrc b/modules/ui/composer/webapp/.eslintrc new file mode 100644 index 0000000..7c4493d --- /dev/null +++ b/modules/ui/composer/webapp/.eslintrc @@ -0,0 +1,34 @@ +{ + "plugins": [ + "react" + ], + "ecmaFeatures": { + "jsx": true, + "modules": true + }, + "env": { + "browser": true, + "amd": true, + "es6": true + }, + "globals": { + "module": 1 + }, + "rules": { + "quotes": [ + 1, + "single" + ], + "no-undef": 2, + "global-strict": 0, + "no-extra-semi": 1, + "no-underscore-dangle": 0, + "no-console": 0, + "no-alert": 0, + "no-debugger": 0, + "indent": [ + 2, + "tab" + ] + } +} diff --git a/modules/ui/composer/webapp/.gitattributes b/modules/ui/composer/webapp/.gitattributes new file mode 100644 index 0000000..176a458 --- /dev/null +++ b/modules/ui/composer/webapp/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/modules/ui/composer/webapp/.yo-rc.json b/modules/ui/composer/webapp/.yo-rc.json new file mode 100644 index 0000000..fc40714 --- /dev/null +++ b/modules/ui/composer/webapp/.yo-rc.json @@ -0,0 +1,8 @@ +{ + "generator-react-webpack": { + "app-name": "composer", + "architecture": "flux", + "styles-language": "scss", + "component-suffix": "js" + } +} \ No newline at end of file diff --git a/modules/ui/composer/webapp/Gruntfile.js b/modules/ui/composer/webapp/Gruntfile.js new file mode 100644 index 0000000..f253ae3 --- /dev/null +++ b/modules/ui/composer/webapp/Gruntfile.js @@ -0,0 +1,172 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +var mountFolder = function (connect, dir) { + return connect.static(require('path').resolve(dir)); +}; + +var webpackDistConfig = require('./webpack.dist.config.js'), + webpackDevConfig = require('./webpack.config.js'); + +module.exports = function (grunt) { + // Let *load-grunt-tasks* require everything + require('load-grunt-tasks')(grunt); + + // Read configuration from package.json + var pkgConfig = grunt.file.readJSON('package.json'); + + grunt.initConfig({ + pkg: pkgConfig, + + version: { + project: { + src: ['package.json'] + }, + src: { + options: { + prefix: 'semver ' + }, + src: ['src/**/*.js'] + } + }, + + webpack: { + options: webpackDistConfig, + dist: { + cache: false + } + }, + + 'webpack-dev-server': { + options: { + hot: true, + port: 9000, + webpack: webpackDevConfig, + publicPath: '/assets/', + contentBase: './<%= pkg.src %>/' + }, + + start: { + keepAlive: true + } + }, + + connect: { + options: { + port: 9000 + }, + + dist: { + options: { + keepalive: true, + middleware: function (connect) { + return [ + mountFolder(connect, pkgConfig.dist) + ]; + } + } + } + }, + + open: { + options: { + delay: 500 + }, + dev: { + path: 'http://localhost:<%= connect.options.port %>/webpack-dev-server/' + }, + dist: { + path: 'http://localhost:<%= connect.options.port %>/' + } + }, + + karma: { + unit: { + configFile: 'karma.conf.js' + } + }, + + copy: { + dist: { + files: [ + // includes files within path + { + flatten: true, + expand: true, + src: ['<%= pkg.src %>/*'], + dest: '<%= pkg.dist %>/', + filter: 'isFile' + }, + { + flatten: true, + expand: true, + src: ['<%= pkg.src %>/images/*'], + dest: '<%= pkg.dist %>/images/' + }, + { + flatten: true, + expand: true, + src: ['<%= pkg.src %>/images/logos/*'], + dest: '<%= pkg.dist %>/images/logos/' + }, + { + flatten: true, + expand: true, + src: ['<%= pkg.src %>/assets/*'], + dest: '<%= pkg.dist %>/assets/' + } + ] + } + }, + + clean: { + dist: { + files: [{ + dot: true, + src: [ + '<%= pkg.dist %>' + ] + }] + } + } + }); + + grunt.registerTask('serve', function (target) { + if (target === 'dist') { + return grunt.task.run(['build', 'open:dist', 'connect:dist']); + } + + grunt.task.run([ + 'open:dev', + 'webpack-dev-server' + ]); + }); + + grunt.registerTask('patch', ['version:project:patch', 'version:src', 'build:dist']); + + grunt.registerTask('test', ['karma']); + + grunt.registerTask('build', ['clean', 'copy', 'webpack']); + + grunt.registerTask('default', []); +}; diff --git a/modules/ui/composer/webapp/README.md b/modules/ui/composer/webapp/README.md new file mode 100644 index 0000000..94bbe81 --- /dev/null +++ b/modules/ui/composer/webapp/README.md @@ -0,0 +1,46 @@ +RIFT.io UI +=== +Currently this repo only contains one module. + +# Development Setup + +## Requirements + +``` +npm install -g babel +npm install -g grunt +``` + +## Helpful + +``` +npm install -g yo +npm install -g generator-react-webpack # https://github.com/newtriks/generator-react-webpack +``` + +# Build Steps + +``` +npm install +grunt build:dist # production build +grunt build # dev build +grunt test # run tests +``` + +# Development Steps + +``` +grunt serve # start webpack dev server source-maps +grunt serve:dist # start dev server with dist runtime +``` + +## Known Issues +`grunt serve:dist` fails for unknown reason. workaround use python -m SimpleHTTPServer 8099 + +# Useful Libs + +• [http://numeraljs.com/](http://numeraljs.com) + +• [http://momentjs.com/docs/](http://momentjs.com/docs/) + +# How the code works see ./src/README.md diff --git a/modules/ui/composer/webapp/codeStyleSettings.xml b/modules/ui/composer/webapp/codeStyleSettings.xml new file mode 100644 index 0000000..9d12ff8 --- /dev/null +++ b/modules/ui/composer/webapp/codeStyleSettings.xml @@ -0,0 +1,95 @@ + + + + + + + \ No newline at end of file diff --git a/modules/ui/composer/webapp/karma.conf.js b/modules/ui/composer/webapp/karma.conf.js new file mode 100644 index 0000000..db495be --- /dev/null +++ b/modules/ui/composer/webapp/karma.conf.js @@ -0,0 +1,62 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * + * + */ +'use strict'; + +var path = require('path'); + +module.exports = function (config) { + config.set({ + basePath: '', + frameworks: ['jasmine', 'es6-shim'], + files: [ + 'test/spec/**/*.js' + ], + preprocessors: { + 'test/spec/**/*.js': ['webpack'] + }, + webpack: require('./webpack.config.js'), + webpackMiddleware: { + noInfo: true, + stats: { + colors: true + } + }, + webpackServer: { + noInfo: true //please don't spam the console when running in karma! + }, + exclude: [], + port: 8080, + logLevel: config.LOG_INFO, + colors: true, + autoWatch: true, + browsers: ['Chrome'], + reporters: ['dots'], + captureTimeout: 60000, + singleRun: false, + plugins: [ + require('karma-webpack'), + require('karma-jasmine'), + require('karma-chrome-launcher'), + require('karma-phantomjs-launcher'), + require('karma-es6-shim') + ] + }); +}; diff --git a/modules/ui/composer/webapp/license-flat-icon.pdf b/modules/ui/composer/webapp/license-flat-icon.pdf new file mode 100755 index 0000000..4969d07 Binary files /dev/null and b/modules/ui/composer/webapp/license-flat-icon.pdf differ diff --git a/modules/ui/composer/webapp/license-info.txt b/modules/ui/composer/webapp/license-info.txt new file mode 100644 index 0000000..7554df7 --- /dev/null +++ b/modules/ui/composer/webapp/license-info.txt @@ -0,0 +1,2 @@ + +flat-icon-license.pdf - license for the icons used in this project diff --git a/modules/ui/composer/webapp/package.json b/modules/ui/composer/webapp/package.json new file mode 100644 index 0000000..550c395 --- /dev/null +++ b/modules/ui/composer/webapp/package.json @@ -0,0 +1,84 @@ +{ + "name": "composer", + "version": "0.0.79", + "description": "", + "repository": "", + "private": true, + "src": "src", + "test": "test", + "dist": "dist", + "mainInput": "ComposerApp", + "mainOutput": "main", + "dependencies": { + "alt": "^0.17.9", + "babel-polyfill": "^6.2.0", + "change-case": "^2.3.0", + "classnames": "^2.2.1", + "d3": "^3.5.10", + "dropzone": "^4.2.0", + "es5-shim": "^4.3.1", + "events": "^1.1.0", + "flux": "^2.1.1", + "grunt-cli": "~0.1.13", + "jquery": "^2.1.4", + "loaders.css": "^0.1.1", + "lodash": "^3.10.1", + "moment": "^2.10.6", + "normalize.css": "^3.0.3", + "numeral": "^1.5.3", + "object-assign": "^4.0.1", + "open-iconic": "^1.1.1", + "prismjs": "^1.3.0", + "react": "^0.14.7", + "react-addons-css-transition-group": "^0.14.7", + "react-addons-pure-render-mixin": "^0.14.7", + "react-crouton": "~0.2.7", + "react-dom": "^0.14.3", + "react-popout": "^0.4.0" + }, + "devDependencies": { + "babel": "^6.1.18", + "babel-core": "^6.2.1", + "babel-loader": "^6.2.0", + "babel-preset-es2015": "^6.1.18", + "babel-preset-react": "^6.1.18", + "css-loader": "^0.23.0", + "eslint": "^1.10.2", + "eslint-loader": "^1.1.1", + "eslint-plugin-react": "^3.11.1", + "grunt": "^0.4.5", + "grunt-contrib-clean": "^0.7.0", + "grunt-contrib-connect": "^0.11.2", + "grunt-contrib-copy": "^0.8.2", + "grunt-karma": "^0.12.1", + "grunt-open": "^0.2.3", + "grunt-version": "^1.0.0", + "grunt-webpack": "^1.0.11", + "image-webpack-loader": "^1.6.2", + "imagemin": "^4.0.0", + "install": "^0.4.2", + "jasmine-core": "^2.4.1", + "json-loader": "^0.5.4", + "karma": "^0.13.15", + "karma-chrome-launcher": "^0.2.2", + "karma-es6-shim": "^0.2.3", + "karma-jasmine": "^0.3.6", + "karma-phantomjs-launcher": "^0.2.1", + "karma-phantomjs-shim": "^1.2.0", + "karma-script-launcher": "^0.1.0", + "karma-sourcemap-loader": "^0.3.7", + "karma-webpack": "^1.7.0", + "load-grunt-tasks": "^3.3.0", + "node-sass": "^3.4.2", + "npm": "^3.7.1", + "phantomjs": "^1.9.19", + "react-addons-test-utils": "^0.14.7", + "react-hot-loader": "^1.3.0", + "sass-loader": "^3.1.2", + "style-loader": "^0.13.0", + "uninstall": "0.0.0-reserved", + "url-loader": "^0.5.7", + "webpack": "^1.12.9", + "webpack-dev-server": "^1.14.0" + } +} diff --git a/modules/ui/composer/webapp/scripts/build.sh b/modules/ui/composer/webapp/scripts/build.sh new file mode 100755 index 0000000..02aa73b --- /dev/null +++ b/modules/ui/composer/webapp/scripts/build.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + + +# change to the directory of this script +THIS_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +cd $THIS_DIR +cd .. + +echo "DIR is" +pwd +echo "Building composer webapp" +npm install +./node_modules/.bin/grunt build \ No newline at end of file diff --git a/modules/ui/composer/webapp/scripts/launch_composer.sh b/modules/ui/composer/webapp/scripts/launch_composer.sh new file mode 100755 index 0000000..11e2aaa --- /dev/null +++ b/modules/ui/composer/webapp/scripts/launch_composer.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + +usage() { + echo "usage: launch_ui.sh [--enable-https --keyfile-path= --certfile-path=]" +} + +start_servers() { + cd $THIS_DIR + echo "Killing any previous instance of server_composer_ui.py" + ps -ef | awk '/[s]cripts\/server_composer_ui.py/{print $2}' | xargs kill -9 + + echo "Running Python webserver. HTTPS Enabled: ${ENABLE_HTTPS}" + cd ../dist + if [ ! -z "${ENABLE_HTTPS}" ]; then + ../scripts/server_composer_ui.py --enable-https --keyfile-path="${KEYFILE_PATH}" --certfile-path="${CERTFILE_PATH}"& + else + ../scripts/server_composer_ui.py + fi +} + +# Begin work +for i in "$@" +do +case $i in + -k=*|--keyfile-path=*) + KEYFILE_PATH="${i#*=}" + shift # past argument=value + ;; + -c=*|--certfile-path=*) + CERTFILE_PATH="${i#*=}" + shift # past argument=value + ;; + -h|--help) + usage + exit + ;; + -e|--enable-https) + ENABLE_HTTPS=YES + shift # past argument=value + ;; + *) + # unknown option + ;; +esac +done + +if [[ ! -z "${ENABLE_HTTPS}" ]]; then + if [ -z "${KEYFILE_PATH}" ] || [ -z "{CERTFILE_PATH}" ]; then + usage + exit + fi +fi + +# change to the directory of this script +THIS_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +# Call function to start web and API servers +start_servers + +while true; do + sleep 5 +done \ No newline at end of file diff --git a/modules/ui/composer/webapp/scripts/server_composer_ui.py b/modules/ui/composer/webapp/scripts/server_composer_ui.py new file mode 100755 index 0000000..e0dd7b8 --- /dev/null +++ b/modules/ui/composer/webapp/scripts/server_composer_ui.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 + +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# + +from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler +import socketserver +import mimetypes +import argparse +import sys +import os +import ssl + +PORT = 9000 + +enable_https = False +keyfile_path = None +certfile_path = None + +DEFAULT_ENABLE_HTTPS = False +DEFAULT_KEYFILE_PATH = None +DEFAULT_CERTFILE_PATH = None + +def start_server( + enable_https=DEFAULT_ENABLE_HTTPS, + keyfile_path=DEFAULT_KEYFILE_PATH, + certfile_path=DEFAULT_CERTFILE_PATH): + Handler = SimpleHTTPRequestHandler + Handler.extensions_map['.svg'] = 'image/svg+xml' + httpd = socketserver.TCPServer(('', PORT), Handler) + + if enable_https: + + httpd.socket = ssl.wrap_socket(httpd.socket, + server_side=True, + certfile=certfile_path, + keyfile=keyfile_path) + + print("Serving at port: {}. HTTPS Enabled: {}".format(PORT, enable_https)) + httpd.serve_forever() + + +def main(argv=sys.argv[1:]): + parser = argparse.ArgumentParser() + parser.add_argument("-p", "--port", + default=PORT, + help="Run on the given port") + parser.add_argument("-e", "--enable-https", + action="store_true", + default=False, + help="Enable HTTPS. Make sure certfile-path and keyfile-path are also specified") + parser.add_argument("-k", "--keyfile-path", + default=DEFAULT_KEYFILE_PATH, + help="Path to the key file") + parser.add_argument("-c", "--certfile-path", + default=DEFAULT_CERTFILE_PATH, + help="Path to the cert file") + + args = parser.parse_args() + + # When you want to use the debugger, unremark this before the line you want + #import pdb; pdb.set_trace() + + if args.enable_https: + if not (args.keyfile_path and args.certfile_path): + parser.print_help() + sys.exit(2) + + start_server(args.enable_https, args.keyfile_path, args.certfile_path) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/modules/ui/composer/webapp/scripts/update-node-modules.sh b/modules/ui/composer/webapp/scripts/update-node-modules.sh new file mode 100755 index 0000000..11b7789 --- /dev/null +++ b/modules/ui/composer/webapp/scripts/update-node-modules.sh @@ -0,0 +1,93 @@ +#!/bin/sh +# +# Copyright 2016 RIFT.IO Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# + + +# the order of the install is important + +#npm install -g grunt-cli + +npm cache clean + +rm -R node_modules + +# dependencies +npm install --save alt +npm install --save change-case +npm install --save classnames +npm install --save d3 +npm install --save dropzone +npm install --save es5-shim +npm install --save events +npm install --save flux +npm install --save highlight.js +npm install --save jquery +npm install --save lodash +npm install --save moment +npm install --save normalize.css +npm install --save numeral +npm install --save object-assign +npm install --save react +npm install --save react-dom +npm install --save react-addons-pure-render-mixin +npm install --save react-highlight +npm install --save react-tooltip +npm install --save babel-polyfill + +# dev-dependencies +npm install --save-dev imagemin +npm install --save-dev jasmine-core +npm install --save-dev babel +npm install --save-dev babel-core +npm install --save-dev eslint +npm install --save-dev karma +npm install --save-dev grunt +npm install --save-dev webpack +npm install --save-dev node-sass +npm install --save-dev phantomjs + +npm install --save-dev grunt-contrib-clean +npm install --save-dev grunt-contrib-connect +npm install --save-dev grunt-contrib-copy +npm install --save-dev grunt-karma +npm install --save-dev grunt-open +npm install --save-dev load-grunt-tasks + +npm install --save-dev karma-jasmine +npm install --save-dev karma-phantomjs-launcher +npm install --save-dev karma-script-launcher +npm install --save-dev karma-webpack + +npm install --save-dev webpack-dev-server +npm install --save-dev grunt-webpack +npm install --save-dev react-hot-loader +npm install --save-dev image-webpack-loader +npm install --save-dev sass-loader +npm install --save-dev style-loader +npm install --save-dev url-loader +npm install --save-dev babel-preset-es2015 +npm install --save-dev babel-preset-react +npm install --save-dev json-loader +npm install --save-dev babel-loader +npm install --save-dev css-loader +npm install --save-dev eslint-loader +npm install --save-dev eslint-plugin-react + +grunt build +grunt serve \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/README.md b/modules/ui/composer/webapp/src/README.md new file mode 100644 index 0000000..9dec53b --- /dev/null +++ b/modules/ui/composer/webapp/src/README.md @@ -0,0 +1,69 @@ + +The application enables editing of CONFD YANG instances. + +Catalog Panel - loads the NSD and VNFD and PNFD catalogs from the server and updates the internal indexes used throughout + the UI +Canvas Panel - graphical editor of the relations and networks of NSD and VNFD descriptors +Details Panel - schema driven editor of every property in the YANG models +Forwarding Graphs Tray - editing FG RSP, Classifier and MatchAttribute properties + +# Details Panel + + - To get an object to show up in the Details Panel it must be defined in the DescriptorModelMeta.json schema file. + + - only needs the DescriptorModelMeta.json file to define the JSON to create / edited. + + - To make an object appear in the Details Panel you need to add it to the "containersList" in the DescriptorModelFactor.js class. + +# Canvas Panel + + - is coded specifically to enable graphical editing of certain descriptor model elements and is the least flexible + + - To make an object "selectable" it must have a data-uid field. + + The canvas panel uses D3 to render the graphical editing experience. + +# State Management + +There are two layers of state: 1) model state, 2) UI state. + +The YANG models are wrapped in Class objects to facilitate contextual operations that may change either state, like +adding and removing property values, accessing the underlying uiState object, etc. These classes are defined in the +`src/libraries/model/` directory. + +## Model State + +The UI uses Alt.js implementation of Flux. Alt.js provides for the actions and state of the application. Model state is +managed by the CatalogDataStore. Any change made to the model must notify the CatalogDataStore. Upon notification of a +change the Alt DataStore will setState() with a deep clone of the model causing a UI update. + +You will see `CatalogItemsActions.catalogItemDescriptorChanged(catalogItemModel)` everywhere a change is made to the +model. In essence the UI treats the model as immutable. While the object is technically mutable the UI is modifying a copy +of the model and so for changes to 'stick' the UI must notify the CatalogDataStore. + +## UI State + +UI state is managed in a couple different ways depending on the specific need of the UI. The three ways are: 1) a field +named 'uiState' added to each YANG model instance when the catalog is loaded from the server; 2) React Component state not +saved in the Alt DataStore; and 3) module variables. + +Ideally, all uiState would us the later two methods. The 'uiState' field poses a potential name collision with the YANG +model (not likely, but if it happens could be really bad for the application!). + +## ReactJS and d3 + +The components built using d3 own the management of the DOM under the SVGSVGElement. ReactJS manages the content DOM element +above the SVG element. This is a clean separation of concerns. Any model or UI state changes are handled by the model +classes and therefore d3 is agnostic and ignorant of managing state. ReactJS is not responsible for the DOM below the +SVG content DIV and so does not care about any of the DOM manipulations that d3 makes. + +All of the UI is driven by the model which is always passed down through the props of the parent ReactJS Component. The +d3 components provide a way to pass the model and UI state into them. For an example of this look at the +`CatalogItemCanvasEditor::componentDidMount()` method. You will see the parent content div and the model data are given +to the `DescriptorGraph()` d3 component. + +The d3 graphing components are located in the `src/libraries/graph/` directory. + + + + diff --git a/modules/ui/composer/webapp/src/actions/CanvasEditorActions.js b/modules/ui/composer/webapp/src/actions/CanvasEditorActions.js new file mode 100644 index 0000000..792b8c9 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CanvasEditorActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 10/14/15. + */ +import alt from '../alt'; + +class CanvasEditorActions { + + constructor() { + this.generateActions('showMoreInfo', 'showLessInfo', 'toggleShowMoreInfo', 'applyDefaultLayout', 'setCanvasZoom', 'addVirtualLinkDescriptor', 'addForwardingGraphDescriptor', 'addVirtualDeploymentDescriptor'); + } + +} + +export default alt.createActions(CanvasEditorActions); diff --git a/modules/ui/composer/webapp/src/actions/CanvasPanelTrayActions.js b/modules/ui/composer/webapp/src/actions/CanvasPanelTrayActions.js new file mode 100644 index 0000000..7c0634c --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CanvasPanelTrayActions.js @@ -0,0 +1,34 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * + * + */ +/** + * Created by onvelocity on 2/4/16. + */ +import alt from '../alt'; + +class CanvasPanelTrayActions { + + constructor() { + this.generateActions('open', 'close', 'toggleOpenClose'); + } + +} + +export default alt.createActions(CanvasPanelTrayActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogDataSourceActions.js b/modules/ui/composer/webapp/src/actions/CatalogDataSourceActions.js new file mode 100644 index 0000000..2230d5c --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogDataSourceActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; + +class CatalogDataSourceActions { + + constructor() { + this.generateActions('loadCatalogsSuccess', 'loadCatalogsError', 'deleteCatalogItemSuccess', 'deleteCatalogItemError', 'saveCatalogItemSuccess', 'saveCatalogItemError'); + } + +} + +export default alt.createActions(CatalogDataSourceActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogFilterActions.js b/modules/ui/composer/webapp/src/actions/CatalogFilterActions.js new file mode 100644 index 0000000..51a9556 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogFilterActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; + +class CatalogFilterActions { + + constructor() { + this.generateActions('filterByType'); + } + +} + +export default alt.createActions(CatalogFilterActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogItemsActions.js b/modules/ui/composer/webapp/src/actions/CatalogItemsActions.js new file mode 100644 index 0000000..13835af --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogItemsActions.js @@ -0,0 +1,39 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; + +/* + This class manages Catalog Data State + */ + +class CatalogItemsActions { + + constructor() { + this.generateActions('catalogItemMetaDataChanged', 'catalogItemDescriptorChanged', 'createCatalogItem', 'editCatalogItem', 'duplicateSelectedCatalogItem', 'selectCatalogItem', 'deleteSelectedCatalogItem', 'cancelCatalogItemChanges', 'saveCatalogItem', 'exportSelectedCatalogItems'); + } + +} + +export default alt.createActions(CatalogItemsActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogPackageManagerActions.js b/modules/ui/composer/webapp/src/actions/CatalogPackageManagerActions.js new file mode 100644 index 0000000..d27c59f --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogPackageManagerActions.js @@ -0,0 +1,32 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +import alt from '../alt'; + +class CatalogPackageManagerActions { + + constructor() { + this.generateActions('downloadCatalogPackage', 'downloadCatalogPackageStatusUpdated', 'downloadCatalogPackageError', 'uploadCatalogPackage', 'uploadCatalogPackageStatusUpdated', 'uploadCatalogPackageError', 'removeCatalogPackage'); + } + +} + +export default alt.createActions(CatalogPackageManagerActions); diff --git a/modules/ui/composer/webapp/src/actions/CatalogPanelTrayActions.js b/modules/ui/composer/webapp/src/actions/CatalogPanelTrayActions.js new file mode 100644 index 0000000..ec08422 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/CatalogPanelTrayActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; + +class CatalogPanelTrayActions { + + constructor() { + this.generateActions('open', 'close', 'toggleOpenClose'); + } + +} + +export default alt.createActions(CatalogPanelTrayActions); diff --git a/modules/ui/composer/webapp/src/actions/ComposerAppActions.js b/modules/ui/composer/webapp/src/actions/ComposerAppActions.js new file mode 100644 index 0000000..0387f7a --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/ComposerAppActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by kkashalk on 11/30/15. + */ +import alt from '../alt'; + +class ComposerAppActions { + + constructor() { + this.generateActions('showError', 'clearError', 'setDragState', 'propertySelected', 'showJsonViewer', 'closeJsonViewer', 'selectModel', 'outlineModel', 'clearSelection', 'enterFullScreenMode', 'exitFullScreenMode'); + } + +} + +export default alt.createActions(ComposerAppActions); \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/actions/ModalOverlayActions.js b/modules/ui/composer/webapp/src/actions/ModalOverlayActions.js new file mode 100644 index 0000000..c308e65 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/ModalOverlayActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 10/14/15. + */ +import alt from '../alt'; + +class ModalOverlayActions { + + constructor() { + this.generateActions('showModalOverlay', 'hideModalOverlay'); + } + +} + +export default alt.createActions(ModalOverlayActions); diff --git a/modules/ui/composer/webapp/src/actions/PanelResizeAction.js b/modules/ui/composer/webapp/src/actions/PanelResizeAction.js new file mode 100644 index 0000000..bdf0344 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/PanelResizeAction.js @@ -0,0 +1,73 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +import alt from '../alt'; +import changeCase from 'change-case' + +/* + This class manages Composer Layout State + */ + +const cleanNameRegExp = /(-is-tray-open|panel-)/i; + +class PanelResizeAction { + + resize(e) { + + /* we expect two types of resize events: + * window resize - invoked by window + * resize-manager resize - invoked by ResizeManager + * + * normalize the data needed by the Composer Layout or ignore invalid ones + * + * */ + + if (!e) { + return false; + } + + if (e.detail && e.detail.side) { + // a ResizeManager event + this.dispatch(PanelResizeAction.buildResizeManagerInfo(e)) + } else { + // a window event + this.dispatch(PanelResizeAction.buildWindowResizeInfo(e)); + } + + } + + static buildWindowResizeInfo(e) { + return e; + } + + static buildResizeManagerInfo(e) { + const info = Object.assign({originalEvent: e}, e.detail); + const name = changeCase.paramCase(info.target.className.replace(cleanNameRegExp, '')); + info.type = 'resize-manager.resize.' + name; + return info; + } + +} + +export default alt.createActions(PanelResizeAction); diff --git a/modules/ui/composer/webapp/src/actions/RiftHeaderActions.js b/modules/ui/composer/webapp/src/actions/RiftHeaderActions.js new file mode 100644 index 0000000..c046a12 --- /dev/null +++ b/modules/ui/composer/webapp/src/actions/RiftHeaderActions.js @@ -0,0 +1,35 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by kkashalk on 11/10/15. + */ +import alt from '../alt'; + +class RiftHeaderActions { + + constructor() { + this.generateActions('requestLaunchpadConfigSuccess'); + } + +} + +export default alt.createActions(RiftHeaderActions); diff --git a/modules/ui/composer/webapp/src/alt.js b/modules/ui/composer/webapp/src/alt.js new file mode 100644 index 0000000..b87da11 --- /dev/null +++ b/modules/ui/composer/webapp/src/alt.js @@ -0,0 +1,30 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 8/11/15. + */ +'use strict'; + +var Alt = require('alt'); +var alt = new Alt(); + +export default alt; diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Black-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Black-webfont.woff new file mode 100755 index 0000000..0229086 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-Black-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-BlackItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-BlackItalic-webfont.woff new file mode 100755 index 0000000..1875c0b Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-BlackItalic-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Bold-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Bold-webfont.woff new file mode 100755 index 0000000..0c69948 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-Bold-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-BoldItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-BoldItalic-webfont.woff new file mode 100755 index 0000000..99de61a Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-BoldItalic-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Italic-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Italic-webfont.woff new file mode 100755 index 0000000..dd74244 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-Italic-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Light-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Light-webfont.woff new file mode 100755 index 0000000..cc534a3 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-Light-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-LightItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-LightItalic-webfont.woff new file mode 100755 index 0000000..3071ff4 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-LightItalic-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Medium-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Medium-webfont.woff new file mode 100755 index 0000000..cd810ef Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-Medium-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-MediumItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-MediumItalic-webfont.woff new file mode 100755 index 0000000..69a1458 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-MediumItalic-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Regular-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Regular-webfont.woff new file mode 100755 index 0000000..bfa05d5 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-Regular-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-Thin-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-Thin-webfont.woff new file mode 100755 index 0000000..f10b831 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-Thin-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/Roboto-ThinItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/Roboto-ThinItalic-webfont.woff new file mode 100755 index 0000000..9ef17a8 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/Roboto-ThinItalic-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-Bold-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Bold-webfont.woff new file mode 100755 index 0000000..235c963 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Bold-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-BoldItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-BoldItalic-webfont.woff new file mode 100755 index 0000000..df69b95 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/RobotoCondensed-BoldItalic-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-Italic-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Italic-webfont.woff new file mode 100755 index 0000000..67804e1 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Italic-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-Light-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Light-webfont.woff new file mode 100755 index 0000000..c414478 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Light-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-LightItalic-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-LightItalic-webfont.woff new file mode 100755 index 0000000..5536e16 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/RobotoCondensed-LightItalic-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/RobotoCondensed-Regular-webfont.woff b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Regular-webfont.woff new file mode 100755 index 0000000..ec28f95 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/RobotoCondensed-Regular-webfont.woff differ diff --git a/modules/ui/composer/webapp/src/assets/big-honking-catalog.json b/modules/ui/composer/webapp/src/assets/big-honking-catalog.json new file mode 100644 index 0000000..25dce0c --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/big-honking-catalog.json @@ -0,0 +1,1901 @@ +[ + { + "id": "GUID-1", + "name": "RIFT.ware™ NS Descriptors Catalog", + "short-name": "rift.ware-nsd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "nsd", + "meta": { + "icon-svg": "data:image/svg+xml,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%0A%3C!--%20Generator%3A%20Adobe%20Illustrator%2018.0.0%2C%20SVG%20Export%20Plug-In%20.%20SVG%20Version%3A%206.00%20Build%200)%20%20--%3E%0A%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%0A%3Csvg%20version%3D%221.1%22%20id%3D%22connection-icon-1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%0A%09%20viewBox%3D%220%200%2050%2050%22%20style%3D%22enable-background%3Anew%200%200%2050%2050%3B%22%20xml%3Aspace%3D%22preserve%22%3E%0A%09%3Cpath%20d%3D%22M15%2030c-2.8%200-5-2.2-5-5s2.2-5%205-5%205%202.2%205%205-2.2%205-5%205zm0-8c-1.7%200-3%201.3-3%203s1.3%203%203%203%203-1.3%203-3-1.3-3-3-3z%22%2F%3E%3Cpath%20d%3D%22M35%2020c-2.8%200-5-2.2-5-5s2.2-5%205-5%205%202.2%205%205-2.2%205-5%205zm0-8c-1.7%200-3%201.3-3%203s1.3%203%203%203%203-1.3%203-3-1.3-3-3-3z%22%2F%3E%3Cpath%20d%3D%22M35%2040c-2.8%200-5-2.2-5-5s2.2-5%205-5%205%202.2%205%205-2.2%205-5%205zm0-8c-1.7%200-3%201.3-3%203s1.3%203%203%203%203-1.3%203-3-1.3-3-3-3z%22%2F%3E%3Cpath%20d%3D%22M19.007%2025.885l12.88%206.44-.895%201.788-12.88-6.44z%22%2F%3E%3Cpath%20d%3D%22M30.993%2015.885l.894%201.79-12.88%206.438-.894-1.79z%22%2F%3E%3C%2Fsvg%3E" + }, + "descriptors": [ + { + "description": "VPNaaS Provider NSD for Corporation X", + "version": 1, + "constituent-vnfd": [ + { + "vnf-configuration": { + "config-type": "netconf", + "config-template": "\n \n \n trafgen\n 0\n \n trafgen-lb\n \n N1TenGi-1\n \n trafgen_vnfd/cp0\n \n \n \n vlan200\n \n trafgen_vnfd/cp0\n 200\n \n \n
200.1.1.1/24
\n
\n
\n
\n \n trafgen_vnfd/cp0\n \n \n rw_trafgen\n rw_trafgen\n \n 2\n 200\n \n \n direct\n \n \n \n 10\n \n \n \n \n \n \n \n 200.1.1.2\n \n \n \n 200.1.1.1\n 200.1.1.1\n 200.1.1.1\n 1\n \n \n 50.1.1.1\n 50.1.1.1\n 50.1.1.1\n 1\n \n \n 10000\n 10000\n 10128\n 1\n \n \n 5678\n 5678\n 5678\n 1\n \n \n 1024\n 1024\n 1024\n 1\n \n \n 200\n 200\n 200\n 1\n \n \n \n \n
\n
\n \n \n syslog\n \n 514\n \n \n ", + "input-params": { + "config-delay": 180, + "config-priority": 1 + }, + "netconf": { + "target": "running", + "port": 2022 + } + }, + "vnfd-id-ref": "2544951a-d6b5-11e5-9bb7-001b21b98a89", + "member-vnf-index": 1 + }, + { + "vnf-configuration": { + "input-params": { + "config-delay": 0, + "config-priority": 2 + }, + "config-type": "script", + "config-template": "\n#!/usr/bin/expect -f\nset login \"root\"\nset addr \nset pw \"6windos\"\nspawn ssh -1 $login@$addr\n\nset timeout 60\n\nexpect \"yes/no\" {\n send \"yes\\r\"\n expect \"*?assword:\" { send \"$pw\\r\" }\n } \"*?assword:\" { send \"$pw\\r\" }\n\nexpect \"# \"\nsend \"cli\\r\"\nexpect \"{}\"\nsend \"edit conf running\\r\"\nexpect \"ing}\"\n\n# Setup eth1 config\nsend \"eth1\\r\"\nexpect \"eth1}\"\nsend \"ipaddress dhcpv4\\r\"\nexpect \"dhcpv4}\"\nsend \"dhcp enable\\r\"\nexpect \"dhcpv4}\"\nsend \"exit\\r\"\nexpect \"eth1}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n# Setup eth2 config\nsend \"eth2\\r\"\nexpect \"eth2}\"\nsend \"ipaddress dhcpv4\\r\"\nexpect \"dhcpv4}\"\nsend \"dhcp enable\\r\"\nexpect \"dhcpv4}\"\nsend \"exit\\r\"\nexpect \"eth2}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup loopback\nsend \"loopback0\\r\"\nexpect \"back0}\"\nsend \"ipaddress 2.2.2.2/32\\r\"\nexpect \"back0}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup VRF 2\nsend \"vrf2\\r\"\nexpect \"vrf2}\"\nsend \"forwarding ipv4 enable\\r\"\nexpect \"vrf2}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Applyconfig\nsend \"addrunning\\r\"\nexpect \"ing}\"\nsend \"exit\\r\"\nexpect \"{}\"\n\n# Get MAC of eth2\nsend \"show interface eth2\\r\"\nexpect -re {ether (\\S+)}\nset macaddr $expect_out(1,string)\nputs $macaddr\n\n# Enter into edit config mode again\nsend \"edit conf running\\r\"\n\n#Setup route for GRE tunnel\nsend \"rtg\\r\"\nexpect \"rtg}\"\nsend \"route 3.3.3.3/32 100.0.108.60\\r\"\nexpect \"rtg}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup VLAN on eth2 and vrf2\nsend \"vlan2\\r\"\nexpect \"vlan2}\"\nsend \"bind eth2 id 200\\r\"\nexpect \"vlan2}\"\nsend \"mac-address $macaddr\\r\"\nexpect \"vlan2}\"\nsend \"ipaddress 200.1.1.2/24\\r\"\nexpect \"vlan2}\"\nsend \"vrf-id 2\\r\"\nexpect \"vlan2}\"\nsend \"interface up\\r\"\nexpect \"vlan2}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup GRE on vrf2\nsend \"gre2\\r\"\nexpect \"gre2}\"\nsend \"bind 2.2.2.2 3.3.3.3 \\r\"\nexpect \"gre2}\"\nsend \"endpoints 5.1.1.1 5.1.1.2\\r\"\nexpect \"gre2}\"\nsend \"vrf-id 2\\r\"\nexpect \"gre2}\"\nsend \"checksum-input disable\\r\"\nexpect \"gre2}\"\nsend \"interface up\\r\"\nexpect \"gre2}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup routing in VRF2 for VLAN2 traffic\nsend \"rtg\\r\"\nexpect \"rtg}\"\nsend \"route 50.1.1.1/32 5.1.1.2 vrf-id 2\\r\"\nexpect \"rtg}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Applyconfig\nsend \"addrunning\\r\"\nexpect \"ing}\"\nsend \"exit\\r\"\nexpect \"{}\"\n ", + "script": { + "script-type": "expect" + } + }, + "vnfd-id-ref": "23028096-d6b5-11e5-8e3f-001b21b98a89", + "member-vnf-index": 2 + }, + { + "vnf-configuration": { + "input-params": { + "config-delay": 0, + "config-priority": 0 + } + }, + "vnfd-id-ref": "2407d7a2-d6b5-11e5-95d4-001b21b98a89", + "member-vnf-index": 3 + }, + { + "vnfd-id-ref": "23095c22-d6b5-11e5-9e33-001b21b98a89", + "member-vnf-index": 4 + }, + { + "vnfd-id-ref": "2329d470-d6b5-11e5-98fb-001b21b98a89", + "member-vnf-index": 5 + } + ], + "short-name": "Vpn_Provider_Site_CorpA_nsd", + "vld": [ + { + "description": "Corporate LAN for A,SRIOV based", + "type": "ELAN", + "version": 1, + "provider-network": { + "physical-network": "physnet3", + "overlay-type": "FLAT" + }, + "short-name": "Corp-A Prov-link", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-connection-point-ref": "trafgen_vnfd/cp0", + "vnfd-id-ref": "2544951a-d6b5-11e5-9bb7-001b21b98a89" + }, + { + "member-vnf-index-ref": 2, + "vnfd-connection-point-ref": "turborouter_vnfd/cp0", + "vnfd-id-ref": "23028096-d6b5-11e5-8e3f-001b21b98a89" + }, + { + "member-vnf-index-ref": 3, + "vnfd-connection-point-ref": "cwims_vnfd/cp0", + "vnfd-id-ref": "2407d7a2-d6b5-11e5-95d4-001b21b98a89" + }, + { + "member-vnf-index-ref": 4, + "vnfd-connection-point-ref": "Riverbed_SteelHead_vnfd/cp0", + "vnfd-id-ref": "23095c22-d6b5-11e5-9e33-001b21b98a89" + }, + { + "member-vnf-index-ref": 5, + "vnfd-connection-point-ref": "F5_BigIP_vnfd/cp0", + "vnfd-id-ref": "2329d470-d6b5-11e5-98fb-001b21b98a89" + } + ], + "name": "Corp-A Prov-link", + "vendor": "RIFT.io", + "id": "28b2c8a2-d6b5-11e5-9a50-001b21b98a89" + }, + { + "description": "HA link between BigIPs", + "type": "ELAN", + "version": 1, + "short-name": "Corp-A HA-link", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 5, + "vnfd-connection-point-ref": "F5_BigIP_vnfd/cp1", + "vnfd-id-ref": "2329d470-d6b5-11e5-98fb-001b21b98a89" + } + ], + "name": "Corp-A HA-link", + "vendor": "RIFT.io", + "id": "28b329be-d6b5-11e5-9a50-001b21b98a89" + }, + { + "description": "Egress link between BigIPs", + "type": "ELAN", + "version": 1, + "short-name": "Corp-A Egress-link", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 5, + "vnfd-connection-point-ref": "F5_BigIP_vnfd/cp2", + "vnfd-id-ref": "2329d470-d6b5-11e5-98fb-001b21b98a89" + } + ], + "name": "Corp-A Egress-link", + "vendor": "RIFT.io", + "id": "28b33d32-d6b5-11e5-9a50-001b21b98a89" + }, + { + "description": "External network VIRTIO based", + "type": "ELAN", + "version": 1, + "short-name": "multisite", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 2, + "vnfd-connection-point-ref": "turborouter_vnfd/cp1", + "vnfd-id-ref": "23028096-d6b5-11e5-8e3f-001b21b98a89" + } + ], + "name": "multisite", + "vendor": "RIFT.io", + "id": "28b350ce-d6b5-11e5-9a50-001b21b98a89" + } + ], + "name": "Vpn_Provider_Site_CorpA_nsd", + "vendor": "RIFT.io", + "id": "28922610-d6b5-11e5-9a50-001b21b98a89", + "meta": { + "instance-ref-count": 0 + } + }, + { + "description": "VPNaaS Provider NSD for Corporation X", + "version": 1, + "constituent-vnfd": [ + { + "vnf-configuration": { + "config-type": "netconf", + "config-template": "\n \n \n trafgen\n 0\n \n trafgen-lb\n \n N1TenGi-1\n \n trafgen_vnfd/cp0\n \n \n \n vlan200\n \n trafgen_vnfd/cp0\n 200\n \n \n
200.1.1.1/24
\n
\n
\n
\n \n trafgen_vnfd/cp0\n \n \n rw_trafgen\n rw_trafgen\n \n 2\n 200\n \n \n direct\n \n \n \n 10\n \n \n \n \n \n \n \n 200.1.1.2\n \n \n \n 200.1.1.1\n 200.1.1.1\n 200.1.1.1\n 1\n \n \n 50.1.1.1\n 50.1.1.1\n 50.1.1.1\n 1\n \n \n 10000\n 10000\n 10128\n 1\n \n \n 5678\n 5678\n 5678\n 1\n \n \n 1024\n 1024\n 1024\n 1\n \n \n 200\n 200\n 200\n 1\n \n \n \n \n
\n
\n \n \n syslog\n \n 514\n \n \n ", + "input-params": { + "config-delay": 180, + "config-priority": 1 + }, + "netconf": { + "target": "running", + "port": 2022 + } + }, + "vnfd-id-ref": "2544951a-d6b5-11e5-9bb7-001b21b98a89", + "member-vnf-index": 1 + }, + { + "vnf-configuration": { + "input-params": { + "config-delay": 0, + "config-priority": 2 + }, + "config-type": "script", + "config-template": "\n#!/usr/bin/expect -f\nset login \"root\"\nset addr \nset pw \"6windos\"\nspawn ssh -1 $login@$addr\n\nset timeout 60\n\nexpect \"yes/no\" {\n send \"yes\\r\"\n expect \"*?assword:\" { send \"$pw\\r\" }\n } \"*?assword:\" { send \"$pw\\r\" }\n\nexpect \"# \"\nsend \"cli\\r\"\nexpect \"{}\"\nsend \"edit conf running\\r\"\nexpect \"ing}\"\n\n# Setup eth1 config\nsend \"eth1\\r\"\nexpect \"eth1}\"\nsend \"ipaddress dhcpv4\\r\"\nexpect \"dhcpv4}\"\nsend \"dhcp enable\\r\"\nexpect \"dhcpv4}\"\nsend \"exit\\r\"\nexpect \"eth1}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n# Setup eth2 config\nsend \"eth2\\r\"\nexpect \"eth2}\"\nsend \"ipaddress dhcpv4\\r\"\nexpect \"dhcpv4}\"\nsend \"dhcp enable\\r\"\nexpect \"dhcpv4}\"\nsend \"exit\\r\"\nexpect \"eth2}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup loopback\nsend \"loopback0\\r\"\nexpect \"back0}\"\nsend \"ipaddress 2.2.2.2/32\\r\"\nexpect \"back0}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup VRF 2\nsend \"vrf2\\r\"\nexpect \"vrf2}\"\nsend \"forwarding ipv4 enable\\r\"\nexpect \"vrf2}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Applyconfig\nsend \"addrunning\\r\"\nexpect \"ing}\"\nsend \"exit\\r\"\nexpect \"{}\"\n\n# Get MAC of eth2\nsend \"show interface eth2\\r\"\nexpect -re {ether (\\S+)}\nset macaddr $expect_out(1,string)\nputs $macaddr\n\n# Enter into edit config mode again\nsend \"edit conf running\\r\"\n\n#Setup route for GRE tunnel\nsend \"rtg\\r\"\nexpect \"rtg}\"\nsend \"route 3.3.3.3/32 100.0.108.60\\r\"\nexpect \"rtg}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup VLAN on eth2 and vrf2\nsend \"vlan2\\r\"\nexpect \"vlan2}\"\nsend \"bind eth2 id 200\\r\"\nexpect \"vlan2}\"\nsend \"mac-address $macaddr\\r\"\nexpect \"vlan2}\"\nsend \"ipaddress 200.1.1.2/24\\r\"\nexpect \"vlan2}\"\nsend \"vrf-id 2\\r\"\nexpect \"vlan2}\"\nsend \"interface up\\r\"\nexpect \"vlan2}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup GRE on vrf2\nsend \"gre2\\r\"\nexpect \"gre2}\"\nsend \"bind 2.2.2.2 3.3.3.3 \\r\"\nexpect \"gre2}\"\nsend \"endpoints 5.1.1.1 5.1.1.2\\r\"\nexpect \"gre2}\"\nsend \"vrf-id 2\\r\"\nexpect \"gre2}\"\nsend \"checksum-input disable\\r\"\nexpect \"gre2}\"\nsend \"interface up\\r\"\nexpect \"gre2}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Setup routing in VRF2 for VLAN2 traffic\nsend \"rtg\\r\"\nexpect \"rtg}\"\nsend \"route 50.1.1.1/32 5.1.1.2 vrf-id 2\\r\"\nexpect \"rtg}\"\nsend \"exit\\r\"\nexpect \"ing}\"\n\n#Applyconfig\nsend \"addrunning\\r\"\nexpect \"ing}\"\nsend \"exit\\r\"\nexpect \"{}\"\n ", + "script": { + "script-type": "expect" + } + }, + "vnfd-id-ref": "23028096-d6b5-11e5-8e3f-001b21b98a89", + "member-vnf-index": 2 + }, + { + "vnf-configuration": { + "input-params": { + "config-delay": 0, + "config-priority": 0 + } + }, + "vnfd-id-ref": "2407d7a2-d6b5-11e5-95d4-001b21b98a89", + "member-vnf-index": 3 + } + ], + "short-name": "Vpn_Provider_Site_CorpB_nsd", + "vld": [ + { + "description": "Corporate LAN for B,SRIOV based", + "type": "ELAN", + "version": 1, + "provider-network": { + "physical-network": "physnet4", + "overlay-type": "FLAT" + }, + "short-name": "Corp-B Prov-link", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-connection-point-ref": "trafgen_vnfd/cp0", + "vnfd-id-ref": "2544951a-d6b5-11e5-9bb7-001b21b98a89" + }, + { + "member-vnf-index-ref": 2, + "vnfd-connection-point-ref": "turborouter_vnfd/cp0", + "vnfd-id-ref": "23028096-d6b5-11e5-8e3f-001b21b98a89" + }, + { + "member-vnf-index-ref": 3, + "vnfd-connection-point-ref": "cwims_vnfd/cp0", + "vnfd-id-ref": "2407d7a2-d6b5-11e5-95d4-001b21b98a89" + } + ], + "name": "Corp-B Prov-link", + "vendor": "RIFT.io", + "id": "29405438-d6b5-11e5-a599-001b21b98a89" + }, + { + "description": "External network VIRTIO based", + "type": "ELAN", + "version": 1, + "short-name": "multisite", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 2, + "vnfd-connection-point-ref": "turborouter_vnfd/cp1", + "vnfd-id-ref": "23028096-d6b5-11e5-8e3f-001b21b98a89" + } + ], + "name": "multisite", + "vendor": "RIFT.io", + "id": "2940a00a-d6b5-11e5-a599-001b21b98a89" + } + ], + "name": "Vpn_Provider_Site_CorpB_nsd", + "vendor": "RIFT.io", + "id": "29212fb8-d6b5-11e5-a599-001b21b98a89", + "meta": { + "instance-ref-count": 0 + } + }, + { + "description": "NS Vrouter-Trafsink", + "version": 1, + "constituent-vnfd": [ + { + "vnf-configuration": { + "config-type": "netconf", + "config-template": "\n \n \n trafsink\n 0\n \n lb-trafsink\n \n N3TenGigi-1\n \n trafsink_vnfd/cp0\n \n \n
50.1.1.1/24
\n
\n
\n
\n \n trafsink_vnfd/cp0\n \n \n rw_trafgen\n rw_trafgen\n \n 2\n \n \n direct\n \n \n \n \n \n \n \n \n \n
\n
\n \n \n syslog\n \n 514\n \n \n\n ", + "input-params": { + "config-delay": 180, + "config-priority": 1 + }, + "netconf": { + "target": "running", + "port": 2022 + }, + "config-access": { + "password": "admin", + "username": "admin" + } + }, + "vnfd-id-ref": "26a08568-d6b5-11e5-a5c6-001b21b98a89", + "member-vnf-index": 1 + }, + { + "vnf-configuration": { + "input-params": { + "config-delay": 0, + "config-priority": 2 + }, + "config-type": "script", + "config-template": "\n#!/usr/bin/expect -f\nset login \"vyatta\"\nset addr \nset pw \"vyatta\"\nspawn ssh $login@$addr\n\nset timeout 60\n\nexpect \"yes/no\" {\n send \"yes\n\"\n expect \"*?assword:\" { send \"$pw\n\" }\n } \"*?assword:\" { send \"$pw\n\" }\n\nexpect \"$ \"\nsend \"configure\n\"\nexpect \"# \"\nsend \"set protocols static interface-route 50.1.1.0/24 next-hop-interface dp0s6\n\"\nexpect \"# \"\nsend \"set protocols static route 2.2.2.2/32 next-hop 100.0.108.5\n\"\nexpect \"# \"\nsend \"set interfaces loopback lo1 address 3.3.3.3/32\n\"\nexpect \"# \"\nsend \"set interfaces tunnel tun0 address 5.1.1.2/24\n\"\nexpect \"# \"\nsend \"set interfaces tunnel tun0 encapsulation gre\n\"\nexpect \"# \"\nsend \"set interfaces tunnel tun0 local-ip 3.3.3.3\n\"\nexpect \"# \"\nsend \"set interfaces tunnel tun0 remote-ip 2.2.2.2\n\"\nexpect \"# \"\nsend \"set protocols static route 200.1.1.1/32 next-hop 5.1.1.1\n\"\nexpect \"# \"\nsend \"commit\n\"\nexpect \"# \"\nsend \"exit \n\"\nexpect \"$ \"\n ", + "script": { + "script-type": "expect" + } + }, + "vnfd-id-ref": "a6ce0f96-d6cc-11e5-9540-001b21b98a89", + "member-vnf-index": 2 + } + ], + "short-name": "Vpn_Remote_Site_nsd", + "vld": [ + { + "description": "Corporate LAN for X,SRIOV based", + "type": "ELAN", + "version": 1, + "provider-network": { + "physical-network": "physnet3", + "overlay-type": "FLAT" + }, + "short-name": "Corp-X Remote Site link", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-connection-point-ref": "trafsink_vnfd/cp0", + "vnfd-id-ref": "26a08568-d6b5-11e5-a5c6-001b21b98a89" + }, + { + "member-vnf-index-ref": 2, + "vnfd-connection-point-ref": "vrouter_vnfd/cp0", + "vnfd-id-ref": "a6ce0f96-d6cc-11e5-9540-001b21b98a89" + } + ], + "name": "Corp-X Remote Site link", + "vendor": "RIFT.io", + "id": "b83bb668-d6cf-11e5-99d1-001b21b98a89" + }, + { + "description": "External network VIRTIO based", + "type": "ELAN", + "version": 1, + "short-name": "multisite", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 2, + "vnfd-connection-point-ref": "vrouter_vnfd/cp1", + "vnfd-id-ref": "a6ce0f96-d6cc-11e5-9540-001b21b98a89" + } + ], + "name": "multisite", + "vendor": "RIFT.io", + "id": "b83bff92-d6cf-11e5-99d1-001b21b98a89" + } + ], + "name": "Vpn_Remote_Site_nsd", + "vendor": "RIFT.io", + "id": "b81198ec-d6cf-11e5-99d1-001b21b98a89", + "meta": { + "instance-ref-count": 0 + } + }, + { + "description": "6wind turborouter only", + "version": 1, + "constituent-vnfd": [ + { + "vnf-configuration": { + "initial-config-primitive": [ + { + "parameter": [ + { + "name": "pass", + "value": "6windos" + }, + { + "name": "user", + "value": "root" + }, + { + "name": "vpe-router", + "value": "" + } + ], + "name": "config", + "seq": 1 + }, + { + "parameter": [ + { + "name": "cidr", + "value": "10.10.10.2/30" + }, + { + "name": "iface-name", + "value": "eth1" + } + ], + "name": "configure-interface", + "seq": 2 + } + ], + "config-type": "juju", + "input-params": { + "config-delay": 0, + "config-priority": 0 + }, + "juju": { + "charm": "vpe-router" + } + }, + "vnfd-id-ref": "c49db92a-d6d8-11e5-9537-6cb3113b406f", + "member-vnf-index": 1 + } + ], + "short-name": "tr_juju_nsd", + "vld": [ + { + "description": "Link from TR to ELAN", + "type": "ELAN", + "version": 1, + "short-name": "Link1", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-connection-point-ref": "turborouter_vnfd/cp0", + "vnfd-id-ref": "c49db92a-d6d8-11e5-9537-6cb3113b406f" + } + ], + "name": "Link1", + "vendor": "RIFT.io", + "id": "c4f9dc50-d6d8-11e5-864f-6cb3113b406f" + } + ], + "config-primitive": [ + { + "parameter": [ + { + "data-type": "string", + "name": "Corporation Name", + "mandatory": "true" + }, + { + "data-type": "integer", + "name": "Tunnel Key", + "default-value": 1, + "mandatory": "true" + } + ], + "name": "Add Corporation", + "parameter-group": [ + { + "parameter": [ + { + "data-type": "string", + "name": "Corp. Gateway", + "default-value": "10.0.1.1", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Corp. Network", + "default-value": "10.0.1.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Interface Name", + "default-value": "eth3", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network", + "default-value": "10.255.255.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network Area", + "default-value": 0, + "mandatory": "true" + }, + { + "data-type": "integer", + "name": "Vlan ID", + "default-value": 101, + "mandatory": "true" + } + ], + "name": "PE1", + "mandatory": "false" + }, + { + "parameter": [ + { + "data-type": "string", + "name": "Corp. Gateway", + "default-value": "10.0.2.1", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Corp. Network", + "default-value": "10.0.2.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Interface Name", + "default-value": "eth3", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network", + "default-value": "10.255.255.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network Area", + "default-value": 0, + "mandatory": "true" + }, + { + "data-type": "integer", + "name": "Vlan ID", + "default-value": 102, + "mandatory": "true" + } + ], + "name": "PE2", + "mandatory": "false" + }, + { + "parameter": [ + { + "data-type": "string", + "name": "Corp. Gateway", + "default-value": "10.0.4.1", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Corp. Network", + "default-value": "10.0.4.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Interface Name", + "default-value": "eth4", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network", + "default-value": "10.255.255.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network Area", + "default-value": 0, + "mandatory": "true" + }, + { + "data-type": "integer", + "name": "Vlan ID", + "default-value": 108, + "mandatory": "true" + } + ], + "name": "PE3", + "mandatory": "false" + } + ], + "user-defined-script": "/home/rift/.install/usr/bin/add_corporation.py" + }, + { + "parameter": [ + { + "data-type": "string", + "name": "Corporation Name", + "mandatory": "true" + }, + { + "data-type": "integer", + "name": "Tunnel Key", + "default-value": 10, + "mandatory": "true" + } + ], + "name": "Add SP Test Corporation", + "parameter-group": [ + { + "parameter": [ + { + "data-type": "string", + "name": "Corp. Gateway", + "default-value": "10.0.1.1", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Corp. Network", + "default-value": "10.0.1.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Interface Name", + "default-value": "eth3", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network", + "default-value": "10.255.255.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network Area", + "default-value": 0, + "mandatory": "true" + }, + { + "data-type": "integer", + "name": "Vlan ID", + "default-value": 3000, + "mandatory": "true" + } + ], + "name": "PE1", + "mandatory": "false" + }, + { + "parameter": [ + { + "data-type": "string", + "name": "Corp. Gateway", + "default-value": "10.0.2.1", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Corp. Network", + "default-value": "10.0.2.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Interface Name", + "default-value": "eth3", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network", + "default-value": "10.255.255.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network Area", + "default-value": 0, + "mandatory": "true" + }, + { + "data-type": "integer", + "name": "Vlan ID", + "default-value": 3000, + "mandatory": "true" + } + ], + "name": "PE2", + "mandatory": "false" + }, + { + "parameter": [ + { + "data-type": "string", + "name": "Corp. Gateway", + "default-value": "10.0.3.1", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Corp. Network", + "default-value": "10.0.3.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Interface Name", + "default-value": "eth3", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network", + "default-value": "10.255.255.0/24", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "Local Network Area", + "default-value": 0, + "mandatory": "true" + }, + { + "data-type": "integer", + "name": "Vlan ID", + "default-value": 3000, + "mandatory": "true" + } + ], + "name": "PE3", + "mandatory": "false" + } + ], + "user-defined-script": "/home/rift/.install/usr/bin/add_corporation.py" + } + ], + "name": "tr_juju_nsd", + "vendor": "6Wind", + "id": "c4f7fb6a-d6d8-11e5-864f-6cb3113b406f", + "meta": { + "instance-ref-count": 0 + } + }, + { + "description": "NS Clearewater IMS only", + "input-parameter-xpath": [ + { + "label": "net-corp-vlan", + "default-value": 55, + "xpath": "/nsd:nsd-catalog/nsd:nsd[nsd:id=c719e6ba-d6d8-11e5-8962-6cb3113b406f]/nsd:vld[nsd:id=c71b347a-d6d8-11e5-8962-6cb3113b406f]/nsd:provider-network/manotypes:segmentation_id" + } + ], + "version": 1, + "constituent-vnfd": [ + { + "vnf-configuration": { + "config-primitive": [ + { + "parameter": [ + { + "data-type": "string", + "name": "base_number" + }, + { + "data-type": "string", + "name": "home_domain" + }, + { + "data-type": "integer", + "name": "number_count" + }, + { + "data-type": "string", + "name": "password" + } + ], + "name": "config" + }, + { + "parameter": [ + { + "data-type": "string", + "name": "number", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "password", + "mandatory": "true" + } + ], + "name": "create-update-user" + }, + { + "parameter": [ + { + "data-type": "string", + "name": "number", + "mandatory": "true" + } + ], + "name": "delete-user" + } + ], + "config-type": "juju", + "initial-config-primitive": [ + { + "parameter": [ + { + "name": "proxied_ip", + "value": "" + } + ], + "name": "config", + "seq": 1 + } + ], + "input-params": { + "config-delay": 0, + "config-priority": 1 + }, + "juju": { + "charm": "clearwater-aio-proxy" + } + }, + "vnfd-id-ref": "c663ea54-d6d8-11e5-9f1a-6cb3113b406f", + "member-vnf-index": 1 + } + ], + "short-name": "cwims_juju_nsd", + "vld": [ + { + "description": "Link from cwims to ELAN", + "type": "ELAN", + "version": 1, + "provider-network": { + "physical-network": "physnet1", + "overlay-type": "VLAN" + }, + "short-name": "net-corp", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-connection-point-ref": "cwims_vnfd/cp0", + "vnfd-id-ref": "c663ea54-d6d8-11e5-9f1a-6cb3113b406f" + } + ], + "name": "net-corp", + "vendor": "RIFT.io", + "id": "c71b347a-d6d8-11e5-8962-6cb3113b406f" + } + ], + "config-primitive": [ + { + "name": "Add User", + "vnf-primitive-group": [ + { + "vnfd-name": "cwims_vnfd", + "member-vnf-index-ref": 1, + "vnfd-id-ref": "c663ea54-d6d8-11e5-9f1a-6cb3113b406f", + "primitive": [ + { + "index": 1, + "name": "create-update-user" + } + ] + } + ] + }, + { + "name": "Delete User", + "vnf-primitive-group": [ + { + "vnfd-name": "cwims_vnfd", + "member-vnf-index-ref": 1, + "vnfd-id-ref": "c663ea54-d6d8-11e5-9f1a-6cb3113b406f", + "primitive": [ + { + "index": 1, + "name": "delete-user" + } + ] + } + ] + }, + { + "name": "Update Domain", + "vnf-primitive-group": [ + { + "vnfd-name": "cwims_vnfd", + "member-vnf-index-ref": 1, + "vnfd-id-ref": "c663ea54-d6d8-11e5-9f1a-6cb3113b406f", + "primitive": [ + { + "index": 1, + "name": "config" + } + ] + } + ] + } + ], + "name": "cwims_juju_nsd", + "vendor": "RIFT.io", + "id": "c719e6ba-d6d8-11e5-8962-6cb3113b406f", + "meta": { + "instance-ref-count": 0 + } + } + ] + }, + { + "id": "GUID-2", + "name": "RIFT.ware™ VNF Descriptors Catalog", + "short-name": "rift.ware-vnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "vnfd", + "meta": { + "icon-svg": "data:image/svg+xml, " + }, + "descriptors": [ + { + "description": "This is a Benu ClientSim VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "clientsim_vnfd/cp0" + }, + { + "name": "eth2", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "clientsim_vnfd/cp1" + } + ], + "vm-flavor": { + "memory-mb": 8192, + "storage-gb": 80, + "vcpu-count": 2 + }, + "name": "master", + "image": "clientsim-benu.qcow2", + "id": "19550ac8-c51c-11e5-b578-001b21b98a89", + "cloud-init": "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nssh_pwauth: True\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl, start, --no-block, ngnix.service ]\n - [ systemctl, start, --no-block, php-fpm.service ]\n" + } + ], + "connection-point": [ + { + "name": "clientsim_vnfd/cp0", + "type": "VPORT" + }, + { + "name": "clientsim_vnfd/cp1", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "19550ac8-c51c-11e5-b578-001b21b98a89", + "dashboard-params": { + "port": 8008, + "path": "/" + }, + "port": 2022 + }, + "version": 1, + "short-name": "clientsim_vnfd", + "name": "clientsim_vnfd", + "vendor": "RIFT.io", + "id": "1953f58e-c51c-11e5-b578-001b21b98a89" + }, + { + "description": "This is a 6Wind TurboRouter VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "turborouter_vnfd/cp0" + }, + { + "name": "eth2", + "virtual-interface": { + "type": "VIRTIO" + }, + "vnfd-connection-point-ref": "turborouter_vnfd/cp1" + } + ], + "vm-flavor": { + "memory-mb": 4096, + "storage-gb": 40, + "vcpu-count": 2 + }, + "name": "iovdu", + "image": "turbo-router-riftio-4-sriov.img.qcow2", + "id": "2302e4c8-d6b5-11e5-8e3f-001b21b98a89", + "cloud-init": "#cloud-config\n" + } + ], + "connection-point": [ + { + "name": "turborouter_vnfd/cp0", + "type": "VPORT" + }, + { + "name": "turborouter_vnfd/cp1", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "2302e4c8-d6b5-11e5-8e3f-001b21b98a89", + "port": 80 + }, + "version": 1, + "short-name": "turborouter_vnfd", + "name": "turborouter_vnfd", + "vendor": "6Wind", + "id": "23028096-d6b5-11e5-8e3f-001b21b98a89" + }, + { + "description": "This is a Riverbed Steelhead VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "Riverbed_SteelHead_vnfd/cp0" + } + ], + "vm-flavor": { + "memory-mb": 4096, + "storage-gb": 40, + "vcpu-count": 2 + }, + "name": "iovdu", + "image": "riverbedimg", + "id": "2309cf5e-d6b5-11e5-9e33-001b21b98a89", + "cloud-init": "#cloud-config\n" + } + ], + "connection-point": [ + { + "name": "Riverbed_SteelHead_vnfd/cp0", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "2309cf5e-d6b5-11e5-9e33-001b21b98a89", + "dashboard-params": { + "port": 80, + "path": "/" + }, + "port": 80 + }, + "version": 1, + "short-name": "Riverbed_SteelHead_vnfd", + "name": "Riverbed_SteelHead_vnfd", + "vendor": "Riverbed", + "http-endpoint": [ + { + "password": "password", + "polling_interval_secs": 2, + "path": "rest/info", + "username": "admin" + } + ], + "id": "23095c22-d6b5-11e5-9e33-001b21b98a89" + }, + { + "description": "This is a F5 Big-IP load balancer VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "F5_BigIP_vnfd/cp0" + }, + { + "name": "eth2", + "virtual-interface": { + "type": "VIRTIO" + }, + "vnfd-connection-point-ref": "F5_BigIP_vnfd/cp1" + }, + { + "name": "eth3", + "virtual-interface": { + "type": "VIRTIO" + }, + "vnfd-connection-point-ref": "F5_BigIP_vnfd/cp2" + } + ], + "vm-flavor": { + "memory-mb": 4096, + "storage-gb": 40, + "vcpu-count": 2 + }, + "name": "iovdu", + "image": "BIGIP-11.6.0.6.146.442.qcow2", + "id": "232a52ba-d6b5-11e5-98fb-001b21b98a89", + "cloud-init": "#cloud-config\n" + } + ], + "connection-point": [ + { + "name": "F5_BigIP_vnfd/cp0", + "type": "VPORT" + }, + { + "name": "F5_BigIP_vnfd/cp1", + "type": "VPORT" + }, + { + "name": "F5_BigIP_vnfd/cp2", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "232a52ba-d6b5-11e5-98fb-001b21b98a89", + "dashboard-params": { + "https": "true", + "port": 443, + "path": "/" + }, + "port": 80 + }, + "version": 1, + "short-name": "F5_BigIP_vnfd", + "name": "F5_BigIP_vnfd", + "vendor": "F5 Networks", + "http-endpoint": [ + { + "password": "password", + "polling_interval_secs": 2, + "path": "rest/info", + "username": "admin" + } + ], + "id": "2329d470-d6b5-11e5-98fb-001b21b98a89" + }, + { + "description": "This is a Clearwater IMS VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "cwims_vnfd/cp0" + } + ], + "vm-flavor": { + "memory-mb": 4096, + "storage-gb": 40, + "vcpu-count": 2 + }, + "name": "iovdu", + "image": "cw-aio.qcow2", + "id": "2408452a-d6b5-11e5-95d4-001b21b98a89", + "cloud-init": "#cloud-config\n" + } + ], + "connection-point": [ + { + "name": "cwims_vnfd/cp0", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "2408452a-d6b5-11e5-95d4-001b21b98a89", + "dashboard-params": { + "port": 80, + "path": "/" + }, + "port": 80 + }, + "version": 1, + "short-name": "cwims_vnfd", + "name": "cwims_vnfd", + "vendor": "Clearwater", + "http-endpoint": [ + { + "password": "vyatta", + "polling_interval_secs": 2, + "path": "rest/app/gui/dashboard/sysinfo", + "username": "vyatta" + } + ], + "id": "2407d7a2-d6b5-11e5-95d4-001b21b98a89" + }, + { + "description": "This is a RIFT.ware Trafgen VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "trafgen_vnfd/cp0" + } + ], + "vm-flavor": { + "memory-mb": 16384, + "storage-gb": 32, + "vcpu-count": 4 + }, + "name": "iovdu", + "image": "rift-root-latest-trafgen-k2.qcow2", + "id": "2545bca6-d6b5-11e5-9bb7-001b21b98a89", + "cloud-init": "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nssh_pwauth: True\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl, enable, trafgen.service ]\n - [ systemctl, start, --no-block, trafgen.service ]\n" + } + ], + "connection-point": [ + { + "name": "trafgen_vnfd/cp0", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "2545bca6-d6b5-11e5-9bb7-001b21b98a89", + "port": 2022 + }, + "version": 1, + "short-name": "trafgen_vnfd", + "name": "trafgen_vnfd", + "vendor": "RIFT.io", + "http-endpoint": [ + { + "polling_interval_secs": 3, + "headers": [ + { + "key": "Accept", + "value": "json" + }, + { + "key": "Content-type", + "value": "application/vnd.yang.data+json" + } + ], + "port": 8008, + "password": "fedora", + "path": "api/operational/vnf-opdata/vnf/trafgen,0/port-state", + "username": "fedora" + } + ], + "id": "2544951a-d6b5-11e5-9bb7-001b21b98a89", + "monitoring-param": [ + { + "json-query-params": { + "object-path": "$..*[@.portname is 'trafgen_vnfd/cp0'].counters.'tx-rate-mbps'" + }, + "description": "Outgoing byte rate of interface", + "widget-type": "GAUGE", + "group-tag": "Group1", + "units": "mbps", + "value-type": "INT", + "name": "Cp0 Tx Rate", + "numeric-constraints": { + "min-value": 0, + "max-value": 1000 + }, + "http-endpoint-ref": "api/operational/vnf-opdata/vnf/trafgen,0/port-state", + "id": 1, + "json-query-method": "OBJECTPATH" + }, + { + "json-query-params": { + "object-path": "$..*[@.portname is 'trafgen_vnfd/cp0'].counters.'rx-rate-mbps'" + }, + "description": "Incoming byte rate of interface", + "widget-type": "GAUGE", + "group-tag": "Group1", + "units": "mbps", + "value-type": "INT", + "name": "Cp0 Rx Rate", + "numeric-constraints": { + "min-value": 0, + "max-value": 1000 + }, + "http-endpoint-ref": "api/operational/vnf-opdata/vnf/trafgen,0/port-state", + "id": 2, + "json-query-method": "OBJECTPATH" + } + ] + }, + { + "description": "This is a RIFT.ware Trafsink VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "trafsink_vnfd/cp0" + } + ], + "vm-flavor": { + "memory-mb": 16384, + "storage-gb": 32, + "vcpu-count": 4 + }, + "name": "iovdu", + "image": "rift-root-latest-trafgen-j.qcow2", + "id": "26a19e3a-d6b5-11e5-a5c6-001b21b98a89", + "cloud-init": "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nssh_pwauth: True\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl, enable, trafsink.service ]\n - [ systemctl, start, --no-block, trafsink.service ]\n" + } + ], + "connection-point": [ + { + "name": "trafsink_vnfd/cp0", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "26a19e3a-d6b5-11e5-a5c6-001b21b98a89", + "port": 2022 + }, + "version": 1, + "short-name": "trafsink_vnfd", + "name": "trafsink_vnfd", + "vendor": "RIFT.io", + "http-endpoint": [ + { + "polling_interval_secs": 3, + "headers": [ + { + "key": "Accept", + "value": "json" + }, + { + "key": "Content-type", + "value": "application/vnd.yang.data+json" + } + ], + "port": 8008, + "password": "fedora", + "path": "api/operational/vnf-opdata/vnf/trafsink,0/port-state", + "username": "fedora" + } + ], + "id": "26a08568-d6b5-11e5-a5c6-001b21b98a89", + "monitoring-param": [ + { + "json-query-params": { + "object-path": "$..*[@.portname is 'trafsink_vnfd/cp0'].counters.'tx-rate-mbps'" + }, + "description": "Outgoing bytes at interface", + "widget-type": "GAUGE", + "group-tag": "Group1", + "units": "mbps", + "value-type": "INT", + "name": "Cp0 Tx Rate", + "numeric-constraints": { + "min-value": 0, + "max-value": 1000 + }, + "http-endpoint-ref": "api/operational/vnf-opdata/vnf/trafsink,0/port-state", + "id": 1, + "json-query-method": "OBJECTPATH" + }, + { + "json-query-params": { + "object-path": "$..*[@.portname is 'trafsink_vnfd/cp0'].counters.'rx-rate-mbps'" + }, + "description": "Incoming bytes at interface", + "widget-type": "GAUGE", + "group-tag": "Group1", + "units": "mbps", + "value-type": "INT", + "name": "Cp0 Rx Rate", + "numeric-constraints": { + "min-value": 0, + "max-value": 1000 + }, + "http-endpoint-ref": "api/operational/vnf-opdata/vnf/trafsink,0/port-state", + "id": 2, + "json-query-method": "OBJECTPATH" + } + ] + }, + { + "description": "This is a Benu SSC VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "ssc_vnfd/cp0" + } + ], + "vm-flavor": { + "memory-mb": 8192, + "storage-gb": 48, + "vcpu-count": 2 + }, + "name": "master", + "image": "ssc-benu.qcow2", + "id": "2e981d3a-d00c-11e5-ab30-625662870761", + "cloud-init": "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nssh_pwauth: True\nruncmd:\n - [ service, startssc, start ]\n" + } + ], + "connection-point": [ + { + "name": "ssc_vnfd/cp0", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "2e981d3a-d00c-11e5-ab30-625662870761", + "dashboard-params": { + "port": 80, + "path": "/" + }, + "port": 2022 + }, + "version": 1, + "short-name": "ssc_vnfd", + "name": "ssc_vnfd", + "vendor": "RIFT.io", + "id": "2e9819f2-d00c-11e5-ab30-625662870761" + }, + { + "description": "This is a Brocade vRouter VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth0", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "vrouter_vnfd/cp0" + }, + { + "name": "eth1", + "virtual-interface": { + "type": "VIRTIO" + }, + "vnfd-connection-point-ref": "vrouter_vnfd/cp1" + } + ], + "vm-flavor": { + "memory-mb": 8192, + "storage-gb": 40, + "vcpu-count": 4 + }, + "name": "iovdu", + "image": "vyatta-7.qcow2", + "id": "a6cf9a28-d6cc-11e5-9540-001b21b98a89", + "cloud-init": "#cloud-config\n" + } + ], + "connection-point": [ + { + "name": "vrouter_vnfd/cp0", + "type": "VPORT" + }, + { + "name": "vrouter_vnfd/cp1", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "a6cf9a28-d6cc-11e5-9540-001b21b98a89", + "dashboard-params": { + "port": 80, + "path": "/" + }, + "port": 80 + }, + "version": 1, + "short-name": "vrouter_vnfd", + "name": "vrouter_vnfd", + "vendor": "Brocade", + "http-endpoint": [ + { + "password": "vyatta", + "polling_interval_secs": 3, + "port": 80, + "path": "rest/app/gui/dashboard/interfaces", + "username": "vyatta" + }, + { + "password": "vyatta", + "polling_interval_secs": 3, + "port": 80, + "path": "rest/app/gui/dashboard/sysinfo", + "username": "vyatta" + } + ], + "id": "a6ce0f96-d6cc-11e5-9540-001b21b98a89", + "monitoring-param": [ + { + "json-query-params": { + "json-path": "$.system.mem_total" + }, + "description": "Memory usage of vrouter", + "widget-type": "COUNTER", + "group-tag": "Group1", + "units": "KB", + "value-type": "INT", + "name": "Total memory", + "http-endpoint-ref": "rest/app/gui/dashboard/sysinfo", + "id": 1, + "json-query-method": "JSONPATH" + }, + { + "json-query-params": { + "json-path": "$.system.mem" + }, + "description": "Memory utilization of vrouter", + "widget-type": "GAUGE", + "group-tag": "Group1", + "units": "%", + "value-type": "DECIMAL", + "name": "Memory utilization", + "numeric-constraints": { + "min-value": 0, + "max-value": 100 + }, + "http-endpoint-ref": "rest/app/gui/dashboard/sysinfo", + "id": 2, + "json-query-method": "JSONPATH" + }, + { + "json-query-params": { + "json-path": "$.system.cpu[0].usage" + }, + "description": "CPU utilization (all) of vrouter", + "widget-type": "GAUGE", + "group-tag": "Group1", + "units": "%", + "value-type": "DECIMAL", + "name": "CPU utilization", + "numeric-constraints": { + "min-value": 0, + "max-value": 100 + }, + "http-endpoint-ref": "rest/app/gui/dashboard/sysinfo", + "id": 3, + "json-query-method": "JSONPATH" + }, + { + "json-query-params": { + "object-path": "$..interfaces[@.name is 'dp0s6']['in']" + }, + "description": "Incoming bytes at interface", + "widget-type": "COUNTER", + "group-tag": "Group2", + "units": "bytes", + "value-type": "INT", + "name": "Interface[dp0s6] Rx", + "http-endpoint-ref": "rest/app/gui/dashboard/interfaces", + "id": 4, + "json-query-method": "OBJECTPATH" + }, + { + "json-query-params": { + "object-path": "$..interfaces[@.name is 'dp0s6']['out']" + }, + "description": "Outgoing bytes at interface", + "widget-type": "COUNTER", + "group-tag": "Group2", + "units": "bytes", + "value-type": "INT", + "name": "Interface[dp0s6] Tx", + "http-endpoint-ref": "rest/app/gui/dashboard/interfaces", + "id": 5, + "json-query-method": "OBJECTPATH" + }, + { + "json-query-params": { + "object-path": "$..interfaces[@.name is 'tun0']['in']" + }, + "description": "Incoming bytes at interface", + "widget-type": "COUNTER", + "group-tag": "Group2", + "units": "bytes", + "value-type": "INT", + "name": "Interface[tun0] Rx", + "http-endpoint-ref": "rest/app/gui/dashboard/interfaces", + "id": 6, + "json-query-method": "OBJECTPATH" + }, + { + "json-query-params": { + "object-path": "$..interfaces[@.name is 'tun0']['out']" + }, + "description": "Outgoing bytes at interface", + "widget-type": "COUNTER", + "group-tag": "Group2", + "units": "bytes", + "value-type": "INT", + "name": "Interface[tun0] Tx", + "http-endpoint-ref": "rest/app/gui/dashboard/interfaces", + "id": 7, + "json-query-method": "OBJECTPATH" + } + ] + }, + { + "description": "This is a Benu VSE-SI VNF", + "vdu": [ + { + "count": 1, + "vm-flavor": { + "memory-mb": 16384, + "storage-gb": 64, + "vcpu-count": 4 + }, + "name": "RW.VM.MASTER", + "image": "Benu_VSE_Image_02_09.qcow2", + "id": "c1bb1a92-aa48-4908-89b2-5a78ff324953", + "cloud-init": "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nssh_pwauth: True\nwrite_files:\n - path: /opt/rift/.vnf_start_conf\n content: |\n VNFNAME=\"vsesi\"\n VDUNAME={{ vdu.name }}\n MASTERIP= \nruncmd: \n - [ systemctl, daemon-reload ]\n - [ systemctl, enable, multivmvnf.service ]\n - [ systemctl, start, --no-block, multivmvnf.service ]\n" + }, + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "vsesi_vnfd/cp0" + }, + { + "name": "eth2", + "virtual-interface": { + "type": "SR-IOV" + }, + "vnfd-connection-point-ref": "vsesi_vnfd/cp1" + } + ], + "vm-flavor": { + "memory-mb": 32768, + "storage-gb": 128, + "vcpu-count": 4 + }, + "name": "RW.VM.FASTPATH.LEAD", + "image": "Benu_VSE_Image_02_09.qcow2", + "id": "d1bb1a92-aa48-4908-89b2-5a78ff324953", + "cloud-init": "#cloud-config\npassword: fedora\nchpasswd: { expire: False } \nssh_pwauth: True\nwrite_files:\n - path: /opt/rift/.vnf_start_conf\n content: |\n VNFNAME=\"vsesi\"\n VDUNAME={{ vdu.name }}\n MASTERIP= {{ vdu[c1bb1a92-aa48-4908-89b2-5a78ff324953].mgmt.ip }}\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl, enable, multivmvnf.service ]\n - [ systemctl, start, --no-block, multivmvnf.service ]\n - [ systemctl, enable, benuvnf.service ]\n - [ systemctl, start, --no-block, benuvnf.service ]\n" + } + ], + "connection-point": [ + { + "name": "vsesi_vnfd/cp0", + "type": "VPORT" + }, + { + "name": "vsesi_vnfd/cp1", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "c1bb1a92-aa48-4908-89b2-5a78ff324953", + "port": 2022 + }, + "version": 1, + "short-name": "vsesi_vnfd", + "name": "vsesi_vnfd", + "vendor": "Benu", + "http-endpoint": [ + { + "polling_interval_secs": 3, + "headers": [ + { + "key": "Accept", + "value": "json" + }, + { + "key": "Content-type", + "value": "application/vnd.yang.data+json" + } + ], + "port": 8008, + "password": "fedora", + "path": "api/operational/vnf-opdata/vnf/vsesi,0/port-state", + "username": "fedora" + } + ], + "id": "b1633f8c-ce8c-11e5-a62b-001b21b98a89", + "monitoring-param": [ + { + "json-query-params": { + "object-path": "$..*[@.portname is 'vsesi/1/1'].counters.'tx-rate-mbps'" + }, + "description": "Outgoing byte rate of interface", + "widget-type": "GAUGE", + "group-tag": "Group1", + "units": "mbps", + "value-type": "INT", + "name": "Cp0 Tx Rate", + "numeric-constraints": { + "min-value": 0, + "max-value": 100 + }, + "http-endpoint-ref": "api/operational/vnf-opdata/vnf/vsesi,0/port-state", + "id": 1, + "json-query-method": "OBJECTPATH" + }, + { + "json-query-params": { + "object-path": "$..*[@.portname is 'vsesi/1/1'].counters.'rx-rate-mbps'" + }, + "description": "Incoming byte rate of interface", + "widget-type": "GAUGE", + "group-tag": "Group1", + "units": "mbps", + "value-type": "INT", + "name": "Cp0 Rx Rate", + "numeric-constraints": { + "min-value": 0, + "max-value": 100 + }, + "http-endpoint-ref": "api/operational/vnf-opdata/vnf/vsesi,0/port-state", + "id": 2, + "json-query-method": "OBJECTPATH" + }, + { + "json-query-params": { + "object-path": "$..*[@.portname is 'vsesi/1/2'].counters.'tx-rate-mbps'" + }, + "description": "Outgoing byte rate of interface", + "widget-type": "GAUGE", + "group-tag": "Group2", + "units": "mbps", + "value-type": "INT", + "name": "Cp1 Tx Rate", + "numeric-constraints": { + "min-value": 0, + "max-value": 100 + }, + "http-endpoint-ref": "api/operational/vnf-opdata/vnf/vsesi,0/port-state", + "id": 3, + "json-query-method": "OBJECTPATH" + }, + { + "json-query-params": { + "object-path": "$..*[@.portname is 'vsesi/1/2'].counters.'rx-rate-mbps'" + }, + "description": "Incoming byte rate of interface", + "widget-type": "GAUGE", + "group-tag": "Group2", + "units": "mbps", + "value-type": "INT", + "name": "Cp1 Rx Rate", + "numeric-constraints": { + "min-value": 0, + "max-value": 100 + }, + "http-endpoint-ref": "api/operational/vnf-opdata/vnf/vsesi,0/port-state", + "id": 4, + "json-query-method": "OBJECTPATH" + } + ] + }, + { + "description": "This is a 6Wind TurboRouter VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "VIRTIO" + }, + "vnfd-connection-point-ref": "turborouter_vnfd/cp0" + }, + { + "name": "eth2", + "virtual-interface": { + "type": "VIRTIO" + }, + "vnfd-connection-point-ref": "turborouter_vnfd/cp1" + } + ], + "vm-flavor": { + "memory-mb": 4096, + "storage-gb": 40, + "vcpu-count": 2 + }, + "name": "iovdu", + "image": "turbo-router-riftio-5.img.qcow2", + "id": "c49dffe8-d6d8-11e5-9537-6cb3113b406f", + "cloud-init": "#cloud-config\n" + } + ], + "connection-point": [ + { + "name": "turborouter_vnfd/cp0", + "type": "VPORT" + }, + { + "name": "turborouter_vnfd/cp1", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "c49dffe8-d6d8-11e5-9537-6cb3113b406f", + "port": 80 + }, + "version": 1, + "short-name": "turborouter_vnfd", + "name": "turborouter_vnfd", + "vendor": "6Wind", + "id": "c49db92a-d6d8-11e5-9537-6cb3113b406f" + }, + { + "description": "This is a Clearwater IMS VNF", + "vdu": [ + { + "count": 1, + "external-interface": [ + { + "name": "eth1", + "virtual-interface": { + "type": "VIRTIO" + }, + "vnfd-connection-point-ref": "cwims_vnfd/cp0" + } + ], + "vm-flavor": { + "memory-mb": 4096, + "storage-gb": 40, + "vcpu-count": 2 + }, + "name": "iovdu", + "image": "cw-aio.qcow2", + "id": "c66438c4-d6d8-11e5-9f1a-6cb3113b406f", + "cloud-init": "#cloud-config\n" + } + ], + "connection-point": [ + { + "name": "cwims_vnfd/cp0", + "type": "VPORT" + } + ], + "mgmt-interface": { + "vdu-id": "c66438c4-d6d8-11e5-9f1a-6cb3113b406f", + "dashboard-params": { + "port": 80, + "path": "/" + }, + "port": 80 + }, + "version": 1, + "short-name": "cwims_vnfd", + "name": "cwims_vnfd", + "vendor": "Clearwater", + "http-endpoint": [ + { + "password": "vyatta", + "polling_interval_secs": 2, + "path": "rest/app/gui/dashboard/sysinfo", + "username": "vyatta" + } + ], + "id": "c663ea54-d6d8-11e5-9f1a-6cb3113b406f" + } + ] + }, + { + "id": "GUID-3", + "name": "RIFT.ware™ PNF Descriptors Catalog", + "short-name": "rift.ware-pnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "pnfd", + "meta": { + "icon-svg": "data:image/svg+xml, " + }, + "descriptors": [] + } +] \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/assets/empty-nsd-catalog.json b/modules/ui/composer/webapp/src/assets/empty-nsd-catalog.json new file mode 100644 index 0000000..db379d6 --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/empty-nsd-catalog.json @@ -0,0 +1,290 @@ +[ + { + "id": "GUID-1", + "name": "RIFT.ware™ NS Descriptors Catalog", + "short-name": "rift.ware-nsd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "nsd", + "descriptors": [] + }, + { + "id": "GUID-2", + "name": "RIFT.ware™ VNF Descriptors Catalog", + "short-name": "rift.ware-vnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "vnfd", + "descriptors": [ + { + "id": "ba145e82-626b-11e5-998d-6cb3113b406f", + "name": "ping-vnfd", + "short-name": "ping-vnfd", + "vendor": "RIFT.io", + "description": "This is an example RIFT.ware VNF", + "version": "1.0", + "internal-vld": [ + { + "id": "ba1478fe-626b-11e5-998d-6cb3113b406f", + "name": "fabric", + "short-name": "fabric", + "description": "Virtual link for internal fabric", + "type": "ELAN" + } + ], + "connection-point": [ + { + "name": "ping-vnfd/cp0", + "type": "VPORT" + }, + { + "name": "ping-vnfd/cp1", + "type": "VPORT" + } + ], + "vdu": [ + { + "id": "ba14a504-626b-11e5-998d-6cb3113b406f", + "name": "iovdu", + "count": 2, + "vm-flavor": { + "vcpu-count": 4, + "memory-mb": 16384, + "storage-gb": 16 + }, + "guest-epa": { + "trusted-execution": true, + "mempage-size": "PREFER_LARGE", + "cpu-pinning-policy": "DEDICATED", + "cpu-thread-pinning-policy": "AVOID", + "numa-node-policy": { + "node-cnt": 2, + "mem-policy": "PREFERRED", + "node": [ + { + "id": 0, + "vcpu": [ + "0", + "1" + ], + "memory-mb": 8192 + }, + { + "id": 1, + "vcpu": [ + "2", + "3" + ], + "memory-mb": 8192 + } + ] + } + }, + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "host-epa": { + "cpu-model": "PREFER_SANDYBRIDGE", + "cpu-arch": "PREFER_X86_64", + "cpu-vendor": "PREFER_INTEL", + "cpu-socket-count": "PREFER_TWO", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ] + }, + "image": "rw_openstack.qcow2", + "internal-connection-point": [ + { + "id": "ba153744-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + }, + { + "id": "ba15577e-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + } + ], + "internal-interface": [ + { + "name": "eth0", + "vdu-internal-connection-point-ref": "ba153744-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vdu-internal-connection-point-ref": "ba15577e-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + } + ], + "external-interface": [ + { + "name": "eth0", + "vnfd-connection-point-ref": "ping-vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vnfd-connection-point-ref": "ping-vnfd/cp1", + "virtual-interface": { + "type": "VIRTIO" + } + } + ] + } + ] + }, + { + "id": "ba1947da-626b-11e5-998d-6cb3113b406f", + "name": "pong-vnfd", + "short-name": "pong-vnfd", + "vendor": "RIFT.io", + "description": "This is an example RIFT.ware VNF", + "version": "1.0", + "internal-vld": [ + { + "id": "ba195068-626b-11e5-998d-6cb3113b406f", + "name": "fabric", + "short-name": "fabric", + "description": "Virtual link for internal fabric", + "type": "ELAN" + } + ], + "connection-point": [ + { + "name": "pong-vnfd/cp0", + "type": "VPORT" + }, + { + "name": "pong-vnfd/cp1", + "type": "VPORT" + } + ], + "vdu": [ + { + "id": "ba1960ee-626b-11e5-998d-6cb3113b406f", + "name": "iovdu", + "count": 2, + "vm-flavor": { + "vcpu-count": 4, + "memory-mb": 16384, + "storage-gb": 16 + }, + "guest-epa": { + "trusted-execution": true, + "mempage-size": "PREFER_LARGE", + "cpu-pinning-policy": "DEDICATED", + "cpu-thread-pinning-policy": "AVOID", + "numa-node-policy": { + "node-cnt": 2, + "mem-policy": "PREFERRED", + "node": [ + { + "id": 0, + "vcpu": [ + "0", + "1" + ], + "memory-mb": 8192 + }, + { + "id": 1, + "vcpu": [ + "2", + "3" + ], + "memory-mb": 8192 + } + ] + } + }, + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "host-epa": { + "cpu-model": "PREFER_SANDYBRIDGE", + "cpu-arch": "PREFER_X86_64", + "cpu-vendor": "PREFER_INTEL", + "cpu-socket-count": "PREFER_TWO", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ] + }, + "image": "rw_openstack.qcow2", + "internal-connection-point": [ + { + "id": "ba197a98-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + }, + { + "id": "ba198696-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + } + ], + "internal-interface": [ + { + "name": "eth0", + "vdu-internal-connection-point-ref": "ba197a98-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vdu-internal-connection-point-ref": "ba198696-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + } + ], + "external-interface": [ + { + "name": "eth0", + "vnfd-connection-point-ref": "pong-vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vnfd-connection-point-ref": "pong-vnfd/cp1", + "virtual-interface": { + "type": "VIRTIO" + } + } + ] + } + ] + } + ] + }, + { + "id": "GUID-3", + "name": "RIFT.ware™ PNF Descriptors Catalog", + "short-name": "rift.ware-pnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "pnfd", + "descriptors": [] + } +] \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/assets/favicons/android-chrome-144x144.png b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-144x144.png new file mode 100644 index 0000000..4c4a4e9 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-144x144.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/android-chrome-192x192.png b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-192x192.png new file mode 100644 index 0000000..457cbff Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-192x192.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/android-chrome-36x36.png b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-36x36.png new file mode 100644 index 0000000..89bc496 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-36x36.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/android-chrome-48x48.png b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-48x48.png new file mode 100644 index 0000000..27e0eda Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-48x48.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/android-chrome-72x72.png b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-72x72.png new file mode 100644 index 0000000..ddb6815 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-72x72.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/android-chrome-96x96.png b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-96x96.png new file mode 100644 index 0000000..063ac0f Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/android-chrome-96x96.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-114x114.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-114x114.png new file mode 100644 index 0000000..978d123 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-114x114.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-120x120.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-120x120.png new file mode 100644 index 0000000..c8c6a18 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-120x120.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-144x144.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-144x144.png new file mode 100644 index 0000000..af3d971 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-144x144.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-152x152.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-152x152.png new file mode 100644 index 0000000..1380273 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-152x152.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-180x180.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-180x180.png new file mode 100644 index 0000000..705da61 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-180x180.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-57x57.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-57x57.png new file mode 100644 index 0000000..98579a5 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-57x57.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-60x60.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-60x60.png new file mode 100644 index 0000000..3556937 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-60x60.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-72x72.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-72x72.png new file mode 100644 index 0000000..188a99a Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-72x72.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-76x76.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-76x76.png new file mode 100644 index 0000000..b6ab765 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-76x76.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-precomposed.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-precomposed.png new file mode 100644 index 0000000..e2ea832 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon-precomposed.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon.png b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon.png new file mode 100644 index 0000000..705da61 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/apple-touch-icon.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/browserconfig.xml b/modules/ui/composer/webapp/src/assets/favicons/browserconfig.xml new file mode 100644 index 0000000..6ad92c2 --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/favicons/browserconfig.xml @@ -0,0 +1,27 @@ + + + + + + + + + + #da532c + + + \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/assets/favicons/favicon-16x16.png b/modules/ui/composer/webapp/src/assets/favicons/favicon-16x16.png new file mode 100644 index 0000000..962310f Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/favicon-16x16.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/favicon-194x194.png b/modules/ui/composer/webapp/src/assets/favicons/favicon-194x194.png new file mode 100644 index 0000000..4fe8e4c Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/favicon-194x194.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/favicon-32x32.png b/modules/ui/composer/webapp/src/assets/favicons/favicon-32x32.png new file mode 100644 index 0000000..d5068bd Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/favicon-32x32.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/favicon-96x96.png b/modules/ui/composer/webapp/src/assets/favicons/favicon-96x96.png new file mode 100644 index 0000000..6c20bb0 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/favicon-96x96.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/favicon.ico b/modules/ui/composer/webapp/src/assets/favicons/favicon.ico new file mode 100644 index 0000000..0b7d4b7 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/favicon.ico differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/manifest.json b/modules/ui/composer/webapp/src/assets/favicons/manifest.json new file mode 100644 index 0000000..74798cf --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/favicons/manifest.json @@ -0,0 +1,41 @@ +{ + "name": "RIFT.ware", + "icons": [ + { + "src": "\/android-chrome-36x36.png", + "sizes": "36x36", + "type": "image\/png", + "density": "0.75" + }, + { + "src": "\/android-chrome-48x48.png", + "sizes": "48x48", + "type": "image\/png", + "density": "1.0" + }, + { + "src": "\/android-chrome-72x72.png", + "sizes": "72x72", + "type": "image\/png", + "density": "1.5" + }, + { + "src": "\/android-chrome-96x96.png", + "sizes": "96x96", + "type": "image\/png", + "density": "2.0" + }, + { + "src": "\/android-chrome-144x144.png", + "sizes": "144x144", + "type": "image\/png", + "density": "3.0" + }, + { + "src": "\/android-chrome-192x192.png", + "sizes": "192x192", + "type": "image\/png", + "density": "4.0" + } + ] +} diff --git a/modules/ui/composer/webapp/src/assets/favicons/mstile-144x144.png b/modules/ui/composer/webapp/src/assets/favicons/mstile-144x144.png new file mode 100644 index 0000000..aece734 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/mstile-144x144.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/mstile-150x150.png b/modules/ui/composer/webapp/src/assets/favicons/mstile-150x150.png new file mode 100644 index 0000000..8750124 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/mstile-150x150.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/mstile-310x150.png b/modules/ui/composer/webapp/src/assets/favicons/mstile-310x150.png new file mode 100644 index 0000000..db6d760 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/mstile-310x150.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/mstile-310x310.png b/modules/ui/composer/webapp/src/assets/favicons/mstile-310x310.png new file mode 100644 index 0000000..2cacdd1 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/mstile-310x310.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/mstile-70x70.png b/modules/ui/composer/webapp/src/assets/favicons/mstile-70x70.png new file mode 100644 index 0000000..34ab792 Binary files /dev/null and b/modules/ui/composer/webapp/src/assets/favicons/mstile-70x70.png differ diff --git a/modules/ui/composer/webapp/src/assets/favicons/safari-pinned-tab.svg b/modules/ui/composer/webapp/src/assets/favicons/safari-pinned-tab.svg new file mode 100644 index 0000000..fa3c696 --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/favicons/safari-pinned-tab.svg @@ -0,0 +1,69 @@ + + + + +Created by potrace 1.11, written by Peter Selinger 2001-2013 + + + + + + diff --git a/modules/ui/composer/webapp/src/assets/juju-catalog.json b/modules/ui/composer/webapp/src/assets/juju-catalog.json new file mode 100644 index 0000000..9fe0ceb --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/juju-catalog.json @@ -0,0 +1,936 @@ +[ + { + "id": "GUID-1", + "name": "RIFT.ware™ NS Descriptors Catalog", + "short-name": "rift.ware-nsd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "nsd", + "descriptors": [ + { + "id": "9c450720-d670-11e5-a5c7-6cb3113b406f", + "config-primitive": [ + { + "vnf-primitive-group": [ + { + "member-vnf-index-ref": 1, + "vnfd-id-ref": "b152c2e6-d34c-11e5-8463-6cb3113b406f", + "primitive": [ + { + "name": "create-update-user", + "index": 1 + } + ], + "vnfd-name": "cwims_vnfd" + } + ], + "name": "Add User" + }, + { + "vnf-primitive-group": [ + { + "member-vnf-index-ref": 1, + "vnfd-id-ref": "b152c2e6-d34c-11e5-8463-6cb3113b406f", + "primitive": [ + { + "name": "delete-user", + "index": 1 + } + ], + "vnfd-name": "cwims_vnfd" + } + ], + "name": "Delete User" + }, + { + "vnf-primitive-group": [ + { + "member-vnf-index-ref": 1, + "vnfd-id-ref": "b152c2e6-d34c-11e5-8463-6cb3113b406f", + "primitive": [ + { + "name": "config", + "index": 1 + } + ], + "vnfd-name": "cwims_vnfd" + } + ], + "name": "Update Domain" + } + ], + "description": "NS Clearewater IMS only", + "vendor": "RIFT.io", + "name": "cwims_juju_nsd", + "input-parameter-xpath": [ + { + "label": "net-corp-vlan", + "default-value": 55, + "xpath": "/nsd:nsd-catalog/nsd:nsd[nsd:id=9c450720-d670-11e5-a5c7-6cb3113b406f]/nsd:vld[nsd:id=9c4646c6-d670-11e5-a5c7-6cb3113b406f]/nsd:provider-network/manotypes:segmentation_id" + } + ], + "version": 1, + "short-name": "cwims_juju_nsd", + "vld": [ + { + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-id-ref": "b152c2e6-d34c-11e5-8463-6cb3113b406f", + "vnfd-connection-point-ref": "cwims_vnfd/cp0" + } + ], + "id": "9c4646c6-d670-11e5-a5c7-6cb3113b406f", + "provider-network": { + "overlay-type": "VLAN", + "physical-network": "physnet1" + }, + "description": "Link from cwims to ELAN", + "vendor": "RIFT.io", + "name": "net-corp", + "version": 1, + "short-name": "net-corp", + "type": "ELAN" + } + ], + "constituent-vnfd": [ + { + "vnf-configuration": { + "config-type": "juju", + "initial-config-primitive": [ + { + "parameter": [ + { + "value": "", + "name": "proxied_ip" + } + ], + "seq": 1, + "name": "config" + } + ], + "juju": { + "charm": "clearwater-aio-proxy" + }, + "config-primitive": [ + { + "parameter": [ + { + "data-type": "string", + "name": "base_number" + }, + { + "data-type": "string", + "name": "home_domain" + }, + { + "data-type": "integer", + "name": "number_count" + }, + { + "data-type": "string", + "name": "password" + } + ], + "name": "config" + }, + { + "parameter": [ + { + "data-type": "string", + "name": "number", + "mandatory": "true" + }, + { + "data-type": "string", + "name": "password", + "mandatory": "true" + } + ], + "name": "create-update-user" + }, + { + "parameter": [ + { + "data-type": "string", + "name": "number", + "mandatory": "true" + } + ], + "name": "delete-user" + } + ], + "input-params": { + "config-priority": 1, + "config-delay": 0 + } + }, + "vnfd-id-ref": "b152c2e6-d34c-11e5-8463-6cb3113b406f", + "member-vnf-index": 1 + } + ], + "meta": "{\"instance-ref-count\": 5}" + }, + { + "id": "9d887e58-d50f-11e5-80c8-6cb3113b406f", + "config-primitive": [ + { + "user-defined-script": "/home/rift/.install/usr/bin/add_corporation.py", + "parameter": [ + { + "data-type": "string", + "name": "Corporation Name", + "mandatory": "true" + }, + { + "data-type": "integer", + "default-value": 1, + "name": "Tunnel Key", + "mandatory": "true" + } + ], + "name": "Add Corporation", + "parameter-group": [ + { + "parameter": [ + { + "data-type": "string", + "default-value": "10.0.1.1", + "name": "Corp. Gateway", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.0.1.0/24", + "name": "Corp. Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "eth3", + "name": "Interface Name", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.255.255.0/24", + "name": "Local Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": 0, + "name": "Local Network Area", + "mandatory": "true" + }, + { + "data-type": "integer", + "default-value": 101, + "name": "Vlan ID", + "mandatory": "true" + } + ], + "name": "PE1", + "mandatory": "false" + }, + { + "parameter": [ + { + "data-type": "string", + "default-value": "10.0.2.1", + "name": "Corp. Gateway", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.0.2.0/24", + "name": "Corp. Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "eth3", + "name": "Interface Name", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.255.255.0/24", + "name": "Local Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": 0, + "name": "Local Network Area", + "mandatory": "true" + }, + { + "data-type": "integer", + "default-value": 102, + "name": "Vlan ID", + "mandatory": "true" + } + ], + "name": "PE2", + "mandatory": "false" + }, + { + "parameter": [ + { + "data-type": "string", + "default-value": "10.0.4.1", + "name": "Corp. Gateway", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.0.4.0/24", + "name": "Corp. Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "eth4", + "name": "Interface Name", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.255.255.0/24", + "name": "Local Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": 0, + "name": "Local Network Area", + "mandatory": "true" + }, + { + "data-type": "integer", + "default-value": 108, + "name": "Vlan ID", + "mandatory": "true" + } + ], + "name": "PE3", + "mandatory": "false" + } + ] + }, + { + "user-defined-script": "/home/rift/.install/usr/bin/add_corporation.py", + "parameter": [ + { + "data-type": "string", + "name": "Corporation Name", + "mandatory": "true" + }, + { + "data-type": "integer", + "default-value": 10, + "name": "Tunnel Key", + "mandatory": "true" + } + ], + "name": "Add SP Test Corporation", + "parameter-group": [ + { + "parameter": [ + { + "data-type": "string", + "default-value": "10.0.1.1", + "name": "Corp. Gateway", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.0.1.0/24", + "name": "Corp. Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "eth3", + "name": "Interface Name", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.255.255.0/24", + "name": "Local Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": 0, + "name": "Local Network Area", + "mandatory": "true" + }, + { + "data-type": "integer", + "default-value": 3000, + "name": "Vlan ID", + "mandatory": "true" + } + ], + "name": "PE1", + "mandatory": "false" + }, + { + "parameter": [ + { + "data-type": "string", + "default-value": "10.0.2.1", + "name": "Corp. Gateway", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.0.2.0/24", + "name": "Corp. Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "eth3", + "name": "Interface Name", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.255.255.0/24", + "name": "Local Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": 0, + "name": "Local Network Area", + "mandatory": "true" + }, + { + "data-type": "integer", + "default-value": 3000, + "name": "Vlan ID", + "mandatory": "true" + } + ], + "name": "PE2", + "mandatory": "false" + }, + { + "parameter": [ + { + "data-type": "string", + "default-value": "10.0.3.1", + "name": "Corp. Gateway", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.0.3.0/24", + "name": "Corp. Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "eth3", + "name": "Interface Name", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": "10.255.255.0/24", + "name": "Local Network", + "mandatory": "true" + }, + { + "data-type": "string", + "default-value": 0, + "name": "Local Network Area", + "mandatory": "true" + }, + { + "data-type": "integer", + "default-value": 3000, + "name": "Vlan ID", + "mandatory": "true" + } + ], + "name": "PE3", + "mandatory": "false" + } + ] + } + ], + "description": "6wind turborouter only", + "vendor": "6Wind", + "name": "tr_juju_nsd", + "version": 1, + "short-name": "tr_juju_nsd", + "vld": [ + { + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-id-ref": "b14795b0-d34c-11e5-8e51-6cb3113b406f", + "vnfd-connection-point-ref": "turborouter_vnfd/cp0" + } + ], + "id": "9d8a7b4a-d50f-11e5-80c8-6cb3113b406f", + "description": "Link from TR to ELAN", + "vendor": "RIFT.io", + "name": "Link1", + "version": 1, + "short-name": "Link1", + "type": "ELAN" + } + ], + "constituent-vnfd": [ + { + "vnf-configuration": { + "config-type": "juju", + "initial-config-primitive": [ + { + "parameter": [ + { + "value": "6windos", + "name": "pass" + }, + { + "value": "root", + "name": "user" + }, + { + "value": "", + "name": "vpe-router" + } + ], + "seq": 1, + "name": "config" + }, + { + "parameter": [ + { + "value": "10.10.10.2/30", + "name": "cidr" + }, + { + "value": "eth1", + "name": "iface-name" + } + ], + "seq": 2, + "name": "configure-interface" + } + ], + "juju": { + "charm": "vpe-router" + }, + "input-params": { + "config-priority": 0, + "config-delay": 0 + } + }, + "vnfd-id-ref": "b14795b0-d34c-11e5-8e51-6cb3113b406f", + "member-vnf-index": 1 + } + ], + "meta": { + "instance-ref-count": 4 + } + }, + { + "id": "b53910e0-d34c-11e5-b34f-6cb3113b406f", + "description": "NS Vrouter-only", + "vendor": "RIFT.io", + "name": "vrouter_nsd", + "version": 1, + "short-name": "vrouter_nsd", + "vld": [ + { + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-id-ref": "b3ebcd0e-d34c-11e5-b72f-6cb3113b406f", + "vnfd-connection-point-ref": "vrouter_vnfd/cp0" + } + ], + "id": "b5505bb0-d34c-11e5-b34f-6cb3113b406f", + "description": "Link from Vrouter to ELAN", + "vendor": "RIFT.io", + "name": "Link1", + "version": 1, + "short-name": "Link1", + "type": "ELAN" + }, + { + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-id-ref": "b3ebcd0e-d34c-11e5-b72f-6cb3113b406f", + "vnfd-connection-point-ref": "vrouter_vnfd/cp1" + } + ], + "id": "b5508324-d34c-11e5-b34f-6cb3113b406f", + "description": "Link from Vrouter to ELAN", + "vendor": "RIFT.io", + "name": "Link2", + "version": 1, + "short-name": "Link2", + "type": "ELAN" + } + ], + "constituent-vnfd": [ + { + "vnfd-id-ref": "b3ebcd0e-d34c-11e5-b72f-6cb3113b406f", + "member-vnf-index": 1 + } + ] + } + ] + }, + { + "id": "GUID-2", + "name": "RIFT.ware™ VNF Descriptors Catalog", + "short-name": "rift.ware-vnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "vnfd", + "meta": { + "icon-svg": "data:image/svg+xml, " + }, + "descriptors": [ + { + "id": "b14795b0-d34c-11e5-8e51-6cb3113b406f", + "description": "This is a 6Wind TurboRouter VNF", + "vendor": "6Wind", + "name": "turborouter_vnfd", + "version": 1, + "short-name": "turborouter_vnfd", + "vdu": [ + { + "id": "b1481ec2-d34c-11e5-8e51-6cb3113b406f", + "count": 1, + "name": "iovdu", + "vm-flavor": { + "memory-mb": 4096, + "vcpu-count": 2, + "storage-gb": 40 + }, + "image": "turbo-router-riftio-5.img.qcow2", + "external-interface": [ + { + "vnfd-connection-point-ref": "turborouter_vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth1" + }, + { + "vnfd-connection-point-ref": "turborouter_vnfd/cp1", + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth2" + } + ], + "cloud-init": "#cloud-config\n" + } + ], + "mgmt-interface": { + "vdu-id": "b1481ec2-d34c-11e5-8e51-6cb3113b406f", + "port": 80 + }, + "connection-point": [ + { + "type": "VPORT", + "name": "turborouter_vnfd/cp0" + }, + { + "type": "VPORT", + "name": "turborouter_vnfd/cp1" + } + ], + "monitoring-param": [ + { + "id": 1, + "units": "KB", + "description": "Memory usage of turborouter", + "name": "Total memory", + "group-tag": "Group1", + "widget-type": "COUNTER", + "http-endpoint-ref": "rest/info", + "value-type": "INT", + "json-query-method": "JSONPATH", + "json-query-params": { + "json-path": "$.system.mem_total" + } + } + ], + "http-endpoint": [ + { + "username": "root", + "polling_interval_secs": 3, + "password": "6windos", + "path": "rest/info", + "port": 80 + } + ] + }, + { + "id": "b152c2e6-d34c-11e5-8463-6cb3113b406f", + "description": "This is a Clearwater IMS VNF", + "vendor": "Clearwater", + "name": "cwims_vnfd", + "version": 1, + "short-name": "cwims_vnfd", + "vdu": [ + { + "id": "b1530f76-d34c-11e5-8463-6cb3113b406f", + "count": 1, + "name": "iovdu", + "vm-flavor": { + "memory-mb": 4096, + "vcpu-count": 2, + "storage-gb": 40 + }, + "image": "cw-aio.qcow2", + "external-interface": [ + { + "vnfd-connection-point-ref": "cwims_vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth1" + } + ], + "cloud-init": "#cloud-config\n" + } + ], + "mgmt-interface": { + "vdu-id": "b1530f76-d34c-11e5-8463-6cb3113b406f", + "dashboard-params": { + "path": "/", + "port": 80 + }, + "port": 80 + }, + "connection-point": [ + { + "type": "VPORT", + "name": "cwims_vnfd/cp0" + } + ], + "http-endpoint": [ + { + "username": "vyatta", + "password": "vyatta", + "path": "rest/app/gui/dashboard/sysinfo", + "polling_interval_secs": 2 + } + ] + }, + { + "id": "b3ebcd0e-d34c-11e5-b72f-6cb3113b406f", + "description": "This is a Brocade vRouter VNF", + "vendor": "Brocade", + "name": "vrouter_vnfd", + "version": 1, + "short-name": "vrouter_vnfd", + "vdu": [ + { + "id": "b3ecbd5e-d34c-11e5-b72f-6cb3113b406f", + "count": 1, + "name": "iovdu", + "vm-flavor": { + "memory-mb": 8192, + "vcpu-count": 4, + "storage-gb": 40 + }, + "image": "vyatta-7.qcow2", + "external-interface": [ + { + "vnfd-connection-point-ref": "vrouter_vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth0" + }, + { + "vnfd-connection-point-ref": "vrouter_vnfd/cp1", + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth1" + } + ], + "cloud-init": "#cloud-config\n" + } + ], + "mgmt-interface": { + "vdu-id": "b3ecbd5e-d34c-11e5-b72f-6cb3113b406f", + "dashboard-params": { + "path": "/", + "port": 80 + }, + "port": 80 + }, + "connection-point": [ + { + "type": "VPORT", + "name": "vrouter_vnfd/cp0" + }, + { + "type": "VPORT", + "name": "vrouter_vnfd/cp1" + } + ], + "monitoring-param": [ + { + "id": 1, + "units": "KB", + "description": "Memory usage of vrouter", + "name": "Total memory", + "group-tag": "Group1", + "widget-type": "COUNTER", + "http-endpoint-ref": "rest/app/gui/dashboard/sysinfo", + "value-type": "INT", + "json-query-method": "JSONPATH", + "json-query-params": { + "json-path": "$.system.mem_total" + } + }, + { + "id": 2, + "units": "%", + "description": "Memory utilization of vrouter", + "name": "Memory utilization", + "numeric-constraints": { + "max-value": 100, + "min-value": 0 + }, + "group-tag": "Group1", + "widget-type": "GAUGE", + "http-endpoint-ref": "rest/app/gui/dashboard/sysinfo", + "value-type": "DECIMAL", + "json-query-method": "JSONPATH", + "json-query-params": { + "json-path": "$.system.mem" + } + }, + { + "id": 3, + "units": "%", + "description": "CPU utilization (all) of vrouter", + "name": "CPU utilization", + "numeric-constraints": { + "max-value": 100, + "min-value": 0 + }, + "group-tag": "Group1", + "widget-type": "GAUGE", + "http-endpoint-ref": "rest/app/gui/dashboard/sysinfo", + "value-type": "DECIMAL", + "json-query-method": "JSONPATH", + "json-query-params": { + "json-path": "$.system.cpu[0].usage" + } + }, + { + "id": 4, + "units": "bytes", + "description": "Incoming bytes at interface", + "name": "Interface[dp0s4] Rx", + "group-tag": "Group2", + "widget-type": "COUNTER", + "http-endpoint-ref": "rest/app/gui/dashboard/interfaces", + "value-type": "INT", + "json-query-method": "OBJECTPATH", + "json-query-params": { + "object-path": "$..interfaces[@.name is 'dp0s4']['in']" + } + }, + { + "id": 5, + "units": "bytes", + "description": "Outgoing bytes at interface", + "name": "Interface[dp0s4] Tx", + "group-tag": "Group2", + "widget-type": "COUNTER", + "http-endpoint-ref": "rest/app/gui/dashboard/interfaces", + "value-type": "INT", + "json-query-method": "OBJECTPATH", + "json-query-params": { + "object-path": "$..interfaces[@.name is 'dp0s4']['out']" + } + }, + { + "id": 6, + "units": "bytes", + "description": "Incoming bytes at interface", + "name": "Interface[dp0s5] Rx", + "group-tag": "Group3", + "widget-type": "COUNTER", + "http-endpoint-ref": "rest/app/gui/dashboard/interfaces", + "value-type": "INT", + "json-query-method": "OBJECTPATH", + "json-query-params": { + "object-path": "$..interfaces[@.name is 'dp0s5']['in']" + } + }, + { + "id": 7, + "units": "bytes", + "description": "Outgoing bytes at interface", + "name": "Interface[dp0s5] Tx", + "group-tag": "Group3", + "widget-type": "COUNTER", + "http-endpoint-ref": "rest/app/gui/dashboard/interfaces", + "value-type": "INT", + "json-query-method": "OBJECTPATH", + "json-query-params": { + "object-path": "$..interfaces[@.name is 'dp0s5']['out']" + } + } + ], + "http-endpoint": [ + { + "username": "vyatta", + "polling_interval_secs": 3, + "password": "vyatta", + "path": "rest/app/gui/dashboard/interfaces", + "port": 80 + }, + { + "username": "vyatta", + "polling_interval_secs": 3, + "password": "vyatta", + "path": "rest/app/gui/dashboard/sysinfo", + "port": 80 + } + ] + } + ] + }, + { + "id": "GUID-3", + "name": "RIFT.ware™ PNF Descriptors Catalog", + "short-name": "rift.ware-pnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "pnfd", + "meta": { + "icon-svg": "data:image/svg+xml, " + }, + "descriptors": [] + } +] \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/assets/onvelocity-color-theme.json b/modules/ui/composer/webapp/src/assets/onvelocity-color-theme.json new file mode 100644 index 0000000..f4f488f --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/onvelocity-color-theme.json @@ -0,0 +1,33 @@ +{ + "nsd": { + "primary": "#2AA198", + "secondary": "#19615C" + }, + "vld": { + "primary": "#CB4B16", + "secondary": "#8B330F" + }, + "vdu": { + "primary": "#b58900", + "secondary": "#755900" + }, + "vnfd": { + "primary": "#268BD2", + "secondary": "#0F3752" + }, + "constituent-vnfd": { + "primary": "#268BD2", + "secondary": "#0F3752" + }, + "vnffgd": { + "primary": "#859900", + "secondary": "#4E5900" + }, + "common": { + "primary": "black", + "secondary": "white", + "background": "white", + "foreground": "black" + } + +} \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/assets/ping-pong-catalog.json b/modules/ui/composer/webapp/src/assets/ping-pong-catalog.json new file mode 100644 index 0000000..7ae2e35 --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/ping-pong-catalog.json @@ -0,0 +1,767 @@ +[ + { + "id": "GUID-1", + "name": "RIFT.ware™ NS Descriptors Catalog", + "short-name": "rift.ware-nsd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "nsd", + "descriptors": [ + { + "id": "ba1dfbcc-626b-11e5-998d-6cb3113b406f", + "name": "ping-pong-nsd", + "short-name": "ping-pong-nsd", + "vendor": "RIFT.io", + "description": "Toy NS", + "version": "1.0", + "logo": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAWIAAADzCAYAAAC8ERwoAAAABGdBTUEAALGPC/xhBQAAQABJREFUeAHtXQl8FEXWf9UzuTiVJIDgASwocihyBNZbP+9VvxUIeKwoCoTgsetJArg7q3Ltrux6EYIn3hCCfuuxuquCNwkgKiK6XniAkAPlzDXd9f1rQiCEyUz3THVPz0zV7weZ6Xr16r1/97yuevXqFSNVFAJxjkCPqx9L372z7gjStLZM19uKvzqnNowZbZnB0plGDZzzWq7xWs1gdbrXsy3N4FvbdD1i61f3X1AX5+or8RMAAZYAOigVkgSBLlc+0dbYvWcIZ0ZfIoZ/hH+8Lyc6ijhpEcLwCzH2A9p/QQz/iH3OvOyzozrSujUL8xoi5KmaKQQsIaAMsSW4FLGTCHSbVNxGr9ZP9BM7HQb3DBjJYcR5ihMyMMZqOfEPGbEyxvgHqW3avrnpiauqnehb9ZF8CChDnHz33LUaCxfDzt21JzKdTheGlzOWA8Ob6gqBGRnE2WoY5Vc1TXt165K8lTDWGIyrohCIHgFliKPHUHGIAoHc3CWeFca2czH6vApsLoYvNz0Kdk42/YExWgyHyHNVJVPWONmx6ivxEFCGOPHuaVxo1GVs0QDdHzC+v+OcusaF0K0IiZHxFxi935fWiT2+eWHenlbI1GWFQKsIKEPcKjSqQjYC3S4rzmqoNy6H4b0KI+DBsvnHnB9j2yBDkaalP1BZMn5LzOVRAsQNAsoQx82til9BM0cV94WL9Q44VHOdWmyLKVqM1UPPp70pbN7WxfmfxlQW1XlcIKAMcVzcpvgUsvu4RZl1u3fPggGeEEV4WXwq3yQ1o39rmueuypK8d5suqb8KgZYIKEPcEhH1PWoEfD6uPbCueBLcDzMxMuwUNcMEYICFvaeYlnGbclkkwM20QQVliG0ANZlZdh698ESD6w8g+uGEZMYhqO6MdsAg/2nAgGMfWOE7wx+URl1MSgSUIU7K2y5f6axrHmlP2+v+zjm7BjHA6rkKBTFj6zTGr6tcOuWdUGSqLnkQUD+Y5LnXtmmaPXLBqdjtsAgGuIdtnSQgY4S9Pamlem+reHbi1gRUT6lkAQFliC2ApUgPRKD3Da+k/bJ5491YjLs5aRfjDoTE8je4Kio50y6rXjr5DcuNVYOEQUAZ4oS5lc4qkjW2eDBv0J9Cr8c623MC9saYDlfFHytK8merbdMJeH9NqKQMsQmQFMl+BLAIx7JHF93Kid3tmjwQ+8WL608YHb/Yrn3GuI2Pj/8lrhVRwltGQBliy5AlbwORF2K5Xr0AYWkTkhcFezWHMf6Gad5RlSWTPrK3J8XdTQgoQ+ymu+FiWbJzl7TjRvUSjIjPd7GYCSEa3BO1+DelcunkxxJCIaVEWAQ8YSkUQdIjcHhuUXe/XvMGRsInJz0YzgDgxQvvf9v0uzClZsPLbzrTpeollggoQxxL9OOg7+zchYMaDL4coWl94kDcRhGR6wFT/G349xMSuyNdJduOihqcxKFjCihO8vDGiS6ntuv3m4w9G15+PU7kVWJGiIByTUQIXDI06zJ2/gi9gf0bI+H2rtK30aB+hbC5T5GafZ2msXUej2eDkeqvbn/IUdvDnUMnNp9o242eOtN7IhlRT8a13gbnw3BU0iA3LkAyjd1TtTT/VlfdAyWMVASUIZYKZ+Iw65pblIOR8H8QH9zBDVphdPsJAjZe8miel9u0S/kQkQW1suUScdE7tnw/2DD4CBj5c8H/DNcYZsburS7N/4NsnRU/dyCgDLE77oOrpMgatWAo58Z/INQhMRTMj4fzNWLaS6kpqS9vfu6aH5yWpfcVT3XYXrPrfMwIfkuMX4g8yu2clqF5fxrTHqxYmneDijVujkpifFaGODHuozQtAhs1/PrrGAkfKo2pBUbw6VZwjS3MYHzBjyX5myw0tZVUGOWfa3dciTQak+EvH2BrZyGYA5/iytLJ+coYhwApDquUIY7Dm2aXyIeNfPioBmooxwiws119tMqXURnW0R7oqnVasr5kTH2rdC6oyB49/xS4SW4RkQ2xEAdumjurSqf8KRZ9qz7tQUAZYntwjTuue0d872Mk3N9R4ZGJzKOxWypKJgtXSFyVzLELh5Ffvwt+ZOFPdrBgiZJpI6tL816Q0emRl88/tLaW9TMY748R/2GIK2nfuDbAsT7AxBpBB7hlRITVTrhoduIa/uEzR1pPjX/LmOdzD6V8saXkmkoZ8iQjD2WIk/Gut9D5dN9y77p1G17GD+ucFlW2fcUUeyt+1H88Q8t6pKRkjG5bRw4wzs4tPpkb+j9grIY40F2gC+C3MyXFO/ynxRM3WOmza+6j2Q1GPV4ciBIh3o9x1h8zoMOs8AhB+wsiT77QOFuDUfubKW3brNj0xFXVIehV1V4ElCFWjwJljZpfBCMC36cDRZznxvg9rEPa7KpHrxUjq4Qoge3fRvWNMG53Acu2TigFP/GXHTRt2DcleSJOOmgRuUGyLn1oKGvQL+AssCtyGF64IpbagQIzT/QJjPObCDH812nU6c14f+naBZoyxHYhGyd8s0bPn8ANesgJcWE4viCvdnnV4rwPnegvFn0E/OysYT4M4AVO9A9MX7l+4OSLfD5mNO8ve9SCPsgRfT2cGJfGxOffXJi9nzGK/wkv4ac1SnmyonTiJ0FIkvaSMsRJe+uJuowtGqD7eTlGcBl2w4Cp6iOeNu1+v/XJcbvt7ssN/LNGFd2EWOS58B+n2C4PYzMRYzxDjH47jy4+H6GHN8CJLNwPrv19i7hwGOaH0rTMR34sGVNjO0Yu78C1N8rluMW9eF2ufKKtvmf3avx4+9qqDKOfNU2bVFkyeamt/biQeefcBb/Wdb4EBvFwe8UTi3f0IP4XMc+/srcvudxhjBGuyOfBVTU/kVxVVlFShtgqYglCnzly/iKoMs5OdfAj+1pL0c6tWJz3tZ39uJl3t8uKs+rr9KUYHZ/mZjljLhte2DBG93fQPPNC+bxjLqdNAihDbBOwbmabNXL+5TAMT9spI6aeazxt2AVbn8yvsLOfeODd4+rH0nftrEEKUbooHuSNpYx4eW/Fs3lb9bL8J2Mph9N9K0PsNOIx7u/w3Ic71Rr1n8MoZNsnCvuP5skcWVkyZpd9fcQXZxEi+Oknnz8O18EV8SV5bKTFIuTbHi9dt3Vx/qexkcDZXpUhdhbvmPeGULWHYYSvtUsQ/ICePSpTu2rNwrwGu/qIV76BxbRRCx5ANMOUeNXBYbn9yDw376hO2oxEf56UIXb4yYpld2JrrmGwt+xaTYc74kVs0LhExYqGvsuZo4rEeX/TQ1Op2iYEYKRWeTwpY7eWTPy26Vqi/RXbFlVJAgSGTCpO+XkPvQQjbE8eCeSKSMv0XFT25FWuzhPhhlstTt1o2/9Cho0Vp7tBnjiQoTtmEVe373/Rf3d/9pKlnYRxoFtARId22MQLHIkr5/fVRj5GYf3s0BDuiC/TUj0Xbl6Yt8cO/onIE4nefTgxZFEi6maLTpx31A2jFNE+94ldjLb0EUOmyhDHEHynuj48d0kG/MIFdvQnVrk9xM/b/GxelR38E5lnj0xtIl5iKxJZRxt0u2G5UfW8eKZt4B0zlsoQxwx65zqu5VV5EhO77Bcc5wwha8GlW0unfLP/ovpkFgGxAJWaquXiZfaT2TaKDs41hAHWGFWvi6xxiYKHMsSJcidb0SMwcuA0tZXq6C5zNgdT7BXRMUnu1mImgZ2HVyO1JMJnVTGNAKcTd9exd7tf8ZDNuxZNSxQVIRYkVYkZAtPWdSG95hhiBrYZs16QoyPe9ziOhzXmg2XiaB5xcCdDNi++G6kDRJrBXzAk+AUHEc+iuUPXhZMdxx7djNwD94Sjs1yPxbmBA489eYXvDL/ltqrBQQggN8U8hLfddFCFuhAGAbaRpaaeVPXctZvDELq6WhliJ25P8eoU+oaGE/OfAqN6LAzpMfiLfxyGN5LCdlNWehe67fiQCXQaNxFs+A5DrW6R9NJaG0yld3oYH6RcEq0hZP363t136zHtFi9kVSwhwD5tm85P/f6ZKT9bauYiYmWI7bgZS7iHPi4/AR7UMwP/GJ0C49tGWleMPUFzhl8Vjl/2yAVjDTKeC0dntR5B9pPhkii22k7Rh0YAs5eLMHv5Z2gqVRsMASx6vpeuZZ4dr5nclCEOdlcjueZbn0q1u36DUe4VcCGcFflo10TnzHM2zRn2ejhKTHffxXT3pHB0luoZrT9TyzpebdqwhJppYtyzl3HPLjDdwC5CxrYh0PlHzKZ24Xnejc06daIrfE/B9V8h3VtPDC5cFUYGY/zSgIF9L4lHd5nXrvuYNHynrxxBOl1JNTsvhc6dGvXG42pXYbSJ0oe+GY5957ELjtcbDLlGGJ1qjG5TRjgc+pHXw+3zB2QRPgtGLjVyLtZawsgiJwh7B1b2DZ7iebujwf8bLgNa/9wlqdW0rbfBqS8noy92bA5ljJ8H10rMwsrwArvw03Ub5kL7W6whEHtqNSKO5B40LrJNxEI3jlfnR0fCIvI2bB7NHR72QUNOiWL8KCZF3k+Qloxery6dcnaQGnVJIgLYAn0vniscu2RfwejxcxjfZzXO3zgiSyuXkcshO3dJO65XXYyNKmMg/3kYjqTZp0HrnJmH/reqZEpcuXiUIW79fh5cM738CNL57ZigTcDoIf1gAgeuaJ6Tafaw90L11Cu3uOMOQ98EQyzv7DTEDHu82uCKxZM/DtW3qoseAXHAp1+v+xqx34iYkVkCCeT/jfPj/lGxJO81GGPbpm7iVPDtNTtHYnR/I0aqJ8jUIiwv5DZO5akn/LRswndhaV1CoAyxmRtRsKY3cb/YmYZE6g4cfdOqTGwLZeR0pxbnk7UkxyLdtVike7jl9ai+M1qG0fCoqHioxqYRyBpZ9CcYYp/pBiEIYXBrMXBYhFOf77V66nMItqarcHzTOQY37oVBtvc0mOYSIbyyR6bnFBkj/eZs7fqsNnSEQraw7GgqKHuGeAOmcRypI2NphCEooxfCGWGhDk7rlW4w4Rv+RyioVJ1cBA7JaP93TPG3RcsVRvgNzasNqFqWPzkWRljIX7E0799dtMzj4Yu+Ezo5kxSK0/Dvqow7osXPqfZqRBwM6Xk/ZFDFZpGm8DYnF02CiXLANY/nHJo17D8HXGvxRbglthtGhUy58QNaU1U6ZWiLrtRXmxHIHD1/KsIf50TYTTVenrdUlk5ZFGF7W5p1H7cos273nr9jdHylLR00YwrjVkcp3uOqFk/6b7PLrvyoRsQtb8vU8ouoYtNnMGTTZRqzlt1E8L2GUtu8Fa7ddoNfJFtuTpoaDYcD3oZ6b0a7BxBFUWGVNUbBz6R40o91mxEWemx64qrqqtL8cYxpF9udY0MsFvIG/QGr+MWCXhniJtQL1vaAGwIrrQio59Sj6bJ7/rL3ydc/7LSOcY5YZnkFP5atPTLZYnkcFSezCGx9ctxu0vhss/S4Vzux2eYyGLortpRcU2m2XSzoqkonv9gmnfeHv83m07352WJjUyx0tNKnMsQCrakrryNeJ0bBF1kBz2HasLHDAXkYP12mXBhdLY2XBQ+ZeruFV7t2GQvgV90UTh4Y4Q+ZJ2UwdjxK30kZru9I68WWZBjkMZD9rkh5mGmHOOd5IrTODG2saJLbEE9d3ZGmlpUAfDF9iVkguqmbr9HycHRZufOPQcha13B0Vuo5Z6Wm6H3cCyxfooJVvUzRKyJTCGx8fHwtCGeGIb6/iyfz15UlE74KQ+e6arzoORYS/4gMdLbFTcNF0c3Qq6e4TvlmAiWvIZ5eNpSY/iFiDEY3w8OdH8XOp7ScVWGFM+i0sDQWCLBIV3mG59C3TTWpXYVIDeEW0VfDIOOvKrIQQAJ5hCKyjQfxQ7wsMc8l1cum3Li+ZExYt9VB7V10oXLp5PsRFXS9XSJh4e5mNyeTT05DXFh+I7YlvwdXRLyM3j5G2Jo//EPKTg5PY4niefPbmY0bApw5iWTdL1LByjvJx5Pz+bIEcXhi4RpCBESL6TtbKTYtVJfmvRCeQ3xQIE79QbhhCvHSwSBWbkFMdpc6vm2CXK7yuCXXD+W+L9OwILeEDENsIXVsL3/0t4utM8MDbgmpO5gwIjb3Iy9YdQIWOJvltcB+Kk53UE35K+Rb2cGM7IomNAKna5mLMI3/UhgpLMj9ZeBxfU+Jp51jobXbX1tdmi/C9UwvUO5vGf6TYfDbRY6M8JTOUySPIRb+4M3bXoMBznUe5ih75PzTcBx63/AK9vVL3LmELc3tmef9cP0G6pnY7BKs8HOplq0g38f2nBwdrMsEvSZmJpyx2zSN/wYLclPjMcOY2Vtzpifzj3jprDBLb56OH17Bt11hnt45yuQwxNNWHwZ/8FswwlJ9qA7eprAj4l8qfugPeeRl0+Pss3AZuPbpz43WfcIiz0BNzbskwgNViQqB6qWT/69y6ZR/RcUkDhqLlw7T0i/DjGyLbHGxkWS8bJ4y+CW+IZ6xtg/puvAHHy8DMId56FjA+Jky2oYdEbMG4ziZsmEx29xouHBlPxNx130QHvgeTVs1QKaMilfiIlBZMn4LJ0++bA3hvju5S+5DPWXzjZZfYhtiERnhr0emMu464BtvHKvC4sQ78PsVk6b9nsQWZo9nIHm1I6nTIR1wCkcKzRnRCRs5toW70Qaj3uForNTDE/mBOXp2oTk6HNdk6G/TtNWDTdIrsiRHQCxEwkUheQbAmW7otm+vtnrr5E1lrfZsN/3U1QPJryMvAz/E7q4s8K+A4X0DMr1O3Ps6Dv/8PmRbC2cvIzynp8ylZo+HlYeUramS8/ObPob9KyIqDP1VKixDKs/hrt//H1YfRWA7AkhYdIPRoK/Hs401EFnFEIb4TlncZPBJzBFxwB/pf9UVRpjRRhjfP2PUOwgj3K74dzlGuY+GNcIW7y6mXBJH/Yynt037xpQInIaYomsi4jwbbqJ/0/SPujddUn8VAq0hULE472scC3Nva/WRXMdvpXeXUcXDI2lrV5vEGxEXfogfev1rAKybXaCF54tTlhlhR5r2GM0e+hYMceNgdW74lpFSYETcQ9aIGLFnP+3d0RVanOkfHgXXT/vQREFqOaFd3WvkW3+qGbdLEA7mLmFbIP3li3a0o7YDFms7IJNZCmn+neT17KROHXfSjX0C57CZY6aoYoUAY+l/J6r5vcxRscH1s6FPWax0atkvfr8JVHzr21HtzhVYPLI2SpMGgfD58nk4u+NB8o3YIY1tGEbiKPadO2r3YAYg5X6CybtVy6acEqZb5OgQO+j4S2HpWiVgKymj8xnk6ym28UZWAqemkIgYwT/eD//E58PxDCB+mSG/QChMWAPqd4J+I2g/x+cvyKN9jlNYvqDunT5XhhrIuKQgUf4CbMrIkyUOIjKWI7XrmbL4RcsncUbEgVOUdz4fEyPMaCtGwH+jTulFdNvxu6O9KVbb76o1OoU2ONY4YuRhzi3BaADwjqLwEVS7tQgMxptmUri6L3EdfmmcdIzk3+Q3QozIwwkXSPQP7MShr3xwQAbdaBRlc3UtXjRY6KU3SfO8QScMWU1jmN5Yqf53GgEtRfur7tcn4p7LcadyOlEMYEzN/BxQNnEMce3Ov+MmneUAZvu7YIQjaLSZ1Pmwe+jmI2r2Vzj7iTU0tA1ncixJxNgPpug5P8YUXSgiTldjO/QH8JsvDEpWvDqFNnJMI2F4DfHP3zMoneyLgTMJ+f+A7f+gz5n0YfkOZOl7HW6mpyi93ctmUpLKFimZ+QlfMQ7EfQvP+RkycACftJ27a08EL3NZDWV0GoJHYhjigrJcLAA5m12JMSwGatfR3GHmRo8hbkLUVZrWlppGclEzwySdkYm8FqIjhrwSEl4BnN1H08s+pJnDV+8T31felWoxFf1Gn4w+uu67HqsPnMPVQSPxnI2kmp3b8PJ4DqGGi2jmMHPRJbGSO6H6Zc/iWZBiiAOwGCRcE8oQS3lGxMGe5Ed2KocKYz+hp98j+qHEoR7DdsN0vW1YInsIOsphy9PIj3SbhWXCPXA0bPsNVCOy4sX4jMDWlesEGacgPHIKRsnr8OaaTYNzlijXReuAyahJ11JKa4yGB/EyTJHBD4vSUjdBRSOTHH9LNBJE01Yk8SH/EtwYMVqxvzD2b2Ipx7vJCAeUFiPiWBQmE3d+JKIavob74X3cz8tcbIRbIj0Q8j5Da8o2UGH5eBKuFFVsQeDHkgnbcAIN4vBlFdZXFqdo+cS3Id60DX5h5DKwuzBmENP+SOk559PswZV2d2eVv0FGbDJKBSITrEobip5LGmGH6sO2uj7I6vcofev/Eot842zrJdkZazgyTFJB3omeQyYVu+LFGb+GuLDstxg15Uu6J6HYVODcsLNoTs5dZo6yD8XIrjqNE8KwYlAYtYlBr+7uUsRIE1+EdKtv0R3l/d0tbPxJhy3PqyRK7d1YTb+SyC9iVvFpiEW8sEH3R6y16YbsW7giTqJZI5abbhIDQuxbiE3EBrbtx0Dd+OiS81OpwfgIBvkv9NePY+M6ig+kLEmZmpEh0xBjvVlC5I8lDYITx6chrt3pw6jj8OAqSbrK6BOsip9Ec4Z8JYmjbWywZQGbOSQWw2ysJlc700LBzpGWlPPbqLpmPRYifx2KVNWZQ2DTE1dVI6pHWqSSxjlmMLEv8WeIRTIfEbVgb3mX0tNOo1lDRYSE64tXk2yIiYXYJNEMDsY2N/umPraGgHBXcP42Fa68FX9Za2TqujkEEDApzRCjR3PPujnRIqaKL0MceIj98xE6ZF/8M0P6xwzvueQ74ZeIUXW4IfK2SnVN4Phxk4tm/FuHVY3f7sQza9BfEVnxT+TYELv5VIkYAbY94qYHNeTKEB+ESbgL01ZdDZKTw5FFUb8Bu6YuJN9QuVP9KAQy0zSjrfdnM3SmaRgzFw7oSfUhhrbaNF9FCI8av5Bqdq1VSfKjehjkGWJucvYXlbjhG8fPiFgseHBuX/4yRptw0tB5tmYDC38/IqLAfvlfYBClRU4gVjPLlCAzB3+H3YWXglYt2pkCrIlIxEwjSX7hqpOarqi/5hGAb0eaIYajSI2IzUMPym21eTDE2ZbamCbGVEfznCc7R7Dp7iUQItFmlQQ2e1kw82FXc4a9jnyx0+T1nSScGpPk/4emll+UJBpLUxOHqEpMrKVcE+ZvjO/bdPiFbzXfwCKlxifQrGGfWmzlLnLGK2UJhHSDWdm5j5nP7zB7+F8QB7RUVv9JxCeDyHieCsqvTiKdo1YV26uQ3lRWYfatN1kQMT5cE7WV12A0fJgFvSyQsiKaPSIRjIhUXy3j9db24We0Gw9jvN4C8Iq0EQEPnu1HEW+Mbd2qmEHA/GKyGW5mQzVN8YqYyP2GWOzd53xqxBqGasjYx0gAflMoErfXZf2u+LCsUUVPIKDkdJmy6qSfYImfr/8u7EAcA2MszVdtqf+4JoanktMiLOCdHddqOCQ84ogPldUVjk06H7+f+b2veMrcArWsjlvwcb8h/tZ/JZaaj2whd/RfGduDZO5j4vUUht43vJKWOaqogGr0/2LPvMBIanwqTnG2vgFh9ojPgOm86G9OMnJARjFDXwY3xbBk1N6izkdYpA9BzrE2zfN/rt35WVbu/ItDENpaJfXHK11SETdcUPYF+PaRzpvRNCQjny2drwMMu+YW5fgNWoQHyLbsUYzY1qpl+eb9xE16i+iW6toNmMVI/LE0MbfyN3AMktgVuRERJZUYcWKkzjV8PgSfD8ELA6d7c8TzsiMgq4sGJDhuKyX1RLr7hC+taJtMtJmj5m/DPZQ2Km6OHUbbJVpqyg0Vz07c2vy63Z/dbYgLy05BWsS35YPAvqCMdsfF2ykL/XOXpG7Rq/8EIzIVxsMjH5cDOWpMO7qydLJ1g1BYPhKZyEoP5Gb7t80wsm9hYlBGXqOMumSuNTXbEXlLGnYfhwC8QcSNQeDxP8C2l+3Shu5gHXXpPjyWp76EFi92tTje6JCdO2rkxs23VIfRz3hj31pZmv9oyyq7vrtixbBV5TiNa7UumgqmXR9vRjh71II+W4xqGDeO/LfRKG+hLadTQW3dEM/OWYaE6f9C2/Mt9GadNBD7jdOymWcJtqO/DyNqHRnh2yYSqRX3p1ecWnY8QvLEy+QSXB9oXbCoWwykis33gcvEqDklGIPdu+sH2K4SRtsG8UdwYOkVzJN+RWXJ+C129+neEbEIWaut2IrRiWQnOltCc4ePtRtYmfwzc4vOxMxgqV3TsVZlZWxRdWn+1a3Wh6qYsfpX5Pd/CpnTQ5FZrmOsHm2exaygmGblrIzI+FrpdHrZUJzqjBwRNBrNbJ+FHCgauwLP6jMHXkvub9mjF9xgGIZ4STlTGPveQ96LKkonfmJnhy7yjbVQs77qt9KNsEjwnpI6o0VPrv6aNbooD4bgNceNMFDBUr4YEUdW7h76NRj8LbLGwVqxOvC7l9LZUTgh5WqaPfwD242wEEOcozdnxKWUwvpglHw/ZKgNJp0t1xgvphmrjrGFd5wyhRG2Fs0TrZ6cH2lQw3v4HV4YLatQ7d1riHVdvluC8yXxsgiSm7vEg6iIe7nBF+AGxsSFhNCent2veOjwUA9QyLo0JLnBnsiQNGYqxUGt3pRjYBD/QL4c26eJQUW6e/i3MP43YlDcHy+Al4LSyL7IsXGhAad+qIxt+5DFYpqzhhg943fQDgvj/5c1asHN+wSR/MGdrglxgm+N8SN0lTgVhP/Q4x1Es4bYOsWQcX965RZ33G4Yi/EEnCuDXzQ88IBcUbVsSuTT46nlBdg9Fll0CmPfkKZNx67H56LRwZa2hWUX4v4swEyluy38mzNldC1eQo82v5SMnwOL1Ub1LuCeEiv9cULIQ0dlatetWZjXIFMGd46Iaw0RzyfRCIMbo5fiwQh3GTW/13Zd/8ANRjjwoDE6JaoHLitNnKRSYZkHo/nUudsAVxphoczs4S9hZHw8/r1oWTerDTibq1JnEm3Vfh4QSyMsbhtGxhO/qzJePfLy+VLD59xpiInOtPqshqVngWlyWLJYEgg3gJ/YcshwbCzlaN43Jxa5n1gwuu343TBWc5rzDPmZ0S4c1HopRoDXuT58a/bwavirL8ao/ffQUSwi2lSQDa9mp3kMbZIi1mxZgwMHBZtQErlYztxdy17tcuUTbU2QmyJxnyEW/jBOZ5iS3iyRmOLOHv6OWfJY0B2e+3Cn2lo/FuVs2EUYhUK4GfVRn3Sb3rkIImwOKwajz+ELzsFBrYvD0rqJYHbOfTDE52DaJTErWEsF2QQqWDmk5dVk+o49cBpcA84tloYEl+f49+xaFvVvY28f7jPE01eLFIydQ2JgtZKxp6w2cZJevFlrjPpXYIT7Odlvq30hZAdP/BxvChtYVTrlhKj9Yb6e+PFod7faX6ACYYXp7YfR3UM2hKZzae3snLdgiC+wzxiLLezsDpdq74hYVaX5D6VrKd2ZRjehw9g/J5zOgZsCeV6iTy+Am+uyMrUc59EZ/5AqFUvp49ZDQMUbdWO18SKMcGwX5rCbCA9DCQYdT1eU5r2DkYf1zRGhbppvfSrV7vwvZjtHHUiGrcgavx0Z8OTe8wM7ce7b1HK4cvgr+Cdt2rpfeHFPPMcjb/a6/deS91P26PmnIKJhEl5Qo2EM02OIxP3Vy6Ygoiby4r4RMTPk+ofFGXQuPYlZvEnFGzV2RhipfRi9iJ1pl3TVsrpi9JtXuWzy29KNsHg+ff2FD/XOFo/qZvhXz0gYIyyUm5vzNtwUYvOH3kJXCV8x8mL+6RIYJQSLyqVT3sEze+X+UTL7NEaK3ZA9cn5U+xPcNSL2IflKTTniTrnJwytNwK6xqfAP/8UEpeMkiBO+H0b4esc7bjxW6ekUjf11S8nkzxzr38e9VFuGhEDUG1EsK0hreynNGuhochXHdC0ouwn3dp70/sSmJK/Wj+4eJpJhqdICAWRQG8J1ug4vw98Bf0fD3DCoycOLYWELkUx9dZchLljTm3iD9dwGoVRlmlj8WRWKJBZ1yIE6ESPiiG5apPLiQdmFadxDaeneeZuenijitJ0vU8suR6fH05CcaTSG2TBqdF6loD0Kv2FB+X8wqPifoPXRXGRsAaI18qNhkehts655pD3f0XA7M/hkceKMI/oypns0dkpFyeQPrPbnLkMsguQNLjEuE2fRDcnJdNsPPjv34d7cqP8I/i0b/IhBHwG/xuiejDSa+/0zU+zNXBW0+yS9OKOsJ5Jvwp8r2V8Mfz51yzzMVHa5JIW+SW2xOWqHoc/Ab+0PuOZtum7XX/iOvmaezEGVJWMw6DFf3OUj5uwY86KboGT0ttuMsNi6bOgNTzpmhBmtZ0z7dWXplAJlhE08MzJJxLZoRvIjHUQu3p+qL5IpaqLy+qYkbzvcBbchCigHRvJju/XE6PtXXK/+u9V+XGaIDcmGmL9pFRC76Zcb27DYwkfY3Q98ZDpO45qNRbjBVaWTV9ven+ogOALpOfeiojx4ZRRXORsXReuka4rcwmuPytKGIcf2g3YrD2M8AXkpLL0o3eWaKChDYu8oMn4dhLB2WmAV+6DrsbmQOXbhMGrwv4/e7Z0iiVEwaVcrAxyb+3xQr1NXI6exvgYvYHmLR4ywCTO1G80eXHlQf+pCSASyRxVdg/WZ+YgFTAtJGEUlRt8VnrY0cOuT+RVm2LhsRCz56B9PhmtWlrtNKm7DGvxP4abYaoSxIPdC+/YZQ5URNvP4O0Qj4n4Zf0xqbxzPEa+/RCrPJGEmTt7werB1n7FNdqmMUXFnfQ9/2Cx/9xhi31qcISZzRx0W6lwUGlVfrd+DN/DRZm9MJHR4Cz98hpY1euPj412yDTQSLRK0jSd1FiJWGqRqx3GskyoRIbClJL+cpaTCb0z/jYiBiUZYB7ooa/T8CSZI4UV0S6nTu8oVhbtmNCwO+0Q0U55c/VpyY7Nw2OfEkpIxiRsS1lLlePo+c/B30kfFDDlZJGyvjScYZcpa9dy1myk17QxsYJIbMttMSNyeuWYytbnHEHOjfTP5o//IcECoSwpOXEZgf/T70YOrE8iE8vvqZflYBFTF1QhoKXIXijjPpukfwv+sSqQIBIxxSurpthljzjvtqaOwu+7cY4iJyzXEnH6K9ObIbIf98LlYGDhJJs/mvDTGx8Pn5dwZXs07V5+tISAOJWAk92AC3nCmNSEUdUsEhDFO1zC7YPRdyzoZ35FK9vrOY4t/FYqXewwxl2yIie0MpbgTdSKhD/xEtuWRFRnSEB+8yAldVB+SEGD0pCROjWwMdppUfknK7MeS/E0asUuw2F0jHQLOU40G4+5QfN1jiEmTOyImirkh/n4b/x0Mca9QNyDSOiwyvHzdwMnKHREpgLFqp6U/i0U7rNtKKoz6SeKU9GxErDEiKSbaAQRyKY/JHrWgT2u83WOIGXVoTciIrjMjpobYhwRGcElMjUj2MI3gz/r8kIwOl/t8SACjSnwhMHPQJthhEUsuqfBeVLxaXnyyJKnilU3V0vyn8fuyvDMurL4cyV7JaNUeuMgQ83ZhlbFEEFvXxIOfFo2CIT7GksjmiH9BmNrFXz39ux3myBWV+xBg/5Imk4gn/gHGWBVpCAwY2Pd2zFqk74aEr/jK1k5Fd48hNliqNCQFI7HFN4YFYSu4mfIL8kaMqyydbFu4jXyJFceDEdDeO/haFFca9L5RtFZNWyCwwneG35PCJuGyv0VVdF/hK66v9V8XjIl7DLFsJznCRoIp7MS1QE5UzofK7kvsmsOOuRdl81X8HEZAkxxaqUlOluUwHG7srmLx5I9FxkLZsmHH3dWn+5YftLvWPYaY0x6pSjOKmSEmXb7DX6zm4gw5cVaXKvGOwMwhWzBjq5emBuedpfFSjPYhkKZl/Rm/u2/2XZDwAYv3XT/79IuLW7JyjyGWPSI2+KEtlXXi+94jtkXyc7kFO3S2PJe/US5TxS0mCDSeByjzZBLZEUcxgcVtnf5YMgahbJrIYyy1GIZxUGSGewyxlhgjYmPPrtGYfkj9YeCt/G27DulzpT4NillsEeBc4oYjJvV5iy0w7upduAIRRbFWplQ4KPLsbpcVH3BqiHsMsUFyowA4i4lrAiujo2XeNMGLM+0mlchHNqox5sekPu/KENt4O2EkZ0plz7mnod44wD3hHkNMhty8qsyhc6qa3SFxThaSsJzd7FLUHxGq9mH10sn/FzUjxcBlCDCZK/JyY/BdhlSsxdm6dPIy+PQ/kykHQltHNufnHkPs0UwlUG4ufOjPbGDoevm17JeGC7BlKk0qZ0YLpPJTzNyBACd54ZWc2rhDqcSUAq4J/KzZbMnanRUYuO1l6h5DnJItd0TMeSbNWB0y0YZkYOFC4P8rlSemr542bZ+RylMxcwcCTOKBosyuzH7ugMoNUmRonUoxO5W2WzcwYNvpP7VJN/cYYl/PWmQ/snTyaZMSrf41+LBW6+yo4HS6TLYaZ09tfXLcbpk8FS+3IMA6SpOEsy40vfwIafwUo4MQEBEUWIR//qCKaC4Y/LSm5u4wxL6VHahg5d+IU0aTYFL+GkaOFD4mmGSNXXg0btRhJkhNkzDmLTZNrAjjCwFO4kQaSYUfTrrxGRWU3URLuEcSU8WmBQIexqTOTuEndokhFqcLFKy8BonnvoQRvgV6y32IuIMjYr++D9QW9y+yr4zeryid+ElkjVUrVyMgnnsm96WN3087LBTPozXlq2n6KscGIK7GWbJwp2mZr+PGSVzL4oOzc5cEcuzEbkRcuLIf3uAr8QA9Arzs2RnE2GDnRggyT58OPEFS376Sn0nFLhoE/rz2MDz36dGwaL0tH4TR8QcY4DxIU1fLc3+03mHS1IhjyDDrfVmiwl6mVx8v+DlviJEeEgb4NjLYh+jf3jc3523oozUBRSWCF5QV4oel9qNpqa8F7UhdjH8EaoxetirB8RvjNIVI/xwG+Rxb+0oy5hrJTdhkaDRAQOisIS5Y24Nqyt/BFOov2KYgN8yrtQeC71+ZbI0k2uviJA7wkJYBS+xvryyZ8FW0cqn2LkVAM451RjKOA3mRcnNq2TR1yKgcxL0pnvflcGrkgoCXQJitc4a4cBXObasvhwE+UaYiYXlxti9EJCxthAQ//gwjzLkwxnIKZ2/KYaS4uBIBg5/gmFxidEx8JhWWP09iUVyVqBDY/NyEzxHd9XNUTJo1hqvDQUNcWHYVceNNGKvsZjI49fFkuzvSDR6YXkjrh7E10ngpRu5DgPHBjgvFEeNey1bRHeX9He87gToUmzuwYPeBLJUQOdFT8LJ3RCz8wVPL5pLBH4cRTpUlvCU+wvjPWGPrVBD7bnpYkikMMfdqyhCHwShuq0V4GafjYiI/50dTg7GSppWPiUn/CdMp/1yiKoeJY9XsM8S+9e2otux5TItulyh0ZKx0m/3EjLpHJliwVoy3z0hZF6xGXUsABNaWHQMt5MbLW4FFhLnp/DkqLMuz0kzRNkdA29T8W5SfvQ+uf7yzPYZ46uojqXbne3jzXxylkHKaG2SrnxiHAkozxIz4TyrTmpzb7kouXHPOP9wqAJhdG1SkjHGrAIWswAxYpiGGmazpLt8QCyNMujDCsZl+BYOQ0SnBLku7xuWNiBEG9700uRQj9yHAY+AfDoqCMsZBYTFz0cOkGmKExHWSa4h9q5HsWP833BGHm9HHMRocT0LT1nWxrT9GmbJ4I3RNGWJZYLqRDyNHE1GFhmCfMb42NJ2qbY6AZuibm3+P9rNBRoY8QxzwCeuvwAgLH5gLCiYQjN5DHtF8ymjXlWYN3GqbUJzJ9Pltt01OxTj2CKTnjCZNG4X43tfwbBqxFwjGmNNCKlwp/UCD2OtmjwR6hgdHKMkrTNcyDjpNNCL2vvWp8Ak/jxvqbLazYMKKBM6cniaW+gzNOWFjMBLZ1xALKM0QIz3eHtnyKX4uQsAXSAi/DBItI7HBSaufiOd1PKKKpCaMsqRx4068J2h62UaaOXy1pbaKWAICXIIhFiFqteVP4WE6S4JEEbJgOBWXP42cQU/TnGFrI2QScTOGVXAYUDmFcWWI5SDpfi6NA4Xp5ON/orpVF8MY5+FfrLYkZ5AfeRQK1g53agDj/hsUXMI0rnlqJeb1F71EPyKuLX8QD09ucJFtvCqmdZyeQyR0CaXlvESNIw0bO2ydNQ4D9AKD1glUjUIgFALNR8nT1hxHekMhyMVvSm42wlAyNNZ1Jqp7Ci+G02P5ewovZmwp9FreUaYEzEPbozPEhWU3Y7PGZJlCheXV6FdbTMxzJ80ZKjOwOmzXrRPwHag7tPV68zXwbEvhY75HRekqBGYNEalPL8PpMjPI778dx75chXWXNMdk5HQSZrjIBUM3O9ZnnHWkM38nDAKlFZ1oe+SLdeLNbdAsadKYYcTY8zDA/WnO8MtptluMMATnTN4CG2cSE4abAVXRuBKBu4d+TXNG5FFGSi9sgP0HFvfqHJOT85to2qrzHesvzjoyEG4mU2SslkZoiO/7Mg3Tp6cce1Mz2khMOx8GeKSrDPDeu4Ht57/IuzFcGWJ5YMY/J9/gzTQ35yYoMhxRQM5l5NP1h8m3Vj2LQZ4gjF6lGmJN8/wS2Yh40zYxEg5kDQoip8RLIgSNPUDp7QfSnJxXJTKWy4qTtBExgomUa0Lu3UkMbnOHf0ydu2OTFJvnUNhbN6qtm5kY4MnVghtcqiH2apG4JgrKzoRa4g1td6nAKPhMjIJvIF9/uYeKSpdcpmuCukkXTzFMDARuPqKG5g6/BSPj02GMf7BfKTaZpq0ebH8/8dUDMrAFMqbJkvqw9lYNceNU5XHhFJUlRFA+DOn6MmgIQtFWBK132UXEEUt0TdAR3cctkrZTz2VQKXFkIDB7+DuU3m4QDPLrMti1ykPEF+u6iIqy9/feqgDurEDqymNlSYadtLvWLMxrsOaaqK2fj5tyhCwhgvJh9BR163QK+Ub8GLTenReluSaEerV7aga5U00llWsQ8PXfRj29F8AYP26vTHwE0mZeY28fccddmiFG9MUGob15Q1xYdiGM8GW2QsaYDyvFV9KNfZxbIZagkMaY1EUUDD9OkCCWYpHoCOQNbcDvZTy2TN9pq6oGzSHfeql+UVvltZF5r9zijpgBHyavC/ax4GXOEPu42LDwV3mdB+PEboQ/+M/Balx/jbOPpMpo4CReVRQCZhGYnfMn0thUs+TW6XgW1exyNlTVupCOtNit6fJGw5CYMx6wHeYMcQ2SSHN5h2O2QEzH9CoPixD3t7geN1/bexrfarIERqw4zvdTRSFgAYHZw7EJg71goYU1UkYT4aJI+gGCrmsnWwMuNLVn7yAuvCGeuhrb+eAysKUgPI3YOEyvFtrC3iGm35TkwUfMNsrrjvfIzl2Y9A+9PDyThFOnjuPwHAZGWNI1Dizc8duk8403hpz/Vp7IjBuHpH4i+IU3xEy/HgNo5BmWXrCzj67GSPgZ6ZxjwBCbOqT+ADjXJd7wGACiunQegal9d1IGOx8zzO9s6ZzxMTgJ+nBbeMcB0y5XFnWGf/jXskTFWtA3VY9eu1PwC22IfavbwCXxB1kdH8CHsbtghJ844Fo8f5HuJyZliOP5eYiV7L6cLeh6FEbG8he8OZKE1bAbYqVarPs1drOLYA9D20wLQiIocN/gLTTTGmOiLaNhESc8OOduCzK7npRrcqeEePMe3yX3oZ6uV1wJ6D4E5oxYgw0fN9sk2CREULSziber2RrMuEamgBgRv9fEr3VDLCIliN/aRCjtL2P1lMLG0xgmXBMJUzSWViZbGd1ouFo2T8UvSRCYPawIxvht+doiF0rdbqkGSb6M8jl2yS0eiNHwiTI5ezRt34ac1g1xbflFMMTy/UEcLom7ctbLVMgNvCpLxmNKyD6VKgtnk3vf8IpzKRClCq+YxRQBLFqQV5tkj4uC/x45i1u3HTFV3J7ODd2Qmu6XEavYsmTSPnsRCkzcRNkF0/eMYXNkc3ULP4C77w0nQya4Jzpv/+n7y2XwUjySEIG7h30BQyzfBch5L5wokjRrGEdePv9QhHddKfUJYvQGclbsy2oc3BBP//AodHqO5I795IFLIoYnaUjVJxgzj1xDLLrg3LBnsTSY/Opa4iHQS5sLF8W+kZc0BTm/WhovlzPaXcduwaCovUwx4R9+oTm/4IZYbxCHGQava97a0mftrzQrZ98qoaWmcUKcToe+KZJ4yBQXJzAd12XU/LNk8lS8kggBsQ2aU4F8jfnZ9NeP28rn6y6O3S4rzkLi9t/LlAoz5z0pmdpLzXm2Zmwln0HHdhOl27xFurlasfn8Y8kYHLPN/im7d52zv/mSzCcnG8Ok5jcn5xXov04qBpzSaVvduVJ5upBZfT2/HYMhuVEijL+8eWHeAYcEH2yIC1f3xWi4n1RMGH+E5hz3s1SermXGnpMtmghle2BdsQ0+e9mSKn6uRED4IhlcFLKL1F1msoWLnl/mqOK++O1JHQ0LqTRii1tKd7AhNvSRLYmi/I4wtbS/R8kjbpp30Tq9hp1N8l86nN8lFg3iBgglqLsQSB+GA3dx5JjMwvlvED2BMNfEK8g5DC+jUYxBaapM7cB0yxGZ2kGz5oMNMdElMjvGzV9Kc07YKJWni5mtLxlTz7i2SLaIeDNnYdEgaV5osvFLen6BRXJ2j2QcOlF92SmSebqCXefRC8bDGJ8qWxhMTR4RieBb8j3QEIsTOBgNbkkU5feE9w23xAdvvfnwFe8LTWlZH/F3zq/CAyJ7xhKxOKphvCGQ8TSey4OMQFRaGIm3FV/saDWI5kWFS7DGjOlpmhY0wdmBhrim4RSp0RJiZ4/YbplkpbJ08pd4of3bDrUNgxdn5z7W1Q7eimeCIyDWaRgtl6olZwkVT9w/d0kqdrSWwA52lIpTI7MXNpfkfR+M74GGmBmnBSOK4tqzUbSN66Ya4/faoYBwUXC99unTfcsT0jdnB2aKZzMEGC1r9k3CR34kzVjbRwIjV7DYYlTfgyiJIXYIA2M7szW+BxpizuQaYk/ai611nOjXK5dO+Rd2zqy2Q08Y4zM//eTzIjt4K54JjgBr8wI2eGDmLbH4G2S7MyUKZ55V1qiiiRgJI+2v/AJ35YuVpflrW+O83xA3rn4ObI3Q8nXGPqOZgzZZbpdIDRj92S51YIwnZI2arxJ12wVwovKdNXArVHtPqnqcx70hzsqdfzEWdWwb3HCv965QmO83xHVlR2NDrcQEM7w8VMfJUFe1NP8lvAlt85HjwZnTObdIbpRLMtyYZNeRseWSIbBlKi9ZxlbZdR698EQy6DmMhj2tEkVRARvwVPXiSatCsdhviLkmbzQsemRaq8PwUAIlXB1jt9qmE5JUGzo91TW3KMe2PhTjxEOAyz1NBgFCcTsizspdeJph6K/CL5xhy43GngJPG3ZLON77DTFxuYbY0D4M13ky1GNUvAJ6PmGXrnBRtPEb/M2sUQsusqsPxTfBEPDuPxlCimacDqUZZT2l8HKQSdboogtJGGHJCX2aq4B1osKtT+ZXNL8W7PN+Q8z5r4IRRHRNLAZkZCR0gh8ruKR40m/FAsk2K22s0OJt3paT8QIWG26y0k7RJikCdw//FlPWX6Rq79fialScPbLoam7w57FpI10qDgcwYx9UlkwOGjd8ABm+7DfEjHVvWRnxd05fkq+/1CxkEcvigoZbSq6p1Di73VZR4KbAQzUPC3hFKrTNVqQTgzmjj+UqwuPCTyzihGGEHzSIPwb97QwB9XuYd3LznMOh8N5viInkGWLGkZBaleYIVJTmPYocpO82v2bHZ4yOJ69bt+FlceKsHfwVzwRBgNNnUjWJg8iJw3OLum/Rq9+CEZ4iVfcgzLBA94+K0omfBKkKeqmZIebdglJEcpFTdSTNErmNeDN68YaEi0LuFtNgoHE6x7+HNmRjv3ywanVNIYDn8Ce5KODUDheXzFELfleri0guPsJ2MRl952nTzmeln0ZD7FvZAcmj5flKGNnmD7WinNtot5ROWo8V5r85IhfnnQzDeBSuijcPG/vQsY70qTqJHwSQBUyysK7cdp89akGfzFHzXyNuPIlwT3mDzVbAw0i4hnk9I7c+OQ452M2XRkNc31Zu6Aa3b2HKvGrupOzKsnzY71/mlHRwVZxR72/4FAt5z2TnPtzbqX5VPy5HgHO5i3VE7WneD3LtSBQQHjby4aOyRhY9ZHDjMwwy5R77FlIu7dqqxXmWI8YaDbHhlwugGhG3eqtEmswMjY3CcSlbWyWSXdG4kHeZYTR8ljmq6N7u4xZlyu5C8Ys7BPzSJd6xQ64diUDAzmOLf4VZYHE9a/hS7D4FCzsX5A6QUGM0t6p0ckT5dRoNsdeQDaDyER9wiw788mNJ/iam8VxH/MXNu+Y8BbuHbqzdtfsrxFDe0e3SR49oXq0+JxECTMOBDZJLXZ0tO9PCSSkOTMAAIz9zZFGZ3qB/hVngJDznKeHayazHGtAr1w3MnxYpz8a3haFL3NosRNHkJhWJVDsXt0NSoHeyRxXdDKDuj4GYhyCG8s66hlof/Gdv4n4t8rZp87xVv1YM5FZdykLA0BHEI7kww7HRpxj5cj+/0CDjQhyYcCoMr9STNKwgAyP8RQdNu9znizyZUiNwXKslbLaWVpgh97A9aYK5ixGyMT2QOXL+MEg1LiaSwWWBfs/CvT9L372rLmvk/OV4qF7WNO/LW0smIuhflYRFgHm8xOUPiu3AKzt3STuu/Xw0+fkxWOweAp/vbzDy7bu/LyzDxaowtp28nou/WTxpezQiNBriNP8uwvnD0orBlSE2CWb7Dhl5u3bUDoA/K6Y7k/Aoi1nRedgUcp6hN9yPF8QGfF/FmPY1Hv6vPR7+NTPSvxabU0yqpsjcjAAzDoFBc7zk5i7xLDeq5pvoGG4O1gMi9jX0qu7U9M6IgcytyYoIiV2ISr2ocvGk/7ZGY/b63qlEGnbB1ZltE56OMWWIw6MUoNj4+PjabrnFl9QZxvuYXsnbVGOy/xBkx6LuWM4xU8LD7w9MmGoJrgwDh0A1IPLDD19cAx7GBtQ3cPxlnPlxvQHk+Cyu8QYsSopFoYZ9tFKnXiGkT8IqYF5auSz/cXOq8x7m6ORSvZ1encl3w4drqrjI6raQF8/1TqZpF1SW5L3boiqir3sN8SAY4rKIGARvxLODX1dXgyEgjk/pOmrhuX7mXw6D5W7sRASGGD3zwAga7469GuEvRvUBoy2uBC43XWsiCVzc+0X9kY8Ako+bZmqwXvtululGYQhTjbB3mO3x4PmWH7ARRjK51Yx2aBo7r6Ik7wNZjBujJhpPeJU4JKY+sgRMFj6BzR5e78mYjm1MFp2VnnIRQD6T9RY4Hm2BNjypSPR1mBY2WsrPdHcPNMJpCp+wlzznVJRMlmaERZeNhjjQOY/K2XyA/FwZ4gPwMPmlCr4mzZP+ayyYqVzOJjFTZHsRwNZ57sm0kshH7mCJ02bKGxp2+z6e7ay4vWcig6LXc/bW0jyZ7oMAHPsNMaNv5QGEaY+P7+ctj3HCc6osGb+FOqaehpHxfxJeWaWgNAQQi1ZeWTLGXMZD38dICCX7lGK+0ZQynA4zRecyIqyFfIIwk2HhTtqIVOxmxpJ9FSmTg9vhyKW6NXKnPgd3krBXqh69dmePLO03GD08mbBKKsUkI8DfMM2wpkH+b5PRRlP9czrGFJ2LiPA7fCY10/PrraVTvrFLrL2LdQH2X8rtxDgF/D6XyzN5uK1ZmIdIA34VTt74AVoXYgQjPwA/eeBMcE0RL+H1PG1aSU3vL3PbwN5+Tc2osQEjngyxn2l0G07Z+YdpbCMktGlEDGk4x/RalWgQwJuYVy/Ln+5hdD7CZX6Khpdqm7gI4A29XKwvmNaQ87NN05omZOtMkXLWbCOGqRYxIRK5YJjGzq5aOsV2IywU3G+ImSbRNRHA7tSYIJiAnVaU5r+WmqYdB8P8fwmonlIpSgSQtwcfUAkAAA2ISURBVGSBaRY+DlcnO8s0vVlCrydsxjGxQw4jtMPNsowNHaLhGT2S7knpt/e8SUfE2G+IDeFGwBRHVuH8CJqxRmwKUEUCApufzauqKs3/LR6SPLyt90hgqVgkAAJ4HrYc2cnzgmlV6sp/LX+hDtt87xoS1n+KpIwDTcsZC0LGPtM0flpV6ZQJP5ZMcDSn+n5DPHcowtf4p1L11/VLpfJTzAgPyUItRYyOyfyPT+GWsAhwzh4V6wmmFeTsfNO0ZgkZToWGGy0cOQ4qQJy8+wpmmrUaYzN6ZGqDRDKuWEi43xCL3hnJFYJzZYhtuKsVi/O+hkG+hJHnTBjkj2zoQrGMAwQwM6pKb9dmnjVR+UXW6E1Qc1pjgkqQiAV8NxVsyWeLNK82AAm4Zlp6oUnW4kBDrHlkG+KjadrqmCazkYyXq9hVLctbfv3A/CF4m18rpqiuEk4JYzsC2GZ7y6Ynrgq7m22fIIVlp2ARfcC+79I+sBXhWCECCENmdlI4Oifq8Vup0Zj2YCql9q4uzb9aDGyc6DdUHwcaYpYi1xCLng1/XigBVF10CIgcqHibP4rDCnsj1OYmtUU6OjzjpTVGw29WLJ38hCV5DbrREr0ZYiR/ok4dVoQj7Xrpgv54CXQKR2drvcgRwdgcTxvWo7J08vU/LZvwna39WWB+oCGeOWgTfsim4gEt9DGOCj+M7/3lFpSNFalI6i5Cbc70ZPbGwzYW9xEn1qqSiAggXK2OUjz5lnSbXn4E1oAusdTGDDHHcza1785wpLqfnROOxq56+IDfw79Jh6Z3OAKDlsKtT+ZX2NVXpHybb+ho4vEqPli7yU0tg/0NnA7dcB2qfMGq1TW5CJSUjNHBcYn4lz16/imGwW6E7/9ijEZS5fakuMUKAcx87rScA1fn4jfokS4zI1Nb8eGaGC297xAMYXgRBcaXMC31ycqSCYHQ3KoQ9LGuOni3VmH5aWQYK+QKxqooK70H3Xb8brl8FTczCBye+3CnGqMBo2T+O9CPQI7KA2dCZpgoGlcgAP/mU5VL88fB0ISNUtgncGFZJl7EX+K+H7rvmqwPXu0kmpnzfih23a946PDaGv/3eP4OtjehGlqqQ/wv8dWMtOcNxp6vLs2DIY6fcvCIOG3YO1Rb/hNunMTkHDyLqmumApY/xg80iSPp3pjIImhU1DX30WzdqL0ACyf4J4wyPzJxNE10Tdh/jsrUrrFkhAUknM+yxQgz2kR3D/uAZobGva6uQYyG5RphZJtjnH9EGnsb2x/eTqeUd52O/Q2ttbXa4OAUlt1HBr/BGquw1DXk1Y7B2/OHsJSKwDEEcDrIkThiox9O4ujLDXYSHog+WN/uiR9uB8eEUB2FRQDGd63IyicSQoUlbk5QsHJIYL2A25ANkbG/05zhNzfvLthnnOpSivEqNpJYKxgo6Bj4b4X8GBjC6BN9ibncF+Tx/HfAsX2+WeE7w2+No3upWzHEq04iQ39XvtjsGZo7/Ar5fBVH2QiIkTM3Gnr5Ge/KuNEFE+HOmPKlY2qcghGWFydzpMA4YLsspWBO6EV9CkYmXoy0936mvTRiSy1oxHXQ2Ts9lY2CW/iJY3nSLwukSLUiEkLGqKAcbgM+wkoz07Rez3CaOUwtCpsGrHXC4Ia48QZuxA2UPG3Fz9WjnUuzhply8LcutqpRCCgEwiJQWHYtZrYPh6WLiIB9i0FVr4iaqkYHIRB80UYsBDB64CDqqC/gDW3oj1HBJ/IXDaKWTTFQCCQQAoUr+yHV5b22aaSpXNkysQ1uiEUP6akPwRjvktlZgBen7kR7xMKRKgoBhYAdCMz9vD0ZbBlmtG3tYA+fbR2xjPn28E5Orq0bYt8JvwCSR22BhdNYKlh5jS28FVOFQLIjsG07frfczgTsT9GsgVhEU0UWAq0bYtGDl/0DSTEMWZ0dyIcVkdj7ropCQCEgD4HCMkQx2Ll5Am5LjVtMNCRPvUTlFNoQ3z0c253587YoL3Z6GbSMZpT1tIW/YqoQSDYEGpP6zLVXbf4qzR7xmb19JB/30IZY4MG9f7ZvVIyNHg38ZZWLIvkePKWxZAREvLBB/0So4MGbtKR2pc2Ryk4xCyAQ3hDPHboOcaOP2IjXsWQ0LKfAEd829qJYKwQSFYGCVSdANYSE8kNsVZFRKc3NedvWPpKUeXhDLIDxtL3DlgiKfaDz/lRbq4zxPjzUB4WASQSmlQ8irr+OkbDdIaE15Em9xaRUiswiAuYMsVgh5Wy2Rd7WyDnvB2P8DhbwjrbWUFErBJIUgallx2PDxuvQ3v48v4zNpZmDv0tSpG1X25whFmJkdMZKKfveVok4PxpukJVUsOosW/tRzBUC8Y7A9FU5UAEjYZ5puyqMvqPO3f5iez9J3IF5Q+zrWQtDfL3tWAWmWPq/qLD8Rtv7Uh0oBOINAR+S9xSUTSddfw8+4SxHxNeQhP7mI2oc6StJOwmeayIUGFPLsHedXxuKRFodY69Sesq15Bu8WRpPxUghEK8ITP/wKPLXPwXxT3ZMBcYeQIY12ZkYHRM/XjoyPyJu0iij3R8wMkZ8sQOF8/Oopn4dTVt1qQO9qS4UAu5FoKD8CtIbPoGAThrhz+CSuN29oCSOZNZHxEL3qatPJqa/Bf+UdUMeMXbsDTS9BRmfPo6YhWqoEIg3BO4o70/1/G7MQn/rqOiM1SMSI0f93pxBPTJDLGQrKJsDQyxO3XCuiO3WnC8ib/odFDjo1LmuVU8KAUcRKFzdF2Fpf0SfY50d8OzVkmm30pycexzVOYk7i9wQ+5Dwu7b8NTwkZzqOn3hbEy0m0nBCwLC1jvevOlQI2IXAjLV9yF/3R4xGL0MX8g/7NCM3YyU0O2csdtTikAxVnEAgckMspBN5hfmeMnzq44SwQftgtIK49ghlGP8k34gdQWnURYWAmxEQg5q68vMwqLkaBli4IGJjgAVGjK2CX/g0FSXh7AMTnSEWsooNGCL21/6dPWGQETlSOUboWgllIL7Sl7MlTANVrRCIHQK+9e2obs+ZxI3z8fsZCUE6x06Ypp7ZjzgPLodmDf2p6Yr66wwC0RtiIWdBGdwTwgjanXDEAiiMfQPq97A1+z0kyd6AETO+D99MPrvSelqQTZEmFwJTV3fEOZg9oPTxGCwMxu9EHOg5HL+ZFPcAwXaTh51Ms3I+co9MySOJHEMs8CosH4/tlkgOhOOQ3FoCK8H8O4j3Mwx0A/6Kf378MBr2fW/8vP8a0xqQ1aqRTrQR/ziOzlQl8RHA6ah4NnBYKv4hOzdOEMZno/F74BBVXBN/aS9N42eR/WxvG9YRhrc72rdzN1isAbqNpNnDX3K3nIkrnVyjObX8d8SMx/DgiYdRFYWAQsD1CMClRywXWdVedL2oCSygXEMsgCos+y1GkM9hZJyWwLgp1RQC8Y8AI6Qt0C5BmNqr8a9MfGsg3xALPKatOpsM4wVM4dvENzxKeoVAgiLA2B7S2MXwCYuNUqrEGAF7DLFQqnDVSTDGL2NkjIUKVRQCCgHXICBOZ+fab1SSd9fcESw12FmmrR6MLFGvwRg7kyXKTl0Ub4VAIiDA2A786s/DwtwHiaBOouhgb66IWUM/JOb9NW78mkQBTOmhEIhjBDaQpp2kjLD77qC9hljoO2fIV9QTxpi0v2F1VoV9ue8ZUBIlAwKMPUwZ3qE0a9inyaBuvOlor2uiJRrTys5FrPEihLd1aVmlvisEFAI2ICBcEcQmITICuVlUcSsCzhpigcK0dV1I37MIfuNz3QqKkkshkCAIlFMKu5TuHu5M/vAEAS0WajhviIWWHLvvClfdjL8z8UXFG8fizqs+ExeBQLpY+hv18sygvKFiV6gqLkcgNoa4CZSCtT2I18MYi5R/Lt4a3SSv+qsQcDsCjF4hzTNV+YLdfqMOlC+2hrhJloKVSIKCNzin05suqb8KAYWAFQTYavLw22nWiOVWWiladyDgDkPchEVh2YUwxnPhsujXdEn9VQgoBEIgILIMatp0mjl0sUrkHgInl1e5yxALsJZwD61ddQ2McQH+9XI5fko8hUBsEGDsB3jz7qGMtkXk6y9OrFEljhFwnyFuAjOwoFd2NkbIedgQcjH+qoxuTdiov0mKANJVMnoJI+CHKHXoayq3duI8Bu41xM0x9pV3pRp+DS5NwKJez+ZV6rNCIAkQ+BIJeh4m1mYRzRq4NQn0TToV48MQN90WH9eovhyZ3fh4jJAvwOX2TVXqr0IgsRBgVdiI+hIx7XEc5PlWYummtGmJQHwZ4ubS3/dlGm3adhZ2DeEfH4GqE/BXxSQ3x0h9jh8EAhnRcHAno/eJe16lIUM+oDFMjx8FlKTRIBC/hril1sWrU+hbYxCOssnB6jH+Uj881H2w4JfdklR9VwjEDgHkW2F8MwYQX+P5/BjnKZbjrLiPaNDQDcrwxu6uxLrnxDHErSHpW9mB6lhXrDB3hJ/tEKTlbBM4e4x5vMR1nDem4S/OIQv8xVljTCwK4nhzzvAZf8U5ZMbes8lEXeAzIjtUSQ4EOEaljOOcQubHs4DFsr2fxdmG2t7zDpvOPhT1zc83DJyHiHYe2oPc3L+Q1/sLdeqyWR1VnxyPjtJSIaAQUAgoBBQCCgGFgEJAIaAQMIvA/wNne9RHd0pXfgAAAABJRU5ErkJggg==", + "connection-point": [ + { + "name": "ping-pong-nsd/cp0" + }, + { + "name": "ping-pong-nsd/cp1" + } + ], + "vld": [ + { + "id": "ba1c03a8-626b-11e5-998d-6cb3113b406f", + "name": "ping-pong-vld", + "short-name": "ping-pong-vld", + "vendor": "RIFT.io", + "description": "Toy VL", + "version": "1.0", + "type": "ELAN", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-id-ref": "ba145e82-626b-11e5-998d-6cb3113b406f", + "vnfd-connection-point-ref": "ping-vnfd/cp1" + }, + { + "member-vnf-index-ref": 2, + "vnfd-id-ref": "ba1947da-626b-11e5-998d-6cb3113b406f", + "vnfd-connection-point-ref": "pong-vnfd/cp1" + } + ], + "provider-network": { + "name": "physnet1", + "overlay-type": "VLAN" + } + } + ], + "constituent-vnfd": [ + { + "member-vnf-index": 1, + "vnfd-id-ref": "ba145e82-626b-11e5-998d-6cb3113b406f" + }, + { + "member-vnf-index": 2, + "vnfd-id-ref": "ba1947da-626b-11e5-998d-6cb3113b406f" + } + ], + "vnffgd": [ + { + "id": "e733efb1-6d52-4d97-839e-90c063239d6a", + "name": "vnffgd-2", + "short-name": "FG-1", + "vendor": "", + "description": "", + "version": "", + "rsp": [ + { + "id": "d671ee1d-0076-40a4-b83d-ef5abf000322", + "name": "rsp-1" + } + ], + "classifier": [ + { + "id": "387b9233-7fc3-4e69-9172-597d630a7345", + "name": "classifier-2", + "rsp-id-ref": "d671ee1d-0076-40a4-b83d-ef5abf000322", + "member-vnf-index-ref": "", + "vnfd-id-ref": "", + "vnfd-connection-point-ref": "", + "match-attributes": [ + { + "id": "5d8ed8f9-f6b7-4edb-938a-7b14748c3e52", + "ip-proto": "111", + "source-ip-address": "222", + "destination-ip-address": "333", + "source-port": "444", + "destination-port": "555" + } + ] + } + ] + } + ] + }, + { + "id": "test1-626b-11e5-998d-6cb3113b406f", + "name": "TEST ping-pong-nsd", + "short-name": "test-ping-pong-nsd", + "vendor": "RIFT.io", + "description": "TEST NS", + "version": "1.0", + "meta": "{\"instance-ref-count\": 4}", + "connection-point": [ + { + "name": "ping-pong-nsd/cp0" + }, + { + "name": "ping-pong-nsd/cp1" + } + ], + "vld": [ + { + "id": "test1-ba1c03a8-626b-11e5-998d-6cb3113b406f", + "name": "test1-ping-pong-vld", + "short-name": "test1-ping-pong-vld", + "vendor": "RIFT.io", + "description": "test1 VL", + "version": "1.0", + "type": "ELAN", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-id-ref": "ba145e82-626b-11e5-998d-6cb3113b406f", + "vnfd-connection-point-ref": "ping-vnfd/cp1" + }, + { + "member-vnf-index-ref": 2, + "vnfd-id-ref": "ba1947da-626b-11e5-998d-6cb3113b406f", + "vnfd-connection-point-ref": "pong-vnfd/cp1" + } + ], + "provider-network": { + "name": "physnet1", + "overlay-type": "VLAN" + } + } + ], + "constituent-vnfd": [ + { + "member-vnf-index": 1, + "vnfd-id-ref": "ba145e82-626b-11e5-998d-6cb3113b406f" + }, + { + "member-vnf-index": 2, + "vnfd-id-ref": "ba1947da-626b-11e5-998d-6cb3113b406f" + } + ] + } + ] + }, + { + "id": "GUID-2", + "name": "RIFT.ware™ VNF Descriptors Catalog", + "short-name": "rift.ware-vnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "vnfd", + "descriptors": [ + { + "id": "ba145e82-626b-11e5-998d-6cb3113b406f", + "name": "ping-vnfd", + "short-name": "ping-vnfd", + "vendor": "RIFT.io", + "description": "This is an example RIFT.ware VNF", + "version": "1.0", + "logo": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAWIAAADzCAYAAAC8ERwoAAAABGdBTUEAALGPC/xhBQAAQABJREFUeAHtXQl8FEXWf9UzuTiVJIDgASwocihyBNZbP+9VvxUIeKwoCoTgsetJArg7q3Ltrux6EYIn3hCCfuuxuquCNwkgKiK6XniAkAPlzDXd9f1rQiCEyUz3THVPz0zV7weZ6Xr16r1/97yuevXqFSNVFAJxjkCPqx9L372z7gjStLZM19uKvzqnNowZbZnB0plGDZzzWq7xWs1gdbrXsy3N4FvbdD1i61f3X1AX5+or8RMAAZYAOigVkgSBLlc+0dbYvWcIZ0ZfIoZ/hH+8Lyc6ijhpEcLwCzH2A9p/QQz/iH3OvOyzozrSujUL8xoi5KmaKQQsIaAMsSW4FLGTCHSbVNxGr9ZP9BM7HQb3DBjJYcR5ihMyMMZqOfEPGbEyxvgHqW3avrnpiauqnehb9ZF8CChDnHz33LUaCxfDzt21JzKdTheGlzOWA8Ob6gqBGRnE2WoY5Vc1TXt165K8lTDWGIyrohCIHgFliKPHUHGIAoHc3CWeFca2czH6vApsLoYvNz0Kdk42/YExWgyHyHNVJVPWONmx6ivxEFCGOPHuaVxo1GVs0QDdHzC+v+OcusaF0K0IiZHxFxi935fWiT2+eWHenlbI1GWFQKsIKEPcKjSqQjYC3S4rzmqoNy6H4b0KI+DBsvnHnB9j2yBDkaalP1BZMn5LzOVRAsQNAsoQx82til9BM0cV94WL9Q44VHOdWmyLKVqM1UPPp70pbN7WxfmfxlQW1XlcIKAMcVzcpvgUsvu4RZl1u3fPggGeEEV4WXwq3yQ1o39rmueuypK8d5suqb8KgZYIKEPcEhH1PWoEfD6uPbCueBLcDzMxMuwUNcMEYICFvaeYlnGbclkkwM20QQVliG0ANZlZdh698ESD6w8g+uGEZMYhqO6MdsAg/2nAgGMfWOE7wx+URl1MSgSUIU7K2y5f6axrHmlP2+v+zjm7BjHA6rkKBTFj6zTGr6tcOuWdUGSqLnkQUD+Y5LnXtmmaPXLBqdjtsAgGuIdtnSQgY4S9Pamlem+reHbi1gRUT6lkAQFliC2ApUgPRKD3Da+k/bJ5491YjLs5aRfjDoTE8je4Kio50y6rXjr5DcuNVYOEQUAZ4oS5lc4qkjW2eDBv0J9Cr8c623MC9saYDlfFHytK8merbdMJeH9NqKQMsQmQFMl+BLAIx7JHF93Kid3tmjwQ+8WL608YHb/Yrn3GuI2Pj/8lrhVRwltGQBliy5AlbwORF2K5Xr0AYWkTkhcFezWHMf6Gad5RlSWTPrK3J8XdTQgoQ+ymu+FiWbJzl7TjRvUSjIjPd7GYCSEa3BO1+DelcunkxxJCIaVEWAQ8YSkUQdIjcHhuUXe/XvMGRsInJz0YzgDgxQvvf9v0uzClZsPLbzrTpeollggoQxxL9OOg7+zchYMaDL4coWl94kDcRhGR6wFT/G349xMSuyNdJduOihqcxKFjCihO8vDGiS6ntuv3m4w9G15+PU7kVWJGiIByTUQIXDI06zJ2/gi9gf0bI+H2rtK30aB+hbC5T5GafZ2msXUej2eDkeqvbn/IUdvDnUMnNp9o242eOtN7IhlRT8a13gbnw3BU0iA3LkAyjd1TtTT/VlfdAyWMVASUIZYKZ+Iw65pblIOR8H8QH9zBDVphdPsJAjZe8miel9u0S/kQkQW1suUScdE7tnw/2DD4CBj5c8H/DNcYZsburS7N/4NsnRU/dyCgDLE77oOrpMgatWAo58Z/INQhMRTMj4fzNWLaS6kpqS9vfu6aH5yWpfcVT3XYXrPrfMwIfkuMX4g8yu2clqF5fxrTHqxYmneDijVujkpifFaGODHuozQtAhs1/PrrGAkfKo2pBUbw6VZwjS3MYHzBjyX5myw0tZVUGOWfa3dciTQak+EvH2BrZyGYA5/iytLJ+coYhwApDquUIY7Dm2aXyIeNfPioBmooxwiws119tMqXURnW0R7oqnVasr5kTH2rdC6oyB49/xS4SW4RkQ2xEAdumjurSqf8KRZ9qz7tQUAZYntwjTuue0d872Mk3N9R4ZGJzKOxWypKJgtXSFyVzLELh5Ffvwt+ZOFPdrBgiZJpI6tL816Q0emRl88/tLaW9TMY748R/2GIK2nfuDbAsT7AxBpBB7hlRITVTrhoduIa/uEzR1pPjX/LmOdzD6V8saXkmkoZ8iQjD2WIk/Gut9D5dN9y77p1G17GD+ucFlW2fcUUeyt+1H88Q8t6pKRkjG5bRw4wzs4tPpkb+j9grIY40F2gC+C3MyXFO/ynxRM3WOmza+6j2Q1GPV4ciBIh3o9x1h8zoMOs8AhB+wsiT77QOFuDUfubKW3brNj0xFXVIehV1V4ElCFWjwJljZpfBCMC36cDRZznxvg9rEPa7KpHrxUjq4Qoge3fRvWNMG53Acu2TigFP/GXHTRt2DcleSJOOmgRuUGyLn1oKGvQL+AssCtyGF64IpbagQIzT/QJjPObCDH812nU6c14f+naBZoyxHYhGyd8s0bPn8ANesgJcWE4viCvdnnV4rwPnegvFn0E/OysYT4M4AVO9A9MX7l+4OSLfD5mNO8ve9SCPsgRfT2cGJfGxOffXJi9nzGK/wkv4ac1SnmyonTiJ0FIkvaSMsRJe+uJuowtGqD7eTlGcBl2w4Cp6iOeNu1+v/XJcbvt7ssN/LNGFd2EWOS58B+n2C4PYzMRYzxDjH47jy4+H6GHN8CJLNwPrv19i7hwGOaH0rTMR34sGVNjO0Yu78C1N8rluMW9eF2ufKKtvmf3avx4+9qqDKOfNU2bVFkyeamt/biQeefcBb/Wdb4EBvFwe8UTi3f0IP4XMc+/srcvudxhjBGuyOfBVTU/kVxVVlFShtgqYglCnzly/iKoMs5OdfAj+1pL0c6tWJz3tZ39uJl3t8uKs+rr9KUYHZ/mZjljLhte2DBG93fQPPNC+bxjLqdNAihDbBOwbmabNXL+5TAMT9spI6aeazxt2AVbn8yvsLOfeODd4+rH0nftrEEKUbooHuSNpYx4eW/Fs3lb9bL8J2Mph9N9K0PsNOIx7u/w3Ic71Rr1n8MoZNsnCvuP5skcWVkyZpd9fcQXZxEi+Oknnz8O18EV8SV5bKTFIuTbHi9dt3Vx/qexkcDZXpUhdhbvmPeGULWHYYSvtUsQ/ICePSpTu2rNwrwGu/qIV76BxbRRCx5ANMOUeNXBYbn9yDw376hO2oxEf56UIXb4yYpld2JrrmGwt+xaTYc74kVs0LhExYqGvsuZo4rEeX/TQ1Op2iYEYKRWeTwpY7eWTPy26Vqi/RXbFlVJAgSGTCpO+XkPvQQjbE8eCeSKSMv0XFT25FWuzhPhhlstTt1o2/9Cho0Vp7tBnjiQoTtmEVe373/Rf3d/9pKlnYRxoFtARId22MQLHIkr5/fVRj5GYf3s0BDuiC/TUj0Xbl6Yt8cO/onIE4nefTgxZFEi6maLTpx31A2jFNE+94ldjLb0EUOmyhDHEHynuj48d0kG/MIFdvQnVrk9xM/b/GxelR38E5lnj0xtIl5iKxJZRxt0u2G5UfW8eKZt4B0zlsoQxwx65zqu5VV5EhO77Bcc5wwha8GlW0unfLP/ovpkFgGxAJWaquXiZfaT2TaKDs41hAHWGFWvi6xxiYKHMsSJcidb0SMwcuA0tZXq6C5zNgdT7BXRMUnu1mImgZ2HVyO1JMJnVTGNAKcTd9exd7tf8ZDNuxZNSxQVIRYkVYkZAtPWdSG95hhiBrYZs16QoyPe9ziOhzXmg2XiaB5xcCdDNi++G6kDRJrBXzAk+AUHEc+iuUPXhZMdxx7djNwD94Sjs1yPxbmBA489eYXvDL/ltqrBQQggN8U8hLfddFCFuhAGAbaRpaaeVPXctZvDELq6WhliJ25P8eoU+oaGE/OfAqN6LAzpMfiLfxyGN5LCdlNWehe67fiQCXQaNxFs+A5DrW6R9NJaG0yld3oYH6RcEq0hZP363t136zHtFi9kVSwhwD5tm85P/f6ZKT9bauYiYmWI7bgZS7iHPi4/AR7UMwP/GJ0C49tGWleMPUFzhl8Vjl/2yAVjDTKeC0dntR5B9pPhkii22k7Rh0YAs5eLMHv5Z2gqVRsMASx6vpeuZZ4dr5nclCEOdlcjueZbn0q1u36DUe4VcCGcFflo10TnzHM2zRn2ejhKTHffxXT3pHB0luoZrT9TyzpebdqwhJppYtyzl3HPLjDdwC5CxrYh0PlHzKZ24Xnejc06daIrfE/B9V8h3VtPDC5cFUYGY/zSgIF9L4lHd5nXrvuYNHynrxxBOl1JNTsvhc6dGvXG42pXYbSJ0oe+GY5957ELjtcbDLlGGJ1qjG5TRjgc+pHXw+3zB2QRPgtGLjVyLtZawsgiJwh7B1b2DZ7iebujwf8bLgNa/9wlqdW0rbfBqS8noy92bA5ljJ8H10rMwsrwArvw03Ub5kL7W6whEHtqNSKO5B40LrJNxEI3jlfnR0fCIvI2bB7NHR72QUNOiWL8KCZF3k+Qloxery6dcnaQGnVJIgLYAn0vniscu2RfwejxcxjfZzXO3zgiSyuXkcshO3dJO65XXYyNKmMg/3kYjqTZp0HrnJmH/reqZEpcuXiUIW79fh5cM738CNL57ZigTcDoIf1gAgeuaJ6Tafaw90L11Cu3uOMOQ98EQyzv7DTEDHu82uCKxZM/DtW3qoseAXHAp1+v+xqx34iYkVkCCeT/jfPj/lGxJO81GGPbpm7iVPDtNTtHYnR/I0aqJ8jUIiwv5DZO5akn/LRswndhaV1CoAyxmRtRsKY3cb/YmYZE6g4cfdOqTGwLZeR0pxbnk7UkxyLdtVike7jl9ai+M1qG0fCoqHioxqYRyBpZ9CcYYp/pBiEIYXBrMXBYhFOf77V66nMItqarcHzTOQY37oVBtvc0mOYSIbyyR6bnFBkj/eZs7fqsNnSEQraw7GgqKHuGeAOmcRypI2NphCEooxfCGWGhDk7rlW4w4Rv+RyioVJ1cBA7JaP93TPG3RcsVRvgNzasNqFqWPzkWRljIX7E0799dtMzj4Yu+Ezo5kxSK0/Dvqow7osXPqfZqRBwM6Xk/ZFDFZpGm8DYnF02CiXLANY/nHJo17D8HXGvxRbglthtGhUy58QNaU1U6ZWiLrtRXmxHIHD1/KsIf50TYTTVenrdUlk5ZFGF7W5p1H7cos273nr9jdHylLR00YwrjVkcp3uOqFk/6b7PLrvyoRsQtb8vU8ouoYtNnMGTTZRqzlt1E8L2GUtu8Fa7ddoNfJFtuTpoaDYcD3oZ6b0a7BxBFUWGVNUbBz6R40o91mxEWemx64qrqqtL8cYxpF9udY0MsFvIG/QGr+MWCXhniJtQL1vaAGwIrrQio59Sj6bJ7/rL3ydc/7LSOcY5YZnkFP5atPTLZYnkcFSezCGx9ctxu0vhss/S4Vzux2eYyGLortpRcU2m2XSzoqkonv9gmnfeHv83m07352WJjUyx0tNKnMsQCrakrryNeJ0bBF1kBz2HasLHDAXkYP12mXBhdLY2XBQ+ZeruFV7t2GQvgV90UTh4Y4Q+ZJ2UwdjxK30kZru9I68WWZBjkMZD9rkh5mGmHOOd5IrTODG2saJLbEE9d3ZGmlpUAfDF9iVkguqmbr9HycHRZufOPQcha13B0Vuo5Z6Wm6H3cCyxfooJVvUzRKyJTCGx8fHwtCGeGIb6/iyfz15UlE74KQ+e6arzoORYS/4gMdLbFTcNF0c3Qq6e4TvlmAiWvIZ5eNpSY/iFiDEY3w8OdH8XOp7ScVWGFM+i0sDQWCLBIV3mG59C3TTWpXYVIDeEW0VfDIOOvKrIQQAJ5hCKyjQfxQ7wsMc8l1cum3Li+ZExYt9VB7V10oXLp5PsRFXS9XSJh4e5mNyeTT05DXFh+I7YlvwdXRLyM3j5G2Jo//EPKTg5PY4niefPbmY0bApw5iWTdL1LByjvJx5Pz+bIEcXhi4RpCBESL6TtbKTYtVJfmvRCeQ3xQIE79QbhhCvHSwSBWbkFMdpc6vm2CXK7yuCXXD+W+L9OwILeEDENsIXVsL3/0t4utM8MDbgmpO5gwIjb3Iy9YdQIWOJvltcB+Kk53UE35K+Rb2cGM7IomNAKna5mLMI3/UhgpLMj9ZeBxfU+Jp51jobXbX1tdmi/C9UwvUO5vGf6TYfDbRY6M8JTOUySPIRb+4M3bXoMBznUe5ih75PzTcBx63/AK9vVL3LmELc3tmef9cP0G6pnY7BKs8HOplq0g38f2nBwdrMsEvSZmJpyx2zSN/wYLclPjMcOY2Vtzpifzj3jprDBLb56OH17Bt11hnt45yuQwxNNWHwZ/8FswwlJ9qA7eprAj4l8qfugPeeRl0+Pss3AZuPbpz43WfcIiz0BNzbskwgNViQqB6qWT/69y6ZR/RcUkDhqLlw7T0i/DjGyLbHGxkWS8bJ4y+CW+IZ6xtg/puvAHHy8DMId56FjA+Jky2oYdEbMG4ziZsmEx29xouHBlPxNx130QHvgeTVs1QKaMilfiIlBZMn4LJ0++bA3hvju5S+5DPWXzjZZfYhtiERnhr0emMu464BtvHKvC4sQ78PsVk6b9nsQWZo9nIHm1I6nTIR1wCkcKzRnRCRs5toW70Qaj3uForNTDE/mBOXp2oTk6HNdk6G/TtNWDTdIrsiRHQCxEwkUheQbAmW7otm+vtnrr5E1lrfZsN/3U1QPJryMvAz/E7q4s8K+A4X0DMr1O3Ps6Dv/8PmRbC2cvIzynp8ylZo+HlYeUramS8/ObPob9KyIqDP1VKixDKs/hrt//H1YfRWA7AkhYdIPRoK/Hs401EFnFEIb4TlncZPBJzBFxwB/pf9UVRpjRRhjfP2PUOwgj3K74dzlGuY+GNcIW7y6mXBJH/Yynt037xpQInIaYomsi4jwbbqJ/0/SPujddUn8VAq0hULE472scC3Nva/WRXMdvpXeXUcXDI2lrV5vEGxEXfogfev1rAKybXaCF54tTlhlhR5r2GM0e+hYMceNgdW74lpFSYETcQ9aIGLFnP+3d0RVanOkfHgXXT/vQREFqOaFd3WvkW3+qGbdLEA7mLmFbIP3li3a0o7YDFms7IJNZCmn+neT17KROHXfSjX0C57CZY6aoYoUAY+l/J6r5vcxRscH1s6FPWax0atkvfr8JVHzr21HtzhVYPLI2SpMGgfD58nk4u+NB8o3YIY1tGEbiKPadO2r3YAYg5X6CybtVy6acEqZb5OgQO+j4S2HpWiVgKymj8xnk6ym28UZWAqemkIgYwT/eD//E58PxDCB+mSG/QChMWAPqd4J+I2g/x+cvyKN9jlNYvqDunT5XhhrIuKQgUf4CbMrIkyUOIjKWI7XrmbL4RcsncUbEgVOUdz4fEyPMaCtGwH+jTulFdNvxu6O9KVbb76o1OoU2ONY4YuRhzi3BaADwjqLwEVS7tQgMxptmUri6L3EdfmmcdIzk3+Q3QozIwwkXSPQP7MShr3xwQAbdaBRlc3UtXjRY6KU3SfO8QScMWU1jmN5Yqf53GgEtRfur7tcn4p7LcadyOlEMYEzN/BxQNnEMce3Ov+MmneUAZvu7YIQjaLSZ1Pmwe+jmI2r2Vzj7iTU0tA1ncixJxNgPpug5P8YUXSgiTldjO/QH8JsvDEpWvDqFNnJMI2F4DfHP3zMoneyLgTMJ+f+A7f+gz5n0YfkOZOl7HW6mpyi93ctmUpLKFimZ+QlfMQ7EfQvP+RkycACftJ27a08EL3NZDWV0GoJHYhjigrJcLAA5m12JMSwGatfR3GHmRo8hbkLUVZrWlppGclEzwySdkYm8FqIjhrwSEl4BnN1H08s+pJnDV+8T31felWoxFf1Gn4w+uu67HqsPnMPVQSPxnI2kmp3b8PJ4DqGGi2jmMHPRJbGSO6H6Zc/iWZBiiAOwGCRcE8oQS3lGxMGe5Ed2KocKYz+hp98j+qHEoR7DdsN0vW1YInsIOsphy9PIj3SbhWXCPXA0bPsNVCOy4sX4jMDWlesEGacgPHIKRsnr8OaaTYNzlijXReuAyahJ11JKa4yGB/EyTJHBD4vSUjdBRSOTHH9LNBJE01Yk8SH/EtwYMVqxvzD2b2Ipx7vJCAeUFiPiWBQmE3d+JKIavob74X3cz8tcbIRbIj0Q8j5Da8o2UGH5eBKuFFVsQeDHkgnbcAIN4vBlFdZXFqdo+cS3Id60DX5h5DKwuzBmENP+SOk559PswZV2d2eVv0FGbDJKBSITrEobip5LGmGH6sO2uj7I6vcofev/Eot842zrJdkZazgyTFJB3omeQyYVu+LFGb+GuLDstxg15Uu6J6HYVODcsLNoTs5dZo6yD8XIrjqNE8KwYlAYtYlBr+7uUsRIE1+EdKtv0R3l/d0tbPxJhy3PqyRK7d1YTb+SyC9iVvFpiEW8sEH3R6y16YbsW7giTqJZI5abbhIDQuxbiE3EBrbtx0Dd+OiS81OpwfgIBvkv9NePY+M6ig+kLEmZmpEh0xBjvVlC5I8lDYITx6chrt3pw6jj8OAqSbrK6BOsip9Ec4Z8JYmjbWywZQGbOSQWw2ysJlc700LBzpGWlPPbqLpmPRYifx2KVNWZQ2DTE1dVI6pHWqSSxjlmMLEv8WeIRTIfEbVgb3mX0tNOo1lDRYSE64tXk2yIiYXYJNEMDsY2N/umPraGgHBXcP42Fa68FX9Za2TqujkEEDApzRCjR3PPujnRIqaKL0MceIj98xE6ZF/8M0P6xwzvueQ74ZeIUXW4IfK2SnVN4Phxk4tm/FuHVY3f7sQza9BfEVnxT+TYELv5VIkYAbY94qYHNeTKEB+ESbgL01ZdDZKTw5FFUb8Bu6YuJN9QuVP9KAQy0zSjrfdnM3SmaRgzFw7oSfUhhrbaNF9FCI8av5Bqdq1VSfKjehjkGWJucvYXlbjhG8fPiFgseHBuX/4yRptw0tB5tmYDC38/IqLAfvlfYBClRU4gVjPLlCAzB3+H3YWXglYt2pkCrIlIxEwjSX7hqpOarqi/5hGAb0eaIYajSI2IzUMPym21eTDE2ZbamCbGVEfznCc7R7Dp7iUQItFmlQQ2e1kw82FXc4a9jnyx0+T1nSScGpPk/4emll+UJBpLUxOHqEpMrKVcE+ZvjO/bdPiFbzXfwCKlxifQrGGfWmzlLnLGK2UJhHSDWdm5j5nP7zB7+F8QB7RUVv9JxCeDyHieCsqvTiKdo1YV26uQ3lRWYfatN1kQMT5cE7WV12A0fJgFvSyQsiKaPSIRjIhUXy3j9db24We0Gw9jvN4C8Iq0EQEPnu1HEW+Mbd2qmEHA/GKyGW5mQzVN8YqYyP2GWOzd53xqxBqGasjYx0gAflMoErfXZf2u+LCsUUVPIKDkdJmy6qSfYImfr/8u7EAcA2MszVdtqf+4JoanktMiLOCdHddqOCQ84ogPldUVjk06H7+f+b2veMrcArWsjlvwcb8h/tZ/JZaaj2whd/RfGduDZO5j4vUUht43vJKWOaqogGr0/2LPvMBIanwqTnG2vgFh9ojPgOm86G9OMnJARjFDXwY3xbBk1N6izkdYpA9BzrE2zfN/rt35WVbu/ItDENpaJfXHK11SETdcUPYF+PaRzpvRNCQjny2drwMMu+YW5fgNWoQHyLbsUYzY1qpl+eb9xE16i+iW6toNmMVI/LE0MbfyN3AMktgVuRERJZUYcWKkzjV8PgSfD8ELA6d7c8TzsiMgq4sGJDhuKyX1RLr7hC+taJtMtJmj5m/DPZQ2Km6OHUbbJVpqyg0Vz07c2vy63Z/dbYgLy05BWsS35YPAvqCMdsfF2ykL/XOXpG7Rq/8EIzIVxsMjH5cDOWpMO7qydLJ1g1BYPhKZyEoP5Gb7t80wsm9hYlBGXqOMumSuNTXbEXlLGnYfhwC8QcSNQeDxP8C2l+3Shu5gHXXpPjyWp76EFi92tTje6JCdO2rkxs23VIfRz3hj31pZmv9oyyq7vrtixbBV5TiNa7UumgqmXR9vRjh71II+W4xqGDeO/LfRKG+hLadTQW3dEM/OWYaE6f9C2/Mt9GadNBD7jdOymWcJtqO/DyNqHRnh2yYSqRX3p1ecWnY8QvLEy+QSXB9oXbCoWwykis33gcvEqDklGIPdu+sH2K4SRtsG8UdwYOkVzJN+RWXJ+C129+neEbEIWaut2IrRiWQnOltCc4ePtRtYmfwzc4vOxMxgqV3TsVZlZWxRdWn+1a3Wh6qYsfpX5Pd/CpnTQ5FZrmOsHm2exaygmGblrIzI+FrpdHrZUJzqjBwRNBrNbJ+FHCgauwLP6jMHXkvub9mjF9xgGIZ4STlTGPveQ96LKkonfmJnhy7yjbVQs77qt9KNsEjwnpI6o0VPrv6aNbooD4bgNceNMFDBUr4YEUdW7h76NRj8LbLGwVqxOvC7l9LZUTgh5WqaPfwD242wEEOcozdnxKWUwvpglHw/ZKgNJp0t1xgvphmrjrGFd5wyhRG2Fs0TrZ6cH2lQw3v4HV4YLatQ7d1riHVdvluC8yXxsgiSm7vEg6iIe7nBF+AGxsSFhNCent2veOjwUA9QyLo0JLnBnsiQNGYqxUGt3pRjYBD/QL4c26eJQUW6e/i3MP43YlDcHy+Al4LSyL7IsXGhAad+qIxt+5DFYpqzhhg943fQDgvj/5c1asHN+wSR/MGdrglxgm+N8SN0lTgVhP/Q4x1Es4bYOsWQcX965RZ33G4Yi/EEnCuDXzQ88IBcUbVsSuTT46nlBdg9Fll0CmPfkKZNx67H56LRwZa2hWUX4v4swEyluy38mzNldC1eQo82v5SMnwOL1Ub1LuCeEiv9cULIQ0dlatetWZjXIFMGd46Iaw0RzyfRCIMbo5fiwQh3GTW/13Zd/8ANRjjwoDE6JaoHLitNnKRSYZkHo/nUudsAVxphoczs4S9hZHw8/r1oWTerDTibq1JnEm3Vfh4QSyMsbhtGxhO/qzJePfLy+VLD59xpiInOtPqshqVngWlyWLJYEgg3gJ/YcshwbCzlaN43Jxa5n1gwuu343TBWc5rzDPmZ0S4c1HopRoDXuT58a/bwavirL8ao/ffQUSwi2lSQDa9mp3kMbZIi1mxZgwMHBZtQErlYztxdy17tcuUTbU2QmyJxnyEW/jBOZ5iS3iyRmOLOHv6OWfJY0B2e+3Cn2lo/FuVs2EUYhUK4GfVRn3Sb3rkIImwOKwajz+ELzsFBrYvD0rqJYHbOfTDE52DaJTErWEsF2QQqWDmk5dVk+o49cBpcA84tloYEl+f49+xaFvVvY28f7jPE01eLFIydQ2JgtZKxp6w2cZJevFlrjPpXYIT7Odlvq30hZAdP/BxvChtYVTrlhKj9Yb6e+PFod7faX6ACYYXp7YfR3UM2hKZzae3snLdgiC+wzxiLLezsDpdq74hYVaX5D6VrKd2ZRjehw9g/J5zOgZsCeV6iTy+Am+uyMrUc59EZ/5AqFUvp49ZDQMUbdWO18SKMcGwX5rCbCA9DCQYdT1eU5r2DkYf1zRGhbppvfSrV7vwvZjtHHUiGrcgavx0Z8OTe8wM7ce7b1HK4cvgr+Cdt2rpfeHFPPMcjb/a6/deS91P26PmnIKJhEl5Qo2EM02OIxP3Vy6Ygoiby4r4RMTPk+ofFGXQuPYlZvEnFGzV2RhipfRi9iJ1pl3TVsrpi9JtXuWzy29KNsHg+ff2FD/XOFo/qZvhXz0gYIyyUm5vzNtwUYvOH3kJXCV8x8mL+6RIYJQSLyqVT3sEze+X+UTL7NEaK3ZA9cn5U+xPcNSL2IflKTTniTrnJwytNwK6xqfAP/8UEpeMkiBO+H0b4esc7bjxW6ekUjf11S8nkzxzr38e9VFuGhEDUG1EsK0hreynNGuhochXHdC0ouwn3dp70/sSmJK/Wj+4eJpJhqdICAWRQG8J1ug4vw98Bf0fD3DCoycOLYWELkUx9dZchLljTm3iD9dwGoVRlmlj8WRWKJBZ1yIE6ESPiiG5apPLiQdmFadxDaeneeZuenijitJ0vU8suR6fH05CcaTSG2TBqdF6loD0Kv2FB+X8wqPifoPXRXGRsAaI18qNhkehts655pD3f0XA7M/hkceKMI/oypns0dkpFyeQPrPbnLkMsguQNLjEuE2fRDcnJdNsPPjv34d7cqP8I/i0b/IhBHwG/xuiejDSa+/0zU+zNXBW0+yS9OKOsJ5Jvwp8r2V8Mfz51yzzMVHa5JIW+SW2xOWqHoc/Ab+0PuOZtum7XX/iOvmaezEGVJWMw6DFf3OUj5uwY86KboGT0ttuMsNi6bOgNTzpmhBmtZ0z7dWXplAJlhE08MzJJxLZoRvIjHUQu3p+qL5IpaqLy+qYkbzvcBbchCigHRvJju/XE6PtXXK/+u9V+XGaIDcmGmL9pFRC76Zcb27DYwkfY3Q98ZDpO45qNRbjBVaWTV9ven+ogOALpOfeiojx4ZRRXORsXReuka4rcwmuPytKGIcf2g3YrD2M8AXkpLL0o3eWaKChDYu8oMn4dhLB2WmAV+6DrsbmQOXbhMGrwv4/e7Z0iiVEwaVcrAxyb+3xQr1NXI6exvgYvYHmLR4ywCTO1G80eXHlQf+pCSASyRxVdg/WZ+YgFTAtJGEUlRt8VnrY0cOuT+RVm2LhsRCz56B9PhmtWlrtNKm7DGvxP4abYaoSxIPdC+/YZQ5URNvP4O0Qj4n4Zf0xqbxzPEa+/RCrPJGEmTt7werB1n7FNdqmMUXFnfQ9/2Cx/9xhi31qcISZzRx0W6lwUGlVfrd+DN/DRZm9MJHR4Cz98hpY1euPj412yDTQSLRK0jSd1FiJWGqRqx3GskyoRIbClJL+cpaTCb0z/jYiBiUZYB7ooa/T8CSZI4UV0S6nTu8oVhbtmNCwO+0Q0U55c/VpyY7Nw2OfEkpIxiRsS1lLlePo+c/B30kfFDDlZJGyvjScYZcpa9dy1myk17QxsYJIbMttMSNyeuWYytbnHEHOjfTP5o//IcECoSwpOXEZgf/T70YOrE8iE8vvqZflYBFTF1QhoKXIXijjPpukfwv+sSqQIBIxxSurpthljzjvtqaOwu+7cY4iJyzXEnH6K9ObIbIf98LlYGDhJJs/mvDTGx8Pn5dwZXs07V5+tISAOJWAk92AC3nCmNSEUdUsEhDFO1zC7YPRdyzoZ35FK9vrOY4t/FYqXewwxl2yIie0MpbgTdSKhD/xEtuWRFRnSEB+8yAldVB+SEGD0pCROjWwMdppUfknK7MeS/E0asUuw2F0jHQLOU40G4+5QfN1jiEmTOyImirkh/n4b/x0Mca9QNyDSOiwyvHzdwMnKHREpgLFqp6U/i0U7rNtKKoz6SeKU9GxErDEiKSbaAQRyKY/JHrWgT2u83WOIGXVoTciIrjMjpobYhwRGcElMjUj2MI3gz/r8kIwOl/t8SACjSnwhMHPQJthhEUsuqfBeVLxaXnyyJKnilU3V0vyn8fuyvDMurL4cyV7JaNUeuMgQ83ZhlbFEEFvXxIOfFo2CIT7GksjmiH9BmNrFXz39ux3myBWV+xBg/5Imk4gn/gHGWBVpCAwY2Pd2zFqk74aEr/jK1k5Fd48hNliqNCQFI7HFN4YFYSu4mfIL8kaMqyydbFu4jXyJFceDEdDeO/haFFca9L5RtFZNWyCwwneG35PCJuGyv0VVdF/hK66v9V8XjIl7DLFsJznCRoIp7MS1QE5UzofK7kvsmsOOuRdl81X8HEZAkxxaqUlOluUwHG7srmLx5I9FxkLZsmHH3dWn+5YftLvWPYaY0x6pSjOKmSEmXb7DX6zm4gw5cVaXKvGOwMwhWzBjq5emBuedpfFSjPYhkKZl/Rm/u2/2XZDwAYv3XT/79IuLW7JyjyGWPSI2+KEtlXXi+94jtkXyc7kFO3S2PJe/US5TxS0mCDSeByjzZBLZEUcxgcVtnf5YMgahbJrIYyy1GIZxUGSGewyxlhgjYmPPrtGYfkj9YeCt/G27DulzpT4NillsEeBc4oYjJvV5iy0w7upduAIRRbFWplQ4KPLsbpcVH3BqiHsMsUFyowA4i4lrAiujo2XeNMGLM+0mlchHNqox5sekPu/KENt4O2EkZ0plz7mnod44wD3hHkNMhty8qsyhc6qa3SFxThaSsJzd7FLUHxGq9mH10sn/FzUjxcBlCDCZK/JyY/BdhlSsxdm6dPIy+PQ/kykHQltHNufnHkPs0UwlUG4ufOjPbGDoevm17JeGC7BlKk0qZ0YLpPJTzNyBACd54ZWc2rhDqcSUAq4J/KzZbMnanRUYuO1l6h5DnJItd0TMeSbNWB0y0YZkYOFC4P8rlSemr542bZ+RylMxcwcCTOKBosyuzH7ugMoNUmRonUoxO5W2WzcwYNvpP7VJN/cYYl/PWmQ/snTyaZMSrf41+LBW6+yo4HS6TLYaZ09tfXLcbpk8FS+3IMA6SpOEsy40vfwIafwUo4MQEBEUWIR//qCKaC4Y/LSm5u4wxL6VHahg5d+IU0aTYFL+GkaOFD4mmGSNXXg0btRhJkhNkzDmLTZNrAjjCwFO4kQaSYUfTrrxGRWU3URLuEcSU8WmBQIexqTOTuEndokhFqcLFKy8BonnvoQRvgV6y32IuIMjYr++D9QW9y+yr4zeryid+ElkjVUrVyMgnnsm96WN3087LBTPozXlq2n6KscGIK7GWbJwp2mZr+PGSVzL4oOzc5cEcuzEbkRcuLIf3uAr8QA9Arzs2RnE2GDnRggyT58OPEFS376Sn0nFLhoE/rz2MDz36dGwaL0tH4TR8QcY4DxIU1fLc3+03mHS1IhjyDDrfVmiwl6mVx8v+DlviJEeEgb4NjLYh+jf3jc3523oozUBRSWCF5QV4oel9qNpqa8F7UhdjH8EaoxetirB8RvjNIVI/xwG+Rxb+0oy5hrJTdhkaDRAQOisIS5Y24Nqyt/BFOov2KYgN8yrtQeC71+ZbI0k2uviJA7wkJYBS+xvryyZ8FW0cqn2LkVAM451RjKOA3mRcnNq2TR1yKgcxL0pnvflcGrkgoCXQJitc4a4cBXObasvhwE+UaYiYXlxti9EJCxthAQ//gwjzLkwxnIKZ2/KYaS4uBIBg5/gmFxidEx8JhWWP09iUVyVqBDY/NyEzxHd9XNUTJo1hqvDQUNcWHYVceNNGKvsZjI49fFkuzvSDR6YXkjrh7E10ngpRu5DgPHBjgvFEeNey1bRHeX9He87gToUmzuwYPeBLJUQOdFT8LJ3RCz8wVPL5pLBH4cRTpUlvCU+wvjPWGPrVBD7bnpYkikMMfdqyhCHwShuq0V4GafjYiI/50dTg7GSppWPiUn/CdMp/1yiKoeJY9XsM8S+9e2otux5TItulyh0ZKx0m/3EjLpHJliwVoy3z0hZF6xGXUsABNaWHQMt5MbLW4FFhLnp/DkqLMuz0kzRNkdA29T8W5SfvQ+uf7yzPYZ46uojqXbne3jzXxylkHKaG2SrnxiHAkozxIz4TyrTmpzb7kouXHPOP9wqAJhdG1SkjHGrAIWswAxYpiGGmazpLt8QCyNMujDCsZl+BYOQ0SnBLku7xuWNiBEG9700uRQj9yHAY+AfDoqCMsZBYTFz0cOkGmKExHWSa4h9q5HsWP833BGHm9HHMRocT0LT1nWxrT9GmbJ4I3RNGWJZYLqRDyNHE1GFhmCfMb42NJ2qbY6AZuibm3+P9rNBRoY8QxzwCeuvwAgLH5gLCiYQjN5DHtF8ymjXlWYN3GqbUJzJ9Pltt01OxTj2CKTnjCZNG4X43tfwbBqxFwjGmNNCKlwp/UCD2OtmjwR6hgdHKMkrTNcyDjpNNCL2vvWp8Ak/jxvqbLazYMKKBM6cniaW+gzNOWFjMBLZ1xALKM0QIz3eHtnyKX4uQsAXSAi/DBItI7HBSaufiOd1PKKKpCaMsqRx4068J2h62UaaOXy1pbaKWAICXIIhFiFqteVP4WE6S4JEEbJgOBWXP42cQU/TnGFrI2QScTOGVXAYUDmFcWWI5SDpfi6NA4Xp5ON/orpVF8MY5+FfrLYkZ5AfeRQK1g53agDj/hsUXMI0rnlqJeb1F71EPyKuLX8QD09ucJFtvCqmdZyeQyR0CaXlvESNIw0bO2ydNQ4D9AKD1glUjUIgFALNR8nT1hxHekMhyMVvSm42wlAyNNZ1Jqp7Ci+G02P5ewovZmwp9FreUaYEzEPbozPEhWU3Y7PGZJlCheXV6FdbTMxzJ80ZKjOwOmzXrRPwHag7tPV68zXwbEvhY75HRekqBGYNEalPL8PpMjPI778dx75chXWXNMdk5HQSZrjIBUM3O9ZnnHWkM38nDAKlFZ1oe+SLdeLNbdAsadKYYcTY8zDA/WnO8MtptluMMATnTN4CG2cSE4abAVXRuBKBu4d+TXNG5FFGSi9sgP0HFvfqHJOT85to2qrzHesvzjoyEG4mU2SslkZoiO/7Mg3Tp6cce1Mz2khMOx8GeKSrDPDeu4Ht57/IuzFcGWJ5YMY/J9/gzTQ35yYoMhxRQM5l5NP1h8m3Vj2LQZ4gjF6lGmJN8/wS2Yh40zYxEg5kDQoip8RLIgSNPUDp7QfSnJxXJTKWy4qTtBExgomUa0Lu3UkMbnOHf0ydu2OTFJvnUNhbN6qtm5kY4MnVghtcqiH2apG4JgrKzoRa4g1td6nAKPhMjIJvIF9/uYeKSpdcpmuCukkXTzFMDARuPqKG5g6/BSPj02GMf7BfKTaZpq0ebH8/8dUDMrAFMqbJkvqw9lYNceNU5XHhFJUlRFA+DOn6MmgIQtFWBK132UXEEUt0TdAR3cctkrZTz2VQKXFkIDB7+DuU3m4QDPLrMti1ykPEF+u6iIqy9/feqgDurEDqymNlSYadtLvWLMxrsOaaqK2fj5tyhCwhgvJh9BR163QK+Ub8GLTenReluSaEerV7aga5U00llWsQ8PXfRj29F8AYP26vTHwE0mZeY28fccddmiFG9MUGob15Q1xYdiGM8GW2QsaYDyvFV9KNfZxbIZagkMaY1EUUDD9OkCCWYpHoCOQNbcDvZTy2TN9pq6oGzSHfeql+UVvltZF5r9zijpgBHyavC/ax4GXOEPu42LDwV3mdB+PEboQ/+M/Balx/jbOPpMpo4CReVRQCZhGYnfMn0thUs+TW6XgW1exyNlTVupCOtNit6fJGw5CYMx6wHeYMcQ2SSHN5h2O2QEzH9CoPixD3t7geN1/bexrfarIERqw4zvdTRSFgAYHZw7EJg71goYU1UkYT4aJI+gGCrmsnWwMuNLVn7yAuvCGeuhrb+eAysKUgPI3YOEyvFtrC3iGm35TkwUfMNsrrjvfIzl2Y9A+9PDyThFOnjuPwHAZGWNI1Dizc8duk8403hpz/Vp7IjBuHpH4i+IU3xEy/HgNo5BmWXrCzj67GSPgZ6ZxjwBCbOqT+ADjXJd7wGACiunQegal9d1IGOx8zzO9s6ZzxMTgJ+nBbeMcB0y5XFnWGf/jXskTFWtA3VY9eu1PwC22IfavbwCXxB1kdH8CHsbtghJ844Fo8f5HuJyZliOP5eYiV7L6cLeh6FEbG8he8OZKE1bAbYqVarPs1drOLYA9D20wLQiIocN/gLTTTGmOiLaNhESc8OOduCzK7npRrcqeEePMe3yX3oZ6uV1wJ6D4E5oxYgw0fN9sk2CREULSziber2RrMuEamgBgRv9fEr3VDLCIliN/aRCjtL2P1lMLG0xgmXBMJUzSWViZbGd1ouFo2T8UvSRCYPawIxvht+doiF0rdbqkGSb6M8jl2yS0eiNHwiTI5ezRt34ac1g1xbflFMMTy/UEcLom7ctbLVMgNvCpLxmNKyD6VKgtnk3vf8IpzKRClCq+YxRQBLFqQV5tkj4uC/x45i1u3HTFV3J7ODd2Qmu6XEavYsmTSPnsRCkzcRNkF0/eMYXNkc3ULP4C77w0nQya4Jzpv/+n7y2XwUjySEIG7h30BQyzfBch5L5wokjRrGEdePv9QhHddKfUJYvQGclbsy2oc3BBP//AodHqO5I795IFLIoYnaUjVJxgzj1xDLLrg3LBnsTSY/Opa4iHQS5sLF8W+kZc0BTm/WhovlzPaXcduwaCovUwx4R9+oTm/4IZYbxCHGQava97a0mftrzQrZ98qoaWmcUKcToe+KZJ4yBQXJzAd12XU/LNk8lS8kggBsQ2aU4F8jfnZ9NeP28rn6y6O3S4rzkLi9t/LlAoz5z0pmdpLzXm2Zmwln0HHdhOl27xFurlasfn8Y8kYHLPN/im7d52zv/mSzCcnG8Ok5jcn5xXov04qBpzSaVvduVJ5upBZfT2/HYMhuVEijL+8eWHeAYcEH2yIC1f3xWi4n1RMGH+E5hz3s1SermXGnpMtmghle2BdsQ0+e9mSKn6uRED4IhlcFLKL1F1msoWLnl/mqOK++O1JHQ0LqTRii1tKd7AhNvSRLYmi/I4wtbS/R8kjbpp30Tq9hp1N8l86nN8lFg3iBgglqLsQSB+GA3dx5JjMwvlvED2BMNfEK8g5DC+jUYxBaapM7cB0yxGZ2kGz5oMNMdElMjvGzV9Kc07YKJWni5mtLxlTz7i2SLaIeDNnYdEgaV5osvFLen6BRXJ2j2QcOlF92SmSebqCXefRC8bDGJ8qWxhMTR4RieBb8j3QEIsTOBgNbkkU5feE9w23xAdvvfnwFe8LTWlZH/F3zq/CAyJ7xhKxOKphvCGQ8TSey4OMQFRaGIm3FV/saDWI5kWFS7DGjOlpmhY0wdmBhrim4RSp0RJiZ4/YbplkpbJ08pd4of3bDrUNgxdn5z7W1Q7eimeCIyDWaRgtl6olZwkVT9w/d0kqdrSWwA52lIpTI7MXNpfkfR+M74GGmBmnBSOK4tqzUbSN66Ya4/faoYBwUXC99unTfcsT0jdnB2aKZzMEGC1r9k3CR34kzVjbRwIjV7DYYlTfgyiJIXYIA2M7szW+BxpizuQaYk/ai611nOjXK5dO+Rd2zqy2Q08Y4zM//eTzIjt4K54JjgBr8wI2eGDmLbH4G2S7MyUKZ55V1qiiiRgJI+2v/AJ35YuVpflrW+O83xA3rn4ObI3Q8nXGPqOZgzZZbpdIDRj92S51YIwnZI2arxJ12wVwovKdNXArVHtPqnqcx70hzsqdfzEWdWwb3HCv965QmO83xHVlR2NDrcQEM7w8VMfJUFe1NP8lvAlt85HjwZnTObdIbpRLMtyYZNeRseWSIbBlKi9ZxlbZdR698EQy6DmMhj2tEkVRARvwVPXiSatCsdhviLkmbzQsemRaq8PwUAIlXB1jt9qmE5JUGzo91TW3KMe2PhTjxEOAyz1NBgFCcTsizspdeJph6K/CL5xhy43GngJPG3ZLON77DTFxuYbY0D4M13ky1GNUvAJ6PmGXrnBRtPEb/M2sUQsusqsPxTfBEPDuPxlCimacDqUZZT2l8HKQSdboogtJGGHJCX2aq4B1osKtT+ZXNL8W7PN+Q8z5r4IRRHRNLAZkZCR0gh8ruKR40m/FAsk2K22s0OJt3paT8QIWG26y0k7RJikCdw//FlPWX6Rq79fialScPbLoam7w57FpI10qDgcwYx9UlkwOGjd8ABm+7DfEjHVvWRnxd05fkq+/1CxkEcvigoZbSq6p1Di73VZR4KbAQzUPC3hFKrTNVqQTgzmjj+UqwuPCTyzihGGEHzSIPwb97QwB9XuYd3LznMOh8N5viInkGWLGkZBaleYIVJTmPYocpO82v2bHZ4yOJ69bt+FlceKsHfwVzwRBgNNnUjWJg8iJw3OLum/Rq9+CEZ4iVfcgzLBA94+K0omfBKkKeqmZIebdglJEcpFTdSTNErmNeDN68YaEi0LuFtNgoHE6x7+HNmRjv3ywanVNIYDn8Ce5KODUDheXzFELfleri0guPsJ2MRl952nTzmeln0ZD7FvZAcmj5flKGNnmD7WinNtot5ROWo8V5r85IhfnnQzDeBSuijcPG/vQsY70qTqJHwSQBUyysK7cdp89akGfzFHzXyNuPIlwT3mDzVbAw0i4hnk9I7c+OQ452M2XRkNc31Zu6Aa3b2HKvGrupOzKsnzY71/mlHRwVZxR72/4FAt5z2TnPtzbqX5VPy5HgHO5i3VE7WneD3LtSBQQHjby4aOyRhY9ZHDjMwwy5R77FlIu7dqqxXmWI8YaDbHhlwugGhG3eqtEmswMjY3CcSlbWyWSXdG4kHeZYTR8ljmq6N7u4xZlyu5C8Ys7BPzSJd6xQ64diUDAzmOLf4VZYHE9a/hS7D4FCzsX5A6QUGM0t6p0ckT5dRoNsdeQDaDyER9wiw788mNJ/iam8VxH/MXNu+Y8BbuHbqzdtfsrxFDe0e3SR49oXq0+JxECTMOBDZJLXZ0tO9PCSSkOTMAAIz9zZFGZ3qB/hVngJDznKeHayazHGtAr1w3MnxYpz8a3haFL3NosRNHkJhWJVDsXt0NSoHeyRxXdDKDuj4GYhyCG8s66hlof/Gdv4n4t8rZp87xVv1YM5FZdykLA0BHEI7kww7HRpxj5cj+/0CDjQhyYcCoMr9STNKwgAyP8RQdNu9znizyZUiNwXKslbLaWVpgh97A9aYK5ixGyMT2QOXL+MEg1LiaSwWWBfs/CvT9L372rLmvk/OV4qF7WNO/LW0smIuhflYRFgHm8xOUPiu3AKzt3STuu/Xw0+fkxWOweAp/vbzDy7bu/LyzDxaowtp28nou/WTxpezQiNBriNP8uwvnD0orBlSE2CWb7Dhl5u3bUDoA/K6Y7k/Aoi1nRedgUcp6hN9yPF8QGfF/FmPY1Hv6vPR7+NTPSvxabU0yqpsjcjAAzDoFBc7zk5i7xLDeq5pvoGG4O1gMi9jX0qu7U9M6IgcytyYoIiV2ISr2ocvGk/7ZGY/b63qlEGnbB1ZltE56OMWWIw6MUoNj4+PjabrnFl9QZxvuYXsnbVGOy/xBkx6LuWM4xU8LD7w9MmGoJrgwDh0A1IPLDD19cAx7GBtQ3cPxlnPlxvQHk+Cyu8QYsSopFoYZ9tFKnXiGkT8IqYF5auSz/cXOq8x7m6ORSvZ1encl3w4drqrjI6raQF8/1TqZpF1SW5L3boiqir3sN8SAY4rKIGARvxLODX1dXgyEgjk/pOmrhuX7mXw6D5W7sRASGGD3zwAga7469GuEvRvUBoy2uBC43XWsiCVzc+0X9kY8Ako+bZmqwXvtululGYQhTjbB3mO3x4PmWH7ARRjK51Yx2aBo7r6Ik7wNZjBujJhpPeJU4JKY+sgRMFj6BzR5e78mYjm1MFp2VnnIRQD6T9RY4Hm2BNjypSPR1mBY2WsrPdHcPNMJpCp+wlzznVJRMlmaERZeNhjjQOY/K2XyA/FwZ4gPwMPmlCr4mzZP+ayyYqVzOJjFTZHsRwNZ57sm0kshH7mCJ02bKGxp2+z6e7ay4vWcig6LXc/bW0jyZ7oMAHPsNMaNv5QGEaY+P7+ctj3HCc6osGb+FOqaehpHxfxJeWaWgNAQQi1ZeWTLGXMZD38dICCX7lGK+0ZQynA4zRecyIqyFfIIwk2HhTtqIVOxmxpJ9FSmTg9vhyKW6NXKnPgd3krBXqh69dmePLO03GD08mbBKKsUkI8DfMM2wpkH+b5PRRlP9czrGFJ2LiPA7fCY10/PrraVTvrFLrL2LdQH2X8rtxDgF/D6XyzN5uK1ZmIdIA34VTt74AVoXYgQjPwA/eeBMcE0RL+H1PG1aSU3vL3PbwN5+Tc2osQEjngyxn2l0G07Z+YdpbCMktGlEDGk4x/RalWgQwJuYVy/Ln+5hdD7CZX6Khpdqm7gI4A29XKwvmNaQ87NN05omZOtMkXLWbCOGqRYxIRK5YJjGzq5aOsV2IywU3G+ImSbRNRHA7tSYIJiAnVaU5r+WmqYdB8P8fwmonlIpSgSQtwcfUAkAAA2ISURBVGSBaRY+DlcnO8s0vVlCrydsxjGxQw4jtMPNsowNHaLhGT2S7knpt/e8SUfE2G+IDeFGwBRHVuH8CJqxRmwKUEUCApufzauqKs3/LR6SPLyt90hgqVgkAAJ4HrYc2cnzgmlV6sp/LX+hDtt87xoS1n+KpIwDTcsZC0LGPtM0flpV6ZQJP5ZMcDSn+n5DPHcowtf4p1L11/VLpfJTzAgPyUItRYyOyfyPT+GWsAhwzh4V6wmmFeTsfNO0ZgkZToWGGy0cOQ4qQJy8+wpmmrUaYzN6ZGqDRDKuWEi43xCL3hnJFYJzZYhtuKsVi/O+hkG+hJHnTBjkj2zoQrGMAwQwM6pKb9dmnjVR+UXW6E1Qc1pjgkqQiAV8NxVsyWeLNK82AAm4Zlp6oUnW4kBDrHlkG+KjadrqmCazkYyXq9hVLctbfv3A/CF4m18rpqiuEk4JYzsC2GZ7y6Ynrgq7m22fIIVlp2ARfcC+79I+sBXhWCECCENmdlI4Oifq8Vup0Zj2YCql9q4uzb9aDGyc6DdUHwcaYpYi1xCLng1/XigBVF10CIgcqHibP4rDCnsj1OYmtUU6OjzjpTVGw29WLJ38hCV5DbrREr0ZYiR/ok4dVoQj7Xrpgv54CXQKR2drvcgRwdgcTxvWo7J08vU/LZvwna39WWB+oCGeOWgTfsim4gEt9DGOCj+M7/3lFpSNFalI6i5Cbc70ZPbGwzYW9xEn1qqSiAggXK2OUjz5lnSbXn4E1oAusdTGDDHHcza1785wpLqfnROOxq56+IDfw79Jh6Z3OAKDlsKtT+ZX2NVXpHybb+ho4vEqPli7yU0tg/0NnA7dcB2qfMGq1TW5CJSUjNHBcYn4lz16/imGwW6E7/9ijEZS5fakuMUKAcx87rScA1fn4jfokS4zI1Nb8eGaGC297xAMYXgRBcaXMC31ycqSCYHQ3KoQ9LGuOni3VmH5aWQYK+QKxqooK70H3Xb8brl8FTczCBye+3CnGqMBo2T+O9CPQI7KA2dCZpgoGlcgAP/mU5VL88fB0ISNUtgncGFZJl7EX+K+H7rvmqwPXu0kmpnzfih23a946PDaGv/3eP4OtjehGlqqQ/wv8dWMtOcNxp6vLs2DIY6fcvCIOG3YO1Rb/hNunMTkHDyLqmumApY/xg80iSPp3pjIImhU1DX30WzdqL0ACyf4J4wyPzJxNE10Tdh/jsrUrrFkhAUknM+yxQgz2kR3D/uAZobGva6uQYyG5RphZJtjnH9EGnsb2x/eTqeUd52O/Q2ttbXa4OAUlt1HBr/BGquw1DXk1Y7B2/OHsJSKwDEEcDrIkThiox9O4ujLDXYSHog+WN/uiR9uB8eEUB2FRQDGd63IyicSQoUlbk5QsHJIYL2A25ANkbG/05zhNzfvLthnnOpSivEqNpJYKxgo6Bj4b4X8GBjC6BN9ibncF+Tx/HfAsX2+WeE7w2+No3upWzHEq04iQ39XvtjsGZo7/Ar5fBVH2QiIkTM3Gnr5Ge/KuNEFE+HOmPKlY2qcghGWFydzpMA4YLsspWBO6EV9CkYmXoy0936mvTRiSy1oxHXQ2Ts9lY2CW/iJY3nSLwukSLUiEkLGqKAcbgM+wkoz07Rez3CaOUwtCpsGrHXC4Ia48QZuxA2UPG3Fz9WjnUuzhply8LcutqpRCCgEwiJQWHYtZrYPh6WLiIB9i0FVr4iaqkYHIRB80UYsBDB64CDqqC/gDW3oj1HBJ/IXDaKWTTFQCCQQAoUr+yHV5b22aaSpXNkysQ1uiEUP6akPwRjvktlZgBen7kR7xMKRKgoBhYAdCMz9vD0ZbBlmtG3tYA+fbR2xjPn28E5Orq0bYt8JvwCSR22BhdNYKlh5jS28FVOFQLIjsG07frfczgTsT9GsgVhEU0UWAq0bYtGDl/0DSTEMWZ0dyIcVkdj7ropCQCEgD4HCMkQx2Ll5Am5LjVtMNCRPvUTlFNoQ3z0c253587YoL3Z6GbSMZpT1tIW/YqoQSDYEGpP6zLVXbf4qzR7xmb19JB/30IZY4MG9f7ZvVIyNHg38ZZWLIvkePKWxZAREvLBB/0So4MGbtKR2pc2Ryk4xCyAQ3hDPHboOcaOP2IjXsWQ0LKfAEd829qJYKwQSFYGCVSdANYSE8kNsVZFRKc3NedvWPpKUeXhDLIDxtL3DlgiKfaDz/lRbq4zxPjzUB4WASQSmlQ8irr+OkbDdIaE15Em9xaRUiswiAuYMsVgh5Wy2Rd7WyDnvB2P8DhbwjrbWUFErBJIUgallx2PDxuvQ3v48v4zNpZmDv0tSpG1X25whFmJkdMZKKfveVok4PxpukJVUsOosW/tRzBUC8Y7A9FU5UAEjYZ5puyqMvqPO3f5iez9J3IF5Q+zrWQtDfL3tWAWmWPq/qLD8Rtv7Uh0oBOINAR+S9xSUTSddfw8+4SxHxNeQhP7mI2oc6StJOwmeayIUGFPLsHedXxuKRFodY69Sesq15Bu8WRpPxUghEK8ITP/wKPLXPwXxT3ZMBcYeQIY12ZkYHRM/XjoyPyJu0iij3R8wMkZ8sQOF8/Oopn4dTVt1qQO9qS4UAu5FoKD8CtIbPoGAThrhz+CSuN29oCSOZNZHxEL3qatPJqa/Bf+UdUMeMXbsDTS9BRmfPo6YhWqoEIg3BO4o70/1/G7MQn/rqOiM1SMSI0f93pxBPTJDLGQrKJsDQyxO3XCuiO3WnC8ib/odFDjo1LmuVU8KAUcRKFzdF2Fpf0SfY50d8OzVkmm30pycexzVOYk7i9wQ+5Dwu7b8NTwkZzqOn3hbEy0m0nBCwLC1jvevOlQI2IXAjLV9yF/3R4xGL0MX8g/7NCM3YyU0O2csdtTikAxVnEAgckMspBN5hfmeMnzq44SwQftgtIK49ghlGP8k34gdQWnURYWAmxEQg5q68vMwqLkaBli4IGJjgAVGjK2CX/g0FSXh7AMTnSEWsooNGCL21/6dPWGQETlSOUboWgllIL7Sl7MlTANVrRCIHQK+9e2obs+ZxI3z8fsZCUE6x06Ypp7ZjzgPLodmDf2p6Yr66wwC0RtiIWdBGdwTwgjanXDEAiiMfQPq97A1+z0kyd6AETO+D99MPrvSelqQTZEmFwJTV3fEOZg9oPTxGCwMxu9EHOg5HL+ZFPcAwXaTh51Ms3I+co9MySOJHEMs8CosH4/tlkgOhOOQ3FoCK8H8O4j3Mwx0A/6Kf378MBr2fW/8vP8a0xqQ1aqRTrQR/ziOzlQl8RHA6ah4NnBYKv4hOzdOEMZno/F74BBVXBN/aS9N42eR/WxvG9YRhrc72rdzN1isAbqNpNnDX3K3nIkrnVyjObX8d8SMx/DgiYdRFYWAQsD1CMClRywXWdVedL2oCSygXEMsgCos+y1GkM9hZJyWwLgp1RQC8Y8AI6Qt0C5BmNqr8a9MfGsg3xALPKatOpsM4wVM4dvENzxKeoVAgiLA2B7S2MXwCYuNUqrEGAF7DLFQqnDVSTDGL2NkjIUKVRQCCgHXICBOZ+fab1SSd9fcESw12FmmrR6MLFGvwRg7kyXKTl0Ub4VAIiDA2A786s/DwtwHiaBOouhgb66IWUM/JOb9NW78mkQBTOmhEIhjBDaQpp2kjLD77qC9hljoO2fIV9QTxpi0v2F1VoV9ue8ZUBIlAwKMPUwZ3qE0a9inyaBuvOlor2uiJRrTys5FrPEihLd1aVmlvisEFAI2ICBcEcQmITICuVlUcSsCzhpigcK0dV1I37MIfuNz3QqKkkshkCAIlFMKu5TuHu5M/vAEAS0WajhviIWWHLvvClfdjL8z8UXFG8fizqs+ExeBQLpY+hv18sygvKFiV6gqLkcgNoa4CZSCtT2I18MYi5R/Lt4a3SSv+qsQcDsCjF4hzTNV+YLdfqMOlC+2hrhJloKVSIKCNzin05suqb8KAYWAFQTYavLw22nWiOVWWiladyDgDkPchEVh2YUwxnPhsujXdEn9VQgoBEIgILIMatp0mjl0sUrkHgInl1e5yxALsJZwD61ddQ2McQH+9XI5fko8hUBsEGDsB3jz7qGMtkXk6y9OrFEljhFwnyFuAjOwoFd2NkbIedgQcjH+qoxuTdiov0mKANJVMnoJI+CHKHXoayq3duI8Bu41xM0x9pV3pRp+DS5NwKJez+ZV6rNCIAkQ+BIJeh4m1mYRzRq4NQn0TToV48MQN90WH9eovhyZ3fh4jJAvwOX2TVXqr0IgsRBgVdiI+hIx7XEc5PlWYummtGmJQHwZ4ubS3/dlGm3adhZ2DeEfH4GqE/BXxSQ3x0h9jh8EAhnRcHAno/eJe16lIUM+oDFMjx8FlKTRIBC/hril1sWrU+hbYxCOssnB6jH+Uj881H2w4JfdklR9VwjEDgHkW2F8MwYQX+P5/BjnKZbjrLiPaNDQDcrwxu6uxLrnxDHErSHpW9mB6lhXrDB3hJ/tEKTlbBM4e4x5vMR1nDem4S/OIQv8xVljTCwK4nhzzvAZf8U5ZMbes8lEXeAzIjtUSQ4EOEaljOOcQubHs4DFsr2fxdmG2t7zDpvOPhT1zc83DJyHiHYe2oPc3L+Q1/sLdeqyWR1VnxyPjtJSIaAQUAgoBBQCCgGFgEJAIaAQMIvA/wNne9RHd0pXfgAAAABJRU5ErkJggg==", + "internal-vld": [ + { + "id": "ba1478fe-626b-11e5-998d-6cb3113b406f", + "name": "fabric", + "short-name": "fabric", + "description": "Virtual link for internal fabric", + "type": "ELAN" + } + ], + "connection-point": [ + { + "name": "ping-vnfd/cp0", + "type": "VPORT" + }, + { + "name": "ping-vnfd/cp1", + "type": "VPORT" + } + ], + "vdu": [ + { + "id": "ba14a504-626b-11e5-998d-6cb3113b406f", + "name": "iovdu", + "count": 2, + "vm-flavor": { + "vcpu-count": 4, + "memory-mb": 16384, + "storage-gb": 16 + }, + "guest-epa": { + "trusted-execution": true, + "mempage-size": "PREFER_LARGE", + "cpu-pinning-policy": "DEDICATED", + "cpu-thread-pinning-policy": "AVOID", + "numa-node-policy": { + "node-cnt": 2, + "mem-policy": "PREFERRED", + "node": [ + { + "id": 0, + "vcpu": [ + "0", + "1" + ], + "memory-mb": 8192 + }, + { + "id": 1, + "vcpu": [ + "2", + "3" + ], + "memory-mb": 8192 + } + ] + } + }, + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "host-epa": { + "cpu-model": "PREFER_SANDYBRIDGE", + "cpu-arch": "PREFER_X86_64", + "cpu-vendor": "PREFER_INTEL", + "cpu-socket-count": "PREFER_TWO", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ] + }, + "image": "rw_openstack.qcow2", + "internal-connection-point": [ + { + "id": "ba153744-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + }, + { + "id": "ba15577e-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + } + ], + "internal-interface": [ + { + "name": "eth0", + "vdu-internal-connection-point-ref": "ba153744-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vdu-internal-connection-point-ref": "ba15577e-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + } + ], + "external-interface": [ + { + "name": "eth0", + "vnfd-connection-point-ref": "ping-vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vnfd-connection-point-ref": "ping-vnfd/cp1", + "virtual-interface": { + "type": "VIRTIO" + } + } + ] + } + ] + }, + { + "id": "ba1947da-626b-11e5-998d-6cb3113b406f", + "name": "pong-vnfd", + "short-name": "pong-vnfd", + "vendor": "RIFT.io", + "description": "This is an example RIFT.ware VNF", + "version": "1.0", + "logo": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAWIAAADzCAYAAAC8ERwoAAAABGdBTUEAALGPC/xhBQAAQABJREFUeAHtXQl8FEXWf9UzuTiVJIDgASwocihyBNZbP+9VvxUIeKwoCoTgsetJArg7q3Ltrux6EYIn3hCCfuuxuquCNwkgKiK6XniAkAPlzDXd9f1rQiCEyUz3THVPz0zV7weZ6Xr16r1/97yuevXqFSNVFAJxjkCPqx9L372z7gjStLZM19uKvzqnNowZbZnB0plGDZzzWq7xWs1gdbrXsy3N4FvbdD1i61f3X1AX5+or8RMAAZYAOigVkgSBLlc+0dbYvWcIZ0ZfIoZ/hH+8Lyc6ijhpEcLwCzH2A9p/QQz/iH3OvOyzozrSujUL8xoi5KmaKQQsIaAMsSW4FLGTCHSbVNxGr9ZP9BM7HQb3DBjJYcR5ihMyMMZqOfEPGbEyxvgHqW3avrnpiauqnehb9ZF8CChDnHz33LUaCxfDzt21JzKdTheGlzOWA8Ob6gqBGRnE2WoY5Vc1TXt165K8lTDWGIyrohCIHgFliKPHUHGIAoHc3CWeFca2czH6vApsLoYvNz0Kdk42/YExWgyHyHNVJVPWONmx6ivxEFCGOPHuaVxo1GVs0QDdHzC+v+OcusaF0K0IiZHxFxi935fWiT2+eWHenlbI1GWFQKsIKEPcKjSqQjYC3S4rzmqoNy6H4b0KI+DBsvnHnB9j2yBDkaalP1BZMn5LzOVRAsQNAsoQx82til9BM0cV94WL9Q44VHOdWmyLKVqM1UPPp70pbN7WxfmfxlQW1XlcIKAMcVzcpvgUsvu4RZl1u3fPggGeEEV4WXwq3yQ1o39rmueuypK8d5suqb8KgZYIKEPcEhH1PWoEfD6uPbCueBLcDzMxMuwUNcMEYICFvaeYlnGbclkkwM20QQVliG0ANZlZdh698ESD6w8g+uGEZMYhqO6MdsAg/2nAgGMfWOE7wx+URl1MSgSUIU7K2y5f6axrHmlP2+v+zjm7BjHA6rkKBTFj6zTGr6tcOuWdUGSqLnkQUD+Y5LnXtmmaPXLBqdjtsAgGuIdtnSQgY4S9Pamlem+reHbi1gRUT6lkAQFliC2ApUgPRKD3Da+k/bJ5491YjLs5aRfjDoTE8je4Kio50y6rXjr5DcuNVYOEQUAZ4oS5lc4qkjW2eDBv0J9Cr8c623MC9saYDlfFHytK8merbdMJeH9NqKQMsQmQFMl+BLAIx7JHF93Kid3tmjwQ+8WL608YHb/Yrn3GuI2Pj/8lrhVRwltGQBliy5AlbwORF2K5Xr0AYWkTkhcFezWHMf6Gad5RlSWTPrK3J8XdTQgoQ+ymu+FiWbJzl7TjRvUSjIjPd7GYCSEa3BO1+DelcunkxxJCIaVEWAQ8YSkUQdIjcHhuUXe/XvMGRsInJz0YzgDgxQvvf9v0uzClZsPLbzrTpeollggoQxxL9OOg7+zchYMaDL4coWl94kDcRhGR6wFT/G349xMSuyNdJduOihqcxKFjCihO8vDGiS6ntuv3m4w9G15+PU7kVWJGiIByTUQIXDI06zJ2/gi9gf0bI+H2rtK30aB+hbC5T5GafZ2msXUej2eDkeqvbn/IUdvDnUMnNp9o242eOtN7IhlRT8a13gbnw3BU0iA3LkAyjd1TtTT/VlfdAyWMVASUIZYKZ+Iw65pblIOR8H8QH9zBDVphdPsJAjZe8miel9u0S/kQkQW1suUScdE7tnw/2DD4CBj5c8H/DNcYZsburS7N/4NsnRU/dyCgDLE77oOrpMgatWAo58Z/INQhMRTMj4fzNWLaS6kpqS9vfu6aH5yWpfcVT3XYXrPrfMwIfkuMX4g8yu2clqF5fxrTHqxYmneDijVujkpifFaGODHuozQtAhs1/PrrGAkfKo2pBUbw6VZwjS3MYHzBjyX5myw0tZVUGOWfa3dciTQak+EvH2BrZyGYA5/iytLJ+coYhwApDquUIY7Dm2aXyIeNfPioBmooxwiws119tMqXURnW0R7oqnVasr5kTH2rdC6oyB49/xS4SW4RkQ2xEAdumjurSqf8KRZ9qz7tQUAZYntwjTuue0d872Mk3N9R4ZGJzKOxWypKJgtXSFyVzLELh5Ffvwt+ZOFPdrBgiZJpI6tL816Q0emRl88/tLaW9TMY748R/2GIK2nfuDbAsT7AxBpBB7hlRITVTrhoduIa/uEzR1pPjX/LmOdzD6V8saXkmkoZ8iQjD2WIk/Gut9D5dN9y77p1G17GD+ucFlW2fcUUeyt+1H88Q8t6pKRkjG5bRw4wzs4tPpkb+j9grIY40F2gC+C3MyXFO/ynxRM3WOmza+6j2Q1GPV4ciBIh3o9x1h8zoMOs8AhB+wsiT77QOFuDUfubKW3brNj0xFXVIehV1V4ElCFWjwJljZpfBCMC36cDRZznxvg9rEPa7KpHrxUjq4Qoge3fRvWNMG53Acu2TigFP/GXHTRt2DcleSJOOmgRuUGyLn1oKGvQL+AssCtyGF64IpbagQIzT/QJjPObCDH812nU6c14f+naBZoyxHYhGyd8s0bPn8ANesgJcWE4viCvdnnV4rwPnegvFn0E/OysYT4M4AVO9A9MX7l+4OSLfD5mNO8ve9SCPsgRfT2cGJfGxOffXJi9nzGK/wkv4ac1SnmyonTiJ0FIkvaSMsRJe+uJuowtGqD7eTlGcBl2w4Cp6iOeNu1+v/XJcbvt7ssN/LNGFd2EWOS58B+n2C4PYzMRYzxDjH47jy4+H6GHN8CJLNwPrv19i7hwGOaH0rTMR34sGVNjO0Yu78C1N8rluMW9eF2ufKKtvmf3avx4+9qqDKOfNU2bVFkyeamt/biQeefcBb/Wdb4EBvFwe8UTi3f0IP4XMc+/srcvudxhjBGuyOfBVTU/kVxVVlFShtgqYglCnzly/iKoMs5OdfAj+1pL0c6tWJz3tZ39uJl3t8uKs+rr9KUYHZ/mZjljLhte2DBG93fQPPNC+bxjLqdNAihDbBOwbmabNXL+5TAMT9spI6aeazxt2AVbn8yvsLOfeODd4+rH0nftrEEKUbooHuSNpYx4eW/Fs3lb9bL8J2Mph9N9K0PsNOIx7u/w3Ic71Rr1n8MoZNsnCvuP5skcWVkyZpd9fcQXZxEi+Oknnz8O18EV8SV5bKTFIuTbHi9dt3Vx/qexkcDZXpUhdhbvmPeGULWHYYSvtUsQ/ICePSpTu2rNwrwGu/qIV76BxbRRCx5ANMOUeNXBYbn9yDw376hO2oxEf56UIXb4yYpld2JrrmGwt+xaTYc74kVs0LhExYqGvsuZo4rEeX/TQ1Op2iYEYKRWeTwpY7eWTPy26Vqi/RXbFlVJAgSGTCpO+XkPvQQjbE8eCeSKSMv0XFT25FWuzhPhhlstTt1o2/9Cho0Vp7tBnjiQoTtmEVe373/Rf3d/9pKlnYRxoFtARId22MQLHIkr5/fVRj5GYf3s0BDuiC/TUj0Xbl6Yt8cO/onIE4nefTgxZFEi6maLTpx31A2jFNE+94ldjLb0EUOmyhDHEHynuj48d0kG/MIFdvQnVrk9xM/b/GxelR38E5lnj0xtIl5iKxJZRxt0u2G5UfW8eKZt4B0zlsoQxwx65zqu5VV5EhO77Bcc5wwha8GlW0unfLP/ovpkFgGxAJWaquXiZfaT2TaKDs41hAHWGFWvi6xxiYKHMsSJcidb0SMwcuA0tZXq6C5zNgdT7BXRMUnu1mImgZ2HVyO1JMJnVTGNAKcTd9exd7tf8ZDNuxZNSxQVIRYkVYkZAtPWdSG95hhiBrYZs16QoyPe9ziOhzXmg2XiaB5xcCdDNi++G6kDRJrBXzAk+AUHEc+iuUPXhZMdxx7djNwD94Sjs1yPxbmBA489eYXvDL/ltqrBQQggN8U8hLfddFCFuhAGAbaRpaaeVPXctZvDELq6WhliJ25P8eoU+oaGE/OfAqN6LAzpMfiLfxyGN5LCdlNWehe67fiQCXQaNxFs+A5DrW6R9NJaG0yld3oYH6RcEq0hZP363t136zHtFi9kVSwhwD5tm85P/f6ZKT9bauYiYmWI7bgZS7iHPi4/AR7UMwP/GJ0C49tGWleMPUFzhl8Vjl/2yAVjDTKeC0dntR5B9pPhkii22k7Rh0YAs5eLMHv5Z2gqVRsMASx6vpeuZZ4dr5nclCEOdlcjueZbn0q1u36DUe4VcCGcFflo10TnzHM2zRn2ejhKTHffxXT3pHB0luoZrT9TyzpebdqwhJppYtyzl3HPLjDdwC5CxrYh0PlHzKZ24Xnejc06daIrfE/B9V8h3VtPDC5cFUYGY/zSgIF9L4lHd5nXrvuYNHynrxxBOl1JNTsvhc6dGvXG42pXYbSJ0oe+GY5957ELjtcbDLlGGJ1qjG5TRjgc+pHXw+3zB2QRPgtGLjVyLtZawsgiJwh7B1b2DZ7iebujwf8bLgNa/9wlqdW0rbfBqS8noy92bA5ljJ8H10rMwsrwArvw03Ub5kL7W6whEHtqNSKO5B40LrJNxEI3jlfnR0fCIvI2bB7NHR72QUNOiWL8KCZF3k+Qloxery6dcnaQGnVJIgLYAn0vniscu2RfwejxcxjfZzXO3zgiSyuXkcshO3dJO65XXYyNKmMg/3kYjqTZp0HrnJmH/reqZEpcuXiUIW79fh5cM738CNL57ZigTcDoIf1gAgeuaJ6Tafaw90L11Cu3uOMOQ98EQyzv7DTEDHu82uCKxZM/DtW3qoseAXHAp1+v+xqx34iYkVkCCeT/jfPj/lGxJO81GGPbpm7iVPDtNTtHYnR/I0aqJ8jUIiwv5DZO5akn/LRswndhaV1CoAyxmRtRsKY3cb/YmYZE6g4cfdOqTGwLZeR0pxbnk7UkxyLdtVike7jl9ai+M1qG0fCoqHioxqYRyBpZ9CcYYp/pBiEIYXBrMXBYhFOf77V66nMItqarcHzTOQY37oVBtvc0mOYSIbyyR6bnFBkj/eZs7fqsNnSEQraw7GgqKHuGeAOmcRypI2NphCEooxfCGWGhDk7rlW4w4Rv+RyioVJ1cBA7JaP93TPG3RcsVRvgNzasNqFqWPzkWRljIX7E0799dtMzj4Yu+Ezo5kxSK0/Dvqow7osXPqfZqRBwM6Xk/ZFDFZpGm8DYnF02CiXLANY/nHJo17D8HXGvxRbglthtGhUy58QNaU1U6ZWiLrtRXmxHIHD1/KsIf50TYTTVenrdUlk5ZFGF7W5p1H7cos273nr9jdHylLR00YwrjVkcp3uOqFk/6b7PLrvyoRsQtb8vU8ouoYtNnMGTTZRqzlt1E8L2GUtu8Fa7ddoNfJFtuTpoaDYcD3oZ6b0a7BxBFUWGVNUbBz6R40o91mxEWemx64qrqqtL8cYxpF9udY0MsFvIG/QGr+MWCXhniJtQL1vaAGwIrrQio59Sj6bJ7/rL3ydc/7LSOcY5YZnkFP5atPTLZYnkcFSezCGx9ctxu0vhss/S4Vzux2eYyGLortpRcU2m2XSzoqkonv9gmnfeHv83m07352WJjUyx0tNKnMsQCrakrryNeJ0bBF1kBz2HasLHDAXkYP12mXBhdLY2XBQ+ZeruFV7t2GQvgV90UTh4Y4Q+ZJ2UwdjxK30kZru9I68WWZBjkMZD9rkh5mGmHOOd5IrTODG2saJLbEE9d3ZGmlpUAfDF9iVkguqmbr9HycHRZufOPQcha13B0Vuo5Z6Wm6H3cCyxfooJVvUzRKyJTCGx8fHwtCGeGIb6/iyfz15UlE74KQ+e6arzoORYS/4gMdLbFTcNF0c3Qq6e4TvlmAiWvIZ5eNpSY/iFiDEY3w8OdH8XOp7ScVWGFM+i0sDQWCLBIV3mG59C3TTWpXYVIDeEW0VfDIOOvKrIQQAJ5hCKyjQfxQ7wsMc8l1cum3Li+ZExYt9VB7V10oXLp5PsRFXS9XSJh4e5mNyeTT05DXFh+I7YlvwdXRLyM3j5G2Jo//EPKTg5PY4niefPbmY0bApw5iWTdL1LByjvJx5Pz+bIEcXhi4RpCBESL6TtbKTYtVJfmvRCeQ3xQIE79QbhhCvHSwSBWbkFMdpc6vm2CXK7yuCXXD+W+L9OwILeEDENsIXVsL3/0t4utM8MDbgmpO5gwIjb3Iy9YdQIWOJvltcB+Kk53UE35K+Rb2cGM7IomNAKna5mLMI3/UhgpLMj9ZeBxfU+Jp51jobXbX1tdmi/C9UwvUO5vGf6TYfDbRY6M8JTOUySPIRb+4M3bXoMBznUe5ih75PzTcBx63/AK9vVL3LmELc3tmef9cP0G6pnY7BKs8HOplq0g38f2nBwdrMsEvSZmJpyx2zSN/wYLclPjMcOY2Vtzpifzj3jprDBLb56OH17Bt11hnt45yuQwxNNWHwZ/8FswwlJ9qA7eprAj4l8qfugPeeRl0+Pss3AZuPbpz43WfcIiz0BNzbskwgNViQqB6qWT/69y6ZR/RcUkDhqLlw7T0i/DjGyLbHGxkWS8bJ4y+CW+IZ6xtg/puvAHHy8DMId56FjA+Jky2oYdEbMG4ziZsmEx29xouHBlPxNx130QHvgeTVs1QKaMilfiIlBZMn4LJ0++bA3hvju5S+5DPWXzjZZfYhtiERnhr0emMu464BtvHKvC4sQ78PsVk6b9nsQWZo9nIHm1I6nTIR1wCkcKzRnRCRs5toW70Qaj3uForNTDE/mBOXp2oTk6HNdk6G/TtNWDTdIrsiRHQCxEwkUheQbAmW7otm+vtnrr5E1lrfZsN/3U1QPJryMvAz/E7q4s8K+A4X0DMr1O3Ps6Dv/8PmRbC2cvIzynp8ylZo+HlYeUramS8/ObPob9KyIqDP1VKixDKs/hrt//H1YfRWA7AkhYdIPRoK/Hs401EFnFEIb4TlncZPBJzBFxwB/pf9UVRpjRRhjfP2PUOwgj3K74dzlGuY+GNcIW7y6mXBJH/Yynt037xpQInIaYomsi4jwbbqJ/0/SPujddUn8VAq0hULE472scC3Nva/WRXMdvpXeXUcXDI2lrV5vEGxEXfogfev1rAKybXaCF54tTlhlhR5r2GM0e+hYMceNgdW74lpFSYETcQ9aIGLFnP+3d0RVanOkfHgXXT/vQREFqOaFd3WvkW3+qGbdLEA7mLmFbIP3li3a0o7YDFms7IJNZCmn+neT17KROHXfSjX0C57CZY6aoYoUAY+l/J6r5vcxRscH1s6FPWax0atkvfr8JVHzr21HtzhVYPLI2SpMGgfD58nk4u+NB8o3YIY1tGEbiKPadO2r3YAYg5X6CybtVy6acEqZb5OgQO+j4S2HpWiVgKymj8xnk6ym28UZWAqemkIgYwT/eD//E58PxDCB+mSG/QChMWAPqd4J+I2g/x+cvyKN9jlNYvqDunT5XhhrIuKQgUf4CbMrIkyUOIjKWI7XrmbL4RcsncUbEgVOUdz4fEyPMaCtGwH+jTulFdNvxu6O9KVbb76o1OoU2ONY4YuRhzi3BaADwjqLwEVS7tQgMxptmUri6L3EdfmmcdIzk3+Q3QozIwwkXSPQP7MShr3xwQAbdaBRlc3UtXjRY6KU3SfO8QScMWU1jmN5Yqf53GgEtRfur7tcn4p7LcadyOlEMYEzN/BxQNnEMce3Ov+MmneUAZvu7YIQjaLSZ1Pmwe+jmI2r2Vzj7iTU0tA1ncixJxNgPpug5P8YUXSgiTldjO/QH8JsvDEpWvDqFNnJMI2F4DfHP3zMoneyLgTMJ+f+A7f+gz5n0YfkOZOl7HW6mpyi93ctmUpLKFimZ+QlfMQ7EfQvP+RkycACftJ27a08EL3NZDWV0GoJHYhjigrJcLAA5m12JMSwGatfR3GHmRo8hbkLUVZrWlppGclEzwySdkYm8FqIjhrwSEl4BnN1H08s+pJnDV+8T31felWoxFf1Gn4w+uu67HqsPnMPVQSPxnI2kmp3b8PJ4DqGGi2jmMHPRJbGSO6H6Zc/iWZBiiAOwGCRcE8oQS3lGxMGe5Ed2KocKYz+hp98j+qHEoR7DdsN0vW1YInsIOsphy9PIj3SbhWXCPXA0bPsNVCOy4sX4jMDWlesEGacgPHIKRsnr8OaaTYNzlijXReuAyahJ11JKa4yGB/EyTJHBD4vSUjdBRSOTHH9LNBJE01Yk8SH/EtwYMVqxvzD2b2Ipx7vJCAeUFiPiWBQmE3d+JKIavob74X3cz8tcbIRbIj0Q8j5Da8o2UGH5eBKuFFVsQeDHkgnbcAIN4vBlFdZXFqdo+cS3Id60DX5h5DKwuzBmENP+SOk559PswZV2d2eVv0FGbDJKBSITrEobip5LGmGH6sO2uj7I6vcofev/Eot842zrJdkZazgyTFJB3omeQyYVu+LFGb+GuLDstxg15Uu6J6HYVODcsLNoTs5dZo6yD8XIrjqNE8KwYlAYtYlBr+7uUsRIE1+EdKtv0R3l/d0tbPxJhy3PqyRK7d1YTb+SyC9iVvFpiEW8sEH3R6y16YbsW7giTqJZI5abbhIDQuxbiE3EBrbtx0Dd+OiS81OpwfgIBvkv9NePY+M6ig+kLEmZmpEh0xBjvVlC5I8lDYITx6chrt3pw6jj8OAqSbrK6BOsip9Ec4Z8JYmjbWywZQGbOSQWw2ysJlc700LBzpGWlPPbqLpmPRYifx2KVNWZQ2DTE1dVI6pHWqSSxjlmMLEv8WeIRTIfEbVgb3mX0tNOo1lDRYSE64tXk2yIiYXYJNEMDsY2N/umPraGgHBXcP42Fa68FX9Za2TqujkEEDApzRCjR3PPujnRIqaKL0MceIj98xE6ZF/8M0P6xwzvueQ74ZeIUXW4IfK2SnVN4Phxk4tm/FuHVY3f7sQza9BfEVnxT+TYELv5VIkYAbY94qYHNeTKEB+ESbgL01ZdDZKTw5FFUb8Bu6YuJN9QuVP9KAQy0zSjrfdnM3SmaRgzFw7oSfUhhrbaNF9FCI8av5Bqdq1VSfKjehjkGWJucvYXlbjhG8fPiFgseHBuX/4yRptw0tB5tmYDC38/IqLAfvlfYBClRU4gVjPLlCAzB3+H3YWXglYt2pkCrIlIxEwjSX7hqpOarqi/5hGAb0eaIYajSI2IzUMPym21eTDE2ZbamCbGVEfznCc7R7Dp7iUQItFmlQQ2e1kw82FXc4a9jnyx0+T1nSScGpPk/4emll+UJBpLUxOHqEpMrKVcE+ZvjO/bdPiFbzXfwCKlxifQrGGfWmzlLnLGK2UJhHSDWdm5j5nP7zB7+F8QB7RUVv9JxCeDyHieCsqvTiKdo1YV26uQ3lRWYfatN1kQMT5cE7WV12A0fJgFvSyQsiKaPSIRjIhUXy3j9db24We0Gw9jvN4C8Iq0EQEPnu1HEW+Mbd2qmEHA/GKyGW5mQzVN8YqYyP2GWOzd53xqxBqGasjYx0gAflMoErfXZf2u+LCsUUVPIKDkdJmy6qSfYImfr/8u7EAcA2MszVdtqf+4JoanktMiLOCdHddqOCQ84ogPldUVjk06H7+f+b2veMrcArWsjlvwcb8h/tZ/JZaaj2whd/RfGduDZO5j4vUUht43vJKWOaqogGr0/2LPvMBIanwqTnG2vgFh9ojPgOm86G9OMnJARjFDXwY3xbBk1N6izkdYpA9BzrE2zfN/rt35WVbu/ItDENpaJfXHK11SETdcUPYF+PaRzpvRNCQjny2drwMMu+YW5fgNWoQHyLbsUYzY1qpl+eb9xE16i+iW6toNmMVI/LE0MbfyN3AMktgVuRERJZUYcWKkzjV8PgSfD8ELA6d7c8TzsiMgq4sGJDhuKyX1RLr7hC+taJtMtJmj5m/DPZQ2Km6OHUbbJVpqyg0Vz07c2vy63Z/dbYgLy05BWsS35YPAvqCMdsfF2ykL/XOXpG7Rq/8EIzIVxsMjH5cDOWpMO7qydLJ1g1BYPhKZyEoP5Gb7t80wsm9hYlBGXqOMumSuNTXbEXlLGnYfhwC8QcSNQeDxP8C2l+3Shu5gHXXpPjyWp76EFi92tTje6JCdO2rkxs23VIfRz3hj31pZmv9oyyq7vrtixbBV5TiNa7UumgqmXR9vRjh71II+W4xqGDeO/LfRKG+hLadTQW3dEM/OWYaE6f9C2/Mt9GadNBD7jdOymWcJtqO/DyNqHRnh2yYSqRX3p1ecWnY8QvLEy+QSXB9oXbCoWwykis33gcvEqDklGIPdu+sH2K4SRtsG8UdwYOkVzJN+RWXJ+C129+neEbEIWaut2IrRiWQnOltCc4ePtRtYmfwzc4vOxMxgqV3TsVZlZWxRdWn+1a3Wh6qYsfpX5Pd/CpnTQ5FZrmOsHm2exaygmGblrIzI+FrpdHrZUJzqjBwRNBrNbJ+FHCgauwLP6jMHXkvub9mjF9xgGIZ4STlTGPveQ96LKkonfmJnhy7yjbVQs77qt9KNsEjwnpI6o0VPrv6aNbooD4bgNceNMFDBUr4YEUdW7h76NRj8LbLGwVqxOvC7l9LZUTgh5WqaPfwD242wEEOcozdnxKWUwvpglHw/ZKgNJp0t1xgvphmrjrGFd5wyhRG2Fs0TrZ6cH2lQw3v4HV4YLatQ7d1riHVdvluC8yXxsgiSm7vEg6iIe7nBF+AGxsSFhNCent2veOjwUA9QyLo0JLnBnsiQNGYqxUGt3pRjYBD/QL4c26eJQUW6e/i3MP43YlDcHy+Al4LSyL7IsXGhAad+qIxt+5DFYpqzhhg943fQDgvj/5c1asHN+wSR/MGdrglxgm+N8SN0lTgVhP/Q4x1Es4bYOsWQcX965RZ33G4Yi/EEnCuDXzQ88IBcUbVsSuTT46nlBdg9Fll0CmPfkKZNx67H56LRwZa2hWUX4v4swEyluy38mzNldC1eQo82v5SMnwOL1Ub1LuCeEiv9cULIQ0dlatetWZjXIFMGd46Iaw0RzyfRCIMbo5fiwQh3GTW/13Zd/8ANRjjwoDE6JaoHLitNnKRSYZkHo/nUudsAVxphoczs4S9hZHw8/r1oWTerDTibq1JnEm3Vfh4QSyMsbhtGxhO/qzJePfLy+VLD59xpiInOtPqshqVngWlyWLJYEgg3gJ/YcshwbCzlaN43Jxa5n1gwuu343TBWc5rzDPmZ0S4c1HopRoDXuT58a/bwavirL8ao/ffQUSwi2lSQDa9mp3kMbZIi1mxZgwMHBZtQErlYztxdy17tcuUTbU2QmyJxnyEW/jBOZ5iS3iyRmOLOHv6OWfJY0B2e+3Cn2lo/FuVs2EUYhUK4GfVRn3Sb3rkIImwOKwajz+ELzsFBrYvD0rqJYHbOfTDE52DaJTErWEsF2QQqWDmk5dVk+o49cBpcA84tloYEl+f49+xaFvVvY28f7jPE01eLFIydQ2JgtZKxp6w2cZJevFlrjPpXYIT7Odlvq30hZAdP/BxvChtYVTrlhKj9Yb6e+PFod7faX6ACYYXp7YfR3UM2hKZzae3snLdgiC+wzxiLLezsDpdq74hYVaX5D6VrKd2ZRjehw9g/J5zOgZsCeV6iTy+Am+uyMrUc59EZ/5AqFUvp49ZDQMUbdWO18SKMcGwX5rCbCA9DCQYdT1eU5r2DkYf1zRGhbppvfSrV7vwvZjtHHUiGrcgavx0Z8OTe8wM7ce7b1HK4cvgr+Cdt2rpfeHFPPMcjb/a6/deS91P26PmnIKJhEl5Qo2EM02OIxP3Vy6Ygoiby4r4RMTPk+ofFGXQuPYlZvEnFGzV2RhipfRi9iJ1pl3TVsrpi9JtXuWzy29KNsHg+ff2FD/XOFo/qZvhXz0gYIyyUm5vzNtwUYvOH3kJXCV8x8mL+6RIYJQSLyqVT3sEze+X+UTL7NEaK3ZA9cn5U+xPcNSL2IflKTTniTrnJwytNwK6xqfAP/8UEpeMkiBO+H0b4esc7bjxW6ekUjf11S8nkzxzr38e9VFuGhEDUG1EsK0hreynNGuhochXHdC0ouwn3dp70/sSmJK/Wj+4eJpJhqdICAWRQG8J1ug4vw98Bf0fD3DCoycOLYWELkUx9dZchLljTm3iD9dwGoVRlmlj8WRWKJBZ1yIE6ESPiiG5apPLiQdmFadxDaeneeZuenijitJ0vU8suR6fH05CcaTSG2TBqdF6loD0Kv2FB+X8wqPifoPXRXGRsAaI18qNhkehts655pD3f0XA7M/hkceKMI/oypns0dkpFyeQPrPbnLkMsguQNLjEuE2fRDcnJdNsPPjv34d7cqP8I/i0b/IhBHwG/xuiejDSa+/0zU+zNXBW0+yS9OKOsJ5Jvwp8r2V8Mfz51yzzMVHa5JIW+SW2xOWqHoc/Ab+0PuOZtum7XX/iOvmaezEGVJWMw6DFf3OUj5uwY86KboGT0ttuMsNi6bOgNTzpmhBmtZ0z7dWXplAJlhE08MzJJxLZoRvIjHUQu3p+qL5IpaqLy+qYkbzvcBbchCigHRvJju/XE6PtXXK/+u9V+XGaIDcmGmL9pFRC76Zcb27DYwkfY3Q98ZDpO45qNRbjBVaWTV9ven+ogOALpOfeiojx4ZRRXORsXReuka4rcwmuPytKGIcf2g3YrD2M8AXkpLL0o3eWaKChDYu8oMn4dhLB2WmAV+6DrsbmQOXbhMGrwv4/e7Z0iiVEwaVcrAxyb+3xQr1NXI6exvgYvYHmLR4ywCTO1G80eXHlQf+pCSASyRxVdg/WZ+YgFTAtJGEUlRt8VnrY0cOuT+RVm2LhsRCz56B9PhmtWlrtNKm7DGvxP4abYaoSxIPdC+/YZQ5URNvP4O0Qj4n4Zf0xqbxzPEa+/RCrPJGEmTt7werB1n7FNdqmMUXFnfQ9/2Cx/9xhi31qcISZzRx0W6lwUGlVfrd+DN/DRZm9MJHR4Cz98hpY1euPj412yDTQSLRK0jSd1FiJWGqRqx3GskyoRIbClJL+cpaTCb0z/jYiBiUZYB7ooa/T8CSZI4UV0S6nTu8oVhbtmNCwO+0Q0U55c/VpyY7Nw2OfEkpIxiRsS1lLlePo+c/B30kfFDDlZJGyvjScYZcpa9dy1myk17QxsYJIbMttMSNyeuWYytbnHEHOjfTP5o//IcECoSwpOXEZgf/T70YOrE8iE8vvqZflYBFTF1QhoKXIXijjPpukfwv+sSqQIBIxxSurpthljzjvtqaOwu+7cY4iJyzXEnH6K9ObIbIf98LlYGDhJJs/mvDTGx8Pn5dwZXs07V5+tISAOJWAk92AC3nCmNSEUdUsEhDFO1zC7YPRdyzoZ35FK9vrOY4t/FYqXewwxl2yIie0MpbgTdSKhD/xEtuWRFRnSEB+8yAldVB+SEGD0pCROjWwMdppUfknK7MeS/E0asUuw2F0jHQLOU40G4+5QfN1jiEmTOyImirkh/n4b/x0Mca9QNyDSOiwyvHzdwMnKHREpgLFqp6U/i0U7rNtKKoz6SeKU9GxErDEiKSbaAQRyKY/JHrWgT2u83WOIGXVoTciIrjMjpobYhwRGcElMjUj2MI3gz/r8kIwOl/t8SACjSnwhMHPQJthhEUsuqfBeVLxaXnyyJKnilU3V0vyn8fuyvDMurL4cyV7JaNUeuMgQ83ZhlbFEEFvXxIOfFo2CIT7GksjmiH9BmNrFXz39ux3myBWV+xBg/5Imk4gn/gHGWBVpCAwY2Pd2zFqk74aEr/jK1k5Fd48hNliqNCQFI7HFN4YFYSu4mfIL8kaMqyydbFu4jXyJFceDEdDeO/haFFca9L5RtFZNWyCwwneG35PCJuGyv0VVdF/hK66v9V8XjIl7DLFsJznCRoIp7MS1QE5UzofK7kvsmsOOuRdl81X8HEZAkxxaqUlOluUwHG7srmLx5I9FxkLZsmHH3dWn+5YftLvWPYaY0x6pSjOKmSEmXb7DX6zm4gw5cVaXKvGOwMwhWzBjq5emBuedpfFSjPYhkKZl/Rm/u2/2XZDwAYv3XT/79IuLW7JyjyGWPSI2+KEtlXXi+94jtkXyc7kFO3S2PJe/US5TxS0mCDSeByjzZBLZEUcxgcVtnf5YMgahbJrIYyy1GIZxUGSGewyxlhgjYmPPrtGYfkj9YeCt/G27DulzpT4NillsEeBc4oYjJvV5iy0w7upduAIRRbFWplQ4KPLsbpcVH3BqiHsMsUFyowA4i4lrAiujo2XeNMGLM+0mlchHNqox5sekPu/KENt4O2EkZ0plz7mnod44wD3hHkNMhty8qsyhc6qa3SFxThaSsJzd7FLUHxGq9mH10sn/FzUjxcBlCDCZK/JyY/BdhlSsxdm6dPIy+PQ/kykHQltHNufnHkPs0UwlUG4ufOjPbGDoevm17JeGC7BlKk0qZ0YLpPJTzNyBACd54ZWc2rhDqcSUAq4J/KzZbMnanRUYuO1l6h5DnJItd0TMeSbNWB0y0YZkYOFC4P8rlSemr542bZ+RylMxcwcCTOKBosyuzH7ugMoNUmRonUoxO5W2WzcwYNvpP7VJN/cYYl/PWmQ/snTyaZMSrf41+LBW6+yo4HS6TLYaZ09tfXLcbpk8FS+3IMA6SpOEsy40vfwIafwUo4MQEBEUWIR//qCKaC4Y/LSm5u4wxL6VHahg5d+IU0aTYFL+GkaOFD4mmGSNXXg0btRhJkhNkzDmLTZNrAjjCwFO4kQaSYUfTrrxGRWU3URLuEcSU8WmBQIexqTOTuEndokhFqcLFKy8BonnvoQRvgV6y32IuIMjYr++D9QW9y+yr4zeryid+ElkjVUrVyMgnnsm96WN3087LBTPozXlq2n6KscGIK7GWbJwp2mZr+PGSVzL4oOzc5cEcuzEbkRcuLIf3uAr8QA9Arzs2RnE2GDnRggyT58OPEFS376Sn0nFLhoE/rz2MDz36dGwaL0tH4TR8QcY4DxIU1fLc3+03mHS1IhjyDDrfVmiwl6mVx8v+DlviJEeEgb4NjLYh+jf3jc3523oozUBRSWCF5QV4oel9qNpqa8F7UhdjH8EaoxetirB8RvjNIVI/xwG+Rxb+0oy5hrJTdhkaDRAQOisIS5Y24Nqyt/BFOov2KYgN8yrtQeC71+ZbI0k2uviJA7wkJYBS+xvryyZ8FW0cqn2LkVAM451RjKOA3mRcnNq2TR1yKgcxL0pnvflcGrkgoCXQJitc4a4cBXObasvhwE+UaYiYXlxti9EJCxthAQ//gwjzLkwxnIKZ2/KYaS4uBIBg5/gmFxidEx8JhWWP09iUVyVqBDY/NyEzxHd9XNUTJo1hqvDQUNcWHYVceNNGKvsZjI49fFkuzvSDR6YXkjrh7E10ngpRu5DgPHBjgvFEeNey1bRHeX9He87gToUmzuwYPeBLJUQOdFT8LJ3RCz8wVPL5pLBH4cRTpUlvCU+wvjPWGPrVBD7bnpYkikMMfdqyhCHwShuq0V4GafjYiI/50dTg7GSppWPiUn/CdMp/1yiKoeJY9XsM8S+9e2otux5TItulyh0ZKx0m/3EjLpHJliwVoy3z0hZF6xGXUsABNaWHQMt5MbLW4FFhLnp/DkqLMuz0kzRNkdA29T8W5SfvQ+uf7yzPYZ46uojqXbne3jzXxylkHKaG2SrnxiHAkozxIz4TyrTmpzb7kouXHPOP9wqAJhdG1SkjHGrAIWswAxYpiGGmazpLt8QCyNMujDCsZl+BYOQ0SnBLku7xuWNiBEG9700uRQj9yHAY+AfDoqCMsZBYTFz0cOkGmKExHWSa4h9q5HsWP833BGHm9HHMRocT0LT1nWxrT9GmbJ4I3RNGWJZYLqRDyNHE1GFhmCfMb42NJ2qbY6AZuibm3+P9rNBRoY8QxzwCeuvwAgLH5gLCiYQjN5DHtF8ymjXlWYN3GqbUJzJ9Pltt01OxTj2CKTnjCZNG4X43tfwbBqxFwjGmNNCKlwp/UCD2OtmjwR6hgdHKMkrTNcyDjpNNCL2vvWp8Ak/jxvqbLazYMKKBM6cniaW+gzNOWFjMBLZ1xALKM0QIz3eHtnyKX4uQsAXSAi/DBItI7HBSaufiOd1PKKKpCaMsqRx4068J2h62UaaOXy1pbaKWAICXIIhFiFqteVP4WE6S4JEEbJgOBWXP42cQU/TnGFrI2QScTOGVXAYUDmFcWWI5SDpfi6NA4Xp5ON/orpVF8MY5+FfrLYkZ5AfeRQK1g53agDj/hsUXMI0rnlqJeb1F71EPyKuLX8QD09ucJFtvCqmdZyeQyR0CaXlvESNIw0bO2ydNQ4D9AKD1glUjUIgFALNR8nT1hxHekMhyMVvSm42wlAyNNZ1Jqp7Ci+G02P5ewovZmwp9FreUaYEzEPbozPEhWU3Y7PGZJlCheXV6FdbTMxzJ80ZKjOwOmzXrRPwHag7tPV68zXwbEvhY75HRekqBGYNEalPL8PpMjPI778dx75chXWXNMdk5HQSZrjIBUM3O9ZnnHWkM38nDAKlFZ1oe+SLdeLNbdAsadKYYcTY8zDA/WnO8MtptluMMATnTN4CG2cSE4abAVXRuBKBu4d+TXNG5FFGSi9sgP0HFvfqHJOT85to2qrzHesvzjoyEG4mU2SslkZoiO/7Mg3Tp6cce1Mz2khMOx8GeKSrDPDeu4Ht57/IuzFcGWJ5YMY/J9/gzTQ35yYoMhxRQM5l5NP1h8m3Vj2LQZ4gjF6lGmJN8/wS2Yh40zYxEg5kDQoip8RLIgSNPUDp7QfSnJxXJTKWy4qTtBExgomUa0Lu3UkMbnOHf0ydu2OTFJvnUNhbN6qtm5kY4MnVghtcqiH2apG4JgrKzoRa4g1td6nAKPhMjIJvIF9/uYeKSpdcpmuCukkXTzFMDARuPqKG5g6/BSPj02GMf7BfKTaZpq0ebH8/8dUDMrAFMqbJkvqw9lYNceNU5XHhFJUlRFA+DOn6MmgIQtFWBK132UXEEUt0TdAR3cctkrZTz2VQKXFkIDB7+DuU3m4QDPLrMti1ykPEF+u6iIqy9/feqgDurEDqymNlSYadtLvWLMxrsOaaqK2fj5tyhCwhgvJh9BR163QK+Ub8GLTenReluSaEerV7aga5U00llWsQ8PXfRj29F8AYP26vTHwE0mZeY28fccddmiFG9MUGob15Q1xYdiGM8GW2QsaYDyvFV9KNfZxbIZagkMaY1EUUDD9OkCCWYpHoCOQNbcDvZTy2TN9pq6oGzSHfeql+UVvltZF5r9zijpgBHyavC/ax4GXOEPu42LDwV3mdB+PEboQ/+M/Balx/jbOPpMpo4CReVRQCZhGYnfMn0thUs+TW6XgW1exyNlTVupCOtNit6fJGw5CYMx6wHeYMcQ2SSHN5h2O2QEzH9CoPixD3t7geN1/bexrfarIERqw4zvdTRSFgAYHZw7EJg71goYU1UkYT4aJI+gGCrmsnWwMuNLVn7yAuvCGeuhrb+eAysKUgPI3YOEyvFtrC3iGm35TkwUfMNsrrjvfIzl2Y9A+9PDyThFOnjuPwHAZGWNI1Dizc8duk8403hpz/Vp7IjBuHpH4i+IU3xEy/HgNo5BmWXrCzj67GSPgZ6ZxjwBCbOqT+ADjXJd7wGACiunQegal9d1IGOx8zzO9s6ZzxMTgJ+nBbeMcB0y5XFnWGf/jXskTFWtA3VY9eu1PwC22IfavbwCXxB1kdH8CHsbtghJ844Fo8f5HuJyZliOP5eYiV7L6cLeh6FEbG8he8OZKE1bAbYqVarPs1drOLYA9D20wLQiIocN/gLTTTGmOiLaNhESc8OOduCzK7npRrcqeEePMe3yX3oZ6uV1wJ6D4E5oxYgw0fN9sk2CREULSziber2RrMuEamgBgRv9fEr3VDLCIliN/aRCjtL2P1lMLG0xgmXBMJUzSWViZbGd1ouFo2T8UvSRCYPawIxvht+doiF0rdbqkGSb6M8jl2yS0eiNHwiTI5ezRt34ac1g1xbflFMMTy/UEcLom7ctbLVMgNvCpLxmNKyD6VKgtnk3vf8IpzKRClCq+YxRQBLFqQV5tkj4uC/x45i1u3HTFV3J7ODd2Qmu6XEavYsmTSPnsRCkzcRNkF0/eMYXNkc3ULP4C77w0nQya4Jzpv/+n7y2XwUjySEIG7h30BQyzfBch5L5wokjRrGEdePv9QhHddKfUJYvQGclbsy2oc3BBP//AodHqO5I795IFLIoYnaUjVJxgzj1xDLLrg3LBnsTSY/Opa4iHQS5sLF8W+kZc0BTm/WhovlzPaXcduwaCovUwx4R9+oTm/4IZYbxCHGQava97a0mftrzQrZ98qoaWmcUKcToe+KZJ4yBQXJzAd12XU/LNk8lS8kggBsQ2aU4F8jfnZ9NeP28rn6y6O3S4rzkLi9t/LlAoz5z0pmdpLzXm2Zmwln0HHdhOl27xFurlasfn8Y8kYHLPN/im7d52zv/mSzCcnG8Ok5jcn5xXov04qBpzSaVvduVJ5upBZfT2/HYMhuVEijL+8eWHeAYcEH2yIC1f3xWi4n1RMGH+E5hz3s1SermXGnpMtmghle2BdsQ0+e9mSKn6uRED4IhlcFLKL1F1msoWLnl/mqOK++O1JHQ0LqTRii1tKd7AhNvSRLYmi/I4wtbS/R8kjbpp30Tq9hp1N8l86nN8lFg3iBgglqLsQSB+GA3dx5JjMwvlvED2BMNfEK8g5DC+jUYxBaapM7cB0yxGZ2kGz5oMNMdElMjvGzV9Kc07YKJWni5mtLxlTz7i2SLaIeDNnYdEgaV5osvFLen6BRXJ2j2QcOlF92SmSebqCXefRC8bDGJ8qWxhMTR4RieBb8j3QEIsTOBgNbkkU5feE9w23xAdvvfnwFe8LTWlZH/F3zq/CAyJ7xhKxOKphvCGQ8TSey4OMQFRaGIm3FV/saDWI5kWFS7DGjOlpmhY0wdmBhrim4RSp0RJiZ4/YbplkpbJ08pd4of3bDrUNgxdn5z7W1Q7eimeCIyDWaRgtl6olZwkVT9w/d0kqdrSWwA52lIpTI7MXNpfkfR+M74GGmBmnBSOK4tqzUbSN66Ya4/faoYBwUXC99unTfcsT0jdnB2aKZzMEGC1r9k3CR34kzVjbRwIjV7DYYlTfgyiJIXYIA2M7szW+BxpizuQaYk/ai611nOjXK5dO+Rd2zqy2Q08Y4zM//eTzIjt4K54JjgBr8wI2eGDmLbH4G2S7MyUKZ55V1qiiiRgJI+2v/AJ35YuVpflrW+O83xA3rn4ObI3Q8nXGPqOZgzZZbpdIDRj92S51YIwnZI2arxJ12wVwovKdNXArVHtPqnqcx70hzsqdfzEWdWwb3HCv965QmO83xHVlR2NDrcQEM7w8VMfJUFe1NP8lvAlt85HjwZnTObdIbpRLMtyYZNeRseWSIbBlKi9ZxlbZdR698EQy6DmMhj2tEkVRARvwVPXiSatCsdhviLkmbzQsemRaq8PwUAIlXB1jt9qmE5JUGzo91TW3KMe2PhTjxEOAyz1NBgFCcTsizspdeJph6K/CL5xhy43GngJPG3ZLON77DTFxuYbY0D4M13ky1GNUvAJ6PmGXrnBRtPEb/M2sUQsusqsPxTfBEPDuPxlCimacDqUZZT2l8HKQSdboogtJGGHJCX2aq4B1osKtT+ZXNL8W7PN+Q8z5r4IRRHRNLAZkZCR0gh8ruKR40m/FAsk2K22s0OJt3paT8QIWG26y0k7RJikCdw//FlPWX6Rq79fialScPbLoam7w57FpI10qDgcwYx9UlkwOGjd8ABm+7DfEjHVvWRnxd05fkq+/1CxkEcvigoZbSq6p1Di73VZR4KbAQzUPC3hFKrTNVqQTgzmjj+UqwuPCTyzihGGEHzSIPwb97QwB9XuYd3LznMOh8N5viInkGWLGkZBaleYIVJTmPYocpO82v2bHZ4yOJ69bt+FlceKsHfwVzwRBgNNnUjWJg8iJw3OLum/Rq9+CEZ4iVfcgzLBA94+K0omfBKkKeqmZIebdglJEcpFTdSTNErmNeDN68YaEi0LuFtNgoHE6x7+HNmRjv3ywanVNIYDn8Ce5KODUDheXzFELfleri0guPsJ2MRl952nTzmeln0ZD7FvZAcmj5flKGNnmD7WinNtot5ROWo8V5r85IhfnnQzDeBSuijcPG/vQsY70qTqJHwSQBUyysK7cdp89akGfzFHzXyNuPIlwT3mDzVbAw0i4hnk9I7c+OQ452M2XRkNc31Zu6Aa3b2HKvGrupOzKsnzY71/mlHRwVZxR72/4FAt5z2TnPtzbqX5VPy5HgHO5i3VE7WneD3LtSBQQHjby4aOyRhY9ZHDjMwwy5R77FlIu7dqqxXmWI8YaDbHhlwugGhG3eqtEmswMjY3CcSlbWyWSXdG4kHeZYTR8ljmq6N7u4xZlyu5C8Ys7BPzSJd6xQ64diUDAzmOLf4VZYHE9a/hS7D4FCzsX5A6QUGM0t6p0ckT5dRoNsdeQDaDyER9wiw788mNJ/iam8VxH/MXNu+Y8BbuHbqzdtfsrxFDe0e3SR49oXq0+JxECTMOBDZJLXZ0tO9PCSSkOTMAAIz9zZFGZ3qB/hVngJDznKeHayazHGtAr1w3MnxYpz8a3haFL3NosRNHkJhWJVDsXt0NSoHeyRxXdDKDuj4GYhyCG8s66hlof/Gdv4n4t8rZp87xVv1YM5FZdykLA0BHEI7kww7HRpxj5cj+/0CDjQhyYcCoMr9STNKwgAyP8RQdNu9znizyZUiNwXKslbLaWVpgh97A9aYK5ixGyMT2QOXL+MEg1LiaSwWWBfs/CvT9L372rLmvk/OV4qF7WNO/LW0smIuhflYRFgHm8xOUPiu3AKzt3STuu/Xw0+fkxWOweAp/vbzDy7bu/LyzDxaowtp28nou/WTxpezQiNBriNP8uwvnD0orBlSE2CWb7Dhl5u3bUDoA/K6Y7k/Aoi1nRedgUcp6hN9yPF8QGfF/FmPY1Hv6vPR7+NTPSvxabU0yqpsjcjAAzDoFBc7zk5i7xLDeq5pvoGG4O1gMi9jX0qu7U9M6IgcytyYoIiV2ISr2ocvGk/7ZGY/b63qlEGnbB1ZltE56OMWWIw6MUoNj4+PjabrnFl9QZxvuYXsnbVGOy/xBkx6LuWM4xU8LD7w9MmGoJrgwDh0A1IPLDD19cAx7GBtQ3cPxlnPlxvQHk+Cyu8QYsSopFoYZ9tFKnXiGkT8IqYF5auSz/cXOq8x7m6ORSvZ1encl3w4drqrjI6raQF8/1TqZpF1SW5L3boiqir3sN8SAY4rKIGARvxLODX1dXgyEgjk/pOmrhuX7mXw6D5W7sRASGGD3zwAga7469GuEvRvUBoy2uBC43XWsiCVzc+0X9kY8Ako+bZmqwXvtululGYQhTjbB3mO3x4PmWH7ARRjK51Yx2aBo7r6Ik7wNZjBujJhpPeJU4JKY+sgRMFj6BzR5e78mYjm1MFp2VnnIRQD6T9RY4Hm2BNjypSPR1mBY2WsrPdHcPNMJpCp+wlzznVJRMlmaERZeNhjjQOY/K2XyA/FwZ4gPwMPmlCr4mzZP+ayyYqVzOJjFTZHsRwNZ57sm0kshH7mCJ02bKGxp2+z6e7ay4vWcig6LXc/bW0jyZ7oMAHPsNMaNv5QGEaY+P7+ctj3HCc6osGb+FOqaehpHxfxJeWaWgNAQQi1ZeWTLGXMZD38dICCX7lGK+0ZQynA4zRecyIqyFfIIwk2HhTtqIVOxmxpJ9FSmTg9vhyKW6NXKnPgd3krBXqh69dmePLO03GD08mbBKKsUkI8DfMM2wpkH+b5PRRlP9czrGFJ2LiPA7fCY10/PrraVTvrFLrL2LdQH2X8rtxDgF/D6XyzN5uK1ZmIdIA34VTt74AVoXYgQjPwA/eeBMcE0RL+H1PG1aSU3vL3PbwN5+Tc2osQEjngyxn2l0G07Z+YdpbCMktGlEDGk4x/RalWgQwJuYVy/Ln+5hdD7CZX6Khpdqm7gI4A29XKwvmNaQ87NN05omZOtMkXLWbCOGqRYxIRK5YJjGzq5aOsV2IywU3G+ImSbRNRHA7tSYIJiAnVaU5r+WmqYdB8P8fwmonlIpSgSQtwcfUAkAAA2ISURBVGSBaRY+DlcnO8s0vVlCrydsxjGxQw4jtMPNsowNHaLhGT2S7knpt/e8SUfE2G+IDeFGwBRHVuH8CJqxRmwKUEUCApufzauqKs3/LR6SPLyt90hgqVgkAAJ4HrYc2cnzgmlV6sp/LX+hDtt87xoS1n+KpIwDTcsZC0LGPtM0flpV6ZQJP5ZMcDSn+n5DPHcowtf4p1L11/VLpfJTzAgPyUItRYyOyfyPT+GWsAhwzh4V6wmmFeTsfNO0ZgkZToWGGy0cOQ4qQJy8+wpmmrUaYzN6ZGqDRDKuWEi43xCL3hnJFYJzZYhtuKsVi/O+hkG+hJHnTBjkj2zoQrGMAwQwM6pKb9dmnjVR+UXW6E1Qc1pjgkqQiAV8NxVsyWeLNK82AAm4Zlp6oUnW4kBDrHlkG+KjadrqmCazkYyXq9hVLctbfv3A/CF4m18rpqiuEk4JYzsC2GZ7y6Ynrgq7m22fIIVlp2ARfcC+79I+sBXhWCECCENmdlI4Oifq8Vup0Zj2YCql9q4uzb9aDGyc6DdUHwcaYpYi1xCLng1/XigBVF10CIgcqHibP4rDCnsj1OYmtUU6OjzjpTVGw29WLJ38hCV5DbrREr0ZYiR/ok4dVoQj7Xrpgv54CXQKR2drvcgRwdgcTxvWo7J08vU/LZvwna39WWB+oCGeOWgTfsim4gEt9DGOCj+M7/3lFpSNFalI6i5Cbc70ZPbGwzYW9xEn1qqSiAggXK2OUjz5lnSbXn4E1oAusdTGDDHHcza1785wpLqfnROOxq56+IDfw79Jh6Z3OAKDlsKtT+ZX2NVXpHybb+ho4vEqPli7yU0tg/0NnA7dcB2qfMGq1TW5CJSUjNHBcYn4lz16/imGwW6E7/9ijEZS5fakuMUKAcx87rScA1fn4jfokS4zI1Nb8eGaGC297xAMYXgRBcaXMC31ycqSCYHQ3KoQ9LGuOni3VmH5aWQYK+QKxqooK70H3Xb8brl8FTczCBye+3CnGqMBo2T+O9CPQI7KA2dCZpgoGlcgAP/mU5VL88fB0ISNUtgncGFZJl7EX+K+H7rvmqwPXu0kmpnzfih23a946PDaGv/3eP4OtjehGlqqQ/wv8dWMtOcNxp6vLs2DIY6fcvCIOG3YO1Rb/hNunMTkHDyLqmumApY/xg80iSPp3pjIImhU1DX30WzdqL0ACyf4J4wyPzJxNE10Tdh/jsrUrrFkhAUknM+yxQgz2kR3D/uAZobGva6uQYyG5RphZJtjnH9EGnsb2x/eTqeUd52O/Q2ttbXa4OAUlt1HBr/BGquw1DXk1Y7B2/OHsJSKwDEEcDrIkThiox9O4ujLDXYSHog+WN/uiR9uB8eEUB2FRQDGd63IyicSQoUlbk5QsHJIYL2A25ANkbG/05zhNzfvLthnnOpSivEqNpJYKxgo6Bj4b4X8GBjC6BN9ibncF+Tx/HfAsX2+WeE7w2+No3upWzHEq04iQ39XvtjsGZo7/Ar5fBVH2QiIkTM3Gnr5Ge/KuNEFE+HOmPKlY2qcghGWFydzpMA4YLsspWBO6EV9CkYmXoy0936mvTRiSy1oxHXQ2Ts9lY2CW/iJY3nSLwukSLUiEkLGqKAcbgM+wkoz07Rez3CaOUwtCpsGrHXC4Ia48QZuxA2UPG3Fz9WjnUuzhply8LcutqpRCCgEwiJQWHYtZrYPh6WLiIB9i0FVr4iaqkYHIRB80UYsBDB64CDqqC/gDW3oj1HBJ/IXDaKWTTFQCCQQAoUr+yHV5b22aaSpXNkysQ1uiEUP6akPwRjvktlZgBen7kR7xMKRKgoBhYAdCMz9vD0ZbBlmtG3tYA+fbR2xjPn28E5Orq0bYt8JvwCSR22BhdNYKlh5jS28FVOFQLIjsG07frfczgTsT9GsgVhEU0UWAq0bYtGDl/0DSTEMWZ0dyIcVkdj7ropCQCEgD4HCMkQx2Ll5Am5LjVtMNCRPvUTlFNoQ3z0c253587YoL3Z6GbSMZpT1tIW/YqoQSDYEGpP6zLVXbf4qzR7xmb19JB/30IZY4MG9f7ZvVIyNHg38ZZWLIvkePKWxZAREvLBB/0So4MGbtKR2pc2Ryk4xCyAQ3hDPHboOcaOP2IjXsWQ0LKfAEd829qJYKwQSFYGCVSdANYSE8kNsVZFRKc3NedvWPpKUeXhDLIDxtL3DlgiKfaDz/lRbq4zxPjzUB4WASQSmlQ8irr+OkbDdIaE15Em9xaRUiswiAuYMsVgh5Wy2Rd7WyDnvB2P8DhbwjrbWUFErBJIUgallx2PDxuvQ3v48v4zNpZmDv0tSpG1X25whFmJkdMZKKfveVok4PxpukJVUsOosW/tRzBUC8Y7A9FU5UAEjYZ5puyqMvqPO3f5iez9J3IF5Q+zrWQtDfL3tWAWmWPq/qLD8Rtv7Uh0oBOINAR+S9xSUTSddfw8+4SxHxNeQhP7mI2oc6StJOwmeayIUGFPLsHedXxuKRFodY69Sesq15Bu8WRpPxUghEK8ITP/wKPLXPwXxT3ZMBcYeQIY12ZkYHRM/XjoyPyJu0iij3R8wMkZ8sQOF8/Oopn4dTVt1qQO9qS4UAu5FoKD8CtIbPoGAThrhz+CSuN29oCSOZNZHxEL3qatPJqa/Bf+UdUMeMXbsDTS9BRmfPo6YhWqoEIg3BO4o70/1/G7MQn/rqOiM1SMSI0f93pxBPTJDLGQrKJsDQyxO3XCuiO3WnC8ib/odFDjo1LmuVU8KAUcRKFzdF2Fpf0SfY50d8OzVkmm30pycexzVOYk7i9wQ+5Dwu7b8NTwkZzqOn3hbEy0m0nBCwLC1jvevOlQI2IXAjLV9yF/3R4xGL0MX8g/7NCM3YyU0O2csdtTikAxVnEAgckMspBN5hfmeMnzq44SwQftgtIK49ghlGP8k34gdQWnURYWAmxEQg5q68vMwqLkaBli4IGJjgAVGjK2CX/g0FSXh7AMTnSEWsooNGCL21/6dPWGQETlSOUboWgllIL7Sl7MlTANVrRCIHQK+9e2obs+ZxI3z8fsZCUE6x06Ypp7ZjzgPLodmDf2p6Yr66wwC0RtiIWdBGdwTwgjanXDEAiiMfQPq97A1+z0kyd6AETO+D99MPrvSelqQTZEmFwJTV3fEOZg9oPTxGCwMxu9EHOg5HL+ZFPcAwXaTh51Ms3I+co9MySOJHEMs8CosH4/tlkgOhOOQ3FoCK8H8O4j3Mwx0A/6Kf378MBr2fW/8vP8a0xqQ1aqRTrQR/ziOzlQl8RHA6ah4NnBYKv4hOzdOEMZno/F74BBVXBN/aS9N42eR/WxvG9YRhrc72rdzN1isAbqNpNnDX3K3nIkrnVyjObX8d8SMx/DgiYdRFYWAQsD1CMClRywXWdVedL2oCSygXEMsgCos+y1GkM9hZJyWwLgp1RQC8Y8AI6Qt0C5BmNqr8a9MfGsg3xALPKatOpsM4wVM4dvENzxKeoVAgiLA2B7S2MXwCYuNUqrEGAF7DLFQqnDVSTDGL2NkjIUKVRQCCgHXICBOZ+fab1SSd9fcESw12FmmrR6MLFGvwRg7kyXKTl0Ub4VAIiDA2A786s/DwtwHiaBOouhgb66IWUM/JOb9NW78mkQBTOmhEIhjBDaQpp2kjLD77qC9hljoO2fIV9QTxpi0v2F1VoV9ue8ZUBIlAwKMPUwZ3qE0a9inyaBuvOlor2uiJRrTys5FrPEihLd1aVmlvisEFAI2ICBcEcQmITICuVlUcSsCzhpigcK0dV1I37MIfuNz3QqKkkshkCAIlFMKu5TuHu5M/vAEAS0WajhviIWWHLvvClfdjL8z8UXFG8fizqs+ExeBQLpY+hv18sygvKFiV6gqLkcgNoa4CZSCtT2I18MYi5R/Lt4a3SSv+qsQcDsCjF4hzTNV+YLdfqMOlC+2hrhJloKVSIKCNzin05suqb8KAYWAFQTYavLw22nWiOVWWiladyDgDkPchEVh2YUwxnPhsujXdEn9VQgoBEIgILIMatp0mjl0sUrkHgInl1e5yxALsJZwD61ddQ2McQH+9XI5fko8hUBsEGDsB3jz7qGMtkXk6y9OrFEljhFwnyFuAjOwoFd2NkbIedgQcjH+qoxuTdiov0mKANJVMnoJI+CHKHXoayq3duI8Bu41xM0x9pV3pRp+DS5NwKJez+ZV6rNCIAkQ+BIJeh4m1mYRzRq4NQn0TToV48MQN90WH9eovhyZ3fh4jJAvwOX2TVXqr0IgsRBgVdiI+hIx7XEc5PlWYummtGmJQHwZ4ubS3/dlGm3adhZ2DeEfH4GqE/BXxSQ3x0h9jh8EAhnRcHAno/eJe16lIUM+oDFMjx8FlKTRIBC/hril1sWrU+hbYxCOssnB6jH+Uj881H2w4JfdklR9VwjEDgHkW2F8MwYQX+P5/BjnKZbjrLiPaNDQDcrwxu6uxLrnxDHErSHpW9mB6lhXrDB3hJ/tEKTlbBM4e4x5vMR1nDem4S/OIQv8xVljTCwK4nhzzvAZf8U5ZMbes8lEXeAzIjtUSQ4EOEaljOOcQubHs4DFsr2fxdmG2t7zDpvOPhT1zc83DJyHiHYe2oPc3L+Q1/sLdeqyWR1VnxyPjtJSIaAQUAgoBBQCCgGFgEJAIaAQMIvA/wNne9RHd0pXfgAAAABJRU5ErkJggg==", + "internal-vld": [ + { + "id": "ba195068-626b-11e5-998d-6cb3113b406f", + "name": "fabric", + "short-name": "fabric", + "description": "Virtual link for internal fabric", + "type": "ELAN", + "internal-connection-point-ref": [ + "ba197a98-626b-11e5-998d-6cb3113b406f", + "ba198696-626b-11e5-998d-6cb3113b406f" + ] + } + ], + "connection-point": [ + { + "name": "pong-vnfd/cp0", + "type": "VPORT" + }, + { + "name": "pong-vnfd/cp1", + "type": "VPORT" + } + ], + "vdu": [ + { + "id": "1-626b-11e5-998d-6cb3113b406f", + "name": "iovdu", + "count": 2, + "vm-flavor": { + "vcpu-count": 4, + "memory-mb": 16384, + "storage-gb": 16 + }, + "guest-epa": { + "trusted-execution": true, + "mempage-size": "PREFER_LARGE", + "cpu-pinning-policy": "DEDICATED", + "cpu-thread-pinning-policy": "AVOID", + "numa-node-policy": { + "node-cnt": 2, + "mem-policy": "PREFERRED", + "node": [ + { + "id": 0, + "vcpu": [ + "0", + "1" + ], + "memory-mb": 8192 + }, + { + "id": 1, + "vcpu": [ + "2", + "3" + ], + "memory-mb": 8192 + } + ] + } + }, + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "host-epa": { + "cpu-model": "PREFER_SANDYBRIDGE", + "cpu-arch": "PREFER_X86_64", + "cpu-vendor": "PREFER_INTEL", + "cpu-socket-count": "PREFER_TWO", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ] + }, + "image": "rw_openstack.qcow2", + "internal-connection-point": [ + { + "id": "ba197a98-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + }, + { + "id": "ba198696-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + } + ], + "internal-interface": [ + { + "name": "eth0", + "vdu-internal-connection-point-ref": "ba197a98-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vdu-internal-connection-point-ref": "ba198696-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + } + ], + "external-interface": [ + { + "name": "eth0", + "vnfd-connection-point-ref": "pong-vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vnfd-connection-point-ref": "pong-vnfd/cp1", + "virtual-interface": { + "type": "VIRTIO" + } + } + ] + }, + { + "id": "2-626b-11e5-998d-6cb3113b406f", + "name": "iovdu", + "count": 2, + "vm-flavor": { + "vcpu-count": 4, + "memory-mb": 16384, + "storage-gb": 16 + }, + "guest-epa": { + "trusted-execution": true, + "mempage-size": "PREFER_LARGE", + "cpu-pinning-policy": "DEDICATED", + "cpu-thread-pinning-policy": "AVOID", + "numa-node-policy": { + "node-cnt": 2, + "mem-policy": "PREFERRED", + "node": [ + { + "id": 0, + "vcpu": [ + "0", + "1" + ], + "memory-mb": 8192 + }, + { + "id": 1, + "vcpu": [ + "2", + "3" + ], + "memory-mb": 8192 + } + ] + } + }, + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "host-epa": { + "cpu-model": "PREFER_SANDYBRIDGE", + "cpu-arch": "PREFER_X86_64", + "cpu-vendor": "PREFER_INTEL", + "cpu-socket-count": "PREFER_TWO", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ] + }, + "image": "rw_openstack.qcow2", + "internal-connection-point": [ + { + "id": "21-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + }, + { + "id": "22-626b-11e5-998d-6cb3113b406f", + "type": "VPORT" + } + ], + "internal-interface": [ + { + "name": "eth0", + "vdu-internal-connection-point-ref": "21-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vdu-internal-connection-point-ref": "22-626b-11e5-998d-6cb3113b406f", + "virtual-interface": { + "type": "VIRTIO" + } + } + ], + "external-interface": [ + { + "name": "eth0", + "vnfd-connection-point-ref": "pong-vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vnfd-connection-point-ref": "pong-vnfd/cp1", + "virtual-interface": { + "type": "VIRTIO" + } + } + ] + } + ] + }, + { + "id": "c55e8e6d-ed13-465f-809d-838878c70d1d", + "name": "vnfd-2", + "short-name": "SF Aware", + "vendor": "", + "logo": "", + "description": "A simple VNF descriptor w/ one VDU", + "version": "0.1.0", + "service-function-chain": "SF", + "service-function-type": "", + "meta": "{\"containerPositionMap\":{}}", + "mgmt-interface": { + "port": "", + "dashboard-params": { + "path": "", + "https": "", + "port": "" + } + }, + "connection-point": [ + { + "name": "ping-vnfd/cp0", + "type": "VPORT" + }, + { + "name": "ping-vnfd/cp1", + "type": "VPORT" + } + ], + "vdu": [ + { + "id": "7ea4bb66-82ff-453c-a0db-96099a797d25", + "name": "vdu-2", + "count": 2, + "vm-flavor": { + "vcpu-count": 4, + "memory-mb": 16384, + "storage-gb": 16 + }, + "guest-epa": { + "trusted-execution": true, + "mempage-size": "PREFER_LARGE", + "cpu-pinning-policy": "DEDICATED", + "cpu-thread-pinning-policy": "AVOID", + "numa-node-policy": { + "node-cnt": 2, + "mem-policy": "PREFERRED", + "node": [ + { + "id": 0, + "vcpu": [ + "0", + "1" + ], + "memory-mb": 8192 + }, + { + "id": 1, + "vcpu": [ + "2", + "3" + ], + "memory-mb": 8192 + } + ] + } + }, + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "host-epa": { + "cpu-model": "PREFER_SANDYBRIDGE", + "cpu-arch": "PREFER_X86_64", + "cpu-vendor": "PREFER_INTEL", + "cpu-socket-count": "PREFER_TWO", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ] + }, + "image": "rw_openstack.qcow2", + "internal-connection-point": [ + { + "id": "009a6284-523b-4630-a050-dcf4937c0658", + "type": "VPORT" + }, + { + "id": "99d5fd31-de1f-457d-ac39-80dfb4c33b2f", + "type": "VPORT" + } + ], + "external-interface": [ + { + "name": "eth0", + "vnfd-connection-point-ref": "ping-vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vnfd-connection-point-ref": "ping-vnfd/cp1", + "virtual-interface": { + "type": "VIRTIO" + } + } + ] + } + ] + }, + { + "id": "776c0ec7-ce00-465d-a837-667975353183", + "name": "vnfd-3", + "short-name": "Classifier", + "vendor": "", + "logo": "", + "description": "A simple VNF descriptor w/ one VDU", + "version": "0.1.0", + "service-function-chain": "CLASSIFIER", + "service-function-type": "", + "meta": "{\"containerPositionMap\":{}}", + "mgmt-interface": { + "port": "", + "dashboard-params": { + "path": "", + "https": "", + "port": "" + } + }, + "connection-point": [ + { + "name": "ping-vnfd/cp0", + "type": "VPORT" + }, + { + "name": "ping-vnfd/cp1", + "type": "VPORT" + } + ], + "vdu": [ + { + "id": "7f9d96e7-37b5-46d2-91b1-8d670db72d0b", + "name": "vdu-3", + "count": 2, + "vm-flavor": { + "vcpu-count": 4, + "memory-mb": 16384, + "storage-gb": 16 + }, + "guest-epa": { + "trusted-execution": true, + "mempage-size": "PREFER_LARGE", + "cpu-pinning-policy": "DEDICATED", + "cpu-thread-pinning-policy": "AVOID", + "numa-node-policy": { + "node-cnt": 2, + "mem-policy": "PREFERRED", + "node": [ + { + "id": 0, + "vcpu": [ + "0", + "1" + ], + "memory-mb": 8192 + }, + { + "id": 1, + "vcpu": [ + "2", + "3" + ], + "memory-mb": 8192 + } + ] + } + }, + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "host-epa": { + "cpu-model": "PREFER_SANDYBRIDGE", + "cpu-arch": "PREFER_X86_64", + "cpu-vendor": "PREFER_INTEL", + "cpu-socket-count": "PREFER_TWO", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ] + }, + "image": "rw_openstack.qcow2", + "internal-connection-point": [ + { + "id": "38ef6b13-f4cc-4153-a662-dc7b804ef202", + "type": "VPORT" + }, + { + "id": "5c07545e-94d0-4551-a704-a8e47ab28fa5", + "type": "VPORT" + } + ], + "external-interface": [ + { + "name": "eth0", + "vnfd-connection-point-ref": "ping-vnfd/cp0", + "virtual-interface": { + "type": "VIRTIO" + } + }, + { + "name": "eth1", + "vnfd-connection-point-ref": "ping-vnfd/cp1", + "virtual-interface": { + "type": "VIRTIO" + } + } + ] + } + ] + } + ] + }, + { + "id": "GUID-3", + "name": "RIFT.ware™ PNF Descriptors Catalog", + "short-name": "rift.ware-pnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "pnfd", + "descriptors": [] + } +] \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/assets/ping-vrouter-pong-catalog.json b/modules/ui/composer/webapp/src/assets/ping-vrouter-pong-catalog.json new file mode 100644 index 0000000..136a571 --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/ping-vrouter-pong-catalog.json @@ -0,0 +1,504 @@ +[ + { + "id": "GUID-1", + "name": "RIFT.ware™ NS Descriptors Catalog", + "short-name": "rift.ware-nsd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "nsd", + "descriptors": [ + { + "description": "Toy NS Ping-Vrouter-Pong", + "constituent-vnfd": [ + { + "member-vnf-index": 1, + "vnfd-id-ref": "7cae6ff6-9263-11e5-b7df-001b21b98a89" + }, + { + "member-vnf-index": 2, + "vnfd-id-ref": "a4dea522-9263-11e5-ad06-001b21b98a89" + }, + { + "member-vnf-index": 3, + "vnfd-id-ref": "7cb0375a-9263-11e5-b7df-001b21b98a89" + } + ], + "version": 1, + "vld": [ + { + "description": "Link from ping to vrouter", + "type": "ELAN", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 1, + "vnfd-connection-point-ref": "ping_vnfd/cp0", + "vnfd-id-ref": "7cae6ff6-9263-11e5-b7df-001b21b98a89" + }, + { + "member-vnf-index-ref": 2, + "vnfd-connection-point-ref": "vrouter_vnfd/cp0", + "vnfd-id-ref": "a4dea522-9263-11e5-ad06-001b21b98a89" + } + ], + "version": 1, + "vendor": "RIFT.io", + "name": "Link1", + "short-name": "Link1", + "id": "23afa0c8-92d1-11e5-9a9f-001b21b98a89" + }, + { + "description": "Link from vrouter to pong", + "type": "ELAN", + "vnfd-connection-point-ref": [ + { + "member-vnf-index-ref": 2, + "vnfd-connection-point-ref": "vrouter_vnfd/cp1", + "vnfd-id-ref": "a4dea522-9263-11e5-ad06-001b21b98a89" + }, + { + "member-vnf-index-ref": 3, + "vnfd-connection-point-ref": "pong_vnfd/cp0", + "vnfd-id-ref": "7cb0375a-9263-11e5-b7df-001b21b98a89" + } + ], + "version": 1, + "vendor": "RIFT.io", + "name": "Link2", + "short-name": "Link2", + "id": "23afe542-92d1-11e5-9a9f-001b21b98a89" + } + ], + "vendor": "RIFT.io", + "name": "ping_vrouter_pong_nsd", + "short-name": "ping_vrouter_pong_nsd", + "id": "23a28e4c-92d1-11e5-9a9f-001b21b98a89" + } + ] + }, + { + "id": "GUID-2", + "name": "RIFT.ware™ VNF Descriptors Catalog", + "short-name": "rift.ware-vnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "vnfd", + "descriptors": [ + { + "vdu": [ + { + "image": "Fedora-x86_64-20-20131211.1-sda-ping.qcow2", + "host-epa": { + "cpu-socket-count": "PREFER_TWO", + "cpu-vendor": "PREFER_INTEL", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ], + "cpu-arch": "PREFER_X86_64", + "cpu-model": "PREFER_SANDYBRIDGE" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "external-interface": [ + { + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth0", + "vnfd-connection-point-ref": "ping_vnfd/cp0" + } + ], + "count": 2, + "internal-interface": [ + { + "vdu-internal-connection-point-ref": "7cafe53e-9263-11e5-b7df-001b21b98a89", + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth0" + } + ], + "cloud-init": "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nssh_pwauth: True\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl, enable, ping.service ]\n - [ systemctl, start, --no-block, ping.service ]\n - [ ifup, eth1 ]\n - [ ifup, eth2 ]\n", + "internal-connection-point": [ + { + "type": "VPORT", + "id": "7cafe53e-9263-11e5-b7df-001b21b98a89" + } + ], + "vm-flavor": { + "memory-mb": 4096, + "vcpu-count": 2, + "storage-gb": 40 + }, + "guest-epa": { + "mempage-size": "PREFER_LARGE", + "cpu-thread-pinning-policy": "AVOID", + "trusted-execution": "true", + "numa-node-policy": { + "node": [ + { + "vcpu": [ + 0, + 1 + ], + "memory-mb": 8192, + "id": 0 + }, + { + "vcpu": [ + 2, + 3 + ], + "memory-mb": 8192, + "id": 1 + } + ], + "node-cnt": 2, + "mem-policy": "PREFERRED" + }, + "cpu-pinning-policy": "DEDICATED" + }, + "name": "iovdu", + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "id": "7caf476e-9263-11e5-b7df-001b21b98a89" + } + ], + "description": "This is an example RIFT.ware VNF", + "connection-point": [ + { + "type": "VPORT", + "name": "ping_vnfd/cp0" + } + ], + "monitoring-param": [ + { + "units": "packets", + "description": "no of ping requests", + "current-value": 0, + "min-value": 0, + "path": "api/v1/ping/stats", + "id": 1, + "widget-type": "COUNTER", + "name": "ping-request-tx-count", + "max-value": 4292967295, + "group-tag": "Group1" + }, + { + "units": "packets", + "description": "no of ping responses", + "current-value": 0, + "min-value": 0, + "path": "api/v1/ping/stats", + "id": 2, + "widget-type": "COUNTER", + "name": "ping-response-rx-count", + "max-value": 4292967295, + "group-tag": "Group1" + } + ], + "mgmt-interface": { + "vdu-id": "7caf476e-9263-11e5-b7df-001b21b98a89", + "port": 18888 + }, + "version": 1, + "internal-vld": [ + { + "description": "Virtual link for internal fabric", + "type": "ELAN", + "internal-connection-point-ref": [ + "7cafe53e-9263-11e5-b7df-001b21b98a89" + ], + "name": "fabric", + "short-name": "fabric", + "id": "7caead0e-9263-11e5-b7df-001b21b98a89" + } + ], + "vendor": "RIFT.io", + "name": "ping_vnfd", + "short-name": "ping_vnfd", + "id": "7cae6ff6-9263-11e5-b7df-001b21b98a89" + }, + { + "vdu": [ + { + "image": "Fedora-x86_64-20-20131211.1-sda-pong.qcow2", + "host-epa": { + "cpu-socket-count": "PREFER_TWO", + "cpu-vendor": "PREFER_INTEL", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ], + "cpu-arch": "PREFER_X86_64", + "cpu-model": "PREFER_SANDYBRIDGE" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "external-interface": [ + { + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth0", + "vnfd-connection-point-ref": "pong_vnfd/cp0" + } + ], + "count": 2, + "internal-interface": [ + { + "vdu-internal-connection-point-ref": "7cb0ab18-9263-11e5-b7df-001b21b98a89", + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth0" + } + ], + "cloud-init": "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nssh_pwauth: True\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl, enable, pong.service ]\n - [ systemctl, start, --no-block, pong.service ]\n - [ ifup, eth1 ]\n - [ ifup, eth2 ]\n", + "internal-connection-point": [ + { + "type": "VPORT", + "id": "7cb0ab18-9263-11e5-b7df-001b21b98a89" + } + ], + "vm-flavor": { + "memory-mb": 4096, + "vcpu-count": 2, + "storage-gb": 40 + }, + "guest-epa": { + "mempage-size": "PREFER_LARGE", + "cpu-thread-pinning-policy": "AVOID", + "trusted-execution": "true", + "numa-node-policy": { + "node": [ + { + "vcpu": [ + 0, + 1 + ], + "memory-mb": 8192, + "id": 0 + }, + { + "vcpu": [ + 2, + 3 + ], + "memory-mb": 8192, + "id": 1 + } + ], + "node-cnt": 2, + "mem-policy": "PREFERRED" + }, + "cpu-pinning-policy": "DEDICATED" + }, + "name": "iovdu", + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "id": "7cb0832c-9263-11e5-b7df-001b21b98a89" + } + ], + "description": "This is an example RIFT.ware VNF", + "connection-point": [ + { + "type": "VPORT", + "name": "pong_vnfd/cp0" + } + ], + "monitoring-param": [ + { + "units": "packets", + "description": "no of ping requests", + "current-value": 0, + "min-value": 0, + "path": "api/v1/pong/stats", + "id": 1, + "widget-type": "COUNTER", + "name": "ping-request-rx-count", + "max-value": 4292967295, + "group-tag": "Group1" + }, + { + "units": "packets", + "description": "no of ping responses", + "current-value": 0, + "min-value": 0, + "path": "api/v1/pong/stats", + "id": 2, + "widget-type": "COUNTER", + "name": "ping-response-tx-count", + "max-value": 4292967295, + "group-tag": "Group1" + } + ], + "mgmt-interface": { + "vdu-id": "7cb0832c-9263-11e5-b7df-001b21b98a89", + "port": 18889 + }, + "version": 1, + "internal-vld": [ + { + "description": "Virtual link for internal fabric", + "type": "ELAN", + "internal-connection-point-ref": [ + "7cb0ab18-9263-11e5-b7df-001b21b98a89" + ], + "name": "fabric", + "short-name": "fabric", + "id": "7cb048bc-9263-11e5-b7df-001b21b98a89" + } + ], + "vendor": "RIFT.io", + "name": "pong_vnfd", + "short-name": "pong_vnfd", + "id": "7cb0375a-9263-11e5-b7df-001b21b98a89" + }, + { + "vdu": [ + { + "image": "vyatta_snp.qcow2", + "host-epa": { + "cpu-socket-count": "PREFER_TWO", + "cpu-vendor": "PREFER_INTEL", + "cpu-feature": [ + "PREFER_AES", + "PREFER_CAT" + ], + "cpu-arch": "PREFER_X86_64", + "cpu-model": "PREFER_SANDYBRIDGE" + }, + "hypervisor-epa": { + "type": "PREFER_KVM" + }, + "cloud-init": "#cloud-config\n", + "external-interface": [ + { + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth0", + "vnfd-connection-point-ref": "vrouter_vnfd/cp0" + }, + { + "virtual-interface": { + "type": "VIRTIO" + }, + "name": "eth1", + "vnfd-connection-point-ref": "vrouter_vnfd/cp1" + } + ], + "vm-flavor": { + "memory-mb": 4096, + "vcpu-count": 2, + "storage-gb": 40 + }, + "guest-epa": { + "mempage-size": "PREFER_LARGE", + "cpu-thread-pinning-policy": "AVOID", + "trusted-execution": "true", + "numa-node-policy": { + "node": [ + { + "vcpu": [ + 0, + 1 + ], + "memory-mb": 8192, + "id": 0 + }, + { + "vcpu": [ + 2, + 3 + ], + "memory-mb": 8192, + "id": 1 + } + ], + "node-cnt": 2, + "mem-policy": "PREFERRED" + }, + "cpu-pinning-policy": "DEDICATED" + }, + "name": "iovdu", + "count": 2, + "vswitch-epa": { + "ovs-acceleration": "DISABLED", + "ovs-offload": "DISABLED" + }, + "id": "a4df6142-9263-11e5-ad06-001b21b98a89" + } + ], + "description": "This is a Brocade vRouter VNF", + "connection-point": [ + { + "type": "VPORT", + "name": "vrouter_vnfd/cp0" + }, + { + "type": "VPORT", + "name": "vrouter_vnfd/cp1" + } + ], + "monitoring-param": [ + { + "units": "packets", + "description": "no of tx pkts", + "current-value": 0, + "min-value": 0, + "path": "api/v1/ping/stats", + "id": 1, + "widget-type": "COUNTER", + "name": "tx-count", + "max-value": 4292967295, + "group-tag": "Group1" + }, + { + "units": "packets", + "description": "no of rx packets", + "current-value": 0, + "min-value": 0, + "path": "api/v1/ping/stats", + "id": 2, + "widget-type": "COUNTER", + "name": "rx-count", + "max-value": 4292967295, + "group-tag": "Group1" + } + ], + "mgmt-interface": { + "vdu-id": "a4df6142-9263-11e5-ad06-001b21b98a89", + "port": 18888 + }, + "version": 1, + "vendor": "Brocade", + "name": "vrouter_vnfd", + "short-name": "vrouter_vnfd", + "id": "a4dea522-9263-11e5-ad06-001b21b98a89" + } + ] + }, + { + "id": "GUID-3", + "name": "RIFT.ware™ PNF Descriptors Catalog", + "short-name": "rift.ware-pnfd-cat", + "description": "RIFT.ware™, an open source NFV development and deployment software platform that makes it simple to create, deploy and manage hyper-scale Virtual network functions and applications.", + "vendor": "RIFT.io", + "version": "", + "created-on": "", + "type": "pnfd", + "descriptors": [] + } +] \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/assets/rift.ware-color-theme.json b/modules/ui/composer/webapp/src/assets/rift.ware-color-theme.json new file mode 100644 index 0000000..deadd18 --- /dev/null +++ b/modules/ui/composer/webapp/src/assets/rift.ware-color-theme.json @@ -0,0 +1,37 @@ +{ + "nsd": { + "primary": "#6c71c4", + "secondary": "#6c71c4" + }, + "vld": { + "primary": "#073642", + "secondary": "#073642" + }, + "internal-vld": { + "primary": "#073642", + "secondary": "#073642" + }, + "vdu": { + "primary": "#859900", + "secondary": "#859900" + }, + "vnfd": { + "primary": "#268bd2", + "secondary": "#268bd2" + }, + "constituent-vnfd": { + "primary": "#268bd2", + "secondary": "#268bd2" + }, + "vnffgd": { + "primary": "#2aa198", + "secondary": "#2aa198" + }, + "common": { + "primary": "black", + "secondary": "white", + "background": "white", + "foreground": "black" + } + +} \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/components/Button.js b/modules/ui/composer/webapp/src/components/Button.js new file mode 100644 index 0000000..4a34199 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/Button.js @@ -0,0 +1,72 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 12/1/15. + */ +'use strict'; + +import guid from '../libraries/guid' +import React from 'react' +import ClassNames from 'classnames' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import SelectionManager from '../libraries/SelectionManager' + +import '../styles/Button.scss' + +const Button = React.createClass({ + mixins: [PureRenderMixin], + getInitialState: function () { + return {}; + }, + getDefaultProps: function () { + return { + className: '', + label: null, + title: null, + src: null, + onClick: () => {} + }; + }, + componentWillMount: function () { + }, + componentDidMount: function () { + }, + componentDidUpdate: function () { + }, + componentWillUnmount: function () { + }, + render() { + const src = this.props.src; + const label = this.props.label; + const title = this.props.title; + const draggable = this.props.draggable; + const className = ClassNames(this.props.className, 'Button'); + return ( +
+ + {label} +
+ ); + } +}); + +export default Button; diff --git a/modules/ui/composer/webapp/src/components/CanvasPanel.js b/modules/ui/composer/webapp/src/components/CanvasPanel.js new file mode 100644 index 0000000..7807613 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CanvasPanel.js @@ -0,0 +1,198 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import _ from 'lodash' +import cc from 'change-case' +import React from 'react' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import utils from '../libraries/utils' +import messages from './messages' +import DescriptorModelFactory from '../libraries/model/DescriptorModelFactory' +import CatalogItemCanvasEditor from './CatalogItemCanvasEditor' +import CatalogItemsActions from '../actions/CatalogItemsActions' +import CanvasEditorActions from '../actions/CanvasEditorActions' +import ComposerAppActions from '../actions/ComposerAppActions' +import CanvasZoom from './CanvasZoom' +import CanvasPanelTray from './CanvasPanelTray' +import EditForwardingGraphPaths from './EditorForwardingGraph/EditForwardingGraphPaths' +import SelectionManager from '../libraries/SelectionManager' +import DescriptorModelIconFactory from '../libraries/model/IconFactory' + +import '../styles/CanvasPanel.scss' + +const CanvasPanel = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return {}; + }, + getDefaultProps() { + return { + title: '', + layout: { + left: 300, + right: 300 + }, + showMore: false, + containers: [] + }; + }, + componentWillMount() { + }, + componentDidMount() { + }, + componentDidUpdate() { + SelectionManager.refreshOutline(); + }, + componentWillUnmount() { + }, + render() { + var style = { + left: this.props.layout.left + }; + const hasItem = this.props.containers.length !== 0; + const isEditingNSD = DescriptorModelFactory.isNetworkService(this.props.containers[0]); + const hasNoCatalogs = this.props.hasNoCatalogs; + const bodyComponent = hasItem ? : messages.canvasWelcome(); + return ( +
+
+

+ {hasItem ? : null} + {this.props.title} +

+
+
+ {hasNoCatalogs ? null : bodyComponent} +
+ + + + +
+ ); + }, + onDragOver(event) { + const isDraggingFiles = _.contains(event.dataTransfer.types, 'Files'); + if (!isDraggingFiles) { + event.preventDefault(); + event.dataTransfer.dropEffect = 'copy'; + } + }, + onDrop(event) { + // given a drop event determine which action to take in the canvas: + // open item or add item to an existing, already opened nsd + // note: nsd is the only editable container + const data = utils.parseJSONIgnoreErrors(event.dataTransfer.getData('text')); + if (data.type === 'catalog-item') { + this.handleDropCatalogItem(event, data); + } else if (data.type === 'action') { + this.handleDropCanvasAction(event, data); + } + }, + handleDropCanvasAction(event, data) { + const action = cc.camel('on-' + data.action); + if (typeof this[action] === 'function') { + if (this[action]({clientX: event.clientX, clientY: event.clientY})) { + event.preventDefault(); + } + } else { + console.warn(`no action defined for drop event ${data.action}. Did you forget to add CanvasPanel.${action}() event handler?`); + } + }, + handleDropCatalogItem(event, data) { + let openItem = null; + const currentItem = this.props.containers[0]; + if (data.item.uiState.type === 'nsd') { + // if item is an nsd then open the descriptor in the canvas + openItem = data.item; + // if item is a vnfd or pnfd then check if the current item is an nsd + } else if (DescriptorModelFactory.isNetworkService(currentItem)) { + // so add the item to the nsd and re-render the canvas + switch (data.item.uiState.type) { + case 'vnfd': + this.onAddVnfd(data.item, {clientX: event.clientX, clientY: event.clientY}); + break; + case 'pnfd': + this.onAddPnfd(data.item, {clientX: event.clientX, clientY: event.clientY}); + break; + default: + console.warn(`Unknown catalog-item type. Expect type "nsd", "vnfd" or "pnfd" but got ${data.item.uiState.type}.`); + } + } else { + // otherwise the default action is to open the item + openItem = data.item; + } + if (openItem) { + event.preventDefault(); + CatalogItemsActions.editCatalogItem(openItem); + } + }, + onAddVdu(dropCoordinates) { + const currentItem = this.props.containers[0]; + if (DescriptorModelFactory.isVirtualNetworkFunction(currentItem)) { + const vdu = currentItem.createVdu(); + vdu.uiState.dropCoordinates = dropCoordinates; + CatalogItemsActions.catalogItemDescriptorChanged(currentItem); + } + }, + onAddVld(dropCoordinates) { + const currentItem = this.props.containers[0]; + if (DescriptorModelFactory.isNetworkService(currentItem) || DescriptorModelFactory.isVirtualNetworkFunction(currentItem)) { + const vld = currentItem.createVld(); + vld.uiState.dropCoordinates = dropCoordinates; + CatalogItemsActions.catalogItemDescriptorChanged(currentItem); + } + }, + onAddVnffgd(dropCoordinates) { + const currentItem = this.props.containers[0]; + if (DescriptorModelFactory.isNetworkService(currentItem)) { + const vld = currentItem.createVnffgd(); + vld.uiState.dropCoordinates = dropCoordinates; + CatalogItemsActions.catalogItemDescriptorChanged(currentItem); + } + }, + onAddVnfd(model, dropCoordinates) { + const currentItem = this.props.containers[0]; + if (DescriptorModelFactory.isNetworkService(currentItem) || DescriptorModelFactory.isVirtualNetworkFunction(currentItem)) { + const vnfd = DescriptorModelFactory.newVirtualNetworkFunction(model); + const cvnfd = currentItem.createConstituentVnfdForVnfd(vnfd); + cvnfd.uiState.dropCoordinates = dropCoordinates; + CatalogItemsActions.catalogItemDescriptorChanged(currentItem); + } + }, + onAddPnfd(model, dropCoordinates) { + const currentItem = this.props.containers[0]; + if (DescriptorModelFactory.isNetworkService(currentItem)) { + const pnfd = DescriptorModelFactory.newPhysicalNetworkFunction(model); + pnfd.uiState.dropCoordinates = dropCoordinates; + currentItem.createPnfd(pnfd); + CatalogItemsActions.catalogItemDescriptorChanged(currentItem); + } + }, + onDblClickOpenFullScreen(event) { + event.stopPropagation(); + ComposerAppActions.enterFullScreenMode(); + } +}); + +export default CanvasPanel; diff --git a/modules/ui/composer/webapp/src/components/CanvasPanelTray.js b/modules/ui/composer/webapp/src/components/CanvasPanelTray.js new file mode 100644 index 0000000..de86eff --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CanvasPanelTray.js @@ -0,0 +1,62 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Created by onvelocity on 2/4/16. + */ +'use strict'; +import React from 'react' +import ClassNames from 'classnames' +import ResizableManager from '../libraries/ResizableManager' +import CanvasPanelTrayActions from '../actions/CanvasPanelTrayActions' +import '../styles/CanvasPanelTray.scss' +const uiTransient = { + isResizing: false +}; +export default function (props) { + const style = { + height: Math.max(0, props.layout.bottom), + right: props.layout.right, + display: props.show ? false : 'none' + }; + const classNames = ClassNames('CanvasPanelTray', {'-with-transitions': !document.body.classList.contains('resizing')}); + function onClickToggleOpenClose(event) { + if (event.defaultPrevented) return; + event.preventDefault(); + // don't toggle if the user was resizing + if (!uiTransient.isResizing) { + CanvasPanelTrayActions.toggleOpenClose(); + } + event.target.removeEventListener('mousemove', onMouseMove, true); + } + function onMouseDown(event) { + uiTransient.isResizing = false; + event.target.addEventListener('mousemove', onMouseMove, true); + } + function onMouseMove() { + uiTransient.isResizing = ResizableManager.isResizing(); + } + // note 25px is the height of the h1 + const isOpen = style.height > 25; + return ( +
+

Forwarding Graphs

+
+ {props.children} +
+
+ ); +} \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/components/CanvasZoom.js b/modules/ui/composer/webapp/src/components/CanvasZoom.js new file mode 100644 index 0000000..6bee5fe --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CanvasZoom.js @@ -0,0 +1,77 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import React from 'react' +import Range from './Range' +import numeral from 'numeral' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import CanvasEditorActions from '../actions/CanvasEditorActions' +import SelectionManager from '../libraries/SelectionManager' + +import '../styles/CanvasZoom.scss' + +const CanvasZoom = React.createClass({ + mixins: [PureRenderMixin], + getInitialState: function () { + return {}; + }, + getDefaultProps: function () { + return { + min: 25, + max: 200, + zoom: 100, + defaultZoom: 100 + }; + }, + componentWillMount: function () { + }, + componentDidMount: function () { + }, + componentDidUpdate: function () { + }, + componentWillUnmount: function () { + }, + render() { + const zoom = this.props.zoom || this.props.defaultZoom + const displayValue = numeral(zoom).format('0.0') + '%'; + return ( +
event.preventDefault()}> + + {displayValue} +
+ ); + }, + onChange(event) { + const zoom = event.target.value; + CanvasEditorActions.setCanvasZoom(zoom); + }, + onDblClick() { + const zoom = this.props.defaultZoom; + CanvasEditorActions.setCanvasZoom(zoom); + } +}); + +export default CanvasZoom; diff --git a/modules/ui/composer/webapp/src/components/CatalogFilter.js b/modules/ui/composer/webapp/src/components/CatalogFilter.js new file mode 100644 index 0000000..759c17a --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CatalogFilter.js @@ -0,0 +1,67 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 12/2/15. + */ +'use strict'; + +import React from 'react' +import ClassNames from 'classnames' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import CatalogFilterActions from '../actions/CatalogFilterActions' + +import '../styles/CatalogFilter.scss' + +const CatalogFilter = React.createClass({ + mixins: [PureRenderMixin], + getInitialState: function () { + return {}; + }, + getDefaultProps: function () { + return {filterByType: 'nsd'}; + }, + componentWillMount: function () { + }, + componentDidMount: function () { + }, + componentDidUpdate: function () { + }, + componentWillUnmount: function () { + }, + render() { + const clickFilterByType = function (event) { + CatalogFilterActions.filterByType(event.target.value); + }; + return ( +
+ + +
+ ); + } +}); + +export default CatalogFilter; diff --git a/modules/ui/composer/webapp/src/components/CatalogItemCanvasEditor.js b/modules/ui/composer/webapp/src/components/CatalogItemCanvasEditor.js new file mode 100644 index 0000000..f4fd730 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CatalogItemCanvasEditor.js @@ -0,0 +1,86 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import React from 'react' +import ReactDOM from 'react-dom' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import DescriptorGraph from '../libraries/graph/DescriptorGraph' +import ComposerAppStore from '../stores/ComposerAppStore' + +import '../styles/CatalogItemCanvasEditor.scss' +import '../styles/DescriptorGraph.scss' + +const CatalogItemCanvasEditor = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return { + graph: null + }; + }, + getDefaultProps() { + return { + zoom: 100, + containers: [], + isShowingMoreInfo: false + }; + }, + componentWillMount() { + }, + componentDidMount() { + const element = ReactDOM.findDOMNode(this.refs.descriptorGraph); + const options = { + zoom: this.props.zoom + }; + const graph = new DescriptorGraph(element, options); + graph.containers = this.props.containers; + this.setState({graph: graph}); + }, + componentDidUpdate() { + this.state.graph.containers = this.props.containers; + const isNSD = this.props.containers[0] && this.props.containers[0].uiState.type === 'nsd'; + if (isNSD) { + this.state.graph.showMoreInfo = this.props.isShowingMoreInfo; + } else { + this.state.graph.showMoreInfo = true; + } + this.state.graph.update(); + }, + componentWillUnmount() { + this.state.graph.destroy(); + }, + render() { + const graph = this.state.graph; + if (graph) { + graph.zoom(this.props.zoom); + } + return ( +
+
+
+
+
+ ); + } +}); + +export default CatalogItemCanvasEditor; diff --git a/modules/ui/composer/webapp/src/components/CatalogItemDetailsEditor.js b/modules/ui/composer/webapp/src/components/CatalogItemDetailsEditor.js new file mode 100644 index 0000000..613b93b --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CatalogItemDetailsEditor.js @@ -0,0 +1,66 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * + * + */ +'use strict'; + +import React from 'react' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import EditDescriptorModelProperties from './EditDescriptorModelProperties' + +const CatalogItemDetailsEditor = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return {}; + }, + getDefaultProps() { + return { + container: null, + width: 0 + }; + }, + componentWillMount() { + }, + componentDidMount() { + }, + componentDidUpdate() { + }, + componentWillUnmount() { + }, + render() { + + const container = this.props.container || {model: {}, uiState: {}}; + if (!(container && container.model && container.uiState)) { + return null; + } + + return ( +
+
+
+ +
+
+
+ ); + + } +}); + +export default CatalogItemDetailsEditor; diff --git a/modules/ui/composer/webapp/src/components/CatalogItems.js b/modules/ui/composer/webapp/src/components/CatalogItems.js new file mode 100644 index 0000000..ac7c544 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CatalogItems.js @@ -0,0 +1,132 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import UID from '../libraries/UniqueId' +import React from 'react' +import messages from './messages' +import ClassNames from 'classnames' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import CatalogDataStore from '../stores/CatalogDataStore' +import CatalogItemsActions from '../actions/CatalogItemsActions' +import ComposerAppActions from '../actions/ComposerAppActions' +import SelectionManager from '../libraries/SelectionManager' + +import '../styles/CatalogItems.scss' +import imgFile from 'file!../images/vendor-riftio.png' + +const CatalogItems = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return CatalogDataStore.getState(); + }, + getDefaultProps() { + return { + filterByType: 'nsd' + }; + }, + componentWillMount() { + CatalogDataStore.listen(this.onChange); + }, + componentDidMount() { + // async actions creator will dispatch loadCatalogsSuccess and loadCatalogsError messages + CatalogDataStore.loadCatalogs().catch(e => console.warn('unable to load catalogs', e)); + }, + componentWillUnmount() { + CatalogDataStore.unlisten(this.onChange); + }, + onChange(state) { + this.setState(state); + }, + render() { + const onDragStart = function(event) { + const data = {type: 'catalog-item', item: this}; + event.dataTransfer.effectAllowed = 'copy'; + event.dataTransfer.setData('text', JSON.stringify(data)); + ComposerAppActions.setDragState(data); + }; + const onDblClickCatalogItem = function () { + CatalogItemsActions.editCatalogItem(this); + }; + const onClickCatalogItem = function () { + // single clicking an item is handled by ComposerApp::onClick handler + //CatalogItemsActions.selectCatalogItem(this); + }; + const cleanDataURI = this.cleanDataURI; + const items = this.getCatalogItems().map(function (d) { + const isNSD = d.uiState.type === 'nsd'; + const isDeleted = d.uiState.deleted; + const isModified = d.uiState.modified; + const isSelected = SelectionManager.isSelected(d); + const isOpenForEdit = d.uiState.isOpenForEdit; + const spanClassNames = ClassNames({'-is-selected': isSelected, '-is-open-for-edit': isOpenForEdit}); + const sectionClassNames = ClassNames('catalog-item', {'-is-modified': isModified, '-is-deleted': isDeleted}); + const instanceCount = d.uiState['instance-ref-count']; + const instanceCountLabel = isNSD && instanceCount ? ({instanceCount}) : null; + return ( +
  • +
    +
    + {isModified ?
    : null} +
    +
    {d.name} {instanceCountLabel}
    +
    + +
    +
    {d['short-name']}
    +
    {d.description}
    +
    {d.vendor || d.provider} {d.version}
    +
    +
    +
    + {isOpenForEdit ?
    : null} +
  • + ); + }); + return ( +
    +
      + {items.length ? items : messages.catalogWelcome} +
    +
    + ); + }, + cleanDataURI(imageString) { + if (/\bbase64\b/g.test(imageString)) { + return imageString; + } else if (/<\?xml\b/g.test(imageString)) { + const imgStr = imageString.substring(imageString.indexOf(' {return d.type === this.props.filterByType}; + return this.state.catalogs.filter(catalogFilter).reduce((result, catalog) => { + return result.concat(catalog.descriptors); + }, []); + } +}); + +export default CatalogItems; diff --git a/modules/ui/composer/webapp/src/components/CatalogPackageManager.js b/modules/ui/composer/webapp/src/components/CatalogPackageManager.js new file mode 100644 index 0000000..960fa85 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CatalogPackageManager.js @@ -0,0 +1,125 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import React from 'react' +import moment from 'moment' +import ClassNames from 'classnames' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import CatalogPackageManagerStore from '../stores/CatalogPackageManagerStore' +import CatalogPackageManagerActions from '../actions/CatalogPackageManagerActions' +import Button from './Button' + +import '../styles/CatalogPackageManager.scss' +// import imgVideoFileType from '../images/video167.svg' +// import imgZipFileType from '../images/zipped2.svg' +// import imgUnknownFileType from '../images/file87.svg' +import imgCancel from '../../node_modules/open-iconic/svg/circle-x.svg' + +const CatalogPackageManager = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return CatalogPackageManagerStore.getState(); + }, + getDefaultProps() { + return {}; + }, + componentWillMount() { + CatalogPackageManagerStore.listen(this.onChange); + }, + componentDidMount() { + }, + onChange(state) { + this.setState(state); + }, + componentDidUpdate() { + }, + componentWillUnmount() { + CatalogPackageManagerStore.unlisten(this.onChange); + }, + render() { + function getIndicator(download) { + if (download.pending) { + return ( +
    +
    +
    + ); + } + } + function getDownloadLink(download) { + if (download.success) { + const now = moment(); + const duration = moment.duration(moment(download.urlValidUntil).diff(now)); + return ( +
    + Download Package + expires  {duration.humanize(true)} + +
    + ); + } + } + + function getMessage(catalogPackage) { + if (catalogPackage.success && catalogPackage.url) { + return getDownloadLink(catalogPackage); + } + return ( +
    {catalogPackage.message}
    + ); + } + + var createItem = function (catalogPackage) { + const onClickRemove = function () { + CatalogPackageManagerActions.removeCatalogPackage(catalogPackage); + }; + const classNames = ClassNames('item', {'-error': catalogPackage.error, '-success': catalogPackage.success}); + return ( +
    +
    +

    {catalogPackage.name}

    + + {getIndicator(catalogPackage)} + {getMessage(catalogPackage)} +
    +
    +
    +
    + ); + }; + + const packages = this.state.packages || []; + return ( +
    +
    + {packages.map(createItem)} +
    +
    + ); + + } + +}); + +export default CatalogPackageManager; diff --git a/modules/ui/composer/webapp/src/components/CatalogPanel.js b/modules/ui/composer/webapp/src/components/CatalogPanel.js new file mode 100644 index 0000000..8f832aa --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CatalogPanel.js @@ -0,0 +1,204 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import _ from 'lodash' +import React from 'react' +import ReactDOM from 'react-dom' +import messages from './messages' +import ClassNames from 'classnames' +import UploadDropZone from '../libraries/CatalogPackageManagerUploadDropZone' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import DropTarget from './DropTarget' +import DropZonePanel from './DropZonePanel' +import CatalogItems from './CatalogItems' +import CatalogFilter from './CatalogFilter' +import CatalogPanelTray from './CatalogPanelTray' +import CatalogPanelToolbar from './CatalogPanelToolbar' +import CatalogPackageManager from './CatalogPackageManager' +import CatalogItemsActions from '../actions/CatalogItemsActions' +import CatalogPanelTrayActions from '../actions/CatalogPanelTrayActions' +import ComposerAppActions from '../actions/ComposerAppActions' +import ComposerAppStore from '../stores/ComposerAppStore' +import CatalogPanelStore from '../stores/CatalogPanelStore' +import LoadingIndicator from './LoadingIndicator' +import SelectionManager from '../libraries/SelectionManager' + +import '../styles/CatalogPanel.scss' + +const createDropZone = function (action, clickable, dropTarget) { + const dropZone = new UploadDropZone(ReactDOM.findDOMNode(dropTarget), [clickable], action); + dropZone.on('dragover', this.onDragOver); + dropZone.on('dragend', this.onDragEnd); + dropZone.on('addedfile', this.onFileAdded); + return dropZone; +}; + +const uiTransientState = { + isDrop: false, + isDragging: false, + isDraggingFiles: false, + timeoutId: 0 +}; + +const CatalogPanel = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return CatalogPanelStore.getState(); + }, + getDefaultProps() { + }, + componentWillMount() { + }, + componentDidMount() { + CatalogPanelStore.listen(this.onChange); + document.body.addEventListener('dragover', this.onDragOver); + document.body.addEventListener('dragend', this.onDragEnd); + window.addEventListener('dragend', this.onDragEnd); + }, + componentDidUpdate() { + }, + componentWillUnmount() { + CatalogPanelStore.unlisten(this.onChange); + document.body.removeEventListener('dragover', this.onDragOver); + document.body.removeEventListener('dragend', this.onDragEnd); + window.removeEventListener('dragend', this.onDragEnd); + }, + render() { + + const onDropCatalogItem = e => { + e.preventDefault(); + clearTimeout(uiTransientState.timeoutId); + uiTransientState.isDragging = false; + uiTransientState.isDrop = true; + const item = JSON.parse(e.dataTransfer.getData('text')).item; + CatalogItemsActions.exportSelectedCatalogItems(item); + CatalogPanelTrayActions.open(); + }; + + const onDropUpdatePackage = e => { + e.preventDefault(); + clearTimeout(uiTransientState.timeoutId); + uiTransientState.isDragging = false; + uiTransientState.isDrop = true; + CatalogPanelTrayActions.open(); + }; + + const onDropOnboardPackage = e => { + e.preventDefault(); + clearTimeout(uiTransientState.timeoutId); + uiTransientState.isDragging = false; + uiTransientState.isDrop = true; + CatalogPanelTrayActions.open(); + }; + + const isDraggingItem = uiTransientState.isDragging && !uiTransientState.isDraggingFiles; + const isDraggingFiles = uiTransientState.isDragging && uiTransientState.isDraggingFiles; + const updateDropZone = createDropZone.bind(this, UploadDropZone.ACTIONS.update, '.action-update-catalog-package'); + const onboardDropZone = createDropZone.bind(this, UploadDropZone.ACTIONS.onboard, '.action-onboard-catalog-package'); + const className = ClassNames('CatalogPanel', {'-is-tray-open': this.state.isTrayOpen}); + const hasNoCatalogs = this.props.hasNoCatalogs; + const isLoading = this.props.isLoading; + return ( +
    + +
    + {(() => { + if (isLoading) { + return ( +
    + +
    + ) + } + if (hasNoCatalogs) { + return messages.catalogWelcome; + } + return ( +
    + + +
    + ); + })()} +
    + + + + Export catalog item. + + + + + On-board new catalog package. + + + Update existing catalog package. + + + + +
    + ); + + }, + onChange(state) { + this.setState(state); + }, + onDragOver(e) { + // NOTE do not call preventDefault here - see DropTarget + if (!uiTransientState.isDragging) { + uiTransientState.isDrop = false; + uiTransientState.isDragging = true; + uiTransientState.wasTrayOpen = this.state.isTrayOpen; + uiTransientState.isDraggingFiles = _.contains(e.dataTransfer.types, 'Files'); + const dragState = ComposerAppStore.getState().drag || {}; + if (uiTransientState.isDraggingFiles || (dragState.type === 'catalog-item')) { + CatalogPanelTrayActions.open(); + } + } + e.dataTransfer.dropEffect = 'none'; + // the drag-end event does not fire on drag events that originate + // outside of the browser, e.g. dragging files from desktop, so + // we use this debounced callback to simulate a drag-end event + clearTimeout(uiTransientState.timeoutId); + uiTransientState.timeoutId = setTimeout(() => { + this.onDragEnd(); + }, 400); + }, + onDragEnd() { + clearTimeout(uiTransientState.timeoutId); + if (uiTransientState.isDragging) { + uiTransientState.isDragging = false; + if (uiTransientState.isDrop || uiTransientState.wasTrayOpen) { + CatalogPanelTrayActions.open(); + } else { + CatalogPanelTrayActions.close(); + } + } + }, + onFileAdded() { + CatalogPanelTrayActions.open(); + } +}); + +export default CatalogPanel; diff --git a/modules/ui/composer/webapp/src/components/CatalogPanelToolbar.js b/modules/ui/composer/webapp/src/components/CatalogPanelToolbar.js new file mode 100644 index 0000000..b93bbae --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CatalogPanelToolbar.js @@ -0,0 +1,108 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import React from 'react' +import Button from './Button' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import CatalogPanelTrayActions from '../actions/CatalogPanelTrayActions' +import CatalogItemsActions from '../actions/CatalogItemsActions' + +import '../styles/CatalogPanelToolbar.scss' + +import imgAdd from '../../node_modules/open-iconic/svg/plus.svg' +import imgCopy from '../../node_modules/open-iconic/svg/layers.svg' +import imgOnboard from '../../node_modules/open-iconic/svg/cloud-upload.svg' +import imgUpdate from '../../node_modules/open-iconic/svg/rain.svg' +import imgDownload from '../../node_modules/open-iconic/svg/cloud-download.svg' +import imgDelete from '../../node_modules/open-iconic/svg/trash.svg' + +const CatalogHeader = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return {}; + }, + getDefaultProps() { + }, + componentWillMount() { + }, + componentDidMount() { + }, + componentDidUpdate() { + }, + componentWillUnmount() { + }, + render() { + return ( +
    +

    Descriptor Catalogs

    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    + + ); + }, + onClickUpdateCatalog() { + //CatalogPanelTrayActions.open(); + // note CatalogPackageManagerUploadDropZone wired our btn + // click event to the DropZone.js configuration and will + // open the tray when/if files are added to the drop zone + }, + onClickOnBoardCatalog() { + //CatalogPanelTrayActions.open(); + // note CatalogPackageManagerUploadDropZone wired our btn + // click event to the DropZone.js configuration and will + // open the tray when/if files are added to the drop zone + }, + onClickDeleteCatalogItem() { + CatalogItemsActions.deleteSelectedCatalogItem(); + }, + onClickCreateCatalogItem(type) { + CatalogItemsActions.createCatalogItem(type); + }, + onClickDuplicateCatalogItem() { + CatalogItemsActions.duplicateSelectedCatalogItem(); + }, + onClickExportCatalogItems() { + CatalogPanelTrayActions.open(); + CatalogItemsActions.exportSelectedCatalogItems(); + } +}); + +export default CatalogHeader; diff --git a/modules/ui/composer/webapp/src/components/CatalogPanelTray.js b/modules/ui/composer/webapp/src/components/CatalogPanelTray.js new file mode 100644 index 0000000..cafd5bb --- /dev/null +++ b/modules/ui/composer/webapp/src/components/CatalogPanelTray.js @@ -0,0 +1,75 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import React from 'react'; +import ClassNames from 'classnames' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import CatalogPanelTrayActions from '../actions/CatalogPanelTrayActions' + +import '../styles/CatalogPanelTray.scss' + +const CatalogPanelTray = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return { + isDragging: false + }; + }, + getDefaultProps() { + return {show: false}; + }, + preventResizeCursor(e) { + e.stopPropagation(); + document.body.style.cursor = ''; + }, + componentWillMount() { + }, + componentDidMount() { + }, + componentDidUpdate() { + }, + componentWillUnmount() { + }, + onDragOver() { + this.setState({isDragging: true}); + }, + onDragLeave() { + this.setState({isDragging: false}); + }, + render() { + const classNames = ClassNames('CatalogPanelTray', { + '-close': !this.props.show, + '-is-dragging': this.state.isDragging + }); + return ( +
    +

    Catalog Package Manager

    +
    + {this.props.children} +
    +
    + ); + } +}); + +export default CatalogPanelTray; diff --git a/modules/ui/composer/webapp/src/components/ComposerApp.js b/modules/ui/composer/webapp/src/components/ComposerApp.js new file mode 100644 index 0000000..a3f92a1 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/ComposerApp.js @@ -0,0 +1,240 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * + * + */ +'use strict'; + +window['RIFT_wareLaunchpadComposerVersion'] = `semver 0.0.79`; + +import 'es5-shim' +import 'babel-polyfill' +import alt from '../alt' +import UID from '../libraries/UniqueId' +import utils from '../libraries/utils' +import React from 'react' +import ReactDOM from 'react-dom' +import Crouton from 'react-crouton' +import ClassNames from 'classnames' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import DeletionManager from '../libraries/DeletionManager' +import SelectionManager from '../libraries/SelectionManager' +import ResizableManager from '../libraries/ResizableManager' +import DescriptorModelFactory from '../libraries/model/DescriptorModelFactory' +import RiftHeader from './RiftHeader' +import CanvasPanel from './CanvasPanel' +import CatalogPanel from './CatalogPanel' +import DetailsPanel from './DetailsPanel' +import ModalOverlay from './ModalOverlay' +import ComposerAppToolbar from './ComposerAppToolbar' +import PanelResizeAction from '../actions/PanelResizeAction' +import ComposerAppActions from '../actions/ComposerAppActions' +import ComposerAppStore from '../stores/ComposerAppStore' +import CatalogDataStore from '../stores/CatalogDataStore' +import TooltipManager from '../libraries/TooltipManager' +import CatalogItemsActions from '../actions/CatalogItemsActions' + +import 'normalize.css' +import '../styles/AppRoot.scss' + +const resizeManager = new ResizableManager(window); + +const clearLocalStorage = utils.getSearchParams(window.location).hasOwnProperty('clearLocalStorage'); + +const preventDefault = e => e.preventDefault(); +const clearDragState = () => ComposerAppActions.setDragState(null); + +const ComposerApp = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return ComposerAppStore.getState(); + }, + getDefaultProps() { + return {}; + }, + componentWillMount() { + if (clearLocalStorage) { + window.localStorage.clear(); + } + this.state.isLoading = CatalogDataStore.getState().isLoading; + ComposerAppStore.listen(this.onChange); + CatalogDataStore.listen(this.onCatalogDataChanged); + window.addEventListener('resize', this.resize); + window.onbeforeunload = this.onBeforeUnload; + // prevent browser from downloading any drop outside of our specific drop zones + window.addEventListener('dragover', preventDefault); + window.addEventListener('drop', preventDefault); + // ensure drags initiated in the app clear the state on drop + window.addEventListener('drop', clearDragState); + DeletionManager.addEventListeners(); + }, + componentWillUnmount() { + window.removeEventListener('resize', this.resize); + window.removeEventListener('dragover', preventDefault); + window.removeEventListener('drop', preventDefault); + window.removeEventListener('drop', clearDragState); + // resizeManager automatically registered its event handlers + resizeManager.removeAllEventListeners(); + ComposerAppStore.unlisten(this.onChange); + CatalogDataStore.unlisten(this.onCatalogDataChanged); + DeletionManager.removeEventListeners(); + TooltipManager.removeEventListeners(); + }, + componentDidMount() { + resizeManager.addAllEventListeners(); + const snapshot = window.localStorage.getItem('composer'); + if (snapshot) { + alt.bootstrap(snapshot); + } + document.body.addEventListener('keydown', (event) => { + // prevent details editor form from blowing up the app + const ENTER_KEY = 13; + if (event.which === ENTER_KEY) { + event.preventDefault(); + return false; + } + }); + const appRootElement = ReactDOM.findDOMNode(this.refs.appRoot); + TooltipManager.addEventListeners(appRootElement); + SelectionManager.onClearSelection = () => { + if (this.state.item) { + CatalogItemsActions.catalogItemMetaDataChanged.defer(this.state.item); + } + }; + }, + componentDidUpdate() { + if (this.state.fullScreenMode) { + document.body.classList.add('-is-full-screen'); + } else { + document.body.classList.remove('-is-full-screen'); + } + SelectionManager.refreshOutline(); + }, + resize(e) { + PanelResizeAction.resize(e); + }, + render() { + + function onClickUpdateSelection(event) { + if (event.defaultPrevented) { + return + } + const element = SelectionManager.getClosestElementWithUID(event.target); + if (element) { + SelectionManager.select(element); + SelectionManager.refreshOutline(); + event.preventDefault(); + } else { + SelectionManager.clearSelectionAndRemoveOutline(); + } + } + + let cpNumber = 0; + const classNames = ClassNames('ComposerApp'); + const isNew = this.state.item && this.state.item.uiState.isNew; + const hasItem = this.state.item && this.state.item.uiState; + const isModified = this.state.item && this.state.item.uiState.modified; + const isEditingNSD = this.state.item && this.state.item.uiState && /nsd/.test(this.state.item.uiState.type); + const isEditingVNFD = this.state.item && this.state.item.uiState && /vnfd/.test(this.state.item.uiState.type); + const containers = [this.state.item].reduce(DescriptorModelFactory.buildCatalogItemFactory(CatalogDataStore.getState().catalogs), []); + + containers.filter(d => DescriptorModelFactory.isConnectionPoint(d)).forEach(d => { + d.cpNumber = ++cpNumber; + containers.filter(d => DescriptorModelFactory.isVnfdConnectionPointRef(d)).filter(ref => ref.key === d.key).forEach(ref => ref.cpNumber = d.cpNumber); + }); + const canvasTitle = containers.length ? containers[0].model.name : ''; + const hasNoCatalogs = CatalogDataStore.getState().catalogs.length === 0; + const isLoading = this.state.isLoading; + + return ( +
    + + + + +
    + +
    + +
    +
    + + + + event.stopPropagation()}/> +
    +
    + +
    + ); + + }, + onChange(state) { + this.setState(state); + }, + onCatalogDataChanged(catalogDataState) { + const catalogs = catalogDataState.catalogs; + const unsavedChanges = catalogs.reduce((result, catalog) => { + if (result) { + return result; + } + return catalog.descriptors.reduce((result, descriptor) => { + if (result) { + return result; + } + return descriptor.uiState.modified; + }, false); + }, false); + this.setState({ + unsavedChanges: unsavedChanges, + isLoading: catalogDataState.isLoading + }); + }, + onBeforeUnload() { + // https://trello.com/c/c8v321Xx/160-prompt-user-to-save-changes + //const snapshot = alt.takeSnapshot(); + //window.localStorage.setItem('composer', snapshot); + if (this.state.unsavedChanges) { + return 'You have unsaved changes. If you do not onboard (or update) your changes they will be lost.'; + } + } + +}); + +ReactDOM.render(, document.getElementById('content')); + +export default ComposerApp; diff --git a/modules/ui/composer/webapp/src/components/ComposerAppToolbar.js b/modules/ui/composer/webapp/src/components/ComposerAppToolbar.js new file mode 100644 index 0000000..9da1506 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/ComposerAppToolbar.js @@ -0,0 +1,189 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import React from 'react' +import messages from './messages' +import ClassNames from 'classnames' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import Button from './Button' +import CatalogItemsActions from '../actions/CatalogItemsActions' +import CanvasEditorActions from '../actions/CanvasEditorActions' +import ComposerAppActions from '../actions/ComposerAppActions' +import SelectionManager from '../libraries/SelectionManager' +import DeletionManager from '../libraries/DeletionManager' + +import '../styles/ComposerAppToolbar.scss' + +import imgSave from '../../node_modules/open-iconic/svg/data-transfer-upload.svg' +import imgCancel from '../../node_modules/open-iconic/svg/circle-x.svg' +import imgLayout from '../../node_modules/open-iconic/svg/grid-three-up.svg' +import imgVLD from '../../node_modules/open-iconic/svg/link-intact.svg' +import imgJSONViewer from '../../node_modules/open-iconic/svg/code.svg' +import imgFG from '../../node_modules/open-iconic/svg/infinity.svg' +import imgDelete from '../../node_modules/open-iconic/svg/trash.svg' +import imgVDU from '../../node_modules/open-iconic/svg/laptop.svg' + +const ComposerAppToolbar = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return {}; + }, + getDefaultProps() { + return { + disabled: true, + showMore: false, + layout: {left: 300}, + isModified: false, + isEditingNSD: false, + isEditingVNFD: false, + showJSONViewer: false, + isNew: false + }; + }, + componentWillMount() { + }, + componentDidMount() { + }, + componentDidUpdate() { + }, + componentWillUnmount() { + }, + onClickSave() { + event.preventDefault(); + event.stopPropagation(); + CatalogItemsActions.saveCatalogItem(); + }, + onClickCancel(event) { + event.preventDefault(); + event.stopPropagation(); + CatalogItemsActions.cancelCatalogItemChanges(); + }, + onClickToggleShowMoreInfo() { + event.preventDefault(); + event.stopPropagation(); + CanvasEditorActions.toggleShowMoreInfo(); + }, + onClickAutoLayout() { + event.preventDefault(); + event.stopPropagation(); + CanvasEditorActions.applyDefaultLayout(); + }, + onClickAddVld() { + event.preventDefault(); + event.stopPropagation(); + CanvasEditorActions.addVirtualLinkDescriptor(); + }, + onClickAddVnffg() { + event.preventDefault(); + event.stopPropagation(); + CanvasEditorActions.addForwardingGraphDescriptor(); + }, + onClickAddVdu() { + event.preventDefault(); + event.stopPropagation(); + CanvasEditorActions.addVirtualDeploymentDescriptor(); + }, + onDragStartAddVdu(event) { + const data = {type: 'action', action: 'add-vdu'}; + event.dataTransfer.effectAllowed = 'copy'; + event.dataTransfer.setData('text', JSON.stringify(data)); + ComposerAppActions.setDragState(data); + }, + onDragStartAddVld(event) { + const data = {type: 'action', action: 'add-vld'}; + event.dataTransfer.effectAllowed = 'copy'; + event.dataTransfer.setData('text', JSON.stringify(data)); + ComposerAppActions.setDragState(data); + }, + onDragStartAddVnffg(event) { + const data = {type: 'action', action: 'add-vnffgd'}; + event.dataTransfer.effectAllowed = 'copy'; + event.dataTransfer.setData('text', JSON.stringify(data)); + ComposerAppActions.setDragState(data); + }, + onClickDeleteSelected(event) { + event.preventDefault(); + event.stopPropagation(); + DeletionManager.deleteSelected(event); + }, + toggleJSONViewer(event) { + event.preventDefault(); + if (this.props.showJSONViewer) { + ComposerAppActions.closeJsonViewer(); + } else { + ComposerAppActions.closeJsonViewer(); + ComposerAppActions.showJsonViewer.defer(); + } + }, + render() { + const style = {left: this.props.layout.left}; + const saveClasses = ClassNames('ComposerAppSave', {'primary-action': this.props.isModified || this.props.isNew}); + const cancelClasses = ClassNames('ComposerAppCancel', {'secondary-action': this.props.isModified}); + if (this.props.disabled) { + return ( +
    + ); + } + const hasSelection = SelectionManager.getSelections().length > 0; + return ( +
    + {(()=>{ + if (this.props.isEditingNSD || this.props.isEditingVNFD) { + return ( +
    +
    + ); + } + })()} +
    +
    +
    + ); + } +}); + +export default ComposerAppToolbar; diff --git a/modules/ui/composer/webapp/src/components/ContentEditableDiv.js b/modules/ui/composer/webapp/src/components/ContentEditableDiv.js new file mode 100644 index 0000000..c9f9de1 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/ContentEditableDiv.js @@ -0,0 +1,36 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Created by onvelocity on 2/13/16. + */ +'use strict'; +import React from 'react' +import ClassNames from 'classnames' +export default function ContentEditableDiv (props) { + + const fontWidth = parseFloat(props.fontWidth) || 15; + const size = props.autoPadRight ? Math.max(50, String(props.name).length * fontWidth) : 0; + const style = {borderColor: 'transparent', background: 'transparent'}; + if (size) { + style.paddingRight = size; + } + + return ( +
    + ); + +} \ No newline at end of file diff --git a/modules/ui/composer/webapp/src/components/DetailsPanel.js b/modules/ui/composer/webapp/src/components/DetailsPanel.js new file mode 100644 index 0000000..2c287d2 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/DetailsPanel.js @@ -0,0 +1,79 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +'use strict'; + +import _ from 'lodash' +import React from 'react'; +import PureRenderMixin from 'react-addons-pure-render-mixin' +import messages from './messages' +import serializers from '../libraries/model/DescriptorModelSerializer' +import JSONViewer from './JSONViewer' +import PopupWindow from './PopupWindow' +import CatalogItemDetailsEditor from './CatalogItemDetailsEditor' +import SelectionManager from '../libraries/SelectionManager' + +import '../styles/DetailsPanel.scss' + +const DetailsPanel = React.createClass({ + mixins: [PureRenderMixin, SelectionManager.reactPauseResumeMixin], + getInitialState() { + return {}; + }, + getDefaultProps() { + return { + containers: [], + showJSONViewer: false + }; + }, + componentWillMount() { + }, + componentDidMount() { + }, + componentDidUpdate() { + SelectionManager.refreshOutline(); + }, + componentWillUnmount() { + }, + render() { + let json = '{}'; + let bodyComponent = messages.detailsWelcome(); + const selected = this.props.containers.filter(d => SelectionManager.isSelected(d)); + const selectedContainer = selected[0]; + if (selectedContainer) { + bodyComponent = ; + const edit = _.cloneDeep(selectedContainer.model); + json = serializers.serialize(edit) || edit; + } + const jsonViewerTitle = selectedContainer ? selectedContainer.model.name : 'nothing selected'; + const hasNoCatalogs = this.props.hasNoCatalogs; + return ( +
    event.preventDefault()}> +
    + {hasNoCatalogs ? null : bodyComponent} +
    + +
    + ); + } +}); + +export default DetailsPanel; diff --git a/modules/ui/composer/webapp/src/components/DropTarget.js b/modules/ui/composer/webapp/src/components/DropTarget.js new file mode 100644 index 0000000..fbabe4d --- /dev/null +++ b/modules/ui/composer/webapp/src/components/DropTarget.js @@ -0,0 +1,85 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 11/6/15. + */ +'use strict'; + +import React from 'react' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import ClassNames from 'classnames' + +const DropTarget = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return {isDragHover: false}; + }, + getDefaultProps() { + return {onDrop: () => {}, dropZone: null, className: 'DropTarget'}; + }, + componentWillMount() { + }, + componentDidMount() { + if (this.props.dropZone) { + const dropTarget = this; + const dropZone = this.props.dropZone(dropTarget); + dropZone.on('dragover', this.onDragOver); + dropZone.on('dragleave', this.onDragLeave); + dropZone.on('dragend', this.onDragEnd); + dropZone.on('drop', this.onDrop); + } + }, + componentDidUpdate() { + }, + componentWillUnmount() { + }, + render() { + const classNames = ClassNames(this.props.className, 'dnd-target', {'-drag-hover': this.state.isDragHover}); + return ( +
    {this.props.children}
    + ); + }, + onDragOver(e) { + // NOTE calling preventDefault makes this a valid drop target + e.preventDefault(); + e.dataTransfer.dropEffect = 'copy'; + this.setState({isDragHover: true}); + }, + onDragLeave() { + this.setState({isDragHover: false}); + }, + onDragEnd() { + this.setState({isDragHover: false}); + }, + onDrop(e) { + this.setState({isDragHover: false}); + this.props.onDrop(e); + } +}); + +export default DropTarget; diff --git a/modules/ui/composer/webapp/src/components/DropZonePanel.js b/modules/ui/composer/webapp/src/components/DropZonePanel.js new file mode 100644 index 0000000..9fbc3b0 --- /dev/null +++ b/modules/ui/composer/webapp/src/components/DropZonePanel.js @@ -0,0 +1,62 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * + * + */ +/** + * Created by onvelocity on 11/6/15. + */ +'use strict'; + +import React from 'react' +import PureRenderMixin from 'react-addons-pure-render-mixin' +import ClassNames from 'classnames' + +import '../styles/DropZonePanel.scss' + +const DropZonePanel = React.createClass({ + mixins: [PureRenderMixin], + getInitialState() { + return {}; + }, + getDefaultProps() { + return {show: false, className: 'DropZonePanel', title: 'Drop files to upload.'}; + }, + componentWillMount() { + }, + componentDidMount() { + }, + componentDidUpdate() { + }, + componentWillUnmount() { + }, + render() { + const classNames = ClassNames(this.props.className, {'-close': !this.props.show}); + return ( +
    +
    + {this.props.title} +
    + {this.props.children} +
    + ); + } +}); + +export default DropZonePanel; diff --git a/modules/ui/composer/webapp/src/components/EditDescriptorModelProperties.js b/modules/ui/composer/webapp/src/components/EditDescriptorModelProperties.js new file mode 100644 index 0000000..dbc7f0a --- /dev/null +++ b/modules/ui/composer/webapp/src/components/EditDescriptorModelProperties.js @@ -0,0 +1,586 @@ +/* + * Copyright 2016 RIFT.IO Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * + * + */ +/** + * Created by onvelocity on 1/18/16. + * + * This class generates the form fields used to edit the CONFD JSON model. + */ +'use strict'; + +import _ from 'lodash' +import utils from '../libraries/utils' +import React from 'react' +import ClassNames from 'classnames' +import changeCase from 'change-case' +import toggle from '../libraries/ToggleElementHandler' +import Button from './Button' +import Property from '../libraries/model/DescriptorModelMetaProperty' +import ComposerAppActions from '../actions/ComposerAppActions' +import CatalogItemsActions from '../actions/CatalogItemsActions' +import DESCRIPTOR_MODEL_FIELDS from '../libraries/model/DescriptorModelFields' +import DescriptorModelFactory from '../libraries/model/DescriptorModelFactory' +import DescriptorModelMetaFactory from '../libraries/model/DescriptorModelMetaFactory' +import SelectionManager from '../libraries/SelectionManager' +import DeletionManager from '../libraries/DeletionManager' +import DescriptorModelIconFactory from '../libraries/model/IconFactory' +import getEventPath from '../libraries/getEventPath' + +import imgAdd from '../../node_modules/open-iconic/svg/plus.svg' +import imgRemove from '../../node_modules/open-iconic/svg/trash.svg' + +import '../styles/EditDescriptorModelProperties.scss' + +function getDescriptorMetaBasicForType(type) { + const basicPropertiesFilter = d => _.contains(DESCRIPTOR_MODEL_FIELDS[type], d.name); + return DescriptorModelMetaFactory.getModelMetaForType(type, basicPropertiesFilter) || {properties: []}; +} + +function getDescriptorMetaAdvancedForType(type) { + const advPropertiesFilter = d => !_.contains(DESCRIPTOR_MODEL_FIELDS[type], d.name); + return DescriptorModelMetaFactory.getModelMetaForType(type, advPropertiesFilter) || {properties: []}; +} + +function getTitle(model = {}) { + if (typeof model['short-name'] === 'string' && model['short-name']) { + return model['short-name']; + } + if (typeof model.name === 'string' && model.name) { + return model.name; + } + if (model.uiState && typeof model.uiState.displayName === 'string' && model.uiState.displayName) { + return model.uiState.displayName + } + if (typeof model.id === 'string') { + return model.id; + } +} + +export default function EditDescriptorModelProperties(props) { + + const container = props.container; + + if (!(DescriptorModelFactory.isContainer(container))) { + return + } + + function startEditing() { + DeletionManager.removeEventListeners(); + } + + function endEditing() { + DeletionManager.addEventListeners(); + } + + function onClickSelectItem(property, path, value, event) { + event.preventDefault(); + const root = this.getRoot(); + if (SelectionManager.select(value)) { + CatalogItemsActions.catalogItemMetaDataChanged(root.model); + } + } + + function onFocusPropertyFormInputElement(property, path, value, event) { + + event.preventDefault(); + startEditing(); + + function removeIsFocusedClass(event) { + event.target.removeEventListener('blur', removeIsFocusedClass); + Array.from(document.querySelectorAll('.-is-focused')).forEach(d => d.classList.remove('-is-focused')); + } + + removeIsFocusedClass(event); + + const propertyWrapper = getEventPath(event).reduce((parent, element) => { + if (parent) { + return parent; + } + if (!element.classList) { + return false; + } + if (element.classList.contains('property')) { + return element; + } + }, false); + + if (propertyWrapper) { + propertyWrapper.classList.add('-is-focused'); + event.target.addEventListener('blur', removeIsFocusedClass); + } + + } + + function buildAddPropertyAction(container, property, path) { + function onClickAddProperty(property, path, event) { + event.preventDefault(); + //SelectionManager.resume(); + const create = Property.getContainerCreateMethod(property, this); + if (create) { + const model = null; + create(model, path, property); + } else { + const name = path.join('.'); + const value = Property.createModelInstance(property); + utils.assignPathValue(this.model, name, value); + } + CatalogItemsActions.catalogItemDescriptorChanged(this.getRoot()); + } + return ( +