Merge from OSM SO master
Signed-off-by: Philip Joseph <philip.joseph@riftio.com>
diff --git a/BUILD.sh b/BUILD.sh
index 40c5990..a9c0195 100755
--- a/BUILD.sh
+++ b/BUILD.sh
@@ -114,10 +114,10 @@
if [[ $PLATFORM == ub16 ]]; then
PLATFORM_REPOSITORY=${1:-OSM}
- PLATFORM_VERSION=${2:-4.4.0.0.57702}
+ PLATFORM_VERSION=${2:-4.99.1.1.58887}
elif [[ $PLATFORM == fc20 ]]; then
PLATFORM_REPOSITORY=${1:-OSM} # change to OSM when published
- PLATFORM_VERSION=${2:-4.3.1.0.53705}
+ PLATFORM_VERSION=${2:-4.99.1.1.58887}
else
echo "Internal error: unknown platform $PLATFORM"
exit 1
@@ -130,11 +130,11 @@
DAILY_TIMER='apt-daily.timer'
DAILY_SERVICE='apt-daily.service'
-if [ $(systemctl is-active $DAILY_TIMER) = "active" ]
+if [ $(sudo systemctl is-active $DAILY_TIMER) = "active" ]
then
- systemctl stop $DAILY_TIMER
- systemctl disable $DAILY_TIMER
- systemctl disable $DAILY_SERVICE
+ sudo systemctl stop $DAILY_TIMER
+ sudo systemctl disable $DAILY_TIMER
+ sudo systemctl disable $DAILY_SERVICE
fi
# must be run from the top of a workspace
@@ -175,7 +175,7 @@
if $runMkcontainer; then
sudo apt-get install -y libxml2-dev libxslt-dev
sudo /usr/rift/container_tools/mkcontainer --modes build --modes ext --repo ${PLATFORM_REPOSITORY}
- sudo pip3 install lxml==3.4.0
+ sudo -H pip3 install lxml==3.4.0
fi
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ae622a3..ddc62d9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -54,6 +54,7 @@
rwmon
rwcm
rwlaunchpad
+ rwprojectmano
)
if (NOT RIFT_AGENT_BUILD STREQUAL "XML_ONLY")
diff --git a/charms/layers/.gitignore b/charms/layers/.gitignore
deleted file mode 100644
index 7da5881..0000000
--- a/charms/layers/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-deps/
-builds/
-
diff --git a/charms/layers/sshproxy/.gitignore b/charms/layers/sshproxy/.gitignore
deleted file mode 100644
index b8e7ba3..0000000
--- a/charms/layers/sshproxy/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-trusty/
diff --git a/charms/layers/sshproxy/README.md b/charms/layers/sshproxy/README.md
deleted file mode 100644
index c66ff75..0000000
--- a/charms/layers/sshproxy/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-# Overview
-
-This is a [Juju] layer intended to ease the development of charms that need
-to execute commands over SSH, such as proxy charms.
-
-# What is a proxy charm?
-
-A proxy charm is a limited type of charm that does not interact with software running on the same host, such as controlling and configuring a remote device (a static VM image, a router/switch, etc.). It cannot take advantage of some of Juju's key features, such as [scaling], [relations], and [leadership].
-
-Proxy charms are primarily a stop-gap, intended to prototype quickly, with the end goal being to develop it into a full-featured charm, which installs and executes code on the same machine as the charm is running.
-
-# Integration
-
-After you've [created your charm], open `interfaces.yaml` and add
-`layer:sshproxy` to the includes stanza, as shown below:
-```
-includes: ['layer:basic', 'layer:sshproxy']
-```
-
-## Reactive states
-
-This layer will set the following states:
-
-- `sshproxy.configured` This state is set when SSH credentials have been supplied to the charm.
-
-
-## Example
-In `reactive/mycharm.py`, you can add logic to execute commands over SSH. This
-example is run via a `start` action, and starts a service running on a remote
-host.
-```
-...
-import charms.sshproxy
-
-
-@when('sshproxy.configured')
-@when('actions.start')
-def start():
- """ Execute's the command, via the start action` using the
- configured SSH credentials
- """
- sshproxy.ssh("service myservice start")
-
-```
-
-## Actions
-This layer includes a built-in `run` action useful for debugging or running arbitrary commands:
-
-```
-$ juju run-action mycharm/0 run command=hostname
-Action queued with id: 014b72f3-bc02-4ecb-8d38-72bce03bbb63
-
-$ juju show-action-output 014b72f3-bc02-4ecb-8d38-72bce03bbb63
-results:
- output: juju-66a5f3-11
-status: completed
-timing:
- completed: 2016-10-27 19:53:49 +0000 UTC
- enqueued: 2016-10-27 19:53:44 +0000 UTC
- started: 2016-10-27 19:53:48 +0000 UTC
-
-```
-## Known Limitations and Issues
-
-### Security issues
-
-- Password and key-based authentications are supported, with the caveat that
-both (password and private key) are stored plaintext within the Juju controller.
-
-# Configuration and Usage
-
-This layer adds the following configuration options:
-- ssh-hostname
-- ssh-username
-- ssh-password
-- ssh-private-key
-
-Once [configure] those values at any time. Once they are set, the `sshproxy.configured` state flag will be toggled:
-
-```
-juju deploy mycharm ssh-hostname=10.10.10.10 ssh-username=ubuntu ssh-password=yourpassword
-```
-or
-```
-juju deploy mycharm ssh-hostname=10.10.10.10 ssh-username=ubuntu ssh-private-key="`cat ~/.ssh/id_rsa`"
-```
-
-
-# Contact Information
-Homepage: https://github.com/AdamIsrael/layer-sshproxy
-
-[Juju]: https://jujucharms.com/about
-[configure]: https://jujucharms.com/docs/2.0/charms-config
-[scaling]: https://jujucharms.com/docs/2.0/charms-scaling
-[relations]: https://jujucharms.com/docs/2.0/charms-relations
-[leadership]: https://jujucharms.com/docs/2.0/developer-leadership
-[created your charm]: https://jujucharms.com/docs/2.0/developer-getting-started
diff --git a/charms/layers/sshproxy/actions.yaml b/charms/layers/sshproxy/actions.yaml
deleted file mode 100644
index 501b2f9..0000000
--- a/charms/layers/sshproxy/actions.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-run:
- description: "Run an arbitrary command"
- params:
- command:
- description: "The command to execute."
- type: string
- default: ""
- required:
- - command
diff --git a/charms/layers/sshproxy/actions/run b/charms/layers/sshproxy/actions/run
deleted file mode 100755
index d85d3fa..0000000
--- a/charms/layers/sshproxy/actions/run
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python3
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-import sys
-sys.path.append('lib')
-
-from charms.reactive import main
-from charms.reactive import set_state
-from charmhelpers.core.hookenv import action_fail, action_name
-
-"""
-`set_state` only works here because it's flushed to disk inside the `main()`
-loop. remove_state will need to be called inside the action method.
-"""
-set_state('actions.{}'.format(action_name()))
-
-try:
- main()
-except Exception as e:
- action_fail(repr(e))
diff --git a/charms/layers/sshproxy/config.yaml b/charms/layers/sshproxy/config.yaml
deleted file mode 100644
index 07f3756..0000000
--- a/charms/layers/sshproxy/config.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-options:
- ssh-hostname:
- type: string
- default: ""
- description: "The hostname or IP address of the machine to"
- ssh-username:
- type: string
- default: ""
- description: "The username to login as."
- ssh-password:
- type: string
- default: ""
- description: "The password used to authenticate."
- ssh-private-key:
- type: string
- default: ""
- description: "The private ssh key to be used to authenticate."
diff --git a/charms/layers/sshproxy/layer.yaml b/charms/layers/sshproxy/layer.yaml
deleted file mode 100644
index 4f095a6..0000000
--- a/charms/layers/sshproxy/layer.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-includes: ['layer:basic'] # if you use any interfaces, add them here
diff --git a/charms/layers/sshproxy/lib/charms/sshproxy.py b/charms/layers/sshproxy/lib/charms/sshproxy.py
deleted file mode 100644
index b247bcf..0000000
--- a/charms/layers/sshproxy/lib/charms/sshproxy.py
+++ /dev/null
@@ -1,124 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-from charmhelpers.core.hookenv import (
- config,
-)
-import io
-import paramiko
-
-from subprocess import (
- Popen,
- CalledProcessError,
- PIPE,
-)
-
-
-def _run(cmd, env=None):
- """ Run a command, either on the local machine or remotely via SSH. """
- if isinstance(cmd, str):
- cmd = cmd.split(' ') if ' ' in cmd else [cmd]
-
- cfg = config()
- if all(k in cfg for k in ['ssh-hostname', 'ssh-username',
- 'ssh-password', 'ssh-private-key']):
- host = cfg['ssh-hostname']
- user = cfg['ssh-username']
- passwd = cfg['ssh-password']
- key = cfg['ssh-private-key']
-
- if host and user and (passwd or key):
- return ssh(cmd, host, user, passwd, key)
-
- p = Popen(cmd,
- env=env,
- shell=True,
- stdout=PIPE,
- stderr=PIPE)
- stdout, stderr = p.communicate()
- retcode = p.poll()
- if retcode > 0:
- raise CalledProcessError(returncode=retcode,
- cmd=cmd,
- output=stderr.decode("utf-8").strip())
- return (stdout.decode('utf-8').strip(), stderr.decode('utf-8').strip())
-
-
-def get_ssh_client(host, user, password=None, key=None):
- """Return a connected Paramiko ssh object"""
-
- client = paramiko.SSHClient()
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
- pkey = None
- if key:
- f = io.StringIO(key)
- pkey = paramiko.RSAKey.from_private_key(f)
-
- ###########################################################################
- # There is a bug in some versions of OpenSSH 4.3 (CentOS/RHEL 5) where #
- # the server may not send the SSH_MSG_USERAUTH_BANNER message except when #
- # responding to an auth_none request. For example, paramiko will attempt #
- # to use password authentication when a password is set, but the server #
- # could deny that, instead requesting keyboard-interactive. The hack to #
- # workaround this is to attempt a reconnect, which will receive the right #
- # banner, and authentication can proceed. See the following for more info #
- # https://github.com/paramiko/paramiko/issues/432 #
- # https://github.com/paramiko/paramiko/pull/438 #
- ###########################################################################
-
- try:
- client.connect(host, port=22, username=user,
- password=password, pkey=pkey)
- except paramiko.ssh_exception.SSHException as e:
- if 'Error reading SSH protocol banner' == str(e):
- # Once more, with feeling
- client.connect(host, port=22, username=user,
- password=password, pkey=pkey)
- pass
- else:
- raise paramiko.ssh_exception.SSHException(e)
-
- return client
-
-
-def sftp(local_file, remote_file, host, user, password=None, key=None):
- """Copy a local file to a remote host"""
- client = get_ssh_client(host, user, password, key)
-
- # Create an sftp connection from the underlying transport
- sftp = paramiko.SFTPClient.from_transport(client.get_transport())
- sftp.put(local_file, remote_file)
- client.close()
-
-
-def ssh(cmd, host, user, password=None, key=None):
- """ Run an arbitrary command over SSH. """
- client = get_ssh_client(host, user, password, key)
-
- cmds = ' '.join(cmd)
- stdin, stdout, stderr = client.exec_command(cmds, get_pty=True)
- retcode = stdout.channel.recv_exit_status()
- client.close() # @TODO re-use connections
- if retcode > 0:
- output = stderr.read().strip()
- raise CalledProcessError(returncode=retcode, cmd=cmd,
- output=output)
- return (
- stdout.read().decode('utf-8').strip(),
- stderr.read().decode('utf-8').strip()
- )
diff --git a/charms/layers/sshproxy/metadata.yaml b/charms/layers/sshproxy/metadata.yaml
deleted file mode 100644
index effeb6a..0000000
--- a/charms/layers/sshproxy/metadata.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-name: sshproxy
-summary: Layer to copy files to or run commands on a remote host over ssh
-maintainer: Adam Israel <adam.israel@canonical.com>
-description: |
- This layer is intended to provide common ssh functionality, such as
- running a command on a remote host.
-series:
- - trusty
- - xenial
-tags:
- # Replace "misc" with one or more whitelisted tags from this list:
- # https://jujucharms.com/docs/stable/authors-charm-metadata
- - misc
-subordinate: false
diff --git a/charms/layers/sshproxy/reactive/sshproxy.py b/charms/layers/sshproxy/reactive/sshproxy.py
deleted file mode 100644
index e1508ca..0000000
--- a/charms/layers/sshproxy/reactive/sshproxy.py
+++ /dev/null
@@ -1,73 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-from charmhelpers.core.hookenv import (
- action_fail,
- action_get,
- action_set,
- config,
-)
-from charms.reactive import (
- remove_state,
- set_state,
- when,
-)
-import charms.sshproxy
-import subprocess
-
-
-@when('config.changed')
-def ssh_configured():
- """ Checks to see if the charm is configured with SSH credentials. If so,
- set a state flag that can be used to execute ssh-only actions.
-
- For example:
-
- @when('sshproxy.configured')
- def run_remote_command(cmd):
- ...
-
- @when_not('sshproxy.configured')
- def run_local_command(cmd):
- ...
- """
- cfg = config()
- if all(k in cfg for k in ['ssh-hostname', 'ssh-username',
- 'ssh-password', 'ssh-private-key']):
- set_state('sshproxy.configured')
- else:
- remove_state('sshproxy.configured')
-
-
-@when('actions.run')
-def run_command():
- """
- Run an arbitrary command, either locally or over SSH with the configured
- credentials.
- """
- try:
- cmd = action_get('command')
- output, err = charms.sshproxy._run(cmd)
- if len(err):
- action_fail("Command '{}' returned error code {}".format(cmd, err))
- else:
- action_set({'output': output})
- except subprocess.CalledProcessError as e:
- action_fail('Command failed: %s (%s)' %
- (' '.join(e.cmd), str(e.output)))
- finally:
- remove_state('actions.run')
diff --git a/charms/layers/sshproxy/tests/00-setup b/charms/layers/sshproxy/tests/00-setup
deleted file mode 100755
index 1e5f91b..0000000
--- a/charms/layers/sshproxy/tests/00-setup
+++ /dev/null
@@ -1,22 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-#!/bin/bash
-
-sudo add-apt-repository ppa:juju/stable -y
-sudo apt-get update
-sudo apt-get install amulet python-requests -y
diff --git a/charms/layers/sshproxy/tests/10-deploy b/charms/layers/sshproxy/tests/10-deploy
deleted file mode 100755
index f5d2f5d..0000000
--- a/charms/layers/sshproxy/tests/10-deploy
+++ /dev/null
@@ -1,76 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-#!/usr/bin/python3
-
-import amulet
-import requests
-import unittest
-import string
-import random
-
-
-class TestCharm(unittest.TestCase):
- user = None
- passwd = None
-
- def id_generator(self, size=6, chars=string.ascii_uppercase + string.digits):
- return ''.join(random.choice(chars) for _ in range(size))
-
- def setUp(self):
-
- # Setup random user/password
- self.user = self.id_generator()
- self.passwd = self.id_generator()
-
- self.d = amulet.Deployment()
-
- self.d.add('sshproxy')
- self.d.add('ubuntu')
-
- self.d.expose('sshproxy')
-
- self.d.setup(timeout=900)
- self.d.sentry.wait()
-
- # Add
- ubuntu_0 = d.sentry['ubuntu'][0]
- ubuntu_0.ssh("sudo adduser {}".format(self.user))
- ubuntu_0.ssh("echo '{}' | sudo passwd {} --stdin".format(self.passwd, self.user))
-
- self.unit = self.d.sentry['sshproxy'][0]
-
- def test_service(self):
-
- # Configure the unit
-
- # Run a command
-
- # Verify the output
-
- # test we can access over http
- # page = requests.get('http://{}'.format(self.unit.info['public-address']))
- # self.assertEqual(page.status_code, 200)
- # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
- # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
- # - .info - An array of the information of that unit from Juju
- # - .file(PATH) - Get the details of a file on that unit
- # - .file_contents(PATH) - Get plain text output of PATH file from that unit
- # - .directory(PATH) - Get details of directory
- # - .directory_contents(PATH) - List files and folders in PATH on that unit
- # - .relation(relation, service:rel) - Get relation data from return service
- pass
diff --git a/charms/layers/sshproxy/wheelhouse.txt b/charms/layers/sshproxy/wheelhouse.txt
deleted file mode 100644
index ab6de07..0000000
--- a/charms/layers/sshproxy/wheelhouse.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-paramiko>=1.16.0,<1.17
diff --git a/charms/layers/vnfproxy/README.md b/charms/layers/vnfproxy/README.md
deleted file mode 100644
index 8379e50..0000000
--- a/charms/layers/vnfproxy/README.md
+++ /dev/null
@@ -1,172 +0,0 @@
-# vnfproxy
-
-## Overview
-
-This charm layer is intended for use by vendors who wish to integrate with
-OSM. The current release of OSM only supports a lightweight version of Juju
-charms, which we will refer to as "proxy charms". Consider the diagram below:
-
-```
-+---------------------+ +---------------------+
-| <----+ |
-| Resource | | Service |
-| Orchestrator (RO) +----> Orchestrator (SO) |
-| | | |
-+------------------+--+ +-------+----^--------+
- | | |
- | | |
- | | |
- +-----v-----+ +-v----+--+
- | <-------+ |
- | Virtual | | Proxy |
- | Machine | | Charm |
- | +-------> |
- +-----------+ +---------+
-```
-
-The Virtual Machine (VM) is created by the Resource Orchestrator (RO), at the
-request of the Service Orchestrator (SO). Once the VM has been created, a
-"proxy charm" is deployed in order to facilitate operations between the SO and
-your service running within the VM.
-
-As such, a proxy charm will expose a number of "actions" that are run via the
-SO. By default, the following actions are exposed:
-
-```bash
-actions
-├── reboot
-├── restart
-├── run
-├── start
-└── stop
-```
-
-Some actions, such as `run` and `reboot`, do not require any additional configuration. `start`, `stop` and `restart`, however, will require you to
-implement the command(s) required to interact with your service.
-
-## Usage
-
-Create the framework for your proxy charm:
-
-```bash
-$ charm create pingpong
-$ cd pingpong
-```
-
-Modify `layer.yaml` to the following:
-```yaml
-includes:
- - layer:basic
- - layer:vnfproxy
-```
-
-The `metadata.yaml` describes your service. It should look similar to the following:
-
-```yaml
-name: vnfproxy
-summary: A layer for developing OSM "proxy" charms.
-maintainer: Adam Israel <adam.israel@canonical.com>
-description: |
- VNF "proxy" charms are a lightweight version of a charm that, rather than
- installing software on the same machine, execute commands over an ssh channel.
-series:
- - trusty
- - xenial
-tags:
- - osm
- - vnf
-subordinate: false
-```
-
-Implement the default action(s) you wish to support by adding the following code to reactive/pingpong.py and fill in the cmd to be run:
-
-```python
-@when('actions.start')
-def start():
- err = ''
- try:
- cmd = ""
- result, err = charms.sshproxy._run(cmd)
- except:
- action_fail('command failed:' + err)
- else:
- action_set({'outout': result})
- finally:
- remove_flag('actions.start')
-
-
-@when('actions.stop')
-def stop():
- err = ''
- try:
- # Enter the command to stop your service(s)
- cmd = "service myname stop"
- result, err = charms.sshproxy._run(cmd)
- except:
- action_fail('command failed:' + err)
- else:
- action_set({'outout': result})
- finally:
- remove_flag('actions.stop')
-
-
-@when('actions.restart')
-def restart():
- err = ''
- try:
- # Enter the command to restart your service(s)
- cmd = "service myname restart"
- result, err = charms.sshproxy._run(cmd)
- except:
- action_fail('command failed:' + err)
- else:
- action_set({'outout': result})
- finally:
- remove_flag('actions.restart')
-```
-
-Rename `README.ex` to `README.md` and describe your application and its usage.
-
--- fix this. there are cases where the config is useful -- Delete `config.yaml`, since the charm's configuration will be driven by the SO.
-
-Create the `actions.yaml` file; this will describe the additional operations you would like to perform on or against your service.
-
-```yaml
-set-server:
- description: "Set the target IP address and port"
- params:
- server-ip:
- description: "IP on which the target service is listening."
- type: string
- default: ""
- server-port:
- description: "Port on which the target service is listening."
- type: integer
- default: 5555
- required:
- - server-ip
-set-rate:
- description: "Set the rate of packet generation."
- params:
- rate:
- description: "Packet rate."
- type: integer
- default: 5
-get-stats:
- description: "Get the stats."
-get-state:
- description: "Get the admin state of the target service."
-get-rate:
- description: "Get the rate set on the target service."
-get-server:
- description: "Get the target server and IP set"
-```
-
-
-Once you've implemented your actions, you need to compile the various charm layers:
-```bash
-$ charm build
-
-```
-
-## Contact
diff --git a/charms/layers/vnfproxy/actions.yaml b/charms/layers/vnfproxy/actions.yaml
deleted file mode 100644
index a9ee2ba..0000000
--- a/charms/layers/vnfproxy/actions.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-start:
- description: "Stop the service"
-stop:
- description: "Stop the service"
-restart:
- description: "Stop the service"
-reboot:
- description: "Reboot the machine"
diff --git a/charms/layers/vnfproxy/actions/reboot b/charms/layers/vnfproxy/actions/reboot
deleted file mode 100755
index 751c88c..0000000
--- a/charms/layers/vnfproxy/actions/reboot
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python3
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-import sys
-sys.path.append('lib')
-
-from charms.reactive import main
-from charms.reactive import set_state
-from charmhelpers.core.hookenv import action_fail
-
-"""
-`set_state` only works here because it's flushed to disk inside the `main()`
-loop. remove_state will need to be called inside the action method.
-"""
-set_state('actions.reboot')
-
-try:
- main()
-except Exception as e:
- action_fail(repr(e))
diff --git a/charms/layers/vnfproxy/actions/restart b/charms/layers/vnfproxy/actions/restart
deleted file mode 100755
index f62929b..0000000
--- a/charms/layers/vnfproxy/actions/restart
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python3
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-import sys
-sys.path.append('lib')
-
-from charms.reactive import main
-from charms.reactive import set_state
-from charmhelpers.core.hookenv import action_fail
-
-"""
-`set_state` only works here because it's flushed to disk inside the `main()`
-loop. remove_state will need to be called inside the action method.
-"""
-set_state('actions.restart')
-
-try:
- main()
-except Exception as e:
- action_fail(repr(e))
diff --git a/charms/layers/vnfproxy/actions/start b/charms/layers/vnfproxy/actions/start
deleted file mode 100755
index a9db3d0..0000000
--- a/charms/layers/vnfproxy/actions/start
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python3
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-import sys
-sys.path.append('lib')
-
-from charms.reactive import main
-from charms.reactive import set_state
-from charmhelpers.core.hookenv import action_fail
-
-"""
-`set_state` only works here because it's flushed to disk inside the `main()`
-loop. remove_state will need to be called inside the action method.
-"""
-set_state('actions.start')
-
-try:
- main()
-except Exception as e:
- action_fail(repr(e))
diff --git a/charms/layers/vnfproxy/actions/stop b/charms/layers/vnfproxy/actions/stop
deleted file mode 100755
index 3890486..0000000
--- a/charms/layers/vnfproxy/actions/stop
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python3
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-import sys
-sys.path.append('lib')
-
-from charms.reactive import main
-from charms.reactive import set_state
-from charmhelpers.core.hookenv import action_fail
-
-"""
-`set_state` only works here because it's flushed to disk inside the `main()`
-loop. remove_state will need to be called inside the action method.
-"""
-set_state('actions.stop')
-
-try:
- main()
-except Exception as e:
- action_fail(repr(e))
diff --git a/charms/layers/vnfproxy/icon.svg b/charms/layers/vnfproxy/icon.svg
deleted file mode 100644
index e092eef..0000000
--- a/charms/layers/vnfproxy/icon.svg
+++ /dev/null
@@ -1,279 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:xlink="http://www.w3.org/1999/xlink"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.48+devel r12274"
- sodipodi:docname="Juju_charm_icon_template.svg">
- <defs
- id="defs6519">
- <linearGradient
- inkscape:collect="always"
- xlink:href="#Background"
- id="linearGradient6461"
- gradientUnits="userSpaceOnUse"
- x1="0"
- y1="970.29498"
- x2="144"
- y2="970.29498"
- gradientTransform="matrix(0,-0.66666669,0.6660448,0,-866.25992,731.29077)" />
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="18.514671"
- inkscape:cy="49.018169"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1029"
- inkscape:window-x="0"
- inkscape:window-y="24"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:url(#linearGradient6461);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- style="display:inline" />
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 a 12,12 0 1 1 -24,0 A 12,12 0 1 1 264,552.36218 Z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 1 1 -24,0 A 12,12 0 1 1 264,552.36218 Z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 a 12,12 0 1 1 -24,0 A 12,12 0 1 1 264,552.36218 Z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 C 669.9821,591.68426 670.20862,595.55064 669.8173,595.77657 Z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/layers/vnfproxy/layer.yaml b/charms/layers/vnfproxy/layer.yaml
deleted file mode 100644
index 89bf245..0000000
--- a/charms/layers/vnfproxy/layer.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-includes:
- - layer:basic
- - layer:sshproxy
diff --git a/charms/layers/vnfproxy/metadata.yaml b/charms/layers/vnfproxy/metadata.yaml
deleted file mode 100644
index 909643d..0000000
--- a/charms/layers/vnfproxy/metadata.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-name: vnfproxy
-summary: A layer for developing OSM "proxy" charms.
-maintainer: Adam Israel <adam.israel@canonical.com>
-description: |
- VNF "proxy" charms are a lightweight version of a charm that, rather than
- installing software on the same machine, execute commands over an ssh channel.
-series:
- - trusty
- - xenial
-tags:
- - osm
- - vnf
-subordinate: false
diff --git a/charms/layers/vnfproxy/reactive/vnfproxy.py b/charms/layers/vnfproxy/reactive/vnfproxy.py
deleted file mode 100644
index 2b26212..0000000
--- a/charms/layers/vnfproxy/reactive/vnfproxy.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from charmhelpers.core.hookenv import (
- action_fail,
- action_set,
-)
-
-from charms.reactive import (
- when,
- remove_state as remove_flag,
-)
-import charms.sshproxy
-
-
-@when('actions.reboot')
-def reboot():
- err = ''
- try:
- result, err = charms.sshproxy._run("reboot")
- except:
- action_fail('command failed:' + err)
- else:
- action_set({'outout': result})
- finally:
- remove_flag('actions.reboot')
-
-
-###############################################################################
-# Below is an example implementation of the start/stop/restart actions. #
-# To use this, copy the below code into your layer and add the appropriate #
-# command(s) necessary to perform the action. #
-###############################################################################
-
-# @when('actions.start')
-# def start():
-# err = ''
-# try:
-# cmd = "service myname start"
-# result, err = charms.sshproxy._run(cmd)
-# except:
-# action_fail('command failed:' + err)
-# else:
-# action_set({'outout': result})
-# finally:
-# remove_flag('actions.start')
-#
-#
-# @when('actions.stop')
-# def stop():
-# err = ''
-# try:
-# # Enter the command to stop your service(s)
-# cmd = "service myname stop"
-# result, err = charms.sshproxy._run(cmd)
-# except:
-# action_fail('command failed:' + err)
-# else:
-# action_set({'outout': result})
-# finally:
-# remove_flag('actions.stop')
-#
-#
-# @when('actions.restart')
-# def restart():
-# err = ''
-# try:
-# # Enter the command to restart your service(s)
-# cmd = "service myname restart"
-# result, err = charms.sshproxy._run(cmd)
-# except:
-# action_fail('command failed:' + err)
-# else:
-# action_set({'outout': result})
-# finally:
-# remove_flag('actions.restart')
-#
-#
diff --git a/charms/layers/vnfproxy/tests/00-setup b/charms/layers/vnfproxy/tests/00-setup
deleted file mode 100755
index 1e5f91b..0000000
--- a/charms/layers/vnfproxy/tests/00-setup
+++ /dev/null
@@ -1,22 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-#!/bin/bash
-
-sudo add-apt-repository ppa:juju/stable -y
-sudo apt-get update
-sudo apt-get install amulet python-requests -y
diff --git a/charms/layers/vnfproxy/tests/10-deploy b/charms/layers/vnfproxy/tests/10-deploy
deleted file mode 100755
index 5bb9044..0000000
--- a/charms/layers/vnfproxy/tests/10-deploy
+++ /dev/null
@@ -1,52 +0,0 @@
-##
-# Copyright 2016 Canonical Ltd.
-# All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-##
-
-#!/usr/bin/python3
-
-import amulet
-import requests
-import unittest
-
-
-class TestCharm(unittest.TestCase):
- def setUp(self):
- self.d = amulet.Deployment()
-
- self.d.add('vnfproxy')
- self.d.expose('vnfproxy')
-
- self.d.setup(timeout=900)
- self.d.sentry.wait()
-
- self.unit = self.d.sentry['vnfproxy'][0]
-
- def test_service(self):
- # test we can access over http
- page = requests.get('http://{}'.format(self.unit.info['public-address']))
- self.assertEqual(page.status_code, 200)
- # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
- # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
- # - .info - An array of the information of that unit from Juju
- # - .file(PATH) - Get the details of a file on that unit
- # - .file_contents(PATH) - Get plain text output of PATH file from that unit
- # - .directory(PATH) - Get details of directory
- # - .directory_contents(PATH) - List files and folders in PATH on that unit
- # - .relation(relation, service:rel) - Get relation data from return service
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/common/plugins/yang/CMakeLists.txt b/common/plugins/yang/CMakeLists.txt
index ed7d7b3..840997a 100644
--- a/common/plugins/yang/CMakeLists.txt
+++ b/common/plugins/yang/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,6 +20,7 @@
##
# Yang targets
##
+
rift_add_yang_target(
TARGET rwsdn_yang
YANG_FILES rw-sdn.yang
@@ -28,6 +29,8 @@
rwsdnal_yang_gen
DEPENDS
rwsdnal_yang
+ ASSOCIATED_FILES
+ rw-sdn.role.xml
)
rift_add_yang_target(
@@ -37,9 +40,13 @@
LIBRARIES
rwsdn_yang_gen
rwcal_yang_gen
+ rwprojectmano_yang_gen
+ mano-types_yang_gen
DEPENDS
rwcal_yang
rwsdnal_yang
+ ASSOCIATED_FILES
+ rw-cloud.role.xml
)
rift_add_yang_target(
@@ -48,6 +55,10 @@
COMPONENT ${PKG_LONG_NAME}
LIBRARIES
rwcal_yang_gen
+ rwprojectmano_yang_gen
DEPENDS
rwcal_yang
+ rwprojectmano_yang
+ ASSOCIATED_FILES
+ rw-config-agent.role.xml
)
diff --git a/common/plugins/yang/rw-cloud.role.xml b/common/plugins/yang/rw-cloud.role.xml
new file mode 100644
index 0000000..6fb2486
--- /dev/null
+++ b/common/plugins/yang/rw-cloud.role.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-cloud-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-cloud:update-cloud-status/rw-cloud:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-oper</role>
+ <keys-role>rw-project-mano:rw-cloud-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-cloud:cloud</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-cloud-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-cloud:cloud</path>
+ <path>/rw-cloud:update-cloud-status</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-cloud-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-cloud:cloud</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/common/plugins/yang/rw-cloud.tailf.yang b/common/plugins/yang/rw-cloud.tailf.yang
index 2d65325..0f21670 100644
--- a/common/plugins/yang/rw-cloud.tailf.yang
+++ b/common/plugins/yang/rw-cloud.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,11 +27,15 @@
prefix rw-cloud;
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
import tailf-common {
prefix tailf-common;
}
- tailf-common:annotate "/rw-cloud:cloud/rw-cloud:account/rw-cloud:connection-status" {
+ tailf-common:annotate "/rw-project:project/rw-cloud:cloud/rw-cloud:account/rw-cloud:connection-status" {
tailf-common:callpoint rw_callpoint;
}
diff --git a/common/plugins/yang/rw-cloud.yang b/common/plugins/yang/rw-cloud.yang
index 2c7ce6f..4287110 100644
--- a/common/plugins/yang/rw-cloud.yang
+++ b/common/plugins/yang/rw-cloud.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,36 +36,51 @@
prefix "rw-sdn";
}
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-14 {
description
"Initial revision.";
}
- container cloud {
- rw-pb-ext:msg-new CloudConfig;
- list account {
- rw-pb-ext:msg-new CloudAccount;
- description "Configure Cloud Accounts";
+ augment "/rw-project:project" {
+ container cloud {
+ rw-pb-ext:msg-new CloudConfig;
+ list account {
+ rw-pb-ext:msg-new CloudAcc;
+ description "Configure Cloud Accounts";
- max-elements 16;
- key "name";
+ max-elements 16;
+ key "name";
- leaf name {
- mandatory true;
- type string {
+ leaf name {
+ mandatory true;
+ type string {
length "1..255";
+ }
}
- }
- leaf sdn-account {
- description "Configured SDN account associated with this cloud account";
- type leafref {
- path "/rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
+ leaf sdn-account {
+ description "Configured SDN account associated with this cloud account";
+ type leafref {
+ path "../../../rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
+ }
}
- }
- uses rwcal:provider-auth;
- uses rwcal:connection-status;
+ uses rwcal:provider-auth;
+ uses rwcal:connection-status;
+ }
}
}
@@ -78,6 +93,8 @@
"The cloud account name to update connection status for";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
}
diff --git a/common/plugins/yang/rw-config-agent.role.xml b/common/plugins/yang/rw-config-agent.role.xml
new file mode 100644
index 0000000..2951e5a
--- /dev/null
+++ b/common/plugins/yang/rw-config-agent.role.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-config-agent-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-config-agent:update-cfg-agent-status/rw-config-agent:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-oper</role>
+ <keys-role>rw-project-mano:rw-config-agent-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-config-agent:config-agent</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-config-agent-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-config-agent:config-agent</path>
+ <path>/rw-config-agent:update-cfg-agent-status</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-config-agent-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-config-agent:config-agent</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/common/plugins/yang/rw-config-agent.tailf.yang b/common/plugins/yang/rw-config-agent.tailf.yang
index 07f0d74..33e3a40 100644
--- a/common/plugins/yang/rw-config-agent.tailf.yang
+++ b/common/plugins/yang/rw-config-agent.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,12 @@
prefix tailf-common;
}
- tailf-common:annotate "/rw-config-agent:config-agent/rw-config-agent:account/rw-config-agent:connection-status" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf-common:annotate "/rw-project:project/rw-config-agent:config-agent" +
+ "/rw-config-agent:account/rw-config-agent:connection-status" {
tailf-common:callpoint rw_callpoint;
}
diff --git a/common/plugins/yang/rw-config-agent.yang b/common/plugins/yang/rw-config-agent.yang
index 1740af3..4f69b8f 100644
--- a/common/plugins/yang/rw-config-agent.yang
+++ b/common/plugins/yang/rw-config-agent.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,6 +35,19 @@
prefix "rwcal";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2016-02-04 {
description
"Initial revision.";
@@ -48,57 +61,59 @@
}
}
- container config-agent {
- rwpb:msg-new ConfigAgent;
+ augment "/rw-project:project" {
+ container config-agent {
+ rwpb:msg-new ConfigAgent;
- list account {
- rwpb:msg-new ConfigAgentAccount;
- key "name";
+ list account {
+ rwpb:msg-new ConfigAgentAccount;
+ key "name";
- description "List of configuration agent accounts";
+ description "List of configuration agent accounts";
- leaf name {
- description "Name of this config agent account";
- type string;
- }
+ leaf name {
+ description "Name of this config agent account";
+ type string;
+ }
- leaf account-type {
- description
- "Default account type is Rift Configuration Agent (RiftCA)";
- type config-agent-account-type;
- default "riftca";
- }
-
- choice config-agent-account-type {
- case juju {
+ leaf account-type {
description
- "Configure the VNF through Juju.";
- container juju {
- leaf ip-address {
+ "Default account type is Rift Configuration Agent (RiftCA)";
+ type config-agent-account-type;
+ default "riftca";
+ }
+
+ choice config-agent-account-type {
+ case juju {
+ description
+ "Configure the VNF through Juju.";
+ container juju {
+ leaf ip-address {
description "Juju host IP address.";
type inet:ip-address;
- }
- leaf port {
+ }
+ leaf port {
description
- "Juju host port number. Default 17070.";
+ "Juju host port number. Default 17070.";
type inet:port-number;
default 17070;
- }
- leaf user {
+ }
+ leaf user {
description
- "User name to connect to Juju host. Default user-admin.";
+ "User name to connect to Juju host. Default user-admin.";
type string;
default "user-admin" ;
- }
- leaf secret {
+ }
+ leaf secret {
description
- "Admin secret or password for Juju host.";
+ "Admin secret or password for Juju host.";
type string;
+ }
}
}
}
+ uses rwcal:connection-status;
}
- uses rwcal:connection-status;
}
}
@@ -111,6 +126,8 @@
"The config agent account name to update connection status for";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
}
}
diff --git a/common/plugins/yang/rw-sdn.role.xml b/common/plugins/yang/rw-sdn.role.xml
new file mode 100644
index 0000000..62944f8
--- /dev/null
+++ b/common/plugins/yang/rw-sdn.role.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-sdn-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-sdn:update-sdn-status/rw-sdn:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-oper</role>
+ <keys-role>rw-project-mano:rw-sdn-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-sdn:sdn</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-admin</role>
+ <keys-role>rw-project-mano:rw-sdn-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-sdn:sdn</path>
+ <path>/rw-sdn:update-sdn-status</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-sdn-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-sdn:sdn</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/common/plugins/yang/rw-sdn.tailf.yang b/common/plugins/yang/rw-sdn.tailf.yang
index 3f63883..b7ddaab 100644
--- a/common/plugins/yang/rw-sdn.tailf.yang
+++ b/common/plugins/yang/rw-sdn.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -26,11 +26,16 @@
import rw-sdn {
prefix rw-sdn;
}
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
import tailf-common {
prefix tailf-common;
}
- tailf:annotate "/rw-sdn:sdn/rw-sdn:account/rw-sdn:connection-status" {
+ tailf:annotate "/rw-project:project/rw-sdn:sdn/rw-sdn:account/rw-sdn:connection-status" {
tailf-common:callpoint rw_callpoint;
}
diff --git a/common/plugins/yang/rw-sdn.yang b/common/plugins/yang/rw-sdn.yang
index 0441452..97b0441 100644
--- a/common/plugins/yang/rw-sdn.yang
+++ b/common/plugins/yang/rw-sdn.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -28,26 +28,41 @@
prefix "rwpb";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
import rwsdnal {
prefix "rwsdnal";
}
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-14 {
description
"Initial revision.";
}
+augment "/rw-project:project" {
container sdn {
- rwpb:msg-new SDNAccountConfig;
- list account {
- rwpb:msg-new SDNAccount;
- key "name";
- leaf name {
- type string;
- }
+ rwpb:msg-new SDNAccountConfig;
+ list account {
+ rwpb:msg-new SDNAccount;
+ key "name";
+ leaf name {
+ type string;
+ }
- uses rwsdnal:sdn-provider-auth;
- uses rwsdnal:connection-status;
+ uses rwsdnal:sdn-provider-auth;
+ uses rwsdnal:connection-status;
+ }
}
}
@@ -60,6 +75,8 @@
"The sdn account name to update connection status for";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
}
}
diff --git a/common/python/CMakeLists.txt b/common/python/CMakeLists.txt
index 8fba078..f6e3baf 100644
--- a/common/python/CMakeLists.txt
+++ b/common/python/CMakeLists.txt
@@ -85,6 +85,7 @@
FILES
rift/downloader/__init__.py
rift/downloader/base.py
+ rift/downloader/local_file.py
rift/downloader/url.py
COMPONENT ${PKG_LONG_NAME}
PYTHON3_ONLY
@@ -143,6 +144,7 @@
rift/mano/utils/__init.py__
rift/mano/utils/compare_desc.py
rift/mano/utils/juju_api.py
+ rift/mano/utils/project.py
rift/mano/utils/short_name.py
COMPONENT ${PKG_LONG_NAME}
PYTHON3_ONLY
diff --git a/common/python/rift/downloader/local_file.py b/common/python/rift/downloader/local_file.py
new file mode 100644
index 0000000..c6e9c5e
--- /dev/null
+++ b/common/python/rift/downloader/local_file.py
@@ -0,0 +1,84 @@
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Taken from http://stackoverflow.com/a/27786580
+
+
+import logging
+import requests
+import os
+from urllib.parse import urlparse
+
+
+class LocalFileAdapter(requests.adapters.BaseAdapter):
+ """Protocol Adapter to allow Requests to GET file:// URLs
+
+ @todo: Properly handle non-empty hostname portions.
+ """
+
+ @staticmethod
+ def _chkpath(method, path):
+ """Return an HTTP status for the given filesystem path."""
+ if method.lower() in ('put', 'delete'):
+ return 501, "Not Implemented" # TODO
+ elif method.lower() not in ('get', 'head'):
+ return 405, "Method Not Allowed"
+ elif os.path.isdir(path):
+ return 400, "Path Not A File"
+ elif not os.path.isfile(path):
+ return 404, "File Not Found"
+ elif not os.access(path, os.R_OK):
+ return 403, "Access Denied"
+ else:
+ return 200, "OK"
+
+ def send(self, req, **kwargs): # pylint: disable=unused-argument
+ """Return the file specified by the given request
+
+ @type req: C{PreparedRequest}
+ @todo: Should I bother filling `response.headers` and processing
+ If-Modified-Since and friends using `os.stat`?
+ """
+
+ log = logging.getLogger('rw-mano-log')
+ log.debug("Request: {}".format(req))
+
+ url = urlparse(req.path_url)
+ path = os.path.normcase(os.path.normpath(url.path))
+ response = requests.Response()
+
+ response.status_code, response.reason = self._chkpath(req.method, path)
+ log.debug("Response {}: {}".format(response.status_code, response.reason))
+ if response.status_code == 200 and req.method.lower() != 'head':
+ try:
+ response.raw = open(path, 'rb')
+ except (OSError, IOError) as err:
+ response.status_code = 500
+ response.reason = str(err)
+
+ if isinstance(req.url, bytes):
+ response.url = req.url.decode('utf-8')
+ else:
+ response.url = req.url
+
+ response.request = req
+ response.connection = self
+
+
+ log.debug("Response {}: {}".format(response.status_code, response))
+ return response
+
+ def close(self):
+ pass
diff --git a/common/python/rift/downloader/url.py b/common/python/rift/downloader/url.py
index 2768894..0cdd8d4 100644
--- a/common/python/rift/downloader/url.py
+++ b/common/python/rift/downloader/url.py
@@ -39,6 +39,7 @@
from gi.repository import RwPkgMgmtYang
from . import base
+from .local_file import LocalFileAdapter as LocalFileAdapter
class UrlDownloader(base.AbstractDownloader):
@@ -109,6 +110,7 @@
retries = Retry(total=5, backoff_factor=1)
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
+ session.mount("file://", LocalFileAdapter())
return session
diff --git a/common/python/rift/mano/cloud/accounts.py b/common/python/rift/mano/cloud/accounts.py
index d3aa860..6563512 100644
--- a/common/python/rift/mano/cloud/accounts.py
+++ b/common/python/rift/mano/cloud/accounts.py
@@ -53,7 +53,7 @@
self._cal = self.plugin.get_interface("Cloud")
self._cal.init(rwlog_hdl)
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._status = RwCloudYang.CloudAcc_ConnectionStatus(
status="unknown",
details="Connection status lookup not started"
)
@@ -151,7 +151,7 @@
@asyncio.coroutine
def validate_cloud_account_credentials(self, loop):
self._log.debug("Validating Cloud Account credentials %s", self._account_msg)
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._status = RwCloudYang.CloudAcc_ConnectionStatus(
status="validating",
details="Cloud account connection validation in progress"
)
@@ -161,9 +161,9 @@
self.cal_account_msg,
)
if rwstatus == RwTypes.RwStatus.SUCCESS:
- self._status = RwCloudYang.CloudAccount_ConnectionStatus.from_dict(status.as_dict())
+ self._status = RwCloudYang.CloudAcc_ConnectionStatus.from_dict(status.as_dict())
else:
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._status = RwCloudYang.CloudAcc_ConnectionStatus(
status="failure",
details="Error when calling CAL validate cloud creds"
)
diff --git a/common/python/rift/mano/cloud/config.py b/common/python/rift/mano/cloud/config.py
index 1b1847c..a5ed7b5 100644
--- a/common/python/rift/mano/cloud/config.py
+++ b/common/python/rift/mano/cloud/config.py
@@ -1,6 +1,6 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,11 +21,13 @@
import gi
gi.require_version('RwDts', '1.0')
import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
from gi.repository import (
RwcalYang as rwcal,
RwDts as rwdts,
ProtobufC,
+ RwCloudYang,
)
from . import accounts
@@ -38,32 +40,6 @@
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class CloudAccountConfigCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
@@ -103,10 +79,11 @@
class CloudAccountConfigSubscriber(object):
XPATH = "C,/rw-cloud:cloud/rw-cloud:account"
- def __init__(self, dts, log, rwlog_hdl, cloud_callbacks):
+ def __init__(self, dts, log, rwlog_hdl, project, cloud_callbacks):
self._dts = dts
self._log = log
self._rwlog_hdl = rwlog_hdl
+ self._project = project
self._reg = None
self.accounts = {}
@@ -144,9 +121,17 @@
self.delete_account(account_msg.name)
self.add_account(account_msg)
+ def deregister(self):
+ self._log.debug("Project {}: De-register cloud account handler".
+ format(self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
+ @asyncio.coroutine
def register(self):
@asyncio.coroutine
- def apply_config(dts, acg, xact, action, _):
+ def apply_config(dts, acg, xact, action, scratch):
self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action)
if xact.xact is None:
@@ -188,12 +173,16 @@
""" Prepare callback from DTS for Cloud Account """
action = xact_info.query_action
+
+ xpath = ks_path.to_xpath(RwCloudYang.get_schema())
+
self._log.debug("Cloud account on_prepare config received (action: %s): %s",
xact_info.query_action, msg)
if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
if msg.name in self.accounts:
- self._log.debug("Cloud account already exists. Invoking update request")
+ self._log.debug("Cloud account {} already exists. " \
+ "Invoking update request".format(msg.name))
# Since updates are handled by a delete followed by an add, invoke the
# delete prepare callbacks to give clients an opportunity to reject.
@@ -241,9 +230,10 @@
on_apply=apply_config,
)
+ xpath = self._project.add_project(CloudAccountConfigSubscriber.XPATH)
with self._dts.appconf_group_create(acg_handler) as acg:
self._reg = acg.register(
- xpath=CloudAccountConfigSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
on_prepare=on_prepare,
)
diff --git a/common/python/rift/mano/cloud/operdata.py b/common/python/rift/mano/cloud/operdata.py
index 4878691..59084ef 100644
--- a/common/python/rift/mano/cloud/operdata.py
+++ b/common/python/rift/mano/cloud/operdata.py
@@ -1,6 +1,6 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@
from gi.repository import(
RwCloudYang,
RwDts as rwdts,
+ RwTypes,
)
class CloudAccountNotFound(Exception):
@@ -28,11 +29,14 @@
class CloudAccountDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
+ self._regh = None
+ self._rpc = None
self.cloud_accounts = {}
def add_cloud_account(self, account):
@@ -69,26 +73,29 @@
self._log.info("Notification called by creating dts query: %s", ac_status)
+ @asyncio.coroutine
def _register_show_status(self):
def get_xpath(cloud_name=None):
return "D,/rw-cloud:cloud/account{}/connection-status".format(
- "[name='%s']" % cloud_name if cloud_name is not None else ''
- )
+ "[name='%s']" % cloud_name if cloud_name is not None else ''
+ )
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
- path_entry = RwCloudYang.CloudAccount.schema().keyspec_to_entry(ks_path)
+ path_entry = RwCloudYang.CloudAcc.schema().keyspec_to_entry(ks_path)
cloud_account_name = path_entry.key00.name
- self._log.debug("Got show cloud connection status request: %s", ks_path.create_string())
+ self._log.debug("Got show cloud connection status request (action: %s): %s",
+ xact_info.query_action, ks_path.create_string())
try:
saved_accounts = self.get_saved_cloud_accounts(cloud_account_name)
for account in saved_accounts:
connection_status = account.connection_status
self._log.debug("Responding to cloud connection status request: %s", connection_status)
+ xpath = self._project.add_project(get_xpath(account.name))
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- xpath=get_xpath(account.name),
+ xpath=xpath,
msg=account.connection_status,
)
except KeyError as e:
@@ -98,13 +105,15 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
+ xpath = self._project.add_project(get_xpath())
+ self._regh = yield from self._dts.register(
+ xpath=xpath,
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare),
flags=rwdts.Flag.PUBLISHER,
)
+ @asyncio.coroutine
def _register_validate_rpc(self):
def get_xpath():
return "/rw-cloud:update-cloud-status"
@@ -113,12 +122,20 @@
def on_prepare(xact_info, action, ks_path, msg):
if not msg.has_field("cloud_account"):
raise CloudAccountNotFound("Cloud account name not provided")
-
cloud_account_name = msg.cloud_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
account = self.cloud_accounts[cloud_account_name]
except KeyError:
- raise CloudAccountNotFound("Cloud account name %s not found" % cloud_account_name)
+ errmsg = "Cloud account name {} not found in project {}". \
+ format(cloud_account_name, self._project.name)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ get_xpath(),
+ errmsg)
+ raise CloudAccountNotFound(errmsg)
account.start_validate_credentials(self._loop)
@@ -126,7 +143,7 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
+ self._rpc = yield from self._dts.register(
xpath=get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare
@@ -136,5 +153,11 @@
@asyncio.coroutine
def register(self):
+ self._log.debug("Register cloud account for project %s", self._project.name)
yield from self._register_show_status()
yield from self._register_validate_rpc()
+
+ def deregister(self):
+ self._log.debug("De-register cloud account for project %s", self._project.name)
+ self._rpc.deregister()
+ self._regh.deregister()
diff --git a/common/python/rift/mano/config_agent/config.py b/common/python/rift/mano/config_agent/config.py
index 7500bac..daee792 100644
--- a/common/python/rift/mano/config_agent/config.py
+++ b/common/python/rift/mano/config_agent/config.py
@@ -21,6 +21,7 @@
import gi
gi.require_version('RwDts', '1.0')
import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
from gi.repository import (
RwcalYang as rwcal,
@@ -36,32 +37,6 @@
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class ConfigAgentCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
@@ -101,9 +76,10 @@
class ConfigAgentSubscriber(object):
XPATH = "C,/rw-config-agent:config-agent/account"
- def __init__(self, dts, log, config_callbacks):
+ def __init__(self, dts, log, project, config_callbacks):
self._dts = dts
self._log = log
+ self._project = project
self._reg = None
self.accounts = {}
@@ -139,6 +115,13 @@
self.delete_account(account_msg)
self.add_account(account_msg)
+ def deregister(self):
+ self._log.debug("De-register config agent handler for project {}".
+ format(self._project.name))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
def register(self):
def apply_config(dts, acg, xact, action, _):
self._log.debug("Got config account apply config (xact: %s) (action: %s)", xact, action)
@@ -212,17 +195,17 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for Config Account config using xpath: %s",
- ConfigAgentSubscriber.XPATH,
- )
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self._dts.appconf_group_create(acg_handler) as acg:
+ xpath = self._project.add_project(ConfigAgentSubscriber.XPATH)
+ self._log.debug("Registering for Config Account config using xpath: %s",
+ xpath)
self._reg = acg.register(
- xpath=ConfigAgentSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
on_prepare=on_prepare,
)
diff --git a/common/python/rift/mano/config_agent/operdata.py b/common/python/rift/mano/config_agent/operdata.py
index fbf3c43..5cbd351 100644
--- a/common/python/rift/mano/config_agent/operdata.py
+++ b/common/python/rift/mano/config_agent/operdata.py
@@ -27,7 +27,6 @@
RwDts as rwdts)
import rift.tasklets
-
import rift.mano.utils.juju_api as juju
@@ -153,12 +152,15 @@
)
class CfgAgentDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self.cfg_agent_accounts = {}
+ self._show_reg = None
+ self._rpc_reg = None
def add_cfg_agent_account(self, account_msg):
account = ConfigAgentAccount(self._log, account_msg)
@@ -205,9 +207,10 @@
for account in saved_accounts:
connection_status = account.connection_status
self._log.debug("Responding to config agent connection status request: %s", connection_status)
+ xpath = self._project.add_project(get_xpath(account.name))
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- xpath=get_xpath(account.name),
+ xpath=xpath,
msg=account.connection_status,
)
except KeyError as e:
@@ -217,12 +220,13 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare),
- flags=rwdts.Flag.PUBLISHER,
- )
+ xpath = self._project.add_project(get_xpath())
+ self._show_reg = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
def _register_validate_rpc(self):
def get_xpath():
@@ -234,6 +238,10 @@
raise ConfigAgentAccountNotFound("Config Agent account name not provided")
cfg_agent_account_name = msg.cfg_agent_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
account = self.cfg_agent_accounts[cfg_agent_account_name]
except KeyError:
@@ -243,24 +251,29 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._rpc_reg = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
@asyncio.coroutine
def register(self):
yield from self._register_show_status()
yield from self._register_validate_rpc()
+ def deregister(self):
+ self._show_reg.deregister()
+ self._rpc_reg.deregister()
+
+
class ConfigAgentJob(object):
"""A wrapper over the config agent job object, providing some
convenience functions.
- YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob contains
+ YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob contains
||
==> VNFRS
||
@@ -274,17 +287,19 @@
"running" : "pending",
"failed" : "failure"}
- def __init__(self, nsr_id, job, tasks=None):
+ def __init__(self, nsr_id, job, project, tasks=None):
"""
Args:
nsr_id (uuid): ID of NSR record
- job (YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
+ job (YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
tasks: List of asyncio.tasks. If provided the job monitor will
use it to monitor the tasks instead of the execution IDs
"""
self._job = job
self.nsr_id = nsr_id
self.tasks = tasks
+ self._project = project
+
self._regh = None
@property
@@ -315,10 +330,10 @@
@property
def xpath(self):
"""Xpath of the job"""
- return ("D,/nsr:ns-instance-opdata" +
+ return self._project.add_project(("D,/nsr:ns-instance-opdata" +
"/nsr:nsr[nsr:ns-instance-config-ref='{}']" +
"/nsr:config-agent-job[nsr:job-id='{}']"
- ).format(self.nsr_id, self.id)
+ ).format(self.nsr_id, self.id))
@property
def regh(self):
@@ -333,7 +348,7 @@
@staticmethod
def convert_rpc_input_to_job(nsr_id, rpc_output, tasks):
"""A helper function to convert the YangOutput_Nsr_ExecNsConfigPrimitive
- to YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
+ to YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
Args:
nsr_id (uuid): NSR ID
@@ -344,10 +359,10 @@
ConfigAgentJob
"""
# Shortcuts to prevent the HUUGE names.
- CfgAgentJob = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob
- CfgAgentVnfr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
- CfgAgentPrimitive = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
- CfgAgentPrimitiveParam = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
+ CfgAgentJob = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob
+ CfgAgentVnfr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
+ CfgAgentPrimitive = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
+ CfgAgentPrimitiveParam = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
job = CfgAgentJob.from_dict({
"job_id": rpc_output.job_id,
@@ -385,7 +400,7 @@
job.vnfr.append(vnfr_job)
- return ConfigAgentJob(nsr_id, job, tasks)
+ return ConfigAgentJob(nsr_id, job, project, tasks)
class ConfigAgentJobMonitor(object):
@@ -669,6 +684,7 @@
self._regh = None
self._nsr_regh = None
+ self._project = cfgm.project
@property
def regh(self):
@@ -687,9 +703,9 @@
@staticmethod
def cfg_job_xpath(nsr_id, job_id):
- return ("D,/nsr:ns-instance-opdata" +
+ return self._project.add_project(("D,/nsr:ns-instance-opdata" +
"/nsr:nsr[nsr:ns-instance-config-ref = '{}']" +
- "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id)
+ "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id))
@asyncio.coroutine
def register(self):
@@ -700,7 +716,7 @@
""" prepare callback from dts """
xpath = ks_path.to_xpath(RwNsrYang.get_schema())
if action == rwdts.QueryAction.READ:
- schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema()
+ schema = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
try:
nsr_id = path_entry.key00.ns_instance_config_ref
@@ -731,7 +747,8 @@
hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=CfgAgentJobDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._project.add_project(
+ CfgAgentJobDtsHandler.XPATH),
handler=hdl,
flags=rwdts.Flag.PUBLISHER,
)
@@ -752,7 +769,7 @@
@property
def nsr_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
@asyncio.coroutine
def register_for_nsr(self):
@@ -787,6 +804,17 @@
except Exception as e:
self._log.error("Failed to register for NSR changes as %s", str(e))
+ def deregister(self):
+ self._log.debug("De-register config agent job for project".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
+ if self._nsr_regh:
+ self._nsr_regh.deregister()
+ self._nsr_regh = None
+
class ConfigAgentJobManager(object):
"""A central class that manager all the Config Agent related data,
@@ -794,7 +822,7 @@
TODO: Needs to support multiple config agents.
"""
- def __init__(self, dts, log, loop, nsm):
+ def __init__(self, dts, log, loop, project, nsm):
"""
Args:
dts : Dts handle
@@ -807,11 +835,12 @@
self.log = log
self.loop = loop
self.nsm = nsm
+ self.project = project
self.handler = CfgAgentJobDtsHandler(dts, log, loop, nsm, self)
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def add_job(self, rpc_output, tasks=None):
- """Once an RPC is trigger add a now job
+ """Once an RPC is triggered, add a new job
Args:
rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): Rpc output
@@ -821,7 +850,8 @@
"""
nsr_id = rpc_output.nsr_id_ref
- job = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output, tasks)
+ job = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output,
+ tasks, self.project)
self.log.debug("Creating a job monitor for Job id: {}".format(
rpc_output.job_id))
@@ -867,3 +897,6 @@
def register(self):
yield from self.handler.register()
yield from self.handler.register_for_nsr()
+
+ def deregister(self):
+ yield from self.handler.deregister()
diff --git a/common/python/rift/mano/config_data/config.py b/common/python/rift/mano/config_data/config.py
index 63a2e48..e04475a 100644
--- a/common/python/rift/mano/config_data/config.py
+++ b/common/python/rift/mano/config_data/config.py
@@ -20,8 +20,8 @@
import os
import yaml
-from gi.repository import NsdYang
-from gi.repository import VnfdYang
+from gi.repository import ProjectNsdYang as NsdYang
+from gi.repository import ProjectVnfdYang as VnfdYang
class InitialConfigReadError(Exception):
diff --git a/common/python/rift/mano/config_data/test/test_converter.py b/common/python/rift/mano/config_data/test/test_converter.py
index 1bfd7d7..0354c2b 100644
--- a/common/python/rift/mano/config_data/test/test_converter.py
+++ b/common/python/rift/mano/config_data/test/test_converter.py
@@ -17,20 +17,23 @@
import pytest
import uuid
-from gi.repository import NsdYang, VnfdYang
+from gi.repository import (
+ ProjectNsdYang as NsdYang,
+ ProjectVnfdYang as VnfdYang,
+ )
from ..config import ConfigPrimitiveConvertor
import yaml
@pytest.fixture(scope="function")
def nsd():
- catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ catalog = NsdYang.YangData_RwProject_Project_NsdCatalog()
nsd = catalog.nsd.add()
nsd.id = str(uuid.uuid1())
return nsd
@pytest.fixture(scope="function")
def vnfd():
- catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+ catalog = VnfdYang.YangData_RwProject_Project_VnfdCatalog()
vnfd = catalog.vnfd.add()
vnfd.id = str(uuid.uuid1())
return vnfd
@@ -287,7 +290,7 @@
cidr: 10.10.10.2/30
"""
- catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+ catalog = VnfdYang.YangData_RwProject_Project_VnfdCatalog()
expected_vnfd = catalog.vnfd.add()
vnf_config = expected_vnfd.vnf_configuration
expected_vnfd.id = vnfd.id
@@ -374,7 +377,7 @@
Vlan ID: '3000'
"""
- catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ catalog = NsdYang.YangData_RwProject_Project_NsdCatalog()
expected_nsd = catalog.nsd.add()
expected_nsd.id = nsd.id
expected_nsd.service_primitive.add().from_dict(
diff --git a/common/python/rift/mano/dts/core.py b/common/python/rift/mano/dts/core.py
index 4894e16..3a04945 100644
--- a/common/python/rift/mano/dts/core.py
+++ b/common/python/rift/mano/dts/core.py
@@ -25,7 +25,7 @@
"""A common class to hold the barebone objects to build a publisher or
subscriber
"""
- def __init__(self, log, dts, loop):
+ def __init__(self, log, dts, loop, project):
"""Constructor
Args:
@@ -34,7 +34,39 @@
loop : Asyncio event loop.
"""
# Reg handle
- self.reg = None
- self.log = log
- self.dts = dts
- self.loop = loop
+ self._reg = None
+ self._log = log
+ self._dts = dts
+ self._loop = loop
+ self._project = project
+
+ @property
+ def reg(self):
+ return self._reg
+
+ @reg.setter
+ def reg(self, val):
+ self._reg = val
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @property
+ def project(self):
+ return self._project
+
+ def deregister(self):
+ self._log.debug("De-registering DTS handler ({}) for project {}".
+ format(self.__class__.__name__, self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
diff --git a/common/python/rift/mano/dts/rpc/core.py b/common/python/rift/mano/dts/rpc/core.py
index dfa08bb..72016f1 100644
--- a/common/python/rift/mano/dts/rpc/core.py
+++ b/common/python/rift/mano/dts/rpc/core.py
@@ -36,8 +36,8 @@
class AbstractRpcHandler(DtsHandler):
"""Base class to simplify RPC implementation
"""
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project=None):
+ super().__init__(log, dts, loop, project)
if not asyncio.iscoroutinefunction(self.callback):
raise ValueError('%s has to be a coroutine' % (self.callback))
@@ -61,6 +61,9 @@
def on_prepare(self, xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
+ if self.project and not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
rpc_op = yield from self.callback(ks_path, msg)
xact_info.respond_xpath(
@@ -76,6 +79,11 @@
@asyncio.coroutine
def register(self):
+ if self.reg:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
reg_event = asyncio.Event(loop=self.loop)
@asyncio.coroutine
@@ -94,6 +102,10 @@
yield from reg_event.wait()
+ def deregister(self):
+ self.reg.deregister()
+ self.reg = None
+
@abc.abstractmethod
@asyncio.coroutine
def callback(self, ks_path, msg):
diff --git a/common/python/rift/mano/dts/subscriber/core.py b/common/python/rift/mano/dts/subscriber/core.py
index dd2513e..a2181e8 100644
--- a/common/python/rift/mano/dts/subscriber/core.py
+++ b/common/python/rift/mano/dts/subscriber/core.py
@@ -1,6 +1,6 @@
"""
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -27,6 +27,9 @@
from gi.repository import (RwDts as rwdts, ProtobufC)
import rift.tasklets
+from rift.mano.utils.project import (
+ get_add_delete_update_cfgs,
+ )
from ..core import DtsHandler
@@ -35,11 +38,11 @@
"""A common class for all subscribers.
"""
@classmethod
- def from_tasklet(cls, tasklet, callback=None):
+ def from_project(cls, proj, callback=None):
"""Convenience method to build the object from tasklet
Args:
- tasklet (rift.tasklets.Tasklet): Tasklet
+ proj (rift.mano.utils.project.ManoProject): Project
callback (None, optional): Callable, which will be invoked on
subscriber changes.
@@ -48,15 +51,15 @@
msg: The Gi Object msg from DTS
action(rwdts.QueryAction): Action type
"""
- return cls(tasklet.log, tasklet.dts, tasklet.loop, callback=callback)
+ return cls(proj.log, proj.dts, proj.loop, proj, callback=callback)
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
self.callback = callback
def get_reg_flags(self):
"""Default set of REG flags, can be over-ridden by sub classes.
-
+
Returns:
Set of rwdts.Flag types.
"""
@@ -70,7 +73,7 @@
Opdata subscriber can be created in one step by subclassing and implementing
the MANDATORY get_xpath() method
-
+
"""
@abc.abstractmethod
def get_xpath(self):
@@ -84,6 +87,12 @@
def register(self):
"""Triggers the registration
"""
+
+ if self.reg:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
xacts = {}
def on_commit(xact_info):
@@ -121,7 +130,7 @@
)
self.reg = yield from self.dts.register(
- xpath=self.get_xpath(),
+ xpath=self.project.add_project(self.get_xpath()),
flags=self.get_reg_flags(),
handler=handler)
@@ -129,9 +138,6 @@
assert self.reg is not None
- def deregister(self):
- self.reg.deregister()
-
class AbstractConfigSubscriber(SubscriberDtsHandler):
"""Abstract class that simplifies the process of creating subscribers
@@ -139,7 +145,7 @@
Config subscriber can be created in one step by subclassing and implementing
the MANDATORY get_xpath() method
-
+
"""
KEY = "msgs"
@@ -151,31 +157,6 @@
def key_name(self):
pass
- def get_add_delete_update_cfgs(self, dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
@asyncio.coroutine
def register(self):
""" Register for VNFD configuration"""
@@ -185,7 +166,7 @@
is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
- add_cfgs, delete_cfgs, update_cfgs = self.get_add_delete_update_cfgs(
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
dts_member_reg=self.reg,
xact=xact,
key_name=self.key_name())
@@ -202,14 +183,13 @@
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
""" on prepare callback """
+ self._log.debug("Subscriber DTS prepare for project %s: %s",
+ self.project, xact_info.query_action)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self.dts.appconf_group_create(handler=acg_hdl) as acg:
self.reg = acg.register(
- xpath=self.get_xpath(),
+ xpath=self.project.add_project(self.get_xpath()),
flags=self.get_reg_flags(),
on_prepare=on_prepare)
-
- def deregister(self):
- self.reg.deregister()
diff --git a/common/python/rift/mano/dts/subscriber/ns_subscriber.py b/common/python/rift/mano/dts/subscriber/ns_subscriber.py
index c16f771..4258afa 100644
--- a/common/python/rift/mano/dts/subscriber/ns_subscriber.py
+++ b/common/python/rift/mano/dts/subscriber/ns_subscriber.py
@@ -39,7 +39,7 @@
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
def get_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
class NsdCatalogSubscriber(core.AbstractConfigSubscriber):
@@ -49,7 +49,7 @@
return "id"
def get_xpath(self):
- return "C,/nsd:nsd-catalog/nsd:nsd"
+ return self._project.add_project("C,/project-nsd:nsd-catalog/project-nsd:nsd")
class NsInstanceConfigSubscriber(core.AbstractConfigSubscriber):
@@ -59,4 +59,4 @@
return "id"
def get_xpath(self):
- return "C,/nsr:ns-instance-config/nsr:nsr"
+ return self._project.add_project("C,/nsr:ns-instance-config/nsr:nsr")
diff --git a/common/python/rift/mano/dts/subscriber/ro_account.py b/common/python/rift/mano/dts/subscriber/ro_account.py
index 575d649..3091d15 100644
--- a/common/python/rift/mano/dts/subscriber/ro_account.py
+++ b/common/python/rift/mano/dts/subscriber/ro_account.py
@@ -33,4 +33,4 @@
return "name"
def get_xpath(self):
- return("C,/rw-launchpad:resource-orchestrator")
\ No newline at end of file
+ return self._project.add_project("C,/rw-launchpad:resource-orchestrator")
diff --git a/common/python/rift/mano/dts/subscriber/store.py b/common/python/rift/mano/dts/subscriber/store.py
index 88cb79a..222d444 100644
--- a/common/python/rift/mano/dts/subscriber/store.py
+++ b/common/python/rift/mano/dts/subscriber/store.py
@@ -33,10 +33,10 @@
"""
KEY = enum.Enum('KEY', 'NSR NSD VNFD VNFR')
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
- params = (self.log, self.dts, self.loop)
+ params = (self.log, self.dts, self.loop, self.project)
self._nsr_sub = ns_subscriber.NsrCatalogSubscriber(*params, callback=self.on_nsr_change)
self._nsrs = {}
@@ -92,6 +92,14 @@
yield from self._vnfr_sub.register()
yield from self._nsr_sub.register()
+ def deregister(self):
+ self._log.debug("De-register store for project {}".
+ format(self._project))
+ self._vnfd_sub.deregister()
+ self._nsd_sub.deregister()
+ self._vnfr_sub.deregister()
+ self._nsr_sub.deregister()
+
@asyncio.coroutine
def refresh_store(self, subsriber, store):
itr = yield from self.dts.query_read(subsriber.get_xpath())
diff --git a/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py b/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py
index a69a00f..9479b16 100644
--- a/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py
+++ b/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py
@@ -30,10 +30,10 @@
from gi.repository import (
RwLaunchpadYang as launchpadyang,
RwDts as rwdts,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwVnfrYang,
RwNsrYang,
- RwNsdYang,
+ RwProjectNsdYang as RwNsdYang,
VnfrYang
)
@@ -107,11 +107,11 @@
def test_vnfd_handler(self):
yield from self.store.register()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
mock_vnfd.id = str(uuid.uuid1())
- w_xpath = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
- xpath = "{}[vnfd:id='{}']".format(w_xpath, mock_vnfd.id)
+ w_xpath = "C,/rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ xpath = "{}[project-vnfd:id='{}']".format(w_xpath, mock_vnfd.id)
yield from self.publisher.publish(w_xpath, xpath, mock_vnfd)
yield from asyncio.sleep(5, loop=self.loop)
@@ -128,10 +128,10 @@
def test_vnfr_handler(self):
yield from self.store.register()
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
mock_vnfr.id = str(uuid.uuid1())
- w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+ w_xpath = "D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr"
xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
@@ -151,11 +151,11 @@
def test_nsr_handler(self):
yield from self.store.register()
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr()
mock_nsr.ns_instance_config_ref = str(uuid.uuid1())
mock_nsr.name_ref = "Foo"
- w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
+ w_xpath = "D,/rw-project:project/nsr:ns-instance-opdata/nsr:nsr"
xpath = "{}[nsr:ns-instance-config-ref='{}']".format(w_xpath, mock_nsr.ns_instance_config_ref)
yield from self.publisher.publish(w_xpath, xpath, mock_nsr)
@@ -175,11 +175,11 @@
def test_nsd_handler(self):
yield from self.store.register()
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
mock_nsd.id = str(uuid.uuid1())
- w_xpath = "C,/nsd:nsd-catalog/nsd:nsd"
- xpath = "{}[nsd:id='{}']".format(w_xpath, mock_nsd.id)
+ w_xpath = "C,/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd"
+ xpath = "{}[project-nsd:id='{}']".format(w_xpath, mock_nsd.id)
yield from self.publisher.publish(w_xpath, xpath, mock_nsd)
yield from asyncio.sleep(2, loop=self.loop)
@@ -206,22 +206,22 @@
# publish
yield from vnf_handler.register()
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
mock_vnfr.id = str(uuid.uuid1())
def mon_xpath(param_id=None):
""" Monitoring params xpath """
- return("D,/vnfr:vnfr-catalog" +
+ return("D,/rw-project:project/vnfr:vnfr-catalog" +
"/vnfr:vnfr[vnfr:id='{}']".format(mock_vnfr.id) +
"/vnfr:monitoring-param" +
("[vnfr:id='{}']".format(param_id) if param_id else ""))
- w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+ w_xpath = "D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr"
xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
- mock_param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+ mock_param = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
"id": "1"
})
mock_vnfr.monitoring_param.append(mock_param)
@@ -238,4 +238,4 @@
)
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
diff --git a/common/python/rift/mano/dts/subscriber/vnf_subscriber.py b/common/python/rift/mano/dts/subscriber/vnf_subscriber.py
index 76a58ab..524fb41 100644
--- a/common/python/rift/mano/dts/subscriber/vnf_subscriber.py
+++ b/common/python/rift/mano/dts/subscriber/vnf_subscriber.py
@@ -38,7 +38,7 @@
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
def get_xpath(self):
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+ return self.project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr")
class VnfdCatalogSubscriber(core.AbstractConfigSubscriber):
@@ -48,4 +48,4 @@
return "id"
def get_xpath(self):
- return "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ return self.project.add_project("C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd")
diff --git a/common/python/rift/mano/sdn/config.py b/common/python/rift/mano/sdn/config.py
index a9de01b..20b17cc 100644
--- a/common/python/rift/mano/sdn/config.py
+++ b/common/python/rift/mano/sdn/config.py
@@ -26,6 +26,8 @@
ProtobufC,
)
+from rift.mano.utils.project import get_add_delete_update_cfgs
+
from . import accounts
@@ -37,32 +39,6 @@
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class SDNAccountConfigCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
@@ -143,6 +119,11 @@
self.delete_account(account_msg.name)
self.add_account(account_msg)
+ def deregister(self):
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
def register(self):
@asyncio.coroutine
def apply_config(dts, acg, xact, action, _):
diff --git a/common/python/rift/mano/sdn/operdata.py b/common/python/rift/mano/sdn/operdata.py
index b29f100..094d804 100644
--- a/common/python/rift/mano/sdn/operdata.py
+++ b/common/python/rift/mano/sdn/operdata.py
@@ -35,6 +35,8 @@
self._loop = loop
self.sdn_accounts = {}
+ self._oper = None
+ self._rpc = None
def add_sdn_account(self, account):
self.sdn_accounts[account.name] = account
@@ -88,7 +90,7 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
+ self._oper = yield from self._dts.register(
xpath=get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare),
@@ -101,6 +103,9 @@
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
+ if self._project and not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
if not msg.has_field("sdn_account"):
raise SDNAccountNotFound("SDN account name not provided")
@@ -114,15 +119,24 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._rpc = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
@asyncio.coroutine
def register(self):
yield from self._register_show_status()
yield from self._register_validate_rpc()
+
+ def deregister(self):
+ if self._oper:
+ self._oper.deregister()
+ self._oper = None
+
+ if self._rpc:
+ self._rpc.deregister()
+ self._rpc = None
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
index aa6b83b..83e054f 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
@@ -42,7 +42,7 @@
# TODO(Philip): Harcoding for now, need to make this generic
def get_xpath(self):
- xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:' + self.name
+ xpath = '/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/nsd:' + self.name
return xpath
def get_dict_output(self):
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py
index d263e6f..5300eba 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py
@@ -24,11 +24,11 @@
try:
import gi
gi.require_version('RwYang', '1.0')
- gi.require_version('RwNsdYang', '1.0')
- gi.require_version('NsdYang', '1.0')
+ gi.require_version('RwProjectNsdYang', '1.0')
+ gi.require_version('ProjectNsdYang', '1.0')
- from gi.repository import NsdYang
- from gi.repository import RwNsdYang
+ from gi.repository import ProjectNsdYang as NsdYang
+ from gi.repository import RwProjectNsdYang as RwNsdYang
from gi.repository import RwYang
except ImportError:
pass
@@ -61,7 +61,7 @@
if use_gi:
try:
- nsd_cat = RwNsdYang.YangData_Nsd_NsdCatalog()
+ nsd_cat = RwNsdYang.YangData_RwProject_Project_NsdCatalog()
nsd = nsd_cat.nsd.add()
nsd.id = nsd_id
nsd.name = self.metadata['name']
@@ -111,7 +111,7 @@
if use_gi:
for param in self.parameters:
nsd.input_parameter_xpath.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath=param.get_xpath(),
)
)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
index e4e045e..3c662dd 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
@@ -24,9 +24,9 @@
try:
import gi
- gi.require_version('RwVnfdYang', '1.0')
+ gi.require_version('RwProjectVnfdYang', '1.0')
- from gi.repository import RwVnfdYang
+ from gi.repository import RwProjectVnfdYang as RwVnfdYang
except ImportError:
pass
except ValueError:
@@ -225,7 +225,7 @@
raise ValidationError(message=err_msg)
def generate_yang_model_gi(self, nsd, vnfds):
- vnfd_cat = RwVnfdYang.YangData_Vnfd_VnfdCatalog()
+ vnfd_cat = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog()
vnfd = vnfd_cat.vnfd.add()
props = convert_keys_to_python(self.properties)
for key in ToscaNfvVnf.IGNORE_PROPS:
diff --git a/common/python/rift/mano/utils/project.py b/common/python/rift/mano/utils/project.py
new file mode 100644
index 0000000..9157f89
--- /dev/null
+++ b/common/python/rift/mano/utils/project.py
@@ -0,0 +1,669 @@
+#!/usr/bin/env python3
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import asyncio
+import logging
+
+import gi
+gi.require_version('RwProjectManoYang', '1.0')
+gi.require_version('RwDts', '1.0')
+from gi.repository import (
+ RwProjectManoYang,
+ RwDts as rwdts,
+ ProtobufC,
+ RwTypes,
+)
+
+import rift.tasklets
+
+
+class ManoProjectError(Exception):
+ pass
+
+
+class ManoProjNameSetErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathNoProjErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathKeyErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathNotRootErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathPresentErr(ManoProjectError):
+ pass
+
+
+NS = 'rw-project'
+PROJECT = 'project'
+NS_PROJECT = '{}:{}'.format(NS, PROJECT)
+XPATH = '/{}'.format(NS_PROJECT)
+XPATH_LEN = len(XPATH)
+
+NAME = 'name'
+NAME_LEN = len(NAME)
+NS_NAME = '{}:{}'.format(NS, NAME)
+
+DEFAULT_PROJECT = 'default'
+DEFAULT_PREFIX = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ DEFAULT_PROJECT)
+
+
+class ManoProject(object):
+ '''Class to handle the project name'''
+
+ log = None
+
+ @classmethod
+ def instance_from_xpath(cls, xpath, log):
+ name = cls.from_xpath(xpath, log)
+ if name is None:
+ return None
+
+ proj = ManoProject(log, name=name)
+ return proj
+
+ @classmethod
+ def from_xpath(cls, xpath, log):
+ log.debug("Get project name from {}".format(xpath));
+
+ if XPATH in xpath:
+ idx = xpath.find(XPATH)
+ if idx == -1:
+ msg = "Project not found in XPATH: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNoProjErr(msg)
+
+ sub = xpath[idx+XPATH_LEN:].strip()
+ if (len(sub) < NAME_LEN) or (sub[0] != '['):
+ msg = "Project name not found in XPath: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr(msg)
+
+ sub = sub[1:].strip()
+ idx = sub.find(NS_NAME)
+ if idx == -1:
+ idx = sub.find(NAME)
+ if idx != 0:
+ msg = "Project name not found in XPath: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr(msg)
+
+ idx = sub.find(']')
+ if idx == -1:
+ msg = "XPath is invalid: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr(msg)
+
+ sub = sub[:idx].strip()
+ try:
+ log.debug("Key and value found: {}".format(sub))
+ k, n = sub.split("=", 2)
+ name = n.strip(' \'"')
+ if name is None:
+ msg = "Project name is empty in XPath".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr (msg)
+
+ log.debug("Found project name {} from XPath {}".
+ format(name, xpath))
+ return name
+
+ except ValueError as e:
+ msg = "Project name not found in XPath: {}, exception: {}" \
+ .format(xpath, e)
+ log.exception(msg)
+ raise ManoProjXpathKeyErr(msg)
+ else:
+ msg = "Project not found in XPATH: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNoProjErr(msg)
+
+ @classmethod
+ def get_log(cls):
+ if not cls.log:
+ cls.log = logging.getLogger('rw-mano-log.rw-project')
+ cls.log.setLevel(logging.ERROR)
+
+ @classmethod
+ def prefix_project(cls, xpath, project=None, log=None):
+ if log is None:
+ log = cls.get_log()
+
+ if project is None:
+ project = DEFAULT_PROJECT
+ proj_prefix = DEFAULT_PREFIX
+ else:
+ proj_prefix = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ project)
+
+ log.debug("Add project {} to {}".format(project, xpath))
+
+ prefix = ''
+ suffix = xpath
+ idx = xpath.find('C,/')
+ if idx == -1:
+ idx = xpath.find('D,/')
+
+ suffix = xpath
+ if idx != -1:
+ prefix = xpath[:2]
+ suffix = xpath[2:]
+
+ if suffix[0] != '/':
+ msg = "Non-rooted xpath provided: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNotRootErr(msg)
+
+ idx = suffix.find(XPATH)
+ if idx == 0:
+ name = cls.from_xpath(xpath, log)
+ if name == project:
+ log.debug("Project already in the XPATH: {}".format(xpath))
+ return xpath
+
+ else:
+ msg = "Different project {} already in XPATH {}". \
+ format(name, xpath)
+ log.error(msg)
+ raise ManoProjXpathPresentErr(msg)
+
+ ret = prefix + proj_prefix + suffix
+ log.debug("XPath with project: {}".format(ret))
+ return ret
+
+
+ def __init__(self, log, name=None, tasklet=None):
+ self._log = log
+ self._name = None
+ self._prefix = None
+ self._pbcm = None
+ self._tasklet = None
+ self._dts = None
+ self._loop = None
+ self._log_hdl = None
+
+ # Track if the apply config was received
+ self._apply = False
+
+ if name:
+ self.name = name
+
+ def update(self, tasklet):
+ # Store the commonly used properties from a tasklet
+ self._tasklet = tasklet
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def prefix(self):
+ return self._prefix
+
+ @property
+ def pbcm(self):
+ return self._pbcm
+
+ @property
+ def config(self):
+ return self._pbcm.project_config
+
+ @property
+ def tasklet(self):
+ return self._tasklet
+
+ @property
+ def log_hdl(self):
+ return self._log_hdl
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @name.setter
+ def name(self, value):
+ if self._name is None:
+ self._name = value
+ self._prefix = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ self._name)
+ self._pbcm = RwProjectManoYang.YangData_RwProject_Project(
+ name=self._name)
+
+ elif self._name == value:
+ self._log.debug("Setting the same name again for project {}".
+ format(value))
+ else:
+ msg = "Project name already set to {}".format(self._name)
+ self._log.error(msg)
+ raise ManoProjNameSetErr(msg)
+
+ def set_from_xpath(self, xpath):
+ self.name = ManoProject.from_xpath(xpath, self._log)
+
+ def add_project(self, xpath):
+ return ManoProject.prefix_project(xpath, log=self._log, project=self._name)
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def delete_prepare(self):
+ self._log.debug("Delete prepare for project {}".format(self._name))
+ return True
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def register(self):
+ msg = "Register not implemented for project type {}". \
+ format(self.__class__.__name__)
+ self._log.error(msg)
+ raise NotImplementedError(msg)
+
+ @abc.abstractmethod
+ def deregister(self):
+ msg = "De-register not implemented for project type {}". \
+ format(self.__class__.__name__)
+ self._log.error(msg)
+ raise NotImplementedError(msg)
+
+ def rpc_check(self, msg, xact_info=None):
+ '''Check if the rpc is for this project'''
+ try:
+ project = msg.project_name
+ except AttributeError as e:
+ project = DEFAULT_PROJECT
+
+ if project != self.name:
+ self._log.debug("Project {}: RPC is for different project {}".
+ format(self.name, project))
+ if xact_info:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ return False
+
+ return True
+
+ @asyncio.coroutine
+ def create_project(self, dts):
+ proj_xpath = "C,{}/config".format(self.prefix)
+ self._log.info("Creating project: {} with {}".
+ format(proj_xpath, self.config.as_dict()))
+
+ yield from dts.query_create(proj_xpath,
+ rwdts.XactFlag.ADVISE,
+ self.config)
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+ #TODO: Check why this is getting called during project delete
+ if not dts_member_reg:
+ return [], [], []
+
+ # Unforunately, it is currently difficult to figure out what has exactly
+ # changed in this xact without Pbdelta support (RIFT-4916)
+ # As a workaround, we can fetch the pre and post xact elements and
+ # perform a comparison to figure out adds/deletes/updates
+ xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+ curr_cfgs = list(dts_member_reg.elements)
+
+ xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+ curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+ # Find Adds
+ added_keys = set(xact_key_map) - set(curr_key_map)
+ added_cfgs = [xact_key_map[key] for key in added_keys]
+
+ # Find Deletes
+ deleted_keys = set(curr_key_map) - set(xact_key_map)
+ deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+ # Find Updates
+ updated_keys = set(curr_key_map) & set(xact_key_map)
+ updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+ return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class ProjectConfigCallbacks(object):
+ def __init__(self,
+ on_add_apply=None, on_add_prepare=None,
+ on_delete_apply=None, on_delete_prepare=None):
+
+ @asyncio.coroutine
+ def prepare_noop(*args, **kwargs):
+ pass
+
+ def apply_noop(*args, **kwargs):
+ pass
+
+ self.on_add_apply = on_add_apply
+ self.on_add_prepare = on_add_prepare
+ self.on_delete_apply = on_delete_apply
+ self.on_delete_prepare = on_delete_prepare
+
+ for f in ('on_add_apply', 'on_delete_apply'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, apply_noop)
+ continue
+
+ if asyncio.iscoroutinefunction(ref):
+ raise ValueError('%s cannot be a coroutine' % (f,))
+
+ for f in ('on_add_prepare', 'on_delete_prepare'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, prepare_noop)
+ continue
+
+ if not asyncio.iscoroutinefunction(ref):
+ raise ValueError("%s must be a coroutine" % f)
+
+
+class ProjectDtsHandler(object):
+ XPATH = "C,{}/project-config".format(XPATH)
+
+ def __init__(self, dts, log, callbacks, sub_config=True):
+ self._dts = dts
+ self._log = log
+ self._callbacks = callbacks
+
+ if sub_config:
+ self.xpath = ProjectDtsHandler.XPATH
+ self._key = 'name_ref'
+ else:
+ self.xpath = "C,{}".format(XPATH)
+ self._key = 'name'
+
+ self.reg = None
+ self.projects = []
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ def add_project(self, name):
+ self.log.info("Adding project: {}".format(name))
+
+ if name not in self.projects:
+ self._callbacks.on_add_apply(name)
+ self.projects.append(name)
+ else:
+ self.log.error("Project already present: {}".
+ format(name))
+
+ def delete_project(self, name):
+ self._log.info("Deleting project: {}".format(name))
+ if name in self.projects:
+ self._callbacks.on_delete_apply(name)
+ self.projects.remove(name)
+ else:
+ self.log.error("Unrecognized project: {}".
+ format(name))
+
+ def update_project(self, name):
+ """ Update an existing project
+
+ Currently, we do not take any action on MANO for this,
+ so no callbacks are defined
+
+ Arguments:
+ msg - The project config message
+ """
+ self._log.info("Updating project: {}".format(name))
+ if name in self.projects:
+ pass
+ else:
+ self.add_project(name)
+
+ def register(self):
+ def on_init(acg, xact, scratch):
+ self._log.debug("on_init")
+ scratch["projects"] = {
+ "added": [],
+ "deleted": [],
+ "updated": [],
+ }
+ return scratch
+
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got project apply config (xact: %s) (action: %s): %s",
+ xact, action, scratch)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.debug("Project being re-added after restart.")
+ self.add_project(cfg.name_ref)
+ else:
+ # When RIFT first comes up, an INSTALL is called with the current config
+ # Since confd doesn't actally persist data this never has any data so
+ # skip this for now.
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ try:
+ add_cfgs = scratch["projects"]["added"]
+ except KeyError:
+ add_cfgs = []
+
+ try:
+ del_cfgs = scratch["projects"]["deleted"]
+ except KeyError:
+ del_cfgs = []
+
+ try:
+ update_cfgs = scratch["projects"]["updated"]
+ except KeyError:
+ update_cfgs = []
+
+
+ # Handle Deletes
+ for name in del_cfgs:
+ self.delete_project(name)
+
+ # Handle Adds
+ for name, msg in add_cfgs:
+ self.add_project(name)
+
+ # Handle Updates
+ for name, msg in update_cfgs:
+ self.update_project(name)
+
+ return RwTypes.RwStatus.SUCCESS
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for Project """
+
+ action = xact_info.query_action
+ xpath = ks_path.to_xpath(RwProjectManoYang.get_schema())
+ self._log.debug("Project xpath: {}".format(xpath))
+ name = ManoProject.from_xpath(xpath, self._log)
+
+ self._log.debug("Project %s on_prepare config received (action: %s): %s",
+ name, xact_info.query_action, msg)
+
+ if action == rwdts.QueryAction.CREATE:
+ if name in self.projects:
+ self._log.debug("Project {} already exists. Ignore request".
+ format(name))
+ else:
+ yield from self._callbacks.on_add_prepare(name)
+ scratch["projects"]["added"].append((name, msg))
+
+ elif action == rwdts.QueryAction.UPDATE:
+ if name in self.projects:
+ scratch["projects"]["updated"].append((name, msg))
+ else:
+ self._log.debug("Project {}: Invoking on_prepare add request".
+ format(name))
+ yield from self._callbacks.on_add_prepare(name)
+ scratch["projects"]["added"].append((name, msg))
+
+
+ elif action == rwdts.QueryAction.DELETE:
+ # Check if the entire project got deleted
+ fref = ProtobufC.FieldReference.alloc()
+ fref.goto_whole_message(msg.to_pbcm())
+ if fref.is_field_deleted():
+ if name in self.projects:
+ rc = yield from self._callbacks.on_delete_prepare(name)
+ if not rc:
+ self._log.error("Project {} should not be deleted".
+ format(name))
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ scratch["projects"]["deleted"].append(name)
+ else:
+ self._log.warning("Delete on unknown project: {}".
+ format(name))
+
+ else:
+ self._log.error("Action (%s) NOT SUPPORTED", action)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._log.debug("Registering for project config using xpath: %s",
+ ProjectDtsHandler.XPATH,
+ )
+
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ on_init=on_init)
+
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=ProjectDtsHandler.XPATH,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare,
+ )
+
+
+class ProjectHandler(object):
+ def __init__(self, tasklet, project_class, **kw):
+ self._tasklet = tasklet
+ self._log = tasklet.log
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
+ self._class = project_class
+ self._kw = kw
+
+ self._log.debug("Creating project config handler")
+ self.project_cfg_handler = ProjectDtsHandler(
+ self._dts, self._log,
+ ProjectConfigCallbacks(
+ on_add_apply=self.on_project_added,
+ on_add_prepare=self.on_add_prepare,
+ on_delete_apply=self.on_project_deleted,
+ on_delete_prepare=self.on_delete_prepare,
+ )
+ )
+
+ def _get_tasklet_name(self):
+ return self._tasklet.tasklet_info.instance_name
+
+ def _get_project(self, name):
+ try:
+ proj = self._tasklet.projects[name]
+ except Exception as e:
+ self._log.exception("Project {} ({})not found for tasklet {}: {}".
+ format(name, list(self._tasklet.projects.keys()),
+ self._get_tasklet_name(), e))
+ raise e
+
+ return proj
+
+ def on_project_deleted(self, name):
+ self._log.debug("Project {} deleted".format(name))
+ try:
+ self._get_project(name).deregister()
+ except Exception as e:
+ self._log.exception("Project {} deregister for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ try:
+ proj = self._tasklet.projects.pop(name)
+ del proj
+ except Exception as e:
+ self._log.exception("Project {} delete for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ def on_project_added(self, name):
+ self._log.debug("Project {} added to tasklet {}".
+ format(name, self._get_tasklet_name()))
+ self._get_project(name)._apply = True
+
+ @asyncio.coroutine
+ def on_add_prepare(self, name):
+ self._log.debug("Project {} to be added to {}".
+ format(name, self._get_tasklet_name()))
+
+ try:
+ self._tasklet.projects[name] = \
+ self._class(name, self._tasklet, **(self._kw))
+ except Exception as e:
+ self._log.exception("Project {} create for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ try:
+ yield from self._get_project(name).register()
+ except Exception as e:
+ self._log.exception("Project {} register for tasklet {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ @asyncio.coroutine
+ def on_delete_prepare(self, name):
+ self._log.debug("Project {} being deleted for tasklet {}".
+ format(name, self._get_tasklet_name()))
+ rc = yield from self._get_project(name).delete_prepare()
+ return rc
+
+ def register(self):
+ self.project_cfg_handler.register()
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
index d28b3e1..9353454 100644
--- a/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
@@ -85,7 +85,7 @@
self.inputs.append({
self.NAME:
self.map_yang_name_to_tosca(
- val.replace('/nsd:nsd-catalog/nsd:nsd/nsd:', ''))})
+ val.replace('/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/nsd:', ''))})
if len(param):
self.log.warn(_("{0}, Did not process the following for "
"input param {1}: {2}").
diff --git a/common/python/test/utest_config_data.py b/common/python/test/utest_config_data.py
index 8287c11..020a2b4 100644
--- a/common/python/test/utest_config_data.py
+++ b/common/python/test/utest_config_data.py
@@ -31,11 +31,11 @@
from rift.mano.config_data import config
import gi
-gi.require_version('VnfdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
gi.require_version('RwYang', '1.0')
from gi.repository import (
- VnfdYang,
+ ProjectVnfdYang,
RwYang,
)
diff --git a/common/python/test/utest_project.py b/common/python/test/utest_project.py
new file mode 100644
index 0000000..01f627c
--- /dev/null
+++ b/common/python/test/utest_project.py
@@ -0,0 +1,221 @@
+#!/usr/bin/env python3
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import logging
+import os
+import sys
+import unittest
+import xmlrunner
+
+from rift.mano.utils import project
+
+NAME = 'test'
+XPATH = "/rw-project:project[rw-project:name='{}']".format(NAME)
+
+class TestCase(unittest.TestCase):
+ log = None
+
+ @classmethod
+ def set_logger(cls, log):
+ cls.log = log
+
+ def setUp(self):
+ if not TestCase.log:
+ log = logging.getLogger()
+ log.setLevel( logging.ERROR)
+
+ def test_create_from_xpath(self):
+ """
+ Asserts:
+ 1. Instance of project from xpath
+ 2. project name in instance is correct
+ """
+ proj = project.ManoProject.create_from_xpath(XPATH, TestCase.log)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_create(self):
+ """
+ Asserts:
+ 1. Instance of project
+ 2. project name in instance is correct
+ """
+ proj = project.ManoProject(TestCase.log, name=NAME)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_create_update(self):
+ """
+ Asserts:
+ 1. Instance of project
+ 2. Set project name later
+ 3. project name in instance is correct
+ """
+ proj = project.ManoProject(TestCase.log)
+ assert proj
+ assert None == proj.name
+
+ proj.name = NAME
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ try:
+ proj.name = 'testing'
+ except project.ManoProjNameSetErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_update_from_xpath(self):
+ """
+ Asserts:
+ 1. Instance of project
+ 2. Update from XPATH
+ 2. project name in instance is correct
+ """
+ proj = project.ManoProject(TestCase.log)
+ assert proj
+ assert proj.name is None
+
+ proj.set_from_xpath(XPATH)
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ try:
+ proj.set_from_xpath(XPATH)
+ except project.ManoProjNameSetErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_create_from_xpath1(self):
+ """
+ Asserts:
+ 1. Instance of project from xpath
+ 2. project name in instance is correct
+ """
+ xpath = XPATH + '/rw:project/rw-project:project/rw-project:project/rw-project:project/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[id=\'1232334\']'
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ def test_create_from_xpath2(self):
+ """
+ Asserts:
+ 1. Instance of project from xpath
+ 2. project name in instance is correct
+ """
+ xpath = '/rw-project:project [ name = "{}" ]'.format(NAME)
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ def test_create_from_xpath_invalid(self):
+ """
+ Asserts:
+ 1. Exception due to invalid XPATH format for extracting project
+ """
+ xpath = '/'
+ try:
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ except project.ManoProjXpathNoProjErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ def test_create_from_xpath_invalid1(self):
+ """
+ Asserts:
+ 1. Exception due to invalid XPATH format for extracting project
+ """
+ xpath = '/rw-project:project/{}'.format(NAME)
+ try:
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ except project.ManoProjXpathKeyErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ def test_create_from_xpath_invalid2(self):
+ """
+ Asserts:
+ 1. Exception due to invalid XPATH format for extracting project
+ """
+ xpath = '/rw-project:project[id=test]'
+ try:
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ except project.ManoProjXpathKeyErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ def tearDown(self):
+ pass
+
+
+def main(argv=sys.argv[1:]):
+ logging.basicConfig(format='TEST %(message)s')
+
+ runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-v', '--verbose', action='store_true')
+ parser.add_argument('-n', '--no-runner', action='store_true')
+
+ args, unknown = parser.parse_known_args(argv)
+ if args.no_runner:
+ runner = None
+
+ # Set the global logging level
+ log = logging.getLogger()
+ log.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+ TestCase.set_logger(log)
+
+ # The unittest framework requires a program name, so use the name of this
+ # file instead (we do not want to have to pass a fake program name to main
+ # when this is called from the interpreter).
+ unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+ main()
diff --git a/common/rw_gen_package.py b/common/rw_gen_package.py
index 427e717..1feb5de 100755
--- a/common/rw_gen_package.py
+++ b/common/rw_gen_package.py
@@ -25,10 +25,10 @@
from gi.repository import (
RwYang,
- NsdYang,
- RwNsdYang,
- VnfdYang,
- RwVnfdYang,
+ ProjectNsdYang as NsdYang,
+ RwProjectNsdYang as RwNsdYang,
+ ProjectVnfdYang as VnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
RwVldYang
)
@@ -40,9 +40,9 @@
descr = None
if descr_type == "nsd":
- descr = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ descr = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
else:
- descr = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ descr = VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
if input_format == 'json':
json_str = open(infile).read()
diff --git a/examples/ping_pong_ns/config_desc.py b/examples/ping_pong_ns/config_desc.py
index fcd1400..dae1c3b 100755
--- a/examples/ping_pong_ns/config_desc.py
+++ b/examples/ping_pong_ns/config_desc.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -25,7 +25,14 @@
import gi
gi.require_version('RwYang', '1.0')
-from gi.repository import NsdYang, VldYang, VnfdYang, RwYang
+# TODO (Philip): Relook at this code
+
+from gi.repository import (
+ NsdYang,
+ VldYang,
+ VnfdYang,
+ RwYang
+ )
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
@@ -39,7 +46,7 @@
def configure_vld(proxy, vld_xml_hdl):
vld_xml = vld_xml_hdl.read()
logger.debug("Attempting to deserialize XML into VLD protobuf: %s", vld_xml)
- vld = VldYang.YangData_Vld_VldCatalog_Vld()
+ vld = VldYang.YangData_RwProject_Project_VldCatalog_Vld()
vld.from_xml_v2(model, vld_xml)
logger.debug("Sending VLD to netconf: %s", vld)
@@ -49,7 +56,7 @@
def configure_vnfd(proxy, vnfd_xml_hdl):
vnfd_xml = vnfd_xml_hdl.read()
logger.debug("Attempting to deserialize XML into VNFD protobuf: %s", vnfd_xml)
- vnfd = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ vnfd = VnfdYang.YangData_VnfdCatalog_Vnfd()
vnfd.from_xml_v2(model, vnfd_xml)
logger.debug("Sending VNFD to netconf: %s", vnfd)
@@ -59,7 +66,7 @@
def configure_nsd(proxy, nsd_xml_hdl):
nsd_xml = nsd_xml_hdl.read()
logger.debug("Attempting to deserialize XML into NSD protobuf: %s", nsd_xml)
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = NsdYang.YangData_NsdCatalog_Nsd()
nsd.from_xml_v2(model, nsd_xml)
logger.debug("Sending NSD to netconf: %s", nsd)
@@ -86,7 +93,9 @@
action="append",
default=[],
type=argparse.FileType(),
- help="VLD XML File Path",
+ #help="VLD XML File Path",
+ # We do not support uploading VLD separately
+ help=argparse.SUPRESS,
)
parser.add_argument(
diff --git a/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py b/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py
index ef2dd90..62513d6 100755
--- a/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py
+++ b/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,24 +18,27 @@
import argparse
+import simplejson
import os
+import yaml
import shutil
import sys
import uuid
+from xml.dom.minidom import parseString
+
import gi
gi.require_version('RwYang', '1.0')
gi.require_version('RwVnfdYang', '1.0')
gi.require_version('VnfdYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
gi.require_version('RwNsdYang', '1.0')
-gi.require_version('NsdYang', '1.0')
-
from gi.repository import (
- RwNsdYang,
- NsdYang,
- RwVnfdYang,
- VnfdYang,
+ RwNsdYang as RwNsdYang,
+ NsdYang as NsdYang,
+ RwVnfdYang as RwVnfdYang,
+ VnfdYang as VnfdYang,
RwYang,
)
@@ -261,7 +264,7 @@
internal_cp.name = cp_name + "/icp{}".format(i)
internal_cp.id = cp_name + "/icp{}".format(i)
internal_cp.type_yang = 'VPORT'
- ivld_cp = internal_vlds[i].internal_connection_point_ref.add()
+ ivld_cp = internal_vlds[i].internal_connection_point.add()
ivld_cp.id_ref = internal_cp.id
internal_interface = vdu.internal_interface.add()
@@ -675,7 +678,7 @@
nsd.version = '1.0'
nsd.input_parameter_xpath.append(
NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
- xpath="/nsd:nsd-catalog/nsd:nsd/nsd:vendor",
+ xpath="/nsd-catalog/nsd/vendor",
)
)
@@ -758,7 +761,7 @@
member.vnfd_id_ref = member_vnfd.descriptor.vnfd[0].id
member.member_vnf_index_ref = vnfd_index_map[member_vnfd]
- # self.create_mon_params(vnfd_list)
+ self.create_mon_params(vnfd_list)
def write_config(self, outdir, vnfds):
diff --git a/models/openmano/python/rift/openmano/rift2openmano.py b/models/openmano/python/rift/openmano/rift2openmano.py
index e7d2e35..92296ff 100755
--- a/models/openmano/python/rift/openmano/rift2openmano.py
+++ b/models/openmano/python/rift/openmano/rift2openmano.py
@@ -27,13 +27,13 @@
import gi
gi.require_version('RwYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
-gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
from gi.repository import (
RwYang,
- RwVnfdYang,
- RwNsdYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwProjectNsdYang as RwNsdYang,
)
import rift.package.store
@@ -88,20 +88,20 @@
@classmethod
def from_xml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ descriptor = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
descriptor.from_xml_v2(RiftNSD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_yaml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ descriptor = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
descriptor.from_yaml(RiftNSD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_dict(cls, nsd_dict):
- descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict(nsd_dict)
+ descriptor = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict(nsd_dict)
return cls(descriptor)
@@ -143,20 +143,20 @@
@classmethod
def from_xml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ descriptor = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
descriptor.from_xml_v2(RiftVNFD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_yaml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ descriptor = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
descriptor.from_yaml(RiftVNFD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_dict(cls, vnfd_dict):
- descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict(vnfd_dict)
+ descriptor = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict(vnfd_dict)
return cls(descriptor)
diff --git a/models/openmano/src/openmano2rift.py b/models/openmano/src/openmano2rift.py
index 503ad89..750115c 100755
--- a/models/openmano/src/openmano2rift.py
+++ b/models/openmano/src/openmano2rift.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,12 +28,14 @@
import gi
gi.require_version('RwYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
-gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
from gi.repository import (
RwYang,
- RwVnfdYang,
- RwNsdYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwProjectNsdYang as RwNsdYang,
+ RwProjectYang,
)
logging.basicConfig(level=logging.WARNING)
@@ -115,7 +117,7 @@
return vnf_name
def openmano2rift(self, vnf_list):
- self.descriptor = RwNsdYang.YangData_Nsd_NsdCatalog()
+ self.descriptor = RwNsdYang.YangData_RwProject_Project_NsdCatalog()
openmano_nsd = self.openmano.dictionary
self.name = openmano_nsd['name']
nsd = self.descriptor.nsd.add()
@@ -203,7 +205,7 @@
return None
def openmano2rift(self):
- self.descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog()
+ self.descriptor = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog()
vnfd = self.descriptor.vnfd.add()
self.vnfd = vnfd
vnfd.id = str(uuid.uuid1())
@@ -467,8 +469,9 @@
vnf_list = create_vnfs_from_yaml_files(args.yaml_file_hdls)
ns_list = create_ns_from_yaml_files(args.yaml_file_hdls, vnf_list)
+ # TODO (Philip): Relook at the model generation
writer = DescriptorFileWriter(
- module_list=['nsd', 'rw-nsd', 'vnfd', 'rw-vnfd'],
+ module_list=['rw-project', 'project-nsd', 'rw-project-nsd', 'project-vnfd', 'rw-project-vnfd'],
output_dir=args.outdir,
output_format=args.format,
)
diff --git a/models/plugins/yang/CMakeLists.txt b/models/plugins/yang/CMakeLists.txt
index 2f6e964..cc2c32e 100644
--- a/models/plugins/yang/CMakeLists.txt
+++ b/models/plugins/yang/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,13 +22,17 @@
ietf-l2-topology.yang
ietf-network-topology.yang
ietf-network.yang
+ nsd-base.yang rw-nsd-base.yang
nsd.yang rw-nsd.yang
+ project-nsd.yang rw-project-nsd.yang
nsr.yang rw-nsr.yang
pnfd.yang
rw-topology.yang
vld.yang rw-vld.yang
vlr.yang rw-vlr.yang
+ vnfd-base.yang rw-vnfd-base.yang
vnfd.yang rw-vnfd.yang
+ project-vnfd.yang rw-project-vnfd.yang
vnfr.yang rw-vnfr.yang
vnffgd.yang
)
@@ -38,6 +42,8 @@
YANG_FILES
mano-types.yang
COMPONENT ${PKG_LONG_NAME}
+ LIBRARIES
+ rwprojectmano_yang_gen
)
rift_add_yang_target(
@@ -51,9 +57,19 @@
rwcloud_yang_gen
rwconfig_agent_yang_gen
mano-types_yang_gen
+ rwprojectmano_yang_gen
DEPENDS
rwcloud_yang
+ rwsdn_yang
rwconfig_agent_yang
+ rwprojectmano_yang
+ ASSOCIATED_FILES
+ project-vnfd.role.xml
+ project-nsd.role.xml
+ vnfr.role.xml
+ rw-vnfr.role.xml
+ vlr.role.xml
+ nsr.role.xml
)
#rift_gen_yang_tree(mano-pyang-trees
diff --git a/models/plugins/yang/mano-types.yang b/models/plugins/yang/mano-types.yang
index ab234be..01e80d5 100644
--- a/models/plugins/yang/mano-types.yang
+++ b/models/plugins/yang/mano-types.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,6 +31,15 @@
prefix "rwpb";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-04-23 {
description
"Initial revision. This YANG file defines
@@ -1123,9 +1132,11 @@
}
leaf http-endpoint-ref {
- type leafref {
- path "../../http-endpoint/path";
- }
+ // TODO (Philip): Fix this
+ // type leafref {
+ // path "../../http-endpoint/path";
+ // }
+ type string;
}
leaf json-query-method {
@@ -2060,4 +2071,15 @@
}
}
+
+ grouping rpc-project-name {
+ leaf project-name {
+ mandatory true;
+ description
+ "Project to which this belongs";
+ type leafref {
+ path "/rw-project:project/rw-project:name";
+ }
+ }
+ }
}
diff --git a/models/plugins/yang/nsd-base.yang b/models/plugins/yang/nsd-base.yang
new file mode 100644
index 0000000..b279398
--- /dev/null
+++ b/models/plugins/yang/nsd-base.yang
@@ -0,0 +1,789 @@
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module nsd-base
+{
+ namespace "http://riftio.com/ns/riftware-1.0/nsd-base";
+ prefix "nsd-base";
+
+ import rw-pb-ext {
+ prefix "rwpb";
+ }
+
+ import vld {
+ prefix "vld";
+ }
+
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ the Network Service Descriptor (NSD)
+ common groupings";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ typedef scaling-trigger {
+ type enumeration {
+ enum pre-scale-in {
+ value 1;
+ }
+ enum post-scale-in {
+ value 2;
+ }
+ enum pre-scale-out {
+ value 3;
+ }
+ enum post-scale-out {
+ value 4;
+ }
+ }
+ }
+
+ typedef scaling-policy-type {
+ type enumeration {
+ enum manual {
+ value 1;
+ }
+ enum automatic {
+ value 2;
+ }
+ }
+ }
+
+ typedef scaling-criteria-operation {
+ type enumeration {
+ enum AND {
+ value 1;
+ }
+ enum OR {
+ value 2;
+ }
+ }
+ }
+
+ grouping primitive-parameter {
+ leaf name {
+ description
+ "Name of the parameter.";
+ type string;
+ }
+
+ leaf data-type {
+ description
+ "Data type associated with the name.";
+ type manotypes:parameter-data-type;
+ }
+
+ leaf mandatory {
+ description "Is this field mandatory";
+ type boolean;
+ default false;
+ }
+
+ leaf default-value {
+ description "The default value for this field";
+ type string;
+ }
+
+ leaf parameter-pool {
+ description "NSD Parameter pool name to use for this paramter";
+ type string;
+ }
+ }
+
+ grouping nsd-descriptor-common {
+ leaf id {
+ description "Identifier for the NSD.";
+ type string;
+ }
+
+ leaf name {
+ description "NSD name.";
+ mandatory true;
+ type string;
+ }
+
+ leaf short-name {
+ description "NSD short name.";
+ type string;
+ }
+
+ leaf vendor {
+ description "Vendor of the NSD.";
+ type string;
+ }
+
+ leaf logo {
+ description
+ "File path for the vendor specific logo. For example icons/mylogo.png.
+ The logo should be part of the network service";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the NSD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the NSD";
+ type string;
+ }
+
+ list connection-point {
+ description
+ "List for external connection points.
+ Each NS has one or more external connection
+ points. As the name implies that external
+ connection points are used for connecting
+ the NS to other NS or to external networks.
+ Each NS exposes these connection points to
+ the orchestrator. The orchestrator can
+ construct network service chains by
+ connecting the connection points between
+ different NS.";
+
+ key "name";
+ leaf name {
+ description
+ "Name of the NS connection point.";
+ type string;
+ }
+
+ leaf type {
+ description
+ "Type of the connection point.";
+ type manotypes:connection-point-type;
+ }
+ }
+
+ list scaling-group-descriptor {
+ description
+ "scaling group descriptor within this network service.
+ The scaling group defines a group of VNFs,
+ and the ratio of VNFs in the network service
+ that is used as target for scaling action";
+
+ key "name";
+
+ leaf name {
+ description "Name of this scaling group.";
+ type string;
+ }
+
+ list scaling-policy {
+
+ key "name";
+
+ leaf name {
+ description
+ "Name of the scaling policy";
+ type string;
+ }
+
+ leaf scaling-type {
+ description
+ "Type of scaling";
+ type scaling-policy-type;
+ }
+
+ leaf enabled {
+ description
+ "Specifies if the scaling policy can be applied";
+ type boolean;
+ default true;
+ }
+
+ leaf scale-in-operation-type {
+ description
+ "Operation to be applied to check between scaling criterias to
+ check if the scale in threshold condition has been met.
+ Defaults to AND";
+ type scaling-criteria-operation;
+ default AND;
+ }
+
+ leaf scale-out-operation-type {
+ description
+ "Operation to be applied to check between scaling criterias to
+ check if the scale out threshold condition has been met.
+ Defauls to OR";
+ type scaling-criteria-operation;
+ default OR;
+ }
+
+ leaf threshold-time {
+ description
+ "The duration for which the criteria must hold true";
+ type uint32;
+ mandatory true;
+ }
+
+ leaf cooldown-time {
+ description
+ "The duration after a scaling-in/scaling-out action has been
+ triggered, for which there will be no further optional";
+ type uint32;
+ mandatory true;
+ }
+
+ list scaling-criteria {
+ description
+ "list of conditions to be met for generating scaling
+ requests";
+ key "name";
+
+ leaf name {
+ type string;
+ }
+
+ leaf scale-in-threshold {
+ description
+ "Value below which scale-in requests are generated";
+ type uint64;
+ }
+
+ leaf scale-out-threshold {
+ description
+ "Value above which scale-out requests are generated";
+ type uint64;
+ }
+
+ leaf ns-monitoring-param-ref {
+ description
+ "Reference to the NS level monitoring parameter
+ that is aggregated";
+ type leafref {
+ path "../../../../monitoring-param/id";
+ }
+ }
+ }
+ }
+
+ list vnfd-member {
+ description "List of VNFs in this scaling group";
+ key "member-vnf-index-ref";
+
+ leaf member-vnf-index-ref {
+ description "member VNF index of this member VNF";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf count {
+ description
+ "count of this member VNF within this scaling group.
+ The count allows to define the number of instances
+ when a scaling action targets this scaling group";
+ type uint32;
+ default 1;
+ }
+ }
+
+ leaf min-instance-count {
+ description
+ "Minimum instances of the scaling group which are allowed.
+ These instances are created by default when the network service
+ is instantiated.";
+ type uint32;
+ default 0;
+ }
+
+ leaf max-instance-count {
+ description
+ "Maximum instances of this scaling group that are allowed
+ in a single network service. The network service scaling
+ will fail, when the number of service group instances
+ exceed the max-instance-count specified.";
+ type uint32;
+ default 10;
+ }
+
+ list scaling-config-action {
+ description "List of scaling config actions";
+ key "trigger";
+
+ leaf trigger {
+ description "scaling trigger";
+ type scaling-trigger;
+ }
+
+ leaf ns-config-primitive-name-ref {
+ description "Reference to the NS config name primitive";
+ type leafref {
+ path "../../../service-primitive/name";
+ }
+ }
+ }
+ }
+
+
+ list vnffgd {
+ description
+ "List of VNF Forwarding Graph Descriptors (VNFFGD).";
+
+ key "id";
+
+ leaf id {
+ description
+ "Identifier for the VNFFGD.";
+ type string;
+ }
+
+ leaf name {
+ description
+ "VNFFGD name.";
+ type string;
+ }
+
+ leaf short-name {
+ description
+ "Short name for VNFFGD for UI";
+ type string;
+ }
+
+ leaf vendor {
+ description "Provider of the VNFFGD.";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VNFFGD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the VNFFGD";
+ type string;
+ }
+
+ list rsp {
+ description
+ "List of Rendered Service Paths (RSP).";
+
+ key "id";
+
+ leaf id {
+ description
+ "Identifier for the RSP.";
+ type string;
+ }
+
+ leaf name {
+ description
+ "RSP name.";
+ type string;
+ }
+
+ list vnfd-connection-point-ref {
+ description
+ "A list of references to connection points.";
+ key "member-vnf-index-ref";
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf order {
+ type uint8;
+ description
+ "A number that denotes the order of a VNF in a chain";
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
+ type leafref {
+ path "../../../../constituent-vnfd" +
+ "[member-vnf-index = current()/../member-vnf-index-ref]" +
+ "/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-connection-point-ref {
+ description
+ "A reference to a connection point name
+ in a vnfd. This is a leafref to path:
+ /vnfd:vnfd-catalog/vnfd:vnfd
+ + [vnfd:id = current()/../nsd:vnfd-id-ref]
+ + /vnfd:connection-point/vnfd:name
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
+ // TODO: Keeping as string as this needs to be
+ // diffenent lvel based of if it is nsd-catalog or
+ // in nsr.
+ // type leafref {
+ // path "../../../../../../vnfd:vnfd-catalog/vnfd:vnfd" +
+ // "[vnfd:id = current()/../vnfd-id-ref]/" +
+ // "vnfd:connection-point/vnfd:name";
+ // }
+ type string;
+ }
+ }
+ } //rsp
+
+ list classifier {
+ description
+ "List of classifier rules.";
+
+ key "id";
+
+ leaf id {
+ description
+ "Identifier for the classifier rule.";
+ type string;
+ }
+
+ leaf name {
+ description
+ "Name of the classifier.";
+ type string;
+ }
+
+ leaf rsp-id-ref {
+ description
+ "A reference to the RSP.";
+ type leafref {
+ path "../../rsp/id";
+ }
+ }
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
+ type leafref {
+ path "../../../constituent-vnfd" +
+ "[member-vnf-index = current()/../member-vnf-index-ref]" +
+ "/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-connection-point-ref {
+ description
+ "A reference to a connection point name
+ in a vnfd. This is a leafref to path:
+ /vnfd:vnfd-catalog/vnfd:vnfd
+ + [vnfd:id = current()/../nsd:vnfd-id-ref]
+ + /vnfd:connection-point/vnfd:name
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
+ // TODO: Keeping as string as this needs to be
+ // diffenent lvel based of if it is nsd-catalog or
+ // in nsr.
+ // type leafref {
+ // path "../../../../../vnfd:vnfd-catalog/vnfd:vnfd" +
+ // "[vnfd:id = current()/../vnfd-id-ref]/" +
+ // "vnfd:connection-point/vnfd:name";
+ // }
+ type string;
+ }
+
+ list match-attributes {
+ description
+ "List of match attributes.";
+
+ key "id";
+
+ leaf id {
+ description
+ "Identifier for the classifier match attribute rule.";
+ type string;
+ }
+
+ leaf ip-proto {
+ description
+ "IP Protocol.";
+ type uint8;
+ }
+
+ leaf source-ip-address {
+ description
+ "Source IP address.";
+ type inet:ip-address;
+ }
+
+ leaf destination-ip-address {
+ description
+ "Destination IP address.";
+ type inet:ip-address;
+ }
+
+ leaf source-port {
+ description
+ "Source port number.";
+ type inet:port-number;
+ }
+
+ leaf destination-port {
+ description
+ "Destination port number.";
+ type inet:port-number;
+ }
+ //TODO: Add more match criteria
+ } //match-attributes
+ } // classifier
+ } // vnffgd
+
+ uses manotypes:ip-profile-list;
+
+ list initial-config-primitive {
+ rwpb:msg-new NsdInitialConfigPrimitive;
+ description
+ "Initial set of configuration primitives for NSD.";
+ key "seq";
+
+ uses manotypes:initial-config;
+ }
+
+ uses manotypes:input-parameter-xpath;
+
+ list parameter-pool {
+ description
+ "Pool of parameter values which must be
+ pulled from during configuration";
+ key "name";
+
+ leaf name {
+ description
+ "Name of the configuration value pool";
+ type string;
+ }
+
+ container range {
+ description
+ "Create a range of values to populate the pool with";
+
+ leaf start-value {
+ description
+ "Generated pool values start at this value";
+ type uint32;
+ mandatory true;
+ }
+
+ leaf end-value {
+ description
+ "Generated pool values stop at this value";
+ type uint32;
+ mandatory true;
+ }
+ }
+ }
+
+ list key-pair {
+ key "name";
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+
+ leaf name {
+ description "Name of this key pair";
+ type string;
+ }
+
+ leaf key {
+ description "Key associated with this key pair";
+ type string;
+ }
+ }
+
+ list user {
+ key "name";
+ description "List of users to be added through cloud-config";
+
+ leaf name {
+ description "Name of the user ";
+ type string;
+ }
+
+ leaf user-info {
+ description "The user name's real name";
+ type string;
+ }
+
+ list key-pair {
+ key "name";
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+
+ leaf name {
+ description "Name of this key pair";
+ type string;
+ }
+
+ leaf key {
+ description "Key associated with this key pair";
+ type string;
+ }
+ }
+ }
+ }
+
+ grouping nsd-vld-common {
+ /* Still having issues modelling this,
+ see the comments under vnfd-connection-point-ref
+ */
+ description
+ "List of Virtual Link Descriptors.";
+
+ leaf id {
+ description
+ "Identifier for the VLD.";
+ type string;
+ }
+
+ leaf name {
+ description
+ "Virtual Link Descriptor (VLD) name.";
+ type string;
+ }
+
+ leaf short-name {
+ description
+ "Short name for VLD for UI";
+ type string;
+ }
+
+ leaf vendor {
+ description "Provider of the VLD.";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VLD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the VLD";
+ type string;
+ }
+
+ leaf type {
+ type manotypes:virtual-link-type;
+ }
+
+ leaf root-bandwidth {
+ description
+ "For ELAN this is the aggregate bandwidth.";
+ type uint64;
+ }
+
+ leaf leaf-bandwidth {
+ description
+ "For ELAN this is the bandwidth of branches.";
+ type uint64;
+ }
+
+ // replicate for pnfd container here
+ uses manotypes:provider-network;
+
+ leaf mgmt-network {
+ description "Flag indicating whether this network is a VIM management network";
+ type boolean;
+ default false;
+ }
+
+ choice init-params {
+ description "Extra parameters for VLD instantiation";
+
+ case vim-network-ref {
+ leaf vim-network-name {
+ description
+ "Name of network in VIM account. This is used to indicate
+ pre-provisioned network name in cloud account.";
+ type string;
+ }
+ }
+
+ case vim-network-profile {
+ leaf ip-profile-ref {
+ description "Named reference to IP-profile object";
+ type string;
+ }
+ }
+
+ }
+ }
+
+ grouping monitoring-param-common {
+ description
+ "List of monitoring parameters from VNF's that should be
+ propogated up into NSR";
+
+ leaf id {
+ type string;
+ }
+
+ leaf name {
+ type string;
+ }
+
+ uses manotypes:monitoring-param-value;
+ uses manotypes:monitoring-param-ui-data;
+ uses manotypes:monitoring-param-aggregation;
+ }
+}
diff --git a/models/plugins/yang/nsd.yang b/models/plugins/yang/nsd.yang
index 15cd2e3..ddd4d92 100644
--- a/models/plugins/yang/nsd.yang
+++ b/models/plugins/yang/nsd.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,30 +23,27 @@
namespace "urn:ietf:params:xml:ns:yang:nfvo:nsd";
prefix "nsd";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
- import vld {
- prefix "vld";
+ import ietf-yang-types {
+ prefix "yang";
}
import vnfd {
prefix "vnfd";
}
- import ietf-inet-types {
- prefix "inet";
- }
-
- import ietf-yang-types {
- prefix "yang";
+ import nsd-base {
+ prefix "nsd-base";
}
import mano-types {
prefix "manotypes";
}
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2014-10-27 {
description
"Initial revision. This YANG file defines
@@ -55,198 +52,45 @@
"Derived from earlier versions of base YANG files";
}
- typedef scaling-trigger {
- type enumeration {
- enum pre-scale-in {
- value 1;
- }
- enum post-scale-in {
- value 2;
- }
- enum pre-scale-out {
- value 3;
- }
- enum post-scale-out {
- value 4;
- }
- }
- }
-
- typedef scaling-policy-type {
- type enumeration {
- enum manual {
- value 1;
- }
- enum automatic {
- value 2;
- }
- }
- }
-
- typedef scaling-criteria-operation {
- type enumeration {
- enum AND {
- value 1;
- }
- enum OR {
- value 2;
- }
- }
- }
-
- grouping primitive-parameter {
- leaf name {
+ grouping nsd-constituent-vnfd {
+ list constituent-vnfd {
description
- "Name of the parameter.";
- type string;
- }
+ "List of VNFDs that are part of this
+ network service.";
- leaf data-type {
- description
- "Data type associated with the name.";
- type manotypes:parameter-data-type;
- }
+ key "member-vnf-index";
- leaf mandatory {
- description "Is this field mandatory";
- type boolean;
- default false;
- }
-
- leaf default-value {
- description "The default value for this field";
- type string;
- }
-
- leaf parameter-pool {
- description "NSD parameter pool name to use for this parameter";
- type string;
- }
- }
-
- grouping nsd-descriptor {
- leaf id {
- description "Identifier for the NSD.";
- type string;
- }
-
- leaf name {
- description "NSD name.";
- mandatory true;
- type string;
- }
-
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
-
- leaf vendor {
- description "Vendor of the NSD.";
- type string;
- }
-
- leaf logo {
- description
- "File path for the vendor specific logo. For example icons/mylogo.png.
- The logo should be part of the network service";
- type string;
- }
-
- leaf description {
- description "Description of the NSD.";
- type string;
- }
-
- leaf version {
- description "Version of the NSD";
- type string;
- }
-
- list connection-point {
- description
- "List for external connection points.
- Each NS has one or more external connection
- points. As the name implies that external
- connection points are used for connecting
- the NS to other NS or to external networks.
- Each NS exposes these connection points to
- the orchestrator. The orchestrator can
- construct network service chains by
- connecting the connection points between
- different NS.";
-
- key "name";
- leaf name {
+ leaf member-vnf-index {
description
- "Name of the NS connection point.";
- type string;
+ "Identifier/index for the VNFD. This separate id
+ is required to ensure that multiple VNFs can be
+ part of single NS";
+ type uint64;
}
- leaf type {
+ leaf vnfd-id-ref {
description
- "Type of the connection point.";
- type manotypes:connection-point-type;
+ "Identifier for the VNFD.";
+ type leafref {
+ path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+ }
+ }
+
+ leaf start-by-default {
+ description
+ "VNFD is started as part of the NS instantiation";
+ type boolean;
+ default true;
}
}
+ }
- /* Still having issues modelling this,
- see the comments under vnfd-connection-point-ref
- */
+ grouping nsd-vld {
list vld {
- description
- "List of Virtual Link Descriptors.";
key "id";
- leaf id {
- description
- "Identifier for the VLD.";
- type string;
- }
-
- leaf name {
- description
- "Virtual Link Descriptor (VLD) name.";
- type string;
- }
-
- leaf short-name {
- description
- "Short name to appear as label in the UI";
- type string;
- }
-
- leaf vendor {
- description "Provider of the VLD.";
- type string;
- }
-
- leaf description {
- description "Description of the VLD.";
- type string;
- }
-
- leaf version {
- description "Version of the VLD";
- type string;
- }
-
- leaf type {
- type manotypes:virtual-link-type;
- }
-
- leaf root-bandwidth {
- description
- "For ELAN this is the aggregate bandwidth.";
- type uint64;
- }
-
- leaf leaf-bandwidth {
- description
- "For ELAN this is the bandwidth of branches.";
- type uint64;
- }
+ uses nsd-base:nsd-vld-common;
list vnfd-connection-point-ref {
description
@@ -279,230 +123,30 @@
}
}
}
-
- // replicate for pnfd container here
- uses manotypes:provider-network;
-
- leaf mgmt-network {
- description "Flag indicating whether this network is a VIM management network";
- type boolean;
- default false;
- }
-
- choice init-params {
- description "Extra parameters for VLD instantiation";
-
- case vim-network-ref {
- leaf vim-network-name {
- description
- "Name of network in VIM account. This is used to indicate
- pre-provisioned network name in cloud account.";
- type string;
- }
- }
-
- case vim-network-profile {
- leaf ip-profile-ref {
- description "Named reference to IP-profile object";
- type string;
- }
- }
-
- }
}
+ }
- list constituent-vnfd {
+ grouping nsd-vnf-dependency {
+ list vnf-dependency {
description
- "List of VNFDs that are part of this
- network service.";
-
- key "member-vnf-index";
-
- leaf member-vnf-index {
- description
- "Identifier/index for the VNFD. This separate id
- is required to ensure that multiple VNFs can be
- part of single NS";
- type uint64;
- }
-
- leaf vnfd-id-ref {
- description
- "Identifier for the VNFD.";
+ "List of VNF dependencies.";
+ key vnf-source-ref;
+ leaf vnf-source-ref {
type leafref {
path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
}
}
-
- leaf start-by-default {
+ leaf vnf-depends-on-ref {
description
- "VNFD is started as part of the NS instantiation";
- type boolean;
- default true;
- }
- }
-
- list scaling-group-descriptor {
- description
- "scaling group descriptor within this network service.
- The scaling group defines a group of VNFs,
- and the ratio of VNFs in the network service
- that is used as target for scaling action";
-
- key "name";
-
- leaf name {
- description "Name of this scaling group.";
- type string;
- }
-
- list scaling-policy {
-
- key "name";
-
- leaf name {
- description
- "Name of the scaling policy";
- type string;
- }
-
- leaf scaling-type {
- description
- "Type of scaling";
- type scaling-policy-type;
- }
-
- leaf enabled {
- description
- "Specifies if the scaling policy can be applied";
- type boolean;
- default true;
- }
-
- leaf scale-in-operation-type {
- description
- "Operation to be applied to check between scaling criterias to
- check if the scale in threshold condition has been met.
- Defaults to AND";
- type scaling-criteria-operation;
- default AND;
- }
-
- leaf scale-out-operation-type {
- description
- "Operation to be applied to check between scaling criterias to
- check if the scale out threshold condition has been met.
- Defauls to OR";
- type scaling-criteria-operation;
- default OR;
- }
-
- leaf threshold-time {
- description
- "The duration for which the criteria must hold true";
- type uint32;
- mandatory true;
- }
-
- leaf cooldown-time {
- description
- "The duration after a scaling-in/scaling-out action has been
- triggered, for which there will be no further optional";
- type uint32;
- mandatory true;
- }
-
- list scaling-criteria {
- description
- "list of conditions to be met for generating scaling
- requests";
- key "name";
-
- leaf name {
- type string;
- }
-
- leaf scale-in-threshold {
- description
- "Value below which scale-in requests are generated";
- type uint64;
- }
-
- leaf scale-out-threshold {
- description
- "Value above which scale-out requests are generated";
- type uint64;
- }
-
- leaf ns-monitoring-param-ref {
- description
- "Reference to the NS level monitoring parameter
- that is aggregated";
- type leafref {
- path "../../../../monitoring-param/id";
- }
- }
- }
- }
-
- list vnfd-member {
- description "List of VNFs in this scaling group";
- key "member-vnf-index-ref";
-
- leaf member-vnf-index-ref {
- description "member VNF index of this member VNF";
- type leafref {
- path "../../../constituent-vnfd/member-vnf-index";
- }
- }
-
- leaf count {
- description
- "count of this member VNF within this scaling group.
- The count allows to define the number of instances
- when a scaling action targets this scaling group";
- type uint32;
- default 1;
- }
- }
-
- leaf min-instance-count {
- description
- "Minimum instances of the scaling group which are allowed.
- These instances are created by default when the network service
- is instantiated.";
- type uint32;
- default 0;
- }
-
- leaf max-instance-count {
- description
- "Maximum instances of this scaling group that are allowed
- in a single network service. The network service scaling
- will fail, when the number of service group instances
- exceed the max-instance-count specified.";
- type uint32;
- default 10;
- }
-
- list scaling-config-action {
- description "List of scaling config actions";
- key "trigger";
-
- leaf trigger {
- description "scaling trigger";
- type scaling-trigger;
- }
-
- leaf ns-config-primitive-name-ref {
- description "Reference to the NS config name primitive";
- type leafref {
- path "../../../service-primitive/name";
- }
+ "Reference to VNF that sorce VNF depends.";
+ type leafref {
+ path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
}
}
}
+ }
+ grouping nsd-placement-groups {
list placement-groups {
description "List of placement groups at NS level";
@@ -526,251 +170,23 @@
description
"Identifier for the VNFD.";
type leafref {
- path "../../../constituent-vnfd" +
- "[member-vnf-index = current()/../member-vnf-index-ref]" +
- "/vnfd-id-ref";
+ path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
}
}
}
}
+ }
- uses manotypes:ip-profile-list;
-
- list vnf-dependency {
- description
- "List of VNF dependencies.";
- key vnf-source-ref;
- leaf vnf-source-ref {
- type leafref {
- path "../../constituent-vnfd/vnfd-id-ref";
- }
- }
- leaf vnf-depends-on-ref {
- description
- "Reference to VNF that sorce VNF depends.";
- type leafref {
- path "../../constituent-vnfd/vnfd-id-ref";
- }
- }
- }
-
- list vnffgd {
- description
- "List of VNF Forwarding Graph Descriptors (VNFFGD).";
-
- key "id";
-
- leaf id {
- description
- "Identifier for the VNFFGD.";
- type string;
- }
-
- leaf name {
- description
- "VNFFGD name.";
- type string;
- }
-
- leaf short-name {
- description
- "Short name to appear as label in the UI";
- type string;
- }
-
- leaf vendor {
- description "Provider of the VNFFGD.";
- type string;
- }
-
- leaf description {
- description "Description of the VNFFGD.";
- type string;
- }
-
- leaf version {
- description "Version of the VNFFGD";
- type string;
- }
-
- list rsp {
- description
- "List of Rendered Service Paths (RSP).";
-
- key "id";
-
- leaf id {
- description
- "Identifier for the RSP.";
- type string;
- }
-
- leaf name {
- description
- "RSP name.";
- type string;
- }
-
- list vnfd-connection-point-ref {
- description
- "A list of references to connection points.";
- key "member-vnf-index-ref";
-
- leaf member-vnf-index-ref {
- description "Reference to member-vnf within constituent-vnfds";
- type leafref {
- path "../../../../constituent-vnfd/member-vnf-index";
- }
- }
-
- leaf order {
- type uint8;
- description
- "A number that denotes the order of a VNF in a chain";
- }
-
- leaf vnfd-id-ref {
- description
- "A reference to a vnfd";
- type leafref {
- path "../../../../constituent-vnfd" +
- "[member-vnf-index = current()/../member-vnf-index-ref]" +
- "/vnfd-id-ref";
- }
- }
-
- leaf vnfd-connection-point-ref {
- description
- "A reference to a connection point name";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd" +
- "[vnfd:id = current()/../vnfd-id-ref]/" +
- "vnfd:connection-point/vnfd:name";
- }
- }
- }
- } //rsp
-
- list classifier {
- description
- "List of classifier rules.";
-
- key "id";
-
- leaf id {
- description
- "Identifier for the classifier rule.";
- type string;
- }
-
- leaf name {
- description
- "Name of the classifier.";
- type string;
- }
-
- leaf rsp-id-ref {
- description
- "A reference to the RSP.";
- type leafref {
- path "../../rsp/id";
- }
- }
-
- leaf member-vnf-index-ref {
- description "Reference to member-vnf within constituent-vnfds";
- type leafref {
- path "../../../constituent-vnfd/member-vnf-index";
- }
- }
-
- leaf vnfd-id-ref {
- description
- "A reference to a vnfd";
- type leafref {
- path "../../../constituent-vnfd" +
- "[member-vnf-index = current()/../member-vnf-index-ref]" +
- "/vnfd-id-ref";
- }
- }
-
- leaf vnfd-connection-point-ref {
- description
- "A reference to a connection point name";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd" +
- "[vnfd:id = current()/../vnfd-id-ref]/" +
- "vnfd:connection-point/vnfd:name";
- }
- }
-
- list match-attributes {
- description
- "List of match attributes.";
-
- key "id";
-
- leaf id {
- description
- "Identifier for the classifier match attribute rule.";
- type string;
- }
-
- leaf ip-proto {
- description
- "IP Protocol.";
- type uint8;
- }
-
- leaf source-ip-address {
- description
- "Source IP address.";
- type inet:ip-address;
- }
-
- leaf destination-ip-address {
- description
- "Destination IP address.";
- type inet:ip-address;
- }
-
- leaf source-port {
- description
- "Source port number.";
- type inet:port-number;
- }
-
- leaf destination-port {
- description
- "Destination port number.";
- type inet:port-number;
- }
- //TODO: Add more match criteria
- } //match-attributes
- } // classifier
- } // vnffgd
+ grouping nsd-monitoring-param {
list monitoring-param {
- description
- "List of monitoring parameters from VNF's that should be
- propogated up into NSR";
- key "id";
+ key id;
- leaf id {
- type string;
- }
-
- leaf name {
- type string;
- }
-
- uses manotypes:monitoring-param-value;
- uses manotypes:monitoring-param-ui-data;
- uses manotypes:monitoring-param-aggregation;
+ uses nsd-base:monitoring-param-common;
list vnfd-monitoring-param {
description "A list of VNFD monitoring params";
- key "member-vnf-index-ref vnfd-monitoring-param-ref";
+ key "vnfd-id-ref vnfd-monitoring-param-ref";
leaf vnfd-id-ref {
description
@@ -794,49 +210,17 @@
leaf member-vnf-index-ref {
description
- "Mandatory reference to member-vnf within constituent-vnfds";
+ "Optional reference to member-vnf within constituent-vnfds";
type leafref {
path "../../../constituent-vnfd/member-vnf-index";
}
}
}
}
+ }
- uses manotypes:input-parameter-xpath;
-
- list parameter-pool {
- description
- "Pool of parameter values which must be
- pulled from during configuration";
- key "name";
-
- leaf name {
- description
- "Name of the configuration value pool";
- type string;
- }
-
- container range {
- description
- "Create a range of values to populate the pool with";
-
- leaf start-value {
- description
- "Generated pool values start at this value";
- type uint32;
- mandatory true;
- }
-
- leaf end-value {
- description
- "Generated pool values stop at this value";
- type uint32;
- mandatory true;
- }
- }
- }
-
- list service-primitive {
+ grouping nsd-service-primitive {
+ list service-primitive {
description
"Network service level service primitives.";
@@ -912,72 +296,26 @@
type string;
}
}
-
- list initial-config-primitive {
- rwpb:msg-new NsdInitialConfigPrimitive;
- description
- "Initial set of configuration primitives for NSD.";
- key "seq";
-
- uses manotypes:initial-config;
- }
-
- list key-pair {
- key "name";
- description "Used to configure the list of public keys to be injected as part
- of ns instantiation";
-
- leaf name {
- description "Name of this key pair";
- type string;
- }
-
- leaf key {
- description "Key associated with this key pair";
- type string;
- }
- }
-
- list user {
- key "name";
- description "List of users to be added through cloud-config";
-
- leaf name {
- description "Name of the user ";
- type string;
- }
-
- leaf user-info {
- description "The user name's real name";
- type string;
- }
-
- list key-pair {
- key "name";
- description "Used to configure the list of public keys to be injected as part
- of ns instantiation";
-
- leaf name {
- description "Name of this key pair";
- type string;
- }
-
- leaf key {
- description "Key associated with this key pair";
- type string;
- }
- }
- }
}
-
container nsd-catalog {
list nsd {
- key "id";
+ key id;
- uses nsd-descriptor;
+ uses nsd-base:nsd-descriptor-common;
+
+ uses nsd-vld;
+
+ uses nsd-constituent-vnfd;
+
+ uses nsd-placement-groups;
+
+ uses nsd-vnf-dependency;
+
+ uses nsd-monitoring-param;
+
+ uses nsd-service-primitive;
}
}
-
}
diff --git a/models/plugins/yang/nsr.role.xml b/models/plugins/yang/nsr.role.xml
new file mode 100644
index 0000000..4353911
--- /dev/null
+++ b/models/plugins/yang/nsr.role.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:nsr-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/nsr:exec-scale-out/nsr:project-name</path>
+ <path>/nsr:exec-scale-in/nsr:project-name</path>
+ <path>/nsr:exec-ns-service-primitive/nsr:project-name</path>
+ <path>/nsr:get-ns-service-primitive-values/nsr:project-name</path>
+ <path>/nsr:start-network-service/nsr:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-oper</role>
+ <keys-role>rw-project-mano:nsr-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/nsr:ns-instance-config</path>
+ <path>/rw-project:project/nsr:ns-instance-opdata</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:nsr-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/nsr:ns-instance-config</path>
+ <path>/rw-project:project/nsr:ns-instance-opdata</path>
+ <path>/nsr:exec-scale-out</path>
+ <path>/nsr:exec-scale-in</path>
+ <path>/nsr:exec-ns-service-primitive</path>
+ <path>/nsr:get-ns-service-primitive-values</path>
+ <path>/nsr:start-network-service</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/models/plugins/yang/nsr.tailf.yang b/models/plugins/yang/nsr.tailf.yang
index b68872e..331cef7 100644
--- a/models/plugins/yang/nsr.tailf.yang
+++ b/models/plugins/yang/nsr.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,11 @@
prefix nsr;
}
- tailf:annotate "/nsr:ns-instance-opdata" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/nsr:ns-instance-opdata" {
tailf:callpoint rw_callpoint;
}
tailf:annotate "/nsr:exec-ns-service-primitive" {
diff --git a/models/plugins/yang/nsr.yang b/models/plugins/yang/nsr.yang
index b081f6a..50bbc0d 100644
--- a/models/plugins/yang/nsr.yang
+++ b/models/plugins/yang/nsr.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,12 +35,16 @@
prefix "vld";
}
- import nsd {
- prefix "nsd";
+ import nsd-base {
+ prefix "nsd-base";
}
- import vnfd {
- prefix "vnfd";
+ import project-nsd {
+ prefix "project-nsd";
+ }
+
+ import project-vnfd {
+ prefix "project-vnfd";
}
import vnfr {
@@ -60,7 +64,16 @@
}
import rw-sdn {
- prefix "rwsdn";
+ prefix "rw-sdn";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
}
revision 2015-09-10 {
@@ -146,80 +159,84 @@
}
}
- rpc start-network-service {
- description "Start the network service";
- input {
+ augment "/rw-project:project" {
+ list key-pair {
+ key "name";
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
leaf name {
- mandatory true;
- description "Name of the Network Service";
+ description "Name of this key pair";
type string;
}
- leaf nsd-ref {
- description "Reference to NSR ID ref";
- mandatory true;
- type leafref {
- path "/nsd:nsd-catalog/nsd:nsd/nsd:id";
- }
- }
- uses ns-instance-config-params;
- }
- output {
- leaf nsr-id {
- description "Automatically generated parameter";
- type yang:uuid;
+ leaf key {
+ description "Key associated with this key pair";
+ type string;
}
}
}
+ augment "/rw-project:project" {
+ container ns-instance-config {
+ list nsr {
+ key "id";
+ unique "name";
- container ns-instance-config {
-
- list nsr {
- key "id";
- unique "name";
-
- leaf id {
- description "Identifier for the NSR.";
- type yang:uuid;
- }
-
- leaf name {
- description "NSR name.";
- type string;
- }
-
- leaf short-name {
- description "NSR short name.";
- type string;
- }
-
- leaf description {
- description "NSR description.";
- type string;
- }
-
- leaf admin-status {
- description
- "This is the administrative status of the NS instance";
-
- type enumeration {
- enum ENABLED;
- enum DISABLED;
+ leaf id {
+ description "Identifier for the NSR.";
+ type yang:uuid;
}
- }
- container nsd {
- description "NS descriptor used to instantiate this NS";
- uses nsd:nsd-descriptor;
- }
+ leaf name {
+ description "NSR name.";
+ type string;
+ }
- uses ns-instance-config-params;
+ leaf short-name {
+ description "NSR short name.";
+ type string;
+ }
+
+ leaf description {
+ description "NSR description.";
+ type string;
+ }
+
+ leaf admin-status {
+ description
+ "This is the administrative status of the NS instance";
+
+ type enumeration {
+ enum ENABLED;
+ enum DISABLED;
+ }
+ }
+
+ container nsd {
+ description "NS descriptor used to instantiate this NS";
+
+ uses nsd-base:nsd-descriptor-common;
+
+ uses project-nsd:nsr-nsd-vld;
+
+ uses project-nsd:nsr-nsd-constituent-vnfd;
+
+ uses project-nsd:nsr-nsd-placement-groups;
+
+ uses project-nsd:nsr-nsd-vnf-dependency;
+
+ uses project-nsd:nsr-nsd-monitoring-param;
+
+ uses project-nsd:nsr-nsd-service-primitive;
+ }
+
+ uses ns-instance-config-params;
+ }
}
}
- grouping ns-instance-config-params {
+ grouping ns-instance-config-params-common {
uses manotypes:input-parameter;
list scaling-group {
@@ -256,41 +273,46 @@
}
uses manotypes:placement-group-input;
}
+ }
- list vnfd-placement-group-maps {
+ grouping ns-instance-config-params {
+ uses ns-instance-config-params-common;
+
+ list vnfd-placement-group-maps {
description
- "Mapping from mano-placement groups construct from VNFD to cloud
+ "Mapping from mano-placement groups construct from VNFD to cloud
platform placement group construct";
- key "placement-group-ref vnfd-id-ref";
+ key "placement-group-ref vnfd-id-ref";
- leaf vnfd-id-ref {
- description
+ leaf vnfd-id-ref {
+ description
"A reference to a vnfd. This is a
leafref to path:
- ../../../../nsd:constituent-vnfd
- + [nsr:id = current()/../nsd:id-ref]
- + /nsd:vnfd-id-ref
+ ../../../../project-nsd:constituent-vnfd
+ + [id = current()/../id-ref]
+ + /project-nsd:vnfd-id-ref
NOTE: An issue with confd is preventing the
use of xpath. Seems to be an issue with leafref
to leafref, whose target is in a different module.
Once that is resolved this will switched to use
leafref";
- type yang:uuid;
- }
-
- leaf placement-group-ref {
- description
- "A reference to VNFD placement group";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = current()/" +
- "../nsr:vnfd-id-ref]/vnfd:placement-groups/vnfd:name";
+ type yang:uuid;
}
+
+ leaf placement-group-ref {
+ description
+ "A reference to VNFD placement group";
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id = " +
+ "current()/../vnfd-id-ref]/project-vnfd:placement-groups/project-vnfd:name";
+ }
+ }
+
+ uses manotypes:placement-group-input;
}
- uses manotypes:placement-group-input;
- }
- uses cloud-config;
+ uses cloud-config;
}
grouping vnffgr {
@@ -306,18 +328,21 @@
leaf vnffgd-id-ref {
description "VNFFG descriptor id reference";
type leafref {
- path "/nsr:ns-instance-config/nsr:nsr"
- + "[nsr:id=current()/../../ns-instance-config-ref]"
- + "/nsr:nsd/nsr:vnffgd/nsr:id";
+ // TODO: Fix leafref
+ path "../../../../ns-instance-config/nsr"
+ // + "[id=current()/../../ns-instance-config-ref]"
+ + "/nsd/vnffgd/id";
}
}
leaf vnffgd-name-ref {
description "VNFFG descriptor name reference";
+ // TODO: Fix leafref
type leafref {
- path "/ns-instance-config/nsr"
- + "[id=current()/../../ns-instance-config-ref]"
- + "/nsd/vnffgd[nsr:id = current()/../vnffgd-id-ref]"
+ path "../../../../ns-instance-config/nsr"
+ // + "[id=current()/../../ns-instance-config-ref]"
+ + "/nsd/vnffgd"
+ // + "[id=current()/../vnffgd-id-ref]"
+ "/name";
}
}
@@ -327,7 +352,7 @@
"The SDN account to use when requesting resources for
this vnffgr";
type leafref {
- path "/rwsdn:sdn/rwsdn:account/rwsdn:name";
+ path "../../../../rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
}
}
@@ -369,11 +394,12 @@
leaf vnffgd-rsp-id-ref {
description
"Identifier for the VNFFG Descriptor RSP reference";
+ // TODO: Fix leafref
type leafref {
- path "/ns-instance-config/nsr"
- + "[id=current()/../../../ns-instance-config-ref]"
+ path "../../../../../ns-instance-config/nsr"
+ // + "[id=current()/../../../ns-instance-config-ref]"
+ "/nsd/vnffgd"
- + "[id=current()/../../vnffgd-id-ref]"
+ // + "[id=current()/../../vnffgd-id-ref]"
+ "/rsp/id";
}
}
@@ -381,12 +407,14 @@
leaf vnffgd-rsp-name-ref {
description
"Name for the VNFFG Descriptor RSP reference";
+ // TODO: Fix leafref
type leafref {
- path "/ns-instance-config/nsr:nsr"
- + "[id=current()/../../../ns-instance-config-ref]"
+ path "../../../../../ns-instance-config/nsr"
+ // + "[id=current()/../../../ns-instance-config-ref]"
+ "/nsd/vnffgd"
- + "[id=current()/../../vnffgd-id-ref]"
- + "/rsp[id=current()/../vnffgd-rsp-id-ref]"
+ // + "[id=current()/../../vnffgd-id-ref]"
+ + "/rsp"
+ // + "[id=current()/../vnffgd-rsp-id-ref]"
+ "/name";
}
}
@@ -430,22 +458,22 @@
description
"A reference to a vnfr id";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+ path "../../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
}
}
leaf vnfr-name-ref {
description
"A reference to a vnfr name";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
+ path "../../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
}
}
leaf vnfr-connection-point-ref {
description
"A reference to a vnfr connection point.";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr"
- + "[vnfr:id = current()/../nsr:vnfr-id-ref]"
+ path "../../../../../../vnfr:vnfr-catalog/vnfr:vnfr"
+ + "[vnfr:id = current()/../vnfr-id-ref]"
+ "/vnfr:connection-point/vnfr:name";
}
}
@@ -516,7 +544,7 @@
description
"A reference to the RSP.";
type leafref {
- path "../../nsr:rsp/nsr:id";
+ path "../../rsp/id";
}
}
leaf rsp-name {
@@ -528,22 +556,22 @@
description
"A reference to a vnfr id";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
}
}
leaf vnfr-name-ref {
description
"A reference to a vnfr name";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
}
}
leaf vnfr-connection-point-ref {
description
"A reference to a vnfr connection point.";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr"
- + "[vnfr:id = current()/../nsr:vnfr-id-ref]"
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr"
+ + "[vnfr:id = current()/../vnfr-id-ref]"
+ "/vnfr:connection-point/vnfr:name";
}
}
@@ -567,62 +595,69 @@
}
}
- container ns-instance-opdata {
- config false;
+ augment "/rw-project:project" {
+ container ns-instance-opdata {
+ config false;
- list nsr {
- key "ns-instance-config-ref";
+ list nsr {
+ key "ns-instance-config-ref";
- leaf ns-instance-config-ref {
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
+ leaf ns-instance-config-ref {
+ type leafref {
+ path "../../../ns-instance-config/nsr/id";
+ }
+ // type yang:uuid;
}
- }
- leaf name-ref {
- description "Network service name reference";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:name";
+ leaf name-ref {
+ description "Network service name reference";
+ type leafref {
+ // TODO: Fix leafref
+ path "../../../ns-instance-config/nsr" +
+ // "[id=current()/../ns-instance-config-ref]" +
+ "/name";
+ }
}
- }
- leaf nsd-ref {
- description "Network service descriptor id reference";
- type leafref {
- path "/ns-instance-config/nsr"
- + "[id=current()/../ns-instance-config-ref]"
- + "/nsd/id";
+ leaf nsd-ref {
+ description "Network service descriptor id reference";
+ type leafref {
+ // TODO: fix leafref
+ path "../../../ns-instance-config/nsr"
+ // + "[id=current()/../ns-instance-config-ref]"
+ + "/nsd/id";
+ }
}
- }
- leaf nsd-name-ref {
- description "Network service descriptor name reference";
- type leafref {
- path "/ns-instance-config/nsr"
- + "[id=current()/../ns-instance-config-ref]"
- + "/nsd/name";
+ leaf nsd-name-ref {
+ description "Network service descriptor name reference";
+ // TODO: Fix leafref
+ type leafref {
+ path "../../../ns-instance-config/nsr"
+ // + "[id=current()/../ns-instance-config-ref]"
+ + "/nsd/name";
+ }
}
- }
- leaf create-time {
- description
- "Creation timestamp of this Network Service.
+ leaf create-time {
+ description
+ "Creation timestamp of this Network Service.
The timestamp is expressed as seconds
since unix epoch - 1970-01-01T00:00:00Z";
- type uint32;
- }
+ type uint32;
+ }
- leaf uptime {
- description
- "Active period of this Network Service.
+ leaf uptime {
+ description
+ "Active period of this Network Service.
Uptime is expressed in seconds";
- type uint32;
- }
+ type uint32;
+ }
- list connection-point {
- description
+ list connection-point {
+ description
"List for external connection points.
Each NS has one or more external connection points.
As the name implies that external connection points
@@ -632,101 +667,101 @@
construct network service chains by connecting the
connection points between different NS.";
- key "name";
- leaf name {
- description
+ key "name";
+ leaf name {
+ description
"Name of the NS connection point.";
- type string;
- }
+ type string;
+ }
- leaf type {
- description
+ leaf type {
+ description
"Type of the connection point.";
- type manotypes:connection-point-type;
- }
- }
-
- list vlr {
- key "vlr-ref";
- leaf vlr-ref {
- description
- "Reference to a VLR record in the VLR catalog";
- type leafref {
- path "/vlr:vlr-catalog/vlr:vlr/vlr:id";
+ type manotypes:connection-point-type;
}
}
+ list vlr {
+ key "vlr-ref";
+ leaf vlr-ref {
+ description
+ "Reference to a VLR record in the VLR catalog";
+ type leafref {
+ path "../../../../vlr:vlr-catalog/vlr:vlr/vlr:id";
+ }
+ }
- list vnfr-connection-point-ref {
+
+ list vnfr-connection-point-ref {
+ description
+ "A list of references to connection points.";
+ key "vnfr-id";
+
+ leaf vnfr-id {
+ description "A reference to a vnfr";
+ type leafref {
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+ }
+ }
+
+ leaf connection-point {
+ description
+ "A reference to a connection point name in a vnfr";
+ type leafref {
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr"
+ + "[vnfr:id = current()/../vnfr-id]"
+ + "/vnfr:connection-point/vnfr:name";
+ }
+ }
+ }
+ }
+
+ list constituent-vnfr-ref {
description
- "A list of references to connection points.";
+ "List of VNFRs that are part of this
+ network service.";
key "vnfr-id";
leaf vnfr-id {
- description "A reference to a vnfr";
- type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
- }
- }
-
- leaf connection-point {
description
- "A reference to a connection point name in a vnfr";
+ "Reference to the VNFR id
+ This should be a leafref to /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id
+ But due to confd bug (RIFT-9451), changing to string.";
+ type string;
+ }
+ }
+
+ list scaling-group-record {
+ description "List of scaling group records";
+ key "scaling-group-name-ref";
+
+ leaf scaling-group-name-ref {
+ description "name of the scaling group";
+ // TODO: Fix leafref
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr"
- + "[vnfr:id = current()/../nsr:vnfr-id]"
- + "/vnfr:connection-point/vnfr:name";
+ path "../../../../ns-instance-config/nsr"
+ // + "[id=current()/../../ns-instance-config-ref]"
+ + "/nsd/scaling-group-descriptor/name";
}
}
- }
- }
- list constituent-vnfr-ref {
- description
- "List of VNFRs that are part of this
- network service.";
- key "vnfr-id";
+ list instance {
+ description "Reference to scaling group instance record";
+ key "instance-id";
+ leaf instance-id {
+ description "Scaling group instance id";
+ type uint16;
+ }
- leaf vnfr-id {
- description
- "Reference to the VNFR id
- This should be a leafref to /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id
- But due to confd bug (RIFT-9451), changing to string.
- ";
- type string;
- }
- }
-
- list scaling-group-record {
- description "List of scaling group records";
- key "scaling-group-name-ref";
-
- leaf scaling-group-name-ref {
- description "name of the scaling group";
- type leafref {
- path "/ns-instance-config/nsr"
- + "[id=current()/../../ns-instance-config-ref]"
- + "/nsd/scaling-group-descriptor/name";
- }
- }
-
- list instance {
- description "Reference to scaling group instance record";
- key "instance-id";
- leaf instance-id {
- description "Scaling group instance id";
- type uint16;
- }
-
- leaf is-default {
- description "Flag indicating whether this instance was part of
+ leaf is-default {
+ description "Flag indicating whether this instance was part of
default scaling group (and thus undeletable)";
- type boolean;
- }
+ type boolean;
+ }
- leaf op-status {
- description
- "The operational status of the NS instance
+ leaf op-status {
+ description
+ "The operational status of the NS instance
init : The scaling group has just started.
vnf-init-phase : The VNFs in the scaling group are being instantiated.
running : The scaling group is in running state.
@@ -736,58 +771,58 @@
failed : The scaling group instantiation failed.
";
- type enumeration {
- enum init;
- enum vnf-init-phase;
- enum running;
- enum terminate;
- enum vnf-terminate-phase;
- enum terminated;
- enum failed;
+ type enumeration {
+ enum init;
+ enum vnf-init-phase;
+ enum running;
+ enum terminate;
+ enum vnf-terminate-phase;
+ enum terminated;
+ enum failed;
+ }
}
- }
- leaf config-status {
- description
- "The configuration status of the scaling group instance
+ leaf config-status {
+ description
+ "The configuration status of the scaling group instance
configuring : At least one of the VNFs in this scaling group instance
is in configuring state
configured : All the VNFs in this scaling group instance are
configured or config-not-needed state
failed : Configuring this scaling group instance failed
";
- type config-states;
- }
+ type config-states;
+ }
- leaf error-msg {
- description
- "Reason for failure in configuration of this scaling instance";
- type string;
- }
+ leaf error-msg {
+ description
+ "Reason for failure in configuration of this scaling instance";
+ type string;
+ }
- leaf create-time {
- description
- "Creation timestamp of this scaling group record.
+ leaf create-time {
+ description
+ "Creation timestamp of this scaling group record.
The timestamp is expressed as seconds
since unix epoch - 1970-01-01T00:00:00Z";
type uint32;
- }
+ }
- leaf-list vnfrs {
- description "Reference to VNFR within the scale instance";
- type leafref {
- path "../../../constituent-vnfr-ref/vnfr-id";
+ leaf-list vnfrs {
+ description "Reference to VNFR within the scale instance";
+ type leafref {
+ path "../../../constituent-vnfr-ref/vnfr-id";
+ }
}
}
}
- }
- uses vnffgr;
+ uses vnffgr;
- leaf operational-status {
- description
- "The operational status of the NS instance
+ leaf operational-status {
+ description
+ "The operational status of the NS instance
init : The network service has just started.
vl-init-phase : The VLs in the NS are being instantiated.
vnf-init-phase : The VNFs in the NS are being instantiated.
@@ -803,270 +838,218 @@
vl-terminate : The NS is terminating a VL
";
- type enumeration {
- enum init;
- enum vl-init-phase;
- enum vnf-init-phase;
- enum running;
- enum terminate;
- enum vnf-terminate-phase;
- enum vl-terminate-phase;
- enum terminated;
- enum failed;
- enum scaling-out;
- enum scaling-in;
- enum vl-instantiate;
- enum vl-terminate;
+ type enumeration {
+ enum init;
+ enum vl-init-phase;
+ enum vnf-init-phase;
+ enum running;
+ enum terminate;
+ enum vnf-terminate-phase;
+ enum vl-terminate-phase;
+ enum terminated;
+ enum failed;
+ enum scaling-out;
+ enum scaling-in;
+ enum vl-instantiate;
+ enum vl-terminate;
+ }
}
- }
- leaf config-status {
- description
- "The configuration status of the NS instance
+ leaf config-status {
+ description
+ "The configuration status of the NS instance
configuring: At least one of the VNFs in this instance is in configuring state
configured: All the VNFs in this NS instance are configured or config-not-needed state
";
- type config-states;
- }
+ type config-states;
+ }
- list service-primitive {
- description
- "Network service level service primitives.";
+ list service-primitive {
+ description
+ "Network service level service primitives.";
- key "name";
+ key "name";
- leaf name {
- description
- "Name of the service primitive.";
- type string;
- }
+ leaf name {
+ description
+ "Name of the service primitive.";
+ type string;
+ }
- list parameter {
- description
- "List of parameters for the service primitive.";
+ list parameter {
+ description
+ "List of parameters for the service primitive.";
- key "name";
- uses manotypes:primitive-parameter;
- }
+ key "name";
+ uses manotypes:primitive-parameter;
+ }
- uses manotypes:ui-primitive-group;
+ uses manotypes:ui-primitive-group;
- list vnf-primitive-group {
- description
- "List of service primitives grouped by VNF.";
-
- key "member-vnf-index-ref";
- leaf member-vnf-index-ref {
+ list vnf-primitive-group {
description
"Reference to member-vnf within constituent-vnfds";
+
+ key "member-vnf-index-ref";
+ leaf member-vnf-index-ref {
+ description
+ "Reference to member-vnf within constituent-vnfds";
+ type string;
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
+
+ type string;
+ }
+
+ leaf vnfd-name {
+ description
+ "Name of the VNFD";
+ type string;
+ }
+
+ list primitive {
+ key "index";
+
+ leaf index {
+ description "Index of this primitive";
+ type uint32;
+ }
+
+ leaf name {
+ description "Name of the primitive in the VNF primitive ";
+ type string;
+ }
+ }
+ }
+
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
+ }
+
+ list initial-config-primitive {
+ rwpb:msg-new NsrInitialConfigPrimitive;
+ description
+ "Initial set of configuration primitives for NSD.";
+ key "seq";
+ leaf seq {
+ description
+ "Sequence number for the configuration primitive.";
+ type uint64;
+ }
+
+ leaf name {
+ description
+ "Name of the configuration primitive.";
+ type string;
+ mandatory "true";
+ }
+
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
+
+ list parameter {
+ key "name";
+ leaf name {
type string;
}
- leaf vnfd-id-ref {
- description
- "A reference to a vnfd. This is a
- leafref to path:
- ../../../../nsd:constituent-vnfd
- + [nsd:id = current()/../nsd:id-ref]
- + /nsd:vnfd-id-ref
- NOTE: An issue with confd is preventing the
- use of xpath. Seems to be an issue with leafref
- to leafref, whose target is in a different module.
- Once that is resolved this will switched to use
- leafref";
-
- type string;
+ leaf value {
+ type string;
}
-
- leaf vnfd-name {
- description
- "Name of the VNFD";
- type string;
- }
-
- list primitive {
- key "index";
-
- leaf index {
- description "Index of this primitive";
- type uint32;
- }
-
- leaf name {
- description "Name of the primitive in the VNF primitive ";
- type string;
- }
- }
- }
-
- leaf user-defined-script {
- description
- "A user defined script.";
- type string;
- }
- }
-
- list initial-config-primitive {
- rwpb:msg-new NsrInitialConfigPrimitive;
- description
- "Initial set of configuration primitives for NSD.";
- key "seq";
- leaf seq {
- description
- "Sequence number for the configuration primitive.";
- type uint64;
+ }
}
- leaf name {
- description
- "Name of the configuration primitive.";
- type string;
- mandatory "true";
- }
- leaf user-defined-script {
+ list monitoring-param {
description
- "A user defined script.";
- type string;
- }
+ "List of NS level params.";
+ key "id";
- list parameter {
- key "name";
+ uses manotypes:monitoring-param-value;
+ uses manotypes:monitoring-param-ui-data;
+ uses manotypes:monitoring-param-aggregation;
+
+ leaf id {
+ type string;
+ }
+
leaf name {
type string;
}
- leaf value {
- type string;
- }
- }
- }
-
-
- list monitoring-param {
- description
- "List of NS level params.";
- key "id";
-
- uses manotypes:monitoring-param-value;
- uses manotypes:monitoring-param-ui-data;
- uses manotypes:monitoring-param-aggregation;
-
- leaf id {
- type string;
- }
-
- leaf name {
- type string;
- }
-
- leaf nsd-mon-param-ref {
- description "Reference to the NSD monitoring param descriptor
+ leaf nsd-mon-param-ref {
+ description "Reference to the NSD monitoring param descriptor
that produced this result";
- type leafref {
- path "/nsd:nsd-catalog/nsd:nsd[nsd:id = current()/" +
- "../../nsr:nsd-ref]/nsd:monitoring-param/nsd:id";
+ // TODO: Fix leafref
+ type leafref {
+ path "../../../../project-nsd:nsd-catalog/project-nsd:nsd" +
+ "[project-nsd:id = current()/../../nsd-ref]" +
+ "/project-nsd:monitoring-param/project-nsd:id";
+ }
}
- }
- list vnfr-mon-param-ref {
- description "A list of VNFR monitoring params associated with this monp";
- key "vnfr-id-ref vnfr-mon-param-ref";
+ list vnfr-mon-param-ref {
+ description "A list of VNFR monitoring params associated with this monp";
+ key "vnfr-id-ref vnfr-mon-param-ref";
- leaf vnfr-id-ref {
- description
- "A reference to a vnfr. This is a
+ leaf vnfr-id-ref {
+ description
+ "A reference to a vnfr. This is a
leafref to path:
/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
- type yang:uuid;
- }
+ type yang:uuid;
+ }
- leaf vnfr-mon-param-ref {
- description "A reference to the VNFR monitoring param";
- type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr"
- + "[vnfr:id = current()/../nsr:vnfr-id-ref]"
- + "/vnfr:monitoring-param/vnfr:id";
+ leaf vnfr-mon-param-ref {
+ description "A reference to the VNFR monitoring param";
+ type leafref {
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr"
+ + "[vnfr:id = current()/../vnfr-id-ref]"
+ + "/vnfr:monitoring-param/vnfr:id";
+ }
}
}
}
- }
- list config-agent-job {
- key "job-id";
+ list config-agent-job {
+ key "job-id";
- leaf job-id {
- description "config agent job Identifier for the NS.";
- type uint64;
- }
+ leaf job-id {
+ description "config agent job Identifier for the NS.";
+ type uint64;
+ }
- leaf job-name {
- description "Config agent job name";
- type string;
- }
+ leaf job-name {
+ description "Config agent job name";
+ type string;
+ }
- leaf job-status {
- description
+ leaf job-status {
+ description
"Job status to be set based on each VNF primitive execution,
pending - if at least one VNF is in pending state
and remaining VNFs are in success state.
Success - if all VNF executions are in success state
failure - if one of the VNF executions is failure";
- type enumeration {
- enum pending;
- enum success;
- enum failure;
- }
- }
-
- leaf triggered-by {
- description "The primitive is triggered from NS or VNF level";
- type trigger-type;
- }
-
- leaf create-time {
- description
- "Creation timestamp of this Config Agent Job.
- The timestamp is expressed as seconds
- since unix epoch - 1970-01-01T00:00:00Z";
-
- type uint32;
- }
-
- leaf job-status-details {
- description "Config agent job status details, in case of errors";
- type string;
- }
-
- uses manotypes:primitive-parameter-value;
-
- list parameter-group {
- description
- "List of NS Primitive parameter groups";
- key "name";
- leaf name {
- description
- "Name of the parameter.";
- type string;
- }
-
- uses manotypes:primitive-parameter-value;
- }
-
- list vnfr {
- key "id";
- leaf id {
- description "Identifier for the VNFR.";
- type yang:uuid;
- }
- leaf vnf-job-status {
- description
- "Job status to be set based on each VNF primitive execution,
- pending - if at least one primitive is in pending state
- and remaining primitives are in success state.
- Success - if all primitive executions are in success state
- failure - if one of the primitive executions is failure";
type enumeration {
enum pending;
enum success;
@@ -1074,30 +1057,85 @@
}
}
- list primitive {
+ leaf triggered-by {
+ description "The primitive is triggered from NS or VNF level";
+ type trigger-type;
+ }
+
+ leaf create-time {
+ description
+ "Creation timestamp of this Config Agent Job.
+ The timestamp is expressed as seconds
+ since unix epoch - 1970-01-01T00:00:00Z";
+
+ type uint32;
+ }
+
+ leaf job-status-details {
+ description "Config agent job status details, in case of errors";
+ type string;
+ }
+
+ uses manotypes:primitive-parameter-value;
+
+ list parameter-group {
+ description
+ "List of NS Primitive parameter groups";
key "name";
leaf name {
- description "the name of the primitive";
+ description
+ "Name of the parameter.";
type string;
}
uses manotypes:primitive-parameter-value;
+ }
- leaf execution-id {
- description "Execution id of the primitive";
- type string;
+ list vnfr {
+ key "id";
+ leaf id {
+ description "Identifier for the VNFR.";
+ type yang:uuid;
}
- leaf execution-status {
- description "status of the Execution";
+ leaf vnf-job-status {
+ description
+ "Job status to be set based on each VNF primitive execution,
+ pending - if at least one primitive is in pending state
+ and remaining primitives are in success state.
+ Success - if all primitive executions are in success state
+ failure - if one of the primitive executions is failure";
type enumeration {
enum pending;
enum success;
enum failure;
}
}
- leaf execution-error-details {
- description "Error details if execution-status is failure";
- type string;
+
+ list primitive {
+ key "name";
+ leaf name {
+ description "the name of the primitive";
+ type string;
+ }
+
+ uses manotypes:primitive-parameter-value;
+
+ leaf execution-id {
+ description "Execution id of the primitive";
+ type string;
+ }
+ leaf execution-status {
+ description "status of the Execution";
+ type enumeration {
+ enum pending;
+ enum success;
+ enum failure;
+ }
+ }
+ leaf execution-error-details {
+ description "Error details if execution-status is failure";
+ type string;
+ }
}
}
}
@@ -1105,22 +1143,29 @@
}
}
+ grouping rpc-common {
+ uses manotypes:rpc-project-name;
+
+ leaf nsr_id_ref {
+ description "Reference to NSR ID ref";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/.." +
+ "/project-name]/ns-instance-config/nsr/id";
+ }
+ }
+ }
+
rpc get-ns-service-primitive-values {
description "Get the service primitive parameter values";
- input {
- leaf nsr_id_ref {
- description "Reference to NSR ID ref";
- mandatory true;
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ input {
leaf name {
description "Name of the NS service primitive group";
mandatory true;
type string;
}
+
+ uses rpc-common;
}
output {
@@ -1175,9 +1220,9 @@
description
"A reference to a vnfd. This is a
leafref to path:
- ../../../../nsd:constituent-vnfd
- + [nsd:id = current()/../nsd:id-ref]
- + /nsd:vnfd-id-ref
+ ../../../../project-nsd:constituent-vnfd
+ + [project-nsd:id = current()/../project-nsd:id-ref]
+ + /project-nsd:vnfd-id-ref
NOTE: An issue with confd is preventing the
use of xpath. Seems to be an issue with leafref
to leafref, whose target is in a different module.
@@ -1226,12 +1271,7 @@
type string;
}
- leaf nsr_id_ref {
- description "Reference to NSR ID ref";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ uses rpc-common;
leaf triggered-by {
description "The primitive is triggered from NS or VNF level";
@@ -1308,12 +1348,7 @@
type string;
}
- leaf nsr_id_ref {
- description "Reference to NSR ID ref";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ uses rpc-common;
leaf triggered-by {
description "The primitive is triggered from NS or VNF level";
@@ -1408,13 +1443,7 @@
description "Executes scale out request";
input {
-
- leaf nsr-id-ref {
- description "Reference to NSR ID ref";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ uses rpc-common;
leaf scaling-group-name-ref {
description "name of the scaling group";
@@ -1440,13 +1469,7 @@
description "Executes scale out request";
input {
-
- leaf nsr-id-ref {
- description "Reference to NSR ID ref";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ uses rpc-common;
leaf scaling-group-name-ref {
description "name of the scaling group";
@@ -1457,8 +1480,8 @@
description "id of the scaling group";
type uint64;
}
-
}
+
output {
leaf instance-id {
description "id of the scaling group";
@@ -1467,4 +1490,102 @@
}
}
+ rpc start-network-service {
+ description "Start the network service";
+ input {
+ leaf name {
+ mandatory true;
+ description "Name of the Network Service";
+ type string;
+ }
+
+ uses rpc-common;
+
+ uses ns-instance-config-params-common;
+
+ list vnfd-placement-group-maps {
+ description
+ "Mapping from mano-placement groups construct from VNFD to cloud
+ platform placement group construct";
+
+ key "placement-group-ref vnfd-id-ref";
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../project-nsd:constituent-vnfd
+ + [id = current()/../project-nsd:id-ref]
+ + /project-nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
+ type yang:uuid;
+ }
+
+ leaf placement-group-ref {
+ description
+ "A reference to VNFD placement group";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/" +
+ "../../project-name]/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id = " +
+ "current()/../vnfd-id-ref]/project-vnfd:placement-groups/project-vnfd:name";
+ }
+ }
+
+ uses manotypes:placement-group-input;
+
+ list ssh-authorized-key {
+ key "key-pair-ref";
+
+ description "List of authorized ssh keys as part of cloud-config";
+
+ leaf key-pair-ref {
+ description "A reference to the key pair entry in the global key pair table";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../../../" +
+ "project-name]/key-pair/name";
+ }
+ }
+ }
+
+ list user {
+ key "name";
+
+ description "List of users to be added through cloud-config";
+ leaf name {
+ description "Name of the user ";
+ type string;
+ }
+ leaf user-info {
+ description "The user name's real name";
+ type string;
+ }
+ list ssh-authorized-key {
+ key "key-pair-ref";
+
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+
+ leaf key-pair-ref {
+ description "A reference to the key pair entry in the global key pair table";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/" +
+ "../../../../project-name]/key-pair/name";
+ }
+ }
+ }
+ }
+ }
+ }
+
+ output {
+ leaf nsr-id {
+ description "Automatically generated parameter";
+ type yang:uuid;
+ }
+ }
+ }
}
diff --git a/models/plugins/yang/pnfd.yang b/models/plugins/yang/pnfd.yang
index 2f9bcdf..fffa8ab 100644
--- a/models/plugins/yang/pnfd.yang
+++ b/models/plugins/yang/pnfd.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,6 +39,15 @@
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file defines
@@ -47,56 +56,58 @@
"Derived from earlier versions of base YANG files";
}
- container pnfd-catalog {
+ augment "/rw-project:project" {
+ container pnfd-catalog {
- list pnfd {
- key "id";
-
- leaf id {
- description "Identifier for the PNFD.";
- type yang:uuid;
- }
-
- leaf name {
- description "PNFD name.";
- type string;
- }
-
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
-
- leaf vendor {
- description "Vendor of the PNFD.";
- type string;
- }
-
- leaf description {
- description "Description of the PNFD.";
- type string;
- }
-
- leaf version {
- description "Version of the PNFD";
- type string;
- }
-
- list connection-point {
- description
- "List for external connection points. Each PNF has one or more external
- connection points.";
+ list pnfd {
key "id";
+
leaf id {
- description
- "Identifier for the external connection points";
- type uint64;
+ description "Identifier for the PNFD.";
+ type yang:uuid;
}
- leaf cp-type {
+ leaf name {
+ description "PNFD name.";
+ type string;
+ }
+
+ leaf short-name {
+ description "Short name to appear as label in the UI";
+ type string;
+ }
+
+ leaf vendor {
+ description "Vendor of the PNFD.";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the PNFD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the PNFD";
+ type string;
+ }
+
+ list connection-point {
description
+ "List for external connection points. Each PNF has one or more external
+ connection points.";
+ key "id";
+ leaf id {
+ description
+ "Identifier for the external connection points";
+ type uint64;
+ }
+
+ leaf cp-type {
+ description
"Type of the connection point.";
- type manotypes:connection-point-type;
+ type manotypes:connection-point-type;
+ }
}
}
}
diff --git a/models/plugins/yang/project-nsd.role.xml b/models/plugins/yang/project-nsd.role.xml
new file mode 100644
index 0000000..afacae3
--- /dev/null
+++ b/models/plugins/yang/project-nsd.role.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:project-nsd-role</role>
+ <key-set>
+ <name>project-name</name>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-oper</role>
+ <keys-role>rw-project-mano:project-nsd-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/project-nsd:nsd-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-admin</role>
+ <keys-role>rw-project-mano:project-nsd-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/project-nsd:nsd-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:project-nsd-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/project-nsd:nsd-catalog</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/models/plugins/yang/project-nsd.yang b/models/plugins/yang/project-nsd.yang
new file mode 100644
index 0000000..9e20fd2
--- /dev/null
+++ b/models/plugins/yang/project-nsd.yang
@@ -0,0 +1,601 @@
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module project-nsd
+{
+ namespace "http://riftio.com/ns/riftware-1.0/project-nsd";
+ prefix "project-nsd";
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ import project-vnfd {
+ prefix "project-vnfd";
+ }
+
+ import nsd-base {
+ prefix "nsd-base";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ the Network Service Descriptor (NSD)
+ under projects";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+
+ grouping nsd-constituent-vnfd {
+ list constituent-vnfd {
+ description
+ "List of VNFDs that are part of this
+ network service.";
+
+ key "member-vnf-index";
+
+ leaf member-vnf-index {
+ description
+ "Identifier/index for the VNFD. This separate id
+ is required to ensure that multiple VNFs can be
+ part of single NS";
+ type uint64;
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "Identifier for the VNFD.";
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+
+ leaf start-by-default {
+ description
+ "VNFD is started as part of the NS instantiation";
+ type boolean;
+ default true;
+ }
+ }
+ }
+
+ grouping nsr-nsd-constituent-vnfd {
+ list constituent-vnfd {
+ description
+ "List of VNFDs that are part of this
+ network service.";
+
+ key "member-vnf-index";
+
+ leaf member-vnf-index {
+ description
+ "Identifier/index for the VNFD. This separate id
+ is required to ensure that multiple VNFs can be
+ part of single NS";
+ type uint64;
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "Identifier for the VNFD.";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+
+ leaf start-by-default {
+ description
+ "VNFD is started as part of the NS instantiation";
+ type boolean;
+ default true;
+ }
+ }
+ }
+
+ grouping nsd-vld {
+ list vld {
+
+ key "id";
+
+ uses nsd-base:nsd-vld-common;
+
+ list vnfd-connection-point-ref {
+ description
+ "A list of references to connection points.";
+ key "member-vnf-index-ref vnfd-connection-point-ref";
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ // TODO (Philip): RIFT-15639
+ // type leafref {
+ // path "../../../constituent-vnfd/member-vnf-index";
+ // }
+ type uint64;
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../constituent-vnfd
+ + [id = current()/../id-ref]
+ + /vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
+ type string;
+ }
+
+ leaf vnfd-connection-point-ref {
+ description "A reference to a connection point name";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd" +
+ "[project-vnfd:id = current()/../vnfd-id-ref]/" +
+ "project-vnfd:connection-point/project-vnfd:name";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsr-nsd-vld {
+ list vld {
+
+ key "id";
+
+ uses nsd-base:nsd-vld-common;
+
+ list vnfd-connection-point-ref {
+ description
+ "A list of references to connection points.";
+ key "member-vnf-index-ref vnfd-connection-point-ref";
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
+ type string;
+ }
+
+ leaf vnfd-connection-point-ref {
+ description "A reference to a connection point name";
+ type leafref {
+ path "../../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd" +
+ "[project-vnfd:id = current()/../vnfd-id-ref]/" +
+ "project-vnfd:connection-point/project-vnfd:name";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsd-vnf-dependency {
+ list vnf-dependency {
+ description
+ "List of VNF dependencies.";
+ key vnf-source-ref;
+ leaf vnf-source-ref {
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ leaf vnf-depends-on-ref {
+ description
+ "Reference to VNF that sorce VNF depends.";
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ }
+ }
+
+ grouping nsr-nsd-vnf-dependency {
+ list vnf-dependency {
+ description
+ "List of VNF dependencies.";
+ key vnf-source-ref;
+ leaf vnf-source-ref {
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ leaf vnf-depends-on-ref {
+ description
+ "Reference to VNF that sorce VNF depends.";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ }
+ }
+
+ grouping nsd-placement-groups {
+ list placement-groups {
+ description "List of placement groups at NS level";
+
+ key "name";
+ uses manotypes:placement-group-info;
+
+ list member-vnfd {
+ description
+ "List of VNFDs that are part of this placement group";
+
+ key "member-vnf-index-ref";
+
+ leaf member-vnf-index-ref {
+ description "member VNF index of this member VNF";
+ // TODO (Philip): RIFT-15639
+ // type leafref {
+ // path "../../../constituent-vnfd/member-vnf-index";
+ // }
+ type uint64;
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "Identifier for the VNFD.";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsr-nsd-placement-groups {
+ list placement-groups {
+ description "List of placement groups at NS level";
+
+ key "name";
+ uses manotypes:placement-group-info;
+
+ list member-vnfd {
+ description
+ "List of VNFDs that are part of this placement group";
+
+ key "member-vnf-index-ref";
+
+ leaf member-vnf-index-ref {
+ description "member VNF index of this member VNF";
+ // TODO (Philip): RIFT-15639
+ // type leafref {
+ // path "../../../constituent-vnfd/member-vnf-index";
+ // }
+ type uint64;
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "Identifier for the VNFD.";
+ type leafref {
+ path "../../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsd-monitoring-param {
+
+ list monitoring-param {
+ key "id";
+
+ uses nsd-base:monitoring-param-common;
+
+ list vnfd-monitoring-param {
+ description "A list of VNFD monitoring params";
+ key "vnfd-id-ref vnfd-monitoring-param-ref";
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+
+ type yang:uuid;
+ }
+
+ leaf vnfd-monitoring-param-ref {
+ description "A reference to the VNFD monitoring param";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vnfd-id-ref]"
+ + "/project-vnfd:monitoring-param/project-vnfd:id";
+ }
+ }
+
+ leaf member-vnf-index-ref {
+ description
+ "Optional reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsr-nsd-monitoring-param {
+ list monitoring-param {
+ key "id";
+
+ uses nsd-base:monitoring-param-common;
+
+ list vnfd-monitoring-param {
+ description "A list of VNFD monitoring params";
+ key "vnfd-id-ref vnfd-monitoring-param-ref";
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+
+ type yang:uuid;
+ }
+
+ leaf vnfd-monitoring-param-ref {
+ description "A reference to the VNFD monitoring param";
+ type leafref {
+ path "../../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vnfd-id-ref]"
+ + "/project-vnfd:monitoring-param/project-vnfd:id";
+ }
+ }
+
+ leaf member-vnf-index-ref {
+ description
+ "Optional reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsd-service-primitive {
+ list service-primitive {
+ description
+ "Network service level service primitives.";
+
+ key "name";
+
+ leaf name {
+ description
+ "Name of the service primitive.";
+ type string;
+ }
+
+ list parameter {
+ description
+ "List of parameters for the service primitive.";
+
+ key "name";
+ uses manotypes:primitive-parameter;
+ }
+
+ uses manotypes:ui-primitive-group;
+
+ list vnf-primitive-group {
+ description
+ "List of service primitives grouped by VNF.";
+
+ key "member-vnf-index-ref";
+ leaf member-vnf-index-ref {
+ description
+ "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a leafref";
+
+ type leafref {
+ path "../../../constituent-vnfd" +
+ "[member-vnf-index = current()/../member-vnf-index-ref]" + "/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-name {
+ description
+ "Name of the VNFD";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vnfd-id-ref]"
+ + "/project-vnfd:name";
+ }
+ }
+
+ list primitive {
+ key "index";
+
+ leaf index {
+ description "Index of this primitive";
+ type uint32;
+ }
+
+ leaf name {
+ description "Name of the primitive in the VNF primitive ";
+ type string;
+ }
+ }
+ }
+
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
+ }
+ }
+
+ grouping nsr-nsd-service-primitive {
+ list service-primitive {
+ description
+ "Network service level service primitives.";
+
+ key "name";
+
+ leaf name {
+ description
+ "Name of the service primitive.";
+ type string;
+ }
+
+ list parameter {
+ description
+ "List of parameters for the service primitive.";
+
+ key "name";
+ uses manotypes:primitive-parameter;
+ }
+
+ uses manotypes:ui-primitive-group;
+
+ list vnf-primitive-group {
+ description
+ "List of service primitives grouped by VNF.";
+
+ key "member-vnf-index-ref";
+ leaf member-vnf-index-ref {
+ description
+ "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a leafref";
+
+ type leafref {
+ path "../../../constituent-vnfd" +
+ "[member-vnf-index = current()/../member-vnf-index-ref]" + "/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-name {
+ description
+ "Name of the VNFD";
+ type leafref {
+ path "../../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vnfd-id-ref]"
+ + "/project-vnfd:name";
+ }
+ }
+
+ list primitive {
+ key "index";
+
+ leaf index {
+ description "Index of this primitive";
+ type uint32;
+ }
+
+ leaf name {
+ description "Name of the primitive in the VNF primitive ";
+ type string;
+ }
+ }
+ }
+
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
+ }
+ }
+
+ grouping nsd-descriptor {
+ uses nsd-base:nsd-descriptor-common;
+
+ uses nsd-vld;
+
+ uses nsd-constituent-vnfd;
+
+ uses nsd-placement-groups;
+
+ uses nsd-vnf-dependency;
+
+ uses nsd-monitoring-param;
+
+ uses nsd-service-primitive;
+ }
+
+ augment "/rw-project:project" {
+ container nsd-catalog {
+
+ list nsd {
+ key id;
+
+ uses nsd-descriptor;
+ }
+ }
+ }
+}
diff --git a/models/plugins/yang/project-vnfd.role.xml b/models/plugins/yang/project-vnfd.role.xml
new file mode 100644
index 0000000..a32c92f
--- /dev/null
+++ b/models/plugins/yang/project-vnfd.role.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:project-vnfd-role</role>
+ <key-set>
+ <name>project-name</name>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-oper</role>
+ <keys-role>rw-project-mano:project-vnfd-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/project-vnfd:vnfd-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-admin</role>
+ <keys-role>rw-project-mano:project-vnfd-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/project-vnfd:vnfd-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:project-vnfd-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/project-vnfd:vnfd-catalog</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/models/plugins/yang/project-vnfd.yang b/models/plugins/yang/project-vnfd.yang
new file mode 100644
index 0000000..5342436
--- /dev/null
+++ b/models/plugins/yang/project-vnfd.yang
@@ -0,0 +1,57 @@
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module project-vnfd
+{
+ namespace "http://riftio.com/ns/riftware-1.0/project-vnfd";
+ prefix "project-vnfd";
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ the Virtual Network Function (VNF) descriptor
+ under a project";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ augment /rw-project:project {
+ container vnfd-catalog {
+ description
+ "Virtual Network Function Descriptor (VNFD).";
+
+ list vnfd {
+ key "id";
+
+ uses vnfd-base:vnfd-descriptor;
+ }
+ }
+ }
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/rw-nsd-base.yang b/models/plugins/yang/rw-nsd-base.yang
new file mode 100644
index 0000000..bed366d
--- /dev/null
+++ b/models/plugins/yang/rw-nsd-base.yang
@@ -0,0 +1,49 @@
+
+/*
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-nsd-base
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-nsd-base";
+ prefix "rw-nsd-base";
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ grouping to extend the base MANO NSD";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ grouping rw-nsd-ext {
+ uses manotypes:control-param;
+ uses manotypes:action-param;
+ leaf meta {
+ description
+ "Any meta-data needed by the UI";
+ type string;
+ }
+ }
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/rw-nsd.yang b/models/plugins/yang/rw-nsd.yang
index 4475928..d59168d 100644
--- a/models/plugins/yang/rw-nsd.yang
+++ b/models/plugins/yang/rw-nsd.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,16 +23,17 @@
namespace "http://riftio.com/ns/riftware-1.0/rw-nsd";
prefix "rw-nsd";
+ import rw-nsd-base {
+ prefix "rw-nsd-base";
+ }
+
import nsd {
prefix "nsd";
}
- import ietf-yang-types {
- prefix "yang";
- }
-
- import mano-types {
- prefix "manotypes";
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
}
revision 2015-09-10 {
@@ -44,13 +45,7 @@
}
augment /nsd:nsd-catalog/nsd:nsd {
- uses manotypes:control-param;
- uses manotypes:action-param;
- leaf meta {
- description
- "Any meta-data needed by the UI";
- type string;
- }
+ uses rw-nsd-base:rw-nsd-ext;
}
}
diff --git a/models/plugins/yang/rw-nsr.tailf.yang b/models/plugins/yang/rw-nsr.tailf.yang
index 3b7588a..37e1402 100644
--- a/models/plugins/yang/rw-nsr.tailf.yang
+++ b/models/plugins/yang/rw-nsr.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,11 +35,15 @@
prefix nsr;
}
- tailf:annotate "/nsr:ns-instance-opdata/nsr:nsr/rw-nsr:operational-events" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/nsr:ns-instance-opdata/nsr:nsr/rw-nsr:operational-events" {
tailf:callpoint rw_callpoint;
}
- tailf:annotate "/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" {
+ tailf:annotate "/rw-project:project/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" {
tailf:callpoint rw_callpoint;
}
}
diff --git a/models/plugins/yang/rw-nsr.yang b/models/plugins/yang/rw-nsr.yang
index 805ed00..16bd411 100644
--- a/models/plugins/yang/rw-nsr.yang
+++ b/models/plugins/yang/rw-nsr.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,8 +31,8 @@
prefix "nsr";
}
- import nsd {
- prefix "nsd";
+ import project-nsd {
+ prefix "project-nsd";
}
import rw-cloud {
@@ -47,10 +47,19 @@
prefix "rw-sdn";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
import ietf-yang-types {
prefix "yang";
}
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file augments
@@ -103,7 +112,7 @@
leaf nsd-id-ref {
description "Reference to NSD";
type leafref {
- path "/nsd:nsd-catalog/nsd:nsd/nsd:id";
+ path "../../../project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:id";
}
}
leaf instance-ref-count {
@@ -125,7 +134,7 @@
All VDU's, Virtual Links, and provider networks will be requested
using the cloud-account's associated CAL instance";
type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ path "../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
@@ -153,7 +162,7 @@
All VDU's, Virtual Links, and provider networks will be requested
using the cloud-account's associated CAL instance";
type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
@@ -172,7 +181,8 @@
The configuration for this VNF will be driven using the specified config
agent account";
type leafref {
- path "/rw-config-agent:config-agent/rw-config-agent:account/rw-config-agent:name";
+ path "../../../../rw-config-agent:config-agent/" +
+ "rw-config-agent:account/rw-config-agent:name";
}
}
}
@@ -196,7 +206,7 @@
All VDU's, Virtual Links, and provider networks will be requested
using the cloud-account's associated CAL instance";
type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
@@ -212,15 +222,112 @@
}
- augment /nsr:ns-instance-config/nsr:nsr {
+ augment /rw-project:project/nsr:ns-instance-config/nsr:nsr {
uses rw-ns-instance-config;
}
augment /nsr:start-network-service/nsr:input{
- uses rw-ns-instance-config;
+ leaf cloud-account {
+ description
+ "The configured cloud account which the NSR is instantiated within.
+ All VDU's, Virtual Links, and provider networks will be requested
+ using the cloud-account's associated CAL instance";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../" +
+ "nsr:project-name]/rw-cloud:cloud/rw-cloud:account" +
+ "/rw-cloud:name";
+ }
+ }
+
+ leaf om-datacenter {
+ description
+ "Openmano datacenter name to use when instantiating
+ the network service. This is only used when openmano
+ is selected as the cloud account. This should be superceded
+ by multiple cloud accounts when that becomes available.";
+ type string;
+ }
+
+ list vnf-cloud-account-map {
+ description
+ "Mapping VNF to Cloud Account where VNF will be instantiated";
+
+ key "member-vnf-index-ref";
+ leaf member-vnf-index-ref {
+ type uint64;
+ }
+
+ leaf cloud-account {
+ description
+ "The configured cloud account where VNF is instantiated within.
+ All VDU's, Virtual Links, and provider networks will be requested
+ using the cloud-account's associated CAL instance";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../../" +
+ "nsr:project-name]/rw-cloud:cloud/rw-cloud:account/" +
+ "rw-cloud:name";
+ }
+ }
+
+ leaf om-datacenter {
+ description
+ "Openmano datacenter name to use when instantiating
+ the network service. This is only used when openmano
+ is selected as the cloud account. This should be superceded
+ by multiple cloud accounts when that becomes available.";
+ type string;
+ }
+
+ leaf config-agent-account {
+ description
+ "The configured config agent account to use for instantiating this VNF.
+ The configuration for this VNF will be driven using the specified config
+ agent account";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../.." +
+ "/nsr:project-name]/rw-config-agent:config-agent/" +
+ "rw-config-agent:account/rw-config-agent:name";
+ }
+ }
+ }
+
+ list vl-cloud-account-map {
+ description
+ "Mapping VL to Cloud Account where VL will be instantiated";
+
+ key "vld-id-ref";
+
+ leaf vld-id-ref {
+ description
+ "A reference to a vld.
+ leafref path ../../nsd/vld/id";
+ type string;
+ }
+
+ leaf-list cloud-accounts {
+ description
+ "The configured list of cloud accounts where VL is instantiated.
+ All VDU's, Virtual Links, and provider networks will be requested
+ using the cloud-account's associated CAL instance";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../../" +
+ "nsr:project-name]/rw-cloud:cloud/rw-cloud:account/" +
+ "rw-cloud:name";
+ }
+ }
+
+ leaf-list om-datacenters {
+ description
+ "Openmano datacenter names to use when instantiating
+ the VLs. This is only used when openmano
+ is selected as the cloud account. This should be superceded
+ by multiple cloud accounts when that becomes available.";
+ type string;
+ }
+ }
}
- augment /nsr:ns-instance-opdata/nsr:nsr {
+ augment /rw-project:project/nsr:ns-instance-opdata/nsr:nsr {
uses manotypes:action-param;
uses manotypes:control-param;
@@ -229,7 +336,7 @@
"The SDN account associted with the cloud account using which an
NS was instantiated.";
type leafref {
- path "/rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
+ path "../../../rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
}
}
@@ -381,11 +488,11 @@
uses operational-events;
}
- augment /nsr:ns-instance-opdata {
+ augment /rw-project:project/nsr:ns-instance-opdata {
uses nsd-ref-count;
}
- augment /nsr:ns-instance-opdata/nsr:nsr/nsr:vlr {
+ augment /rw-project:project/nsr:ns-instance-opdata/nsr:nsr/nsr:vlr {
leaf assigned-subnet {
description "Subnet added for the VL";
type string;
@@ -394,7 +501,7 @@
description
"The configured cloud account in which the VL is instantiated within.";
type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
leaf om-datacenter {
@@ -407,14 +514,14 @@
}
}
- augment /nsr:ns-instance-opdata/nsr:nsr/nsr:constituent-vnfr-ref {
+ augment /rw-project:project/nsr:ns-instance-opdata/nsr:nsr/nsr:constituent-vnfr-ref {
leaf cloud-account {
description
"The configured cloud account in which the VNF is instantiated within.
All VDU's, Virtual Links, and provider networks will be requested
using the cloud-account's associated CAL instance";
type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
leaf om-datacenter {
@@ -427,7 +534,7 @@
}
}
- augment /nsr:ns-instance-config {
+ augment /rw-project:project/nsr:ns-instance-config {
leaf nfvi-polling-period {
description
"Defines the period (secons) that the NFVI metrics are polled at";
diff --git a/models/plugins/yang/rw-project-nsd.yang b/models/plugins/yang/rw-project-nsd.yang
new file mode 100644
index 0000000..2e03385
--- /dev/null
+++ b/models/plugins/yang/rw-project-nsd.yang
@@ -0,0 +1,51 @@
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-project-nsd
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-nsd";
+ prefix "rw-project-nsd";
+
+ import rw-nsd-base {
+ prefix "rw-nsd-base";
+ }
+
+ import project-nsd {
+ prefix "nsd";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file augments
+ the base MANO NSD";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ augment /rw-project:project/nsd:nsd-catalog/nsd:nsd {
+ uses rw-nsd-base:rw-nsd-ext;
+ }
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/rw-project-vnfd.yang b/models/plugins/yang/rw-project-vnfd.yang
new file mode 100644
index 0000000..766f525
--- /dev/null
+++ b/models/plugins/yang/rw-project-vnfd.yang
@@ -0,0 +1,54 @@
+
+/*
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-project-vnfd
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-vnfd";
+ prefix "rw-project-vnfd";
+
+ import project-vnfd {
+ prefix "project-vnfd";
+ }
+
+ import rw-vnfd-base {
+ prefix "rw-vnfd-base";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file augments
+ the base MANO VNFD";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd {
+ uses rw-vnfd-base:rw-vnfd-ext;
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:vdu {
+ uses rw-vnfd-base:vcs-component-ref;
+ }
+}
+// vim: sw=2
diff --git a/models/plugins/yang/rw-vlr.yang b/models/plugins/yang/rw-vlr.yang
index 755bb81..88e8e43 100644
--- a/models/plugins/yang/rw-vlr.yang
+++ b/models/plugins/yang/rw-vlr.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,6 +39,15 @@
prefix "yang";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-30 {
description
"Initial revision. This YANG file augments
@@ -47,13 +56,13 @@
"Derived from earlier versions of base YANG files";
}
- augment /vlr:vlr-catalog/vlr:vlr {
+ augment /rw-project:project/vlr:vlr-catalog/vlr:vlr {
leaf cloud-account {
description
"The cloud account to use when requesting resources for
this vlr";
type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ path "../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
leaf om-datacenter {
diff --git a/models/plugins/yang/rw-vnfd-base.yang b/models/plugins/yang/rw-vnfd-base.yang
new file mode 100644
index 0000000..ec39ef1
--- /dev/null
+++ b/models/plugins/yang/rw-vnfd-base.yang
@@ -0,0 +1,119 @@
+
+/*
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-vnfd-base
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-vnfd-base";
+ prefix "rw-vnfd-base";
+
+ import vnfd {
+ prefix "vnfd";
+ }
+
+ import rwvcs-types {
+ prefix "rwvcstypes";
+ }
+
+ import rw-pb-ext { prefix "rwpb"; }
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ common structs for extending MANO VNFD";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ grouping rw-vnfd-ext {
+ uses manotypes:control-param;
+ uses manotypes:action-param;
+
+ leaf meta {
+ description
+ "Any meta-data needed by the UI";
+ type string;
+ }
+
+ list component {
+ description
+ "This section defines the RIFT.ware
+ virtual components";
+ key "component-name";
+ rwpb:msg-new VcsComponent;
+ rwpb:application-request-point;
+
+ leaf component-name {
+ description "";
+ type string;
+ }
+
+ leaf component-type {
+ description "";
+ type rwvcstypes:component_type;
+ mandatory true;
+ }
+
+ choice component {
+ case rwvcs-rwcollection {
+ uses rwvcstypes:rwvcs-rwcollection;
+ }
+ case rwvcs-rwvm {
+ uses rwvcstypes:rwvcs-rwvm;
+ }
+ case rwvcs-rwproc {
+ uses rwvcstypes:rwvcs-rwproc;
+ }
+ case native-proc {
+ uses rwvcstypes:native-proc;
+ }
+ case rwvcs-rwtasklet {
+ uses rwvcstypes:rwvcs-rwtasklet;
+ }
+ }
+ } // list component
+ }
+
+ grouping vcs-component-ref {
+ leaf vcs-component-ref {
+ description
+ "This defines the software components using the
+ RIFT.ware Virtual Component System (VCS). This
+ also allows specifying a state machine during
+ the VM startup.
+ NOTE: This is an significant addition to MANO,
+ since MANO doesn't clearly specify a method to
+ identify various software components in a VM.
+ Also using a state machine is not something that
+ is well described in MANO.";
+ type leafref {
+ path "../../component/component-name";
+ }
+ }
+ }
+}
+// vim: sw=2
diff --git a/models/plugins/yang/rw-vnfd.yang b/models/plugins/yang/rw-vnfd.yang
index 29eb852..e7fca3b 100644
--- a/models/plugins/yang/rw-vnfd.yang
+++ b/models/plugins/yang/rw-vnfd.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,18 +27,13 @@
prefix "vnfd";
}
- import rwvcs-types {
- prefix "rwvcstypes";
+ import rw-vnfd-base {
+ prefix "rw-vnfd-base";
}
- import rw-pb-ext { prefix "rwpb"; }
-
- import ietf-yang-types {
- prefix "yang";
- }
-
- import mano-types {
- prefix "manotypes";
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
}
revision 2015-09-10 {
@@ -50,68 +45,11 @@
}
augment /vnfd:vnfd-catalog/vnfd:vnfd {
- uses manotypes:control-param;
- uses manotypes:action-param;
- leaf meta {
- description
- "Any meta-data needed by the UI";
- type string;
- }
- list component {
- description
- "This section defines the RIFT.ware
- virtual components";
- key "component-name";
- rwpb:msg-new VcsComponent;
- rwpb:application-request-point;
-
- leaf component-name {
- description "";
- type string;
- }
-
- leaf component-type {
- description "";
- type rwvcstypes:component_type;
- mandatory true;
- }
-
- choice component {
- case rwvcs-rwcollection {
- uses rwvcstypes:rwvcs-rwcollection;
- }
- case rwvcs-rwvm {
- uses rwvcstypes:rwvcs-rwvm;
- }
- case rwvcs-rwproc {
- uses rwvcstypes:rwvcs-rwproc;
- }
- case native-proc {
- uses rwvcstypes:native-proc;
- }
- case rwvcs-rwtasklet {
- uses rwvcstypes:rwvcs-rwtasklet;
- }
- }
- } // list component
+ uses rw-vnfd-base:rw-vnfd-ext;
}
augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu {
- leaf vcs-component-ref {
- description
- "This defines the software components using the
- RIFT.ware Virtual Component System (VCS). This
- also allows specifying a state machine during
- the VM startup.
- NOTE: This is an significant addition to MANO,
- since MANO doesn't clearly specify a method to
- identify various software components in a VM.
- Also using a state machine is not something that
- is well described in MANO.";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd/rw-vnfd:component/rw-vnfd:component-name";
- }
- }
+ uses rw-vnfd-base:vcs-component-ref;
}
}
// vim: sw=2
diff --git a/models/plugins/yang/rw-vnfr.role.xml b/models/plugins/yang/rw-vnfr.role.xml
new file mode 100644
index 0000000..9178690
--- /dev/null
+++ b/models/plugins/yang/rw-vnfr.role.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-vnfr-role</role>
+ <key-set>
+ <name>project-name</name>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-oper</role>
+ <keys-role>rw-project-mano:rw-vnfr-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-vnfr:vnfr-console</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-vnfr-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-vnfr:vnfr-console</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/models/plugins/yang/rw-vnfr.tailf.yang b/models/plugins/yang/rw-vnfr.tailf.yang
index 6090fcf..da9bdbf 100644
--- a/models/plugins/yang/rw-vnfr.tailf.yang
+++ b/models/plugins/yang/rw-vnfr.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,15 +35,19 @@
prefix vnfr;
}
- tailf:annotate "/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" {
tailf:callpoint rw_callpoint;
}
- tailf:annotate "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:operational-events" {
+ tailf:annotate "/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:operational-events" {
tailf:callpoint rw_callpoint;
}
- tailf:annotate "/rw-vnfr:vnfr-console" {
+ tailf:annotate "/rw-project:project/rw-vnfr:vnfr-console" {
tailf:callpoint rw_callpoint;
}
diff --git a/models/plugins/yang/rw-vnfr.yang b/models/plugins/yang/rw-vnfr.yang
index be8acb4..6614f16 100644
--- a/models/plugins/yang/rw-vnfr.yang
+++ b/models/plugins/yang/rw-vnfr.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,14 +27,16 @@
prefix "manotypes";
}
- import rw-pb-ext { prefix "rwpb"; }
+ import rw-pb-ext {
+ prefix "rwpb";
+ }
import vnfr {
prefix "vnfr";
}
- import vnfd {
- prefix "vnfd";
+ import project-vnfd {
+ prefix "project-vnfd";
}
import rw-cloud {
@@ -53,6 +55,15 @@
prefix "inet";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file augments
@@ -141,7 +152,7 @@
}
}
- augment /vnfr:vnfr-catalog/vnfr:vnfr {
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr {
uses manotypes:action-param;
uses manotypes:control-param;
@@ -150,7 +161,7 @@
"The cloud account to use when requesting resources for
this vnf";
type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ path "../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
@@ -235,7 +246,7 @@
}
}
- augment /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur {
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur {
leaf vm-pool {
description
"The pool from which this vm was allocated from";
@@ -258,7 +269,7 @@
Also using a state machine is not something that
is well described in MANO.";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:component/rw-vnfr:component-name";
+ path "../../rw-vnfr:component/rw-vnfr:component-name";
}
}
@@ -270,6 +281,7 @@
type string;
}
}
+
grouping vnfd-ref-count {
list vnfd-ref-count {
key "vnfd-id-ref";
@@ -278,7 +290,7 @@
leaf vnfd-id-ref {
description "Reference to VNFD";
type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+ path "../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
}
}
leaf instance-ref-count {
@@ -292,28 +304,31 @@
}
}
}
- augment /vnfr:vnfr-catalog {
+
+ augment /rw-project:project/vnfr:vnfr-catalog {
uses vnfd-ref-count;
}
- container vnfr-console {
- config false;
- list vnfr {
- key "id";
- leaf id {
- description "Identifier for the VNFR.";
- type yang:uuid;
- }
- list vdur {
- description "List of Virtual Deployment Units";
+ augment /rw-project:project {
+ container vnfr-console {
+ config false;
+ list vnfr {
key "id";
leaf id {
- description "Unique id for the VDU";
+ description "Identifier for the VNFR.";
type yang:uuid;
}
- leaf console-url {
- description "Console URL for this VDU, if available";
- type inet:uri;
+ list vdur {
+ description "List of Virtual Deployment Units";
+ key "id";
+ leaf id {
+ description "Unique id for the VDU";
+ type yang:uuid;
+ }
+ leaf console-url {
+ description "Console URL for this VDU, if available";
+ type inet:uri;
+ }
}
}
}
diff --git a/models/plugins/yang/vld.yang b/models/plugins/yang/vld.yang
index 2747887..63556d7 100644
--- a/models/plugins/yang/vld.yang
+++ b/models/plugins/yang/vld.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,8 +27,8 @@
prefix "rwpb";
}
- import vnfd {
- prefix "vnfd";
+ import project-vnfd {
+ prefix "project-vnfd";
}
import ietf-inet-types {
@@ -43,6 +43,15 @@
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file defines
@@ -51,91 +60,93 @@
"Derived from earlier versions of base YANG files";
}
- container vld-catalog {
+ augment "/rw-project:project" {
+ container vld-catalog {
- list vld {
- key "id";
+ list vld {
+ key "id";
- leaf id {
- description "Identifier for the VLD.";
- type yang:uuid;
- }
-
- leaf name {
- description "Virtual Link Descriptor (VLD) name.";
- type string;
- }
-
- leaf short-name {
- description "Short name for VLD for UI";
- type string;
- }
-
- leaf vendor {
- description "Provider of the VLD.";
- type string;
- }
-
- leaf description {
- description "Description of the VLD.";
- type string;
- }
-
- leaf version {
- description "Version of the VLD";
- type string;
- }
-
- leaf type {
- type manotypes:virtual-link-type;
- }
-
- leaf root-bandwidth {
- description
- "For ELAN this is the aggregate bandwidth.";
- type uint64;
- }
-
- leaf leaf-bandwidth {
- description
- "For ELAN this is the bandwidth of branches.";
- type uint64;
- }
-
- list vnfd-connection-point-ref {
- description
- "A list of references to connection points.";
- key "vnfd-ref member-vnf-index-ref";
-
- leaf vnfd-ref {
- description "A reference to a vnfd";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
- }
+ leaf id {
+ description "Identifier for the VLD.";
+ type yang:uuid;
}
- leaf member-vnf-index-ref {
- description
+ leaf name {
+ description "Virtual Link Descriptor (VLD) name.";
+ type string;
+ }
+
+ leaf short-name {
+ description "Short name for VLD for UI";
+ type string;
+ }
+
+ leaf vendor {
+ description "Provider of the VLD.";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VLD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the VLD";
+ type string;
+ }
+
+ leaf type {
+ type manotypes:virtual-link-type;
+ }
+
+ leaf root-bandwidth {
+ description
+ "For ELAN this is the aggregate bandwidth.";
+ type uint64;
+ }
+
+ leaf leaf-bandwidth {
+ description
+ "For ELAN this is the bandwidth of branches.";
+ type uint64;
+ }
+
+ list vnfd-connection-point-ref {
+ description
+ "A list of references to connection points.";
+ key "vnfd-ref member-vnf-index-ref";
+
+ leaf vnfd-ref {
+ description "A reference to a vnfd";
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+
+ leaf member-vnf-index-ref {
+ description
"A reference to the consituent-vnfd id in nsd.
Should have been a leafref to:
- '/nsd:nsd-catalog:/nsd:nsd/constituent-vnfd/member-vnf-index-ref'.
+ '/rw-project:project/project-nsd:nsd-catalog:/nsd/constituent-vnfd/member-vnf-index-ref'.
Instead using direct leaf to avoid circular reference.";
- type uint64;
- }
+ type uint64;
+ }
- leaf vnfd-connection-point-ref {
- description
+ leaf vnfd-connection-point-ref {
+ description
"A reference to a connection point name in a vnfd";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd"
- + "[vnfd:id = current()/../vld:vnfd-ref]"
- + "/vnfd:connection-point/vnfd:name";
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vld:vnfd-ref]"
+ + "/project-vnfd:connection-point/project-vnfd:name";
+ }
}
}
- }
- // replicate for pnfd container here
- uses manotypes:provider-network;
+ // replicate for pnfd container here
+ uses manotypes:provider-network;
+ }
}
}
}
diff --git a/models/plugins/yang/vlr.role.xml b/models/plugins/yang/vlr.role.xml
new file mode 100644
index 0000000..90350dc
--- /dev/null
+++ b/models/plugins/yang/vlr.role.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:vlr-role</role>
+ <key-set>
+ <name>project-name</name>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-oper</role>
+ <keys-role>rw-project-mano:vlr-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/vlr:vlr-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:vlr-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/vlr:vlr-catalog</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/models/plugins/yang/vlr.tailf.yang b/models/plugins/yang/vlr.tailf.yang
index 4bed1d2..f773de6 100644
--- a/models/plugins/yang/vlr.tailf.yang
+++ b/models/plugins/yang/vlr.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,11 @@
prefix vlr;
}
- tailf:annotate "/vlr:vlr-catalog" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/vlr:vlr-catalog" {
tailf:callpoint rw_callpoint;
}
}
diff --git a/models/plugins/yang/vlr.yang b/models/plugins/yang/vlr.yang
index e30aa5b..394f2ec 100644
--- a/models/plugins/yang/vlr.yang
+++ b/models/plugins/yang/vlr.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -43,6 +43,15 @@
prefix "vld";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file defines
@@ -51,125 +60,127 @@
"Derived from earlier versions of base YANG files";
}
- container vlr-catalog {
- config false;
+ augment "/rw-project:project" {
+ container vlr-catalog {
+ config false;
- list vlr {
- key "id";
- unique "name";
+ list vlr {
+ key "id";
+ unique "name";
- leaf id {
- description "Identifier for the VLR.";
- type yang:uuid;
- }
+ leaf id {
+ description "Identifier for the VLR.";
+ type yang:uuid;
+ }
- leaf name {
- description "VLR name.";
- type string;
- }
+ leaf name {
+ description "VLR name.";
+ type string;
+ }
- leaf nsr-id-ref {
- description
+ leaf nsr-id-ref {
+ description
"NS instance identifier.
- This is a leafref /nsr:ns-instance-config/nsr:nsr/nsr:id";
- type yang:uuid;
- }
+ This is a leafref /rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:id";
+ type yang:uuid;
+ }
- leaf vld-ref {
- description
- "Reference to VLD
- /nsr:ns-instance-config/nsr:nsr[nsr:id=../nsr-id-ref]/nsd/vld:vld/vld:id";
- type string;
- }
+ leaf vld-ref {
+ description
+ "Reference to VLD
+ /rw-project:project/nsr:ns-instance-config/nsr:nsr[nsr:id=../nsr-id-ref]
+ /nsd/vld:vld/vld:id";
+ type string;
+ }
- leaf res-id {
- description "Identifier for resmgr id mapping";
- type yang:uuid;
- }
+ leaf res-id {
+ description "Identifier for resmgr id mapping";
+ type yang:uuid;
+ }
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
+ leaf short-name {
+ description "Short name to appear as label in the UI";
+ type string;
+ }
- leaf vendor {
- description "Provider of the VLR.";
- type string;
- }
+ leaf vendor {
+ description "Provider of the VLR.";
+ type string;
+ }
- leaf description {
- description "Description of the VLR.";
- type string;
- }
+ leaf description {
+ description "Description of the VLR.";
+ type string;
+ }
- leaf version {
- description "Version of the VLR";
- type string;
- }
+ leaf version {
+ description "Version of the VLR";
+ type string;
+ }
- leaf type {
- type manotypes:virtual-link-type;
- }
+ leaf type {
+ type manotypes:virtual-link-type;
+ }
- leaf root-bandwidth {
- description
+ leaf root-bandwidth {
+ description
"For ELAN this is the aggregate bandwidth.";
- type uint64;
- }
+ type uint64;
+ }
- leaf leaf-bandwidth {
- description
+ leaf leaf-bandwidth {
+ description
"For ELAN this is the bandwidth of branches.";
- type uint64;
- }
+ type uint64;
+ }
- leaf create-time {
- description
- "Creation timestamp of this Virtual Link.
+ leaf create-time {
+ description
+ "Creation timestamp of this Virtual Link.
The timestamp is expressed as seconds
since unix epoch - 1970-01-01T00:00:00Z";
- type uint32;
- }
+ type uint32;
+ }
- leaf uptime {
- description
- "Active period of this Virtual Link.
+ leaf uptime {
+ description
+ "Active period of this Virtual Link.
Uptime is expressed in seconds";
- type uint32;
- }
+ type uint32;
+ }
- leaf network-id {
- description
+ leaf network-id {
+ description
"Identifier for the allocated network resource.";
- type string;
- }
+ type string;
+ }
- leaf vim-network-name {
- description
+ leaf vim-network-name {
+ description
"Name of network in VIM account. This is used to indicate
pre-provisioned network name in cloud account.";
- type string;
- }
-
- // replicate for pnfd container here
-
- uses manotypes:provider-network;
- uses manotypes:ip-profile-info;
-
- leaf status {
- description
- "Status of the virtual link record.";
- type enumeration {
- enum LINK_UP;
- enum DEGRADED;
- enum LINK_DOWN;
+ type string;
}
- }
- leaf operational-status {
- description
- "The operational status of the Virtual Link
+
+ // replicate for pnfd container here
+
+ uses manotypes:provider-network;
+ uses manotypes:ip-profile-info;
+
+ leaf status {
+ description
+ "Status of the virtual link record.";
+ type enumeration {
+ enum LINK_UP;
+ enum DEGRADED;
+ enum LINK_DOWN;
+ }
+ }
+ leaf operational-status {
+ description
+ "The operational status of the Virtual Link
init : The VL is in init stat.
vl-alloc-pending : The VL alloc is pending in VIM
running : The VL is up and running in VM
@@ -178,14 +189,15 @@
failed : The VL instantiation failed in VIM.
";
- type enumeration {
- rwpb:enum-type "VlOperationalStatus";
- enum init;
- enum vl-alloc-pending;
- enum running;
- enum vl-terminate-pending;
- enum terminated;
- enum failed;
+ type enumeration {
+ rwpb:enum-type "VlOperationalStatus";
+ enum init;
+ enum vl-alloc-pending;
+ enum running;
+ enum vl-terminate-pending;
+ enum terminated;
+ enum failed;
+ }
}
}
}
diff --git a/models/plugins/yang/vnfd-base.yang b/models/plugins/yang/vnfd-base.yang
new file mode 100644
index 0000000..27f064e
--- /dev/null
+++ b/models/plugins/yang/vnfd-base.yang
@@ -0,0 +1,532 @@
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module vnfd-base
+{
+ namespace "http://riftio.com/ns/riftware-1.0/vnfd-base";
+ prefix "vnfd-base";
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ import rw-pb-ext {
+ prefix "rwpb";
+ }
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ the common types for Virtual Network Function
+ (VNF) descriptor";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ grouping common-connection-point {
+ leaf name {
+ description "Name of the connection point";
+ type string;
+ }
+
+ leaf id {
+ description "Identifier for the internal connection points";
+ type string;
+ }
+
+ leaf short-name {
+ description "Short name of the connection point";
+ type string;
+ }
+
+ leaf type {
+ description "Type of the connection point.";
+ type manotypes:connection-point-type;
+ }
+ leaf port-security-enabled {
+ description "Enables the port security for the port";
+ type boolean;
+ }
+ }
+
+ grouping virtual-interface {
+ container virtual-interface {
+ description
+ "Container for the virtual interface properties";
+
+ leaf type {
+ description
+ "Specifies the type of virtual interface
+ between VM and host.
+ VIRTIO : Use the traditional VIRTIO interface.
+ PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface.
+ SR-IOV : Use SR-IOV interface.
+ E1000 : Emulate E1000 interface.
+ RTL8139 : Emulate RTL8139 interface.
+ PCNET : Emulate PCNET interface.
+ OM-MGMT : Used to specify openmano mgmt external-connection type";
+
+ type enumeration {
+ enum OM-MGMT;
+ enum PCI-PASSTHROUGH;
+ enum SR-IOV;
+ enum VIRTIO;
+ enum E1000;
+ enum RTL8139;
+ enum PCNET;
+ }
+ default "VIRTIO";
+ }
+
+ leaf vpci {
+ description
+ "Specifies the virtual PCI address. Expressed in
+ the following format dddd:dd:dd.d. For example
+ 0000:00:12.0. This information can be used to
+ pass as metadata during the VM creation.";
+ type string;
+ }
+
+ leaf bandwidth {
+ description
+ "Aggregate bandwidth of the NIC.";
+ type uint64;
+ }
+ }
+ }
+
+ grouping vnfd-descriptor {
+ leaf id {
+ description "Identifier for the VNFD.";
+ type string;
+ }
+
+ leaf name {
+ description "VNFD name.";
+ mandatory true;
+ type string;
+ }
+
+ leaf short-name {
+ description "VNFD short name.";
+ type string;
+ }
+
+ leaf vendor {
+ description "Vendor of the VNFD.";
+ type string;
+ }
+
+ leaf logo {
+ description
+ "Vendor logo for the Virtual Network Function";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VNFD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the VNFD";
+ type string;
+ }
+
+ uses manotypes:vnf-configuration;
+
+ container mgmt-interface {
+ description
+ "Interface over which the VNF is managed.";
+
+ choice endpoint-type {
+ description
+ "Indicates the type of management endpoint.";
+
+ case ip {
+ description
+ "Specifies the static IP address for managing the VNF.";
+ leaf ip-address {
+ type inet:ip-address;
+ }
+ }
+
+ case vdu-id {
+ description
+ "Use the default management interface on this VDU.";
+ leaf vdu-id {
+ type leafref {
+ path "../../vdu/id";
+ }
+ }
+ }
+
+ case cp {
+ description
+ "Use the ip address associated with this connection point.";
+ leaf cp {
+ type leafref {
+ path "../../connection-point/name";
+ }
+ }
+ }
+ }
+
+ leaf port {
+ description
+ "Port for the management interface.";
+ type inet:port-number;
+ }
+
+ container dashboard-params {
+ description "Parameters for the VNF dashboard";
+
+ leaf path {
+ description "The HTTP path for the dashboard";
+ type string;
+ }
+
+ leaf https {
+ description "Pick HTTPS instead of HTTP , Default is false";
+ type boolean;
+ }
+
+ leaf port {
+ description "The HTTP port for the dashboard";
+ type inet:port-number;
+ }
+ }
+ }
+
+ list internal-vld {
+ key "id";
+ description
+ "List of Internal Virtual Link Descriptors (VLD).
+ The internal VLD describes the basic topology of
+ the connectivity (e.g. E-LAN, E-Line, E-Tree)
+ between internal VNF components of the system.";
+
+ leaf id {
+ description "Identifier for the VLD";
+ type string;
+ }
+
+ leaf name {
+ description "Name of the internal VLD";
+ type string;
+ }
+
+ leaf short-name {
+ description "Short name of the internal VLD";
+ type string;
+ }
+
+ leaf description {
+ type string;
+ }
+
+ leaf type {
+ type manotypes:virtual-link-type;
+ }
+
+ leaf root-bandwidth {
+ description
+ "For ELAN this is the aggregate bandwidth.";
+ type uint64;
+ }
+
+ leaf leaf-bandwidth {
+ description
+ "For ELAN this is the bandwidth of branches.";
+ type uint64;
+ }
+
+ list internal-connection-point {
+ key "id-ref";
+ description "List of internal connection points in this VLD";
+ leaf id-ref {
+ description "reference to the internal connection point id";
+ type leafref {
+ path "../../../vdu/internal-connection-point/id";
+ }
+ }
+ }
+ uses manotypes:provider-network;
+ choice init-params {
+ description "Extra parameters for VLD instantiation";
+
+ case vim-network-ref {
+ leaf vim-network-name {
+ description
+ "Name of network in VIM account. This is used to indicate
+ pre-provisioned network name in cloud account.";
+ type string;
+ }
+ }
+
+ case vim-network-profile {
+ leaf ip-profile-ref {
+ description "Named reference to IP-profile object";
+ type string;
+ }
+ }
+
+ }
+ }
+
+ uses manotypes:ip-profile-list;
+
+ list connection-point {
+ key "name";
+ description
+ "List for external connection points. Each VNF has one
+ or more external connection points. As the name
+ implies that external connection points are used for
+ connecting the VNF to other VNFs or to external networks.
+ Each VNF exposes these connection points to the
+ orchestrator. The orchestrator can construct network
+ services by connecting the connection points between
+ different VNFs. The NFVO will use VLDs and VNFFGs at
+ the network service level to construct network services.";
+
+ uses common-connection-point;
+ }
+
+ list vdu {
+ description "List of Virtual Deployment Units";
+ key "id";
+
+ leaf id {
+ description "Unique id for the VDU";
+ type string;
+ }
+
+ leaf name {
+ description "Unique name for the VDU";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VDU.";
+ type string;
+ }
+
+ leaf count {
+ description "Number of instances of VDU";
+ type uint64;
+ }
+
+ leaf mgmt-vpci {
+ description
+ "Specifies the virtual PCI address. Expressed in
+ the following format dddd:dd:dd.d. For example
+ 0000:00:12.0. This information can be used to
+ pass as metadata during the VM creation.";
+ type string;
+ }
+
+ uses manotypes:vm-flavor;
+ uses manotypes:guest-epa;
+ uses manotypes:vswitch-epa;
+ uses manotypes:hypervisor-epa;
+ uses manotypes:host-epa;
+
+ list alarm {
+ key "alarm-id";
+
+ uses manotypes:alarm;
+ }
+
+ uses manotypes:image-properties;
+
+ choice cloud-init-input {
+ description
+ "Indicates how the contents of cloud-init script are provided.
+ There are 2 choices - inline or in a file";
+
+ case inline {
+ leaf cloud-init {
+ description
+ "Contents of cloud-init script, provided inline, in cloud-config format";
+ type string;
+ }
+ }
+
+ case filename {
+ leaf cloud-init-file {
+ description
+ "Name of file with contents of cloud-init script in cloud-config format";
+ type string;
+ }
+ }
+ }
+
+ uses manotypes:supplemental-boot-data;
+
+ list internal-connection-point {
+ key "id";
+ description
+ "List for internal connection points. Each VNFC
+ has zero or more internal connection points.
+ Internal connection points are used for connecting
+ the VNF components internal to the VNF. If a VNF
+ has only one VNFC, it may not have any internal
+ connection points.";
+
+ uses common-connection-point;
+
+ leaf internal-vld-ref {
+ type leafref {
+ path "../../../internal-vld/id";
+ }
+ }
+ }
+
+ list internal-interface {
+ description
+ "List of internal interfaces for the VNF";
+ key name;
+
+ leaf name {
+ description
+ "Name of internal interface. Note that this
+ name has only local significance to the VDU.";
+ type string;
+ }
+
+ leaf vdu-internal-connection-point-ref {
+ type leafref {
+ path "../../internal-connection-point/id";
+ }
+ }
+ uses virtual-interface;
+ }
+
+ list external-interface {
+ description
+ "List of external interfaces for the VNF.
+ The external interfaces enable sending
+ traffic to and from VNF.";
+ key name;
+
+ leaf name {
+ description
+ "Name of the external interface. Note that
+ this name has only local significance.";
+ type string;
+ }
+
+ leaf vnfd-connection-point-ref {
+ description
+ "Name of the external connection point.";
+ type leafref {
+ path "../../../connection-point/name";
+ }
+ }
+ uses virtual-interface;
+ }
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+
+ uses manotypes:volume-info;
+ }
+ }
+
+ list vdu-dependency {
+ description
+ "List of VDU dependencies.";
+
+ key vdu-source-ref;
+ leaf vdu-source-ref {
+ type leafref {
+ path "../../vdu/id";
+ }
+ }
+
+ leaf vdu-depends-on-ref {
+ description
+ "Reference to the VDU that
+ source VDU depends.";
+ type leafref {
+ path "../../vdu/id";
+ }
+ }
+ }
+
+ leaf service-function-chain {
+ description "Type of node in Service Function Chaining Architecture";
+
+ type enumeration {
+ enum UNAWARE;
+ enum CLASSIFIER;
+ enum SF;
+ enum SFF;
+ }
+ default "UNAWARE";
+ }
+
+ leaf service-function-type {
+ description
+ "Type of Service Function.
+ NOTE: This needs to map with Service Function Type in ODL to
+ support VNFFG. Service Function Type is manadatory param in ODL
+ SFC. This is temporarily set to string for ease of use";
+ type string;
+ }
+
+ uses manotypes:monitoring-param;
+
+ list placement-groups {
+ description "List of placement groups at VNF level";
+
+ key "name";
+ uses manotypes:placement-group-info;
+
+ list member-vdus {
+
+ description
+ "List of VDUs that are part of this placement group";
+ key "member-vdu-ref";
+
+ leaf member-vdu-ref {
+ type leafref {
+ path "../../../vdu/id";
+ }
+ }
+ }
+ }
+ }
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/vnfd.yang b/models/plugins/yang/vnfd.yang
index 1dfb459..42de19c 100644
--- a/models/plugins/yang/vnfd.yang
+++ b/models/plugins/yang/vnfd.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,20 +23,13 @@
namespace "urn:ietf:params:xml:ns:yang:nfvo:vnfd";
prefix "vnfd";
- import mano-types {
- prefix "manotypes";
+ import vnfd-base {
+ prefix "vnfd-base";
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
- import ietf-yang-types {
- prefix "yang";
- }
-
- import ietf-inet-types {
- prefix "inet";
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
}
revision 2015-09-10 {
@@ -47,489 +40,15 @@
"Derived from earlier versions of base YANG files";
}
- grouping common-connection-point {
- leaf name {
- description "Name of the connection point";
- type string;
- }
-
- leaf id {
- description "Identifier for the internal connection points";
- type string;
- }
-
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
-
- leaf type {
- description "Type of the connection point.";
- type manotypes:connection-point-type;
- }
- leaf port-security-enabled {
- description "Enables the port security for the port";
- type boolean;
- }
- }
-
- grouping virtual-interface {
- container virtual-interface {
- description
- "Container for the virtual interface properties";
-
- leaf type {
- description
- "Specifies the type of virtual interface
- between VM and host.
- VIRTIO : Use the traditional VIRTIO interface.
- PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface.
- SR-IOV : Use SR-IOV interface.
- E1000 : Emulate E1000 interface.
- RTL8139 : Emulate RTL8139 interface.
- PCNET : Emulate PCNET interface.
- OM-MGMT : Used to specify openmano mgmt external-connection type";
-
- type enumeration {
- enum OM-MGMT;
- enum PCI-PASSTHROUGH;
- enum SR-IOV;
- enum VIRTIO;
- enum E1000;
- enum RTL8139;
- enum PCNET;
- }
- default "VIRTIO";
- }
-
- leaf vpci {
- description
- "Specifies the virtual PCI address. Expressed in
- the following format dddd:dd:dd.d. For example
- 0000:00:12.0. This information can be used to
- pass as metadata during the VM creation.";
- type string;
- }
-
- leaf bandwidth {
- description
- "Aggregate bandwidth of the NIC.";
- type uint64;
- }
- }
- }
-
- grouping vnfd-descriptor {
- leaf id {
- description "Identifier for the VNFD.";
- type string;
- }
-
- leaf name {
- description "VNFD name.";
- mandatory true;
- type string;
- }
-
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
-
- leaf vendor {
- description "Vendor of the VNFD.";
- type string;
- }
-
- leaf logo {
- description
- "Vendor logo for the Virtual Network Function";
- type string;
- }
-
- leaf description {
- description "Description of the VNFD.";
- type string;
- }
-
- leaf version {
- description "Version of the VNFD";
- type string;
- }
-
- uses manotypes:vnf-configuration;
-
- container mgmt-interface {
- description
- "Interface over which the VNF is managed.";
-
- choice endpoint-type {
- description
- "Indicates the type of management endpoint.";
-
- case ip {
- description
- "Specifies the static IP address for managing the VNF.";
- leaf ip-address {
- type inet:ip-address;
- }
- }
-
- case vdu-id {
- description
- "Use the default management interface on this VDU.";
- leaf vdu-id {
- type leafref {
- path "../../vdu/id";
- }
- }
- }
-
- case cp {
- description
- "Use the ip address associated with this connection point.";
- leaf cp {
- type leafref {
- path "../../connection-point/name";
- }
- }
- }
- }
-
- leaf port {
- description
- "Port for the management interface.";
- type inet:port-number;
- }
-
- container dashboard-params {
- description "Parameters for the VNF dashboard";
-
- leaf path {
- description "The HTTP path for the dashboard";
- type string;
- }
-
- leaf https {
- description "Pick HTTPS instead of HTTP , Default is false";
- type boolean;
- }
-
- leaf port {
- description "The HTTP port for the dashboard";
- type inet:port-number;
- }
- }
- }
-
- list internal-vld {
- key "id";
- description
- "List of Internal Virtual Link Descriptors (VLD).
- The internal VLD describes the basic topology of
- the connectivity such as E-LAN, E-Line, E-Tree.
- between internal VNF components of the system.";
-
- leaf id {
- description "Identifier for the VLD";
- type string;
- }
-
- leaf name {
- description "Name of the internal VLD";
- type string;
- }
-
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
-
- leaf description {
- description "Description of internal VLD.";
- type string;
- }
-
- leaf type {
- type manotypes:virtual-link-type;
- }
-
- leaf root-bandwidth {
- description
- "For ELAN this is the aggregate bandwidth.";
- type uint64;
- }
-
- leaf leaf-bandwidth {
- description
- "For ELAN this is the bandwidth of branches.";
- type uint64;
- }
-
- list internal-connection-point {
- key "id-ref";
- description "List of internal connection points in this VLD";
- leaf id-ref {
- description "reference to the internal connection point id";
- type leafref {
- path "../../../vdu/internal-connection-point/id";
- }
- }
- }
- uses manotypes:provider-network;
- choice init-params {
- description "Extra parameters for VLD instantiation";
-
- case vim-network-ref {
- leaf vim-network-name {
- description
- "Name of network in VIM account. This is used to indicate
- pre-provisioned network name in cloud account.";
- type string;
- }
- }
-
- case vim-network-profile {
- leaf ip-profile-ref {
- description "Named reference to IP-profile object";
- type string;
- }
- }
-
- }
- }
-
- uses manotypes:ip-profile-list;
-
- list connection-point {
- key "name";
- description
- "List for external connection points. Each VNF has one
- or more external connection points that connect the VNF
- to other VNFs or to external networks. Each VNF exposes
- connection points to the orchestrator, which can construct
- network services by connecting the connection points
- between different VNFs. The NFVO will use VLDs and VNFFGs
- at the network service level to construct network services.";
-
- uses common-connection-point;
- }
-
- list vdu {
- description "List of Virtual Deployment Units";
- key "id";
-
- leaf id {
- description "Unique id for the VDU";
- type string;
- }
-
- leaf name {
- description "Unique name for the VDU";
- type string;
- }
-
- leaf description {
- description "Description of the VDU.";
- type string;
- }
-
- leaf count {
- description "Number of instances of VDU";
- type uint64;
- }
-
- leaf mgmt-vpci {
- description
- "Specifies the virtual PCI address. Expressed in
- the following format dddd:dd:dd.d. For example
- 0000:00:12.0. This information can be used to
- pass as metadata during the VM creation.";
- type string;
- }
-
- uses manotypes:vm-flavor;
- uses manotypes:guest-epa;
- uses manotypes:vswitch-epa;
- uses manotypes:hypervisor-epa;
- uses manotypes:host-epa;
-
- list alarm {
- key "alarm-id";
-
- uses manotypes:alarm;
- }
-
- uses manotypes:image-properties;
-
- choice cloud-init-input {
- description
- "Indicates how the contents of cloud-init script are provided.
- There are 2 choices - inline or in a file";
-
- case inline {
- leaf cloud-init {
- description
- "Contents of cloud-init script, provided inline, in cloud-config format";
- type string;
- }
- }
-
- case filename {
- leaf cloud-init-file {
- description
- "Name of file with contents of cloud-init script in cloud-config format";
- type string;
- }
- }
- }
-
- uses manotypes:supplemental-boot-data;
-
- list internal-connection-point {
- key "id";
- description
- "List for internal connection points. Each VNFC
- has zero or more internal connection points.
- Internal connection points are used for connecting
- the VNF with components internal to the VNF. If a VNF
- has only one VNFC, it may not have any internal
- connection points.";
-
- uses common-connection-point;
- }
-
- list internal-interface {
- description
- "List of internal interfaces for the VNF";
- key name;
-
- leaf name {
- description
- "Name of internal interface. Note that this
- name has only local significance to the VDU.";
- type string;
- }
-
- leaf vdu-internal-connection-point-ref {
- type leafref {
- path "../../internal-connection-point/id";
- }
- }
- uses virtual-interface;
- }
-
- list external-interface {
- description
- "List of external interfaces for the VNF.
- The external interfaces enable sending
- traffic to and from VNF.";
- key name;
-
- leaf name {
- description
- "Name of the external interface. Note that
- this name has only local significance to
- the VDU.";
- type string;
- }
-
- leaf vnfd-connection-point-ref {
- description
- "Name of the external connection point.";
- type leafref {
- path "../../../connection-point/name";
- }
- }
- uses virtual-interface;
- }
-
- list volumes {
- key "name";
-
- leaf name {
- description "Name of the disk-volumes, e.g. vda, vdb etc";
- type string;
- }
-
- uses manotypes:volume-info;
- }
- }
-
- list vdu-dependency {
- description
- "List of VDU dependencies.";
-
- key vdu-source-ref;
- leaf vdu-source-ref {
- type leafref {
- path "../../vdu/id";
- }
- }
-
- leaf vdu-depends-on-ref {
- description
- "Reference to the VDU on which
- the source VDU depends.";
- type leafref {
- path "../../vdu/id";
- }
- }
- }
-
- leaf service-function-chain {
- description "Type of node in Service Function Chaining Architecture";
-
- type enumeration {
- enum UNAWARE;
- enum CLASSIFIER;
- enum SF;
- enum SFF;
- }
- default "UNAWARE";
- }
-
- leaf service-function-type {
- description
- "Type of Service Function.
- NOTE: This needs to map with Service Function Type in ODL to
- support VNFFG. Service Function Type is mandatory param in ODL
- SFC. This is temporarily set to string for ease of use";
- type string;
- }
-
- uses manotypes:monitoring-param;
-
- list placement-groups {
- description "List of placement groups at VNF level";
-
- key "name";
- uses manotypes:placement-group-info;
-
- list member-vdus {
-
- description
- "List of VDUs that are part of this placement group";
- key "member-vdu-ref";
-
- leaf member-vdu-ref {
- type leafref {
- path "../../../vdu/id";
- }
- }
- }
- }
- }
-
container vnfd-catalog {
description
- "Virtual Network Function Descriptor (VNFD).";
+ "Virtual Network Function Descriptor (VNFD).";
list vnfd {
key "id";
- uses vnfd-descriptor;
- }
+ uses vnfd-base:vnfd-descriptor;
+ }
}
}
diff --git a/models/plugins/yang/vnffgd.yang b/models/plugins/yang/vnffgd.yang
index 99347ae..3220113 100644
--- a/models/plugins/yang/vnffgd.yang
+++ b/models/plugins/yang/vnffgd.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,6 +39,15 @@
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2014-10-27 {
description
"Initial revision. This YANG file defines
@@ -47,37 +56,39 @@
"Derived from earlier versions of base YANG files";
}
- container vnffgd-catalog {
+ augment "/rw-project:project" {
+ container vnffgd-catalog {
- list vnffgd {
- key "id";
+ list vnffgd {
+ key "id";
- leaf name {
- description "VNF Forwarding Graph Descriptor name.";
- type string;
+ leaf name {
+ description "VNF Forwarding Graph Descriptor name.";
+ type string;
+ }
+
+ leaf id {
+ description "Identifier for the VNFFGD.";
+ type yang:uuid;
+ }
+
+ leaf provider {
+ description "Provider of the VNFFGD.";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VNFFGD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the VNFFGD";
+ type string;
+ }
+
+ //TODO: Add more content here
}
-
- leaf id {
- description "Identifier for the VNFFGD.";
- type yang:uuid;
- }
-
- leaf provider {
- description "Provider of the VNFFGD.";
- type string;
- }
-
- leaf description {
- description "Description of the VNFFGD.";
- type string;
- }
-
- leaf version {
- description "Version of the VNFFGD";
- type string;
- }
-
- //TODO: Add more content here
}
}
}
diff --git a/models/plugins/yang/vnfr.role.xml b/models/plugins/yang/vnfr.role.xml
new file mode 100644
index 0000000..9dff86b
--- /dev/null
+++ b/models/plugins/yang/vnfr.role.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:vnfr-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ <path>/vnfr:create-alarm/vnfr:project-name</path>
+ <path>/vnfr:destroy-alarm/vnfr:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-oper</role>
+ <keys-role>rw-project-mano:vnfr-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/vnfr:vnfr-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:vnfr-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/vnfr:vnfr-catalog</path>
+ <path>/vnfr:create-alarm</path>
+ <path>/vnfr:destroy-alarm</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/models/plugins/yang/vnfr.tailf.yang b/models/plugins/yang/vnfr.tailf.yang
index 150dc9a..ef266a1 100644
--- a/models/plugins/yang/vnfr.tailf.yang
+++ b/models/plugins/yang/vnfr.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,11 @@
prefix vnfr;
}
- tailf:annotate "/vnfr:vnfr-catalog" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/vnfr:vnfr-catalog" {
tailf:callpoint rw_callpoint;
}
diff --git a/models/plugins/yang/vnfr.yang b/models/plugins/yang/vnfr.yang
index f228f1d..2678c50 100644
--- a/models/plugins/yang/vnfr.yang
+++ b/models/plugins/yang/vnfr.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,12 +31,12 @@
prefix "rwpb";
}
- import vnfd {
- prefix "vnfd";
+ import vnfd-base {
+ prefix "vnfd-base";
}
- import nsd {
- prefix "nsd";
+ import project-nsd {
+ prefix "project-nsd";
}
import vlr {
@@ -51,6 +51,19 @@
prefix "inet";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-cloud {
+ prefix "rw-cloud";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file defines
@@ -133,118 +146,119 @@
}
}
- container vnfr-catalog {
- config false;
- list vnfr {
- description
- "Virtual Network Function Record (VNFR).";
- key "id";
- unique "name";
-
- leaf id {
- description "Identifier for the VNFR.";
- type yang:uuid;
- }
-
- leaf nsr-id-ref {
+ augment "/rw-project:project" {
+ container vnfr-catalog {
+ config false;
+ list vnfr {
description
+ "Virtual Network Function Record (VNFR).";
+ key "id";
+ unique "name";
+
+ leaf id {
+ description "Identifier for the VNFR.";
+ type yang:uuid;
+ }
+
+ leaf nsr-id-ref {
+ description
"NS instance identifier.
This is a leafref /nsr:ns-instance-config/nsr:nsr/nsr:id";
- type yang:uuid;
- }
-
- leaf member-vnf-index-ref {
- description "Reference to member VNF index in Network service.";
- type leafref {
- path "/nsd:nsd-catalog/nsd:nsd/nsd:constituent-vnfd/nsd:member-vnf-index";
+ type yang:uuid;
}
- }
- leaf dashboard-url {
- description "Dashboard URL";
- type inet:uri;
- }
+ leaf member-vnf-index-ref {
+ description "Reference to member VNF index in Network service.";
+ type leafref {
+ path "../../../project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:constituent-vnfd/project-nsd:member-vnf-index";
+ }
+ }
- leaf name {
- description "VNFR name.";
- type string;
- }
+ leaf dashboard-url {
+ description "Dashboard URL";
+ type inet:uri;
+ }
- leaf short-name {
- description "VNFR short name.";
- type string;
- }
+ leaf name {
+ description "VNFR name.";
+ type string;
+ }
- leaf vendor {
- description "Vendor of the VNFR.";
- type string;
- }
+ leaf short-name {
+ description "VNFR short name.";
+ type string;
+ }
- leaf description {
- description "Description of the VNFR.";
- type string;
- }
+ leaf vendor {
+ description "Vendor of the VNFR.";
+ type string;
+ }
- leaf version {
- description "Version of the VNFR";
- type string;
- }
+ leaf description {
+ description "Description of the VNFR.";
+ type string;
+ }
- leaf create-time {
- description
- "Creation timestamp of this Virtual Network
+ leaf version {
+ description "Version of the VNFR";
+ type string;
+ }
+
+ leaf create-time {
+ description
+ "Creation timestamp of this Virtual Network
Function. The timestamp is expressed as
seconds since unix epoch - 1970-01-01T00:00:00Z";
- type uint32;
- }
+ type uint32;
+ }
- leaf uptime {
- description
- "Active period of this Virtual Network Function.
+ leaf uptime {
+ description
+ "Active period of this Virtual Network Function.
Uptime is expressed in seconds";
- type uint32;
- }
-
- container vnfd {
- description "VNF descriptor used to instantiate this VNF";
- uses vnfd:vnfd-descriptor;
- }
-
- // Use parameters provided here to configure this VNF
- uses manotypes:vnf-configuration;
-
- // Mainly used by Mon-params & dashboard url
- container mgmt-interface {
- leaf ip-address {
- type inet:ip-address;
+ type uint32;
}
- leaf port {
- type inet:port-number;
+
+ container vnfd {
+ description "VNF descriptor used to instantiate this VNF";
+ uses vnfd-base:vnfd-descriptor;
}
- }
- list internal-vlr {
- key "vlr-ref";
+ // Use parameters provided here to configure this VNF
+ uses manotypes:vnf-configuration;
- leaf vlr-ref {
- description "Reference to a VLR record in the VLR catalog";
- type leafref {
- path "/vlr:vlr-catalog/vlr:vlr/vlr:id";
+ // Mainly used by Mon-params & dashboard url
+ container mgmt-interface {
+ leaf ip-address {
+ type inet:ip-address;
+ }
+ leaf port {
+ type inet:port-number;
}
}
- leaf-list internal-connection-point-ref {
- type leafref {
- path "../../vdur/internal-connection-point/id";
+ list internal-vlr {
+ key "vlr-ref";
+
+ leaf vlr-ref {
+ description "Reference to a VLR record in the VLR catalog";
+ type leafref {
+ path "../../../../vlr:vlr-catalog/vlr:vlr/vlr:id";
+ }
+ }
+
+ leaf-list internal-connection-point-ref {
+ type leafref {
+ path "../../vdur/internal-connection-point/id";
+ }
}
}
- }
- list connection-point {
- key "name";
- description
+ list connection-point {
+ key "name";
+ description
"List for external connection points. Each VNF has one
or more external connection points. As the name
implies that external connection points are used for
@@ -255,129 +269,129 @@
different VNFs. The NFVO will use VLDs and VNFFGs at
the network service level to construct network services.";
- uses vnfd:common-connection-point;
+ uses vnfd-base:common-connection-point;
- leaf vlr-ref {
- description
+ leaf vlr-ref {
+ description
"Reference to the VLR associated with this connection point";
- type leafref {
- path "/vlr:vlr-catalog/vlr:vlr/vlr:id";
+ type leafref {
+ path "../../../../vlr:vlr-catalog/vlr:vlr/vlr:id";
+ }
}
- }
- leaf ip-address {
- description
+ leaf ip-address {
+ description
"IP address assigned to the external connection point";
- type inet:ip-address;
- }
- leaf mac-address {
- description
+ type inet:ip-address;
+ }
+ leaf mac-address {
+ description
"MAC address assigned to the external connection point";
- // type inet:mac-address;
- type string;
- }
- leaf connection-point-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- }
-
- list vdur {
- description "List of Virtual Deployment Units";
- key "id";
- unique "name";
-
- leaf id {
- description "Unique id for the VDU";
- type yang:uuid;
- }
-
- leaf name {
- description "name of the instantiated VDUR";
- type string;
- }
-
- leaf unique-short-name {
- description "Short Unique name of the VDU
- This will be of the format NSR name-ShortnedString-VDUname
- NSR name and VDU name shall be constrained to 10 characters";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
-
- leaf vdu-id-ref {
- type leafref {
- path "../../vnfd/vdu/id";
+ // type inet:mac-address;
+ type string;
+ }
+ leaf connection-point-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
}
}
- leaf vim-id {
- description "Allocated VM resource id";
- type string;
- }
+ list vdur {
+ description "List of Virtual Deployment Units";
+ key "id";
+ unique "name";
- leaf flavor-id {
- description "VIM assigned flavor id";
- type string;
- }
-
- leaf image-id {
- description "VIM assigned image id";
- type string;
- }
-
- leaf management-ip {
- description "Management IP address";
- type inet:ip-address;
- }
-
- leaf vm-management-ip {
- description "VM Private Management IP address";
- type inet:ip-address;
- }
-
- leaf console-url {
- description "Console URL for this VDU, if available";
- type inet:uri;
- }
-
- uses manotypes:vm-flavor;
- uses manotypes:guest-epa;
- uses manotypes:vswitch-epa;
- uses manotypes:hypervisor-epa;
- uses manotypes:host-epa;
-
- uses manotypes:supplemental-boot-data;
-
- list volumes {
- key "name";
+ leaf id {
+ description "Unique id for the VDU";
+ type yang:uuid;
+ }
leaf name {
- description "Name of the disk-volumes, e.g. vda, vdb etc";
+ description "name of the instantiated VDUR";
type string;
}
- leaf volume-id {
- description "VIM assigned volume id";
+ leaf unique-short-name {
+ description "Short Unique name of the VDU
+ This will be of the format NSR name-ShortnedString-VDUname
+ NSR name and VDU name shall be constrained to 10 characters";
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
type string;
}
- uses manotypes:volume-info;
- }
+ leaf vdu-id-ref {
+ type leafref {
+ path "../../vnfd/vdu/id";
+ }
+ }
- list alarms {
- description
+ leaf vim-id {
+ description "Allocated VM resource id";
+ type string;
+ }
+
+ leaf flavor-id {
+ description "VIM assigned flavor id";
+ type string;
+ }
+
+ leaf image-id {
+ description "VIM assigned image id";
+ type string;
+ }
+
+ leaf management-ip {
+ description "Management IP address";
+ type inet:ip-address;
+ }
+
+ leaf vm-management-ip {
+ description "VM Private Management IP address";
+ type inet:ip-address;
+ }
+
+ leaf console-url {
+ description "Console URL for this VDU, if available";
+ type inet:uri;
+ }
+
+ uses manotypes:vm-flavor;
+ uses manotypes:guest-epa;
+ uses manotypes:vswitch-epa;
+ uses manotypes:hypervisor-epa;
+ uses manotypes:host-epa;
+
+ uses manotypes:supplemental-boot-data;
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+
+ leaf volume-id {
+ description "VIM assigned volume id";
+ type string;
+ }
+
+ uses manotypes:volume-info;
+ }
+
+ list alarms {
+ description
"A list of the alarms that have been created for this VDU";
- key "alarm-id";
- uses manotypes:alarm;
- }
+ key "alarm-id";
+ uses manotypes:alarm;
+ }
- list internal-connection-point {
- key "id";
- description
+ list internal-connection-point {
+ key "id";
+ description
"List for internal connection points. Each VNFC
has zero or more internal connection points.
Internal connection points are used for connecting
@@ -385,67 +399,67 @@
has only one VNFC, it may not have any internal
connection points.";
- uses vnfd:common-connection-point;
+ uses vnfd-base:common-connection-point;
- leaf ip-address {
- description
+ leaf ip-address {
+ description
"IP address assigned to the internal connection point";
- type inet:ip-address;
- }
- leaf mac-address {
- description
+ type inet:ip-address;
+ }
+ leaf mac-address {
+ description
"MAC address assigned to the internal connection point";
- // type inet:mac-address;
- type string;
- }
- }
-
- list internal-interface {
- description
- "List of internal interfaces for the VNF";
- key name;
-
- leaf name {
- description
- "Name of internal interface. Note that this
- name has only local significance to the VDU.";
- type string;
- }
-
- leaf vdur-internal-connection-point-ref {
- type leafref {
- path "../../internal-connection-point/id";
+ // type inet:mac-address;
+ type string;
}
}
- uses virtual-interface;
- }
- list external-interface {
- description
+ list internal-interface {
+ description
+ "List of internal interfaces for the VNF";
+ key name;
+
+ leaf name {
+ description
+ "Name of internal interface. Note that this
+ name has only local significance to the VDU.";
+ type string;
+ }
+
+ leaf vdur-internal-connection-point-ref {
+ type leafref {
+ path "../../internal-connection-point/id";
+ }
+ }
+ uses virtual-interface;
+ }
+
+ list external-interface {
+ description
"List of external interfaces for the VNF.
The external interfaces enable sending
traffic to and from VNF.";
- key name;
+ key name;
- leaf name {
- description
+ leaf name {
+ description
"Name of the external interface. Note that
this name has only local significance.";
- type string;
- }
-
- leaf vnfd-connection-point-ref {
- description
- "Name of the external connection point.";
- type leafref {
- path "../../../connection-point/name";
+ type string;
}
+
+ leaf vnfd-connection-point-ref {
+ description
+ "Name of the external connection point.";
+ type leafref {
+ path "../../../connection-point/name";
+ }
+ }
+ uses virtual-interface;
}
- uses virtual-interface;
- }
- leaf operational-status {
- description
- "The operational status of the VDU
+ leaf operational-status {
+ description
+ "The operational status of the VDU
init : The VDU has just started.
vm-init-phase : The VDUs in the VNF is being created in VIM.
vm-alloc-pending : The VM alloc is pending in VIM
@@ -456,26 +470,26 @@
failed : The VDU instantiation failed.
";
- type enumeration {
- rwpb:enum-type "VduOperationalStatus";
- enum init;
- enum vm-init-phase;
- enum vm-alloc-pending;
- enum running;
- enum terminate;
- enum vl-terminate-phase;
- enum terminated;
- enum failed;
+ type enumeration {
+ rwpb:enum-type "VduOperationalStatus";
+ enum init;
+ enum vm-init-phase;
+ enum vm-alloc-pending;
+ enum running;
+ enum terminate;
+ enum vl-terminate-phase;
+ enum terminated;
+ enum failed;
+ }
}
+ uses placement-group-info;
}
- uses placement-group-info;
- }
- uses manotypes:monitoring-param;
+ uses manotypes:monitoring-param;
- leaf operational-status {
- description
- "The operational status of the VNFR instance
+ leaf operational-status {
+ description
+ "The operational status of the VNFR instance
init : The VNF has just started.
vl-init-phase : The internal VLs in the VNF are being instantiated.
vm-init-phase : The VMs for VDUs in the VNF are being instantiated.
@@ -487,51 +501,57 @@
failed : The VNF instantiation failed
";
- type enumeration {
- rwpb:enum-type "VnfrOperationalStatus";
- enum init;
- enum vl-init-phase;
- enum vm-init-phase;
- enum running;
- enum terminate;
- enum vm-terminate-phase;
- enum vl-terminate-phase;
- enum terminated;
- enum failed;
+ type enumeration {
+ rwpb:enum-type "VnfrOperationalStatus";
+ enum init;
+ enum vl-init-phase;
+ enum vm-init-phase;
+ enum running;
+ enum terminate;
+ enum vm-terminate-phase;
+ enum vl-terminate-phase;
+ enum terminated;
+ enum failed;
+ }
}
- }
- leaf config-status {
- description
- "The configuration status of the NS instance
+ leaf config-status {
+ description
+ "The configuration status of the NS instance
configuring: At least one of the VNFs in this instance is in configuring state
configured: All the VNFs in this NS instance are configured or config-not-needed state
";
- type enumeration {
- enum configuring {
- value 1;
- }
- enum configured {
- value 2;
- }
- enum failed {
- value 3;
- }
- enum config-not-needed {
- value 4;
+ type enumeration {
+ enum configuring {
+ value 1;
+ }
+ enum configured {
+ value 2;
+ }
+ enum failed {
+ value 3;
+ }
+ enum config-not-needed {
+ value 4;
+ }
}
}
+ uses placement-group-info;
}
- uses placement-group-info;
}
}
rpc create-alarm {
description "Create an alert for a running VDU";
input {
+ uses manotypes:rpc-project-name;
+
leaf cloud-account {
mandatory true;
- type string;
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../project-name]" +
+ "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ }
}
leaf vdur-id {
@@ -554,9 +574,14 @@
rpc destroy-alarm {
description "Destroy an alert that is associated with a running VDU";
input {
+ uses manotypes:rpc-project-name;
+
leaf cloud-account {
mandatory true;
- type string;
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../project-name]" +
+ "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ }
}
leaf alarm-id {
diff --git a/rwcal/plugins/vala/CMakeLists.txt b/rwcal/plugins/vala/CMakeLists.txt
index 3482277..7ef9879 100644
--- a/rwcal/plugins/vala/CMakeLists.txt
+++ b/rwcal/plugins/vala/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,7 +35,7 @@
VALA_PACKAGES
rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
- rw_log-1.0
+ rw_log-1.0 rw_project_yang-1.0
VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
@@ -49,7 +49,7 @@
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- DEPENDS rwcal_yang rwlog_gi rwschema_yang rwmanifest_yang
+ DEPENDS rwcal_yang rwlog_gi rwschema_yang rwmanifest_yang rwproject_yang
)
rift_install_vala_artifacts(
diff --git a/rwcal/plugins/yang/CMakeLists.txt b/rwcal/plugins/yang/CMakeLists.txt
index a1b24fe..52ad258 100644
--- a/rwcal/plugins/yang/CMakeLists.txt
+++ b/rwcal/plugins/yang/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -37,10 +37,12 @@
COMPONENT ${PKG_LONG_NAME}
DEPENDS
mano-types_yang
+ rwprojectmano_yang
LIBRARIES
rwschema_yang_gen
rwyang
rwlog
rwlog-mgmt_yang_gen
mano-types_yang_gen
+ rwprojectmano_yang_gen
)
diff --git a/rwcal/plugins/yang/rwcal.yang b/rwcal/plugins/yang/rwcal.yang
index 1ed2f7b..dcd653e 100644
--- a/rwcal/plugins/yang/rwcal.yang
+++ b/rwcal/plugins/yang/rwcal.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -43,6 +43,15 @@
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2014-12-30 {
description
"Initial revision.";
@@ -639,149 +648,153 @@
}
}
- container cloud-accounts {
- list cloud-account-list {
- rwpb:msg-new CloudAccount;
- key "name";
+ augment "/rw-project:project" {
+ container cloud-accounts {
+ list cloud-account-list {
+ rwpb:msg-new CloudAccount;
+ key "name";
- leaf name {
- type string;
+ leaf name {
+ type string;
+ }
+ uses provider-auth;
}
- uses provider-auth;
}
}
- container vim-resources {
- rwpb:msg-new VimResources;
- config false;
-
- list vminfo-list {
- rwpb:msg-new VMInfoItem;
+ augment "/rw-project:project" {
+ container vim-resources {
+ rwpb:msg-new VimResources;
config false;
- key "vm-id";
- uses vm-info-item;
- }
+ list vminfo-list {
+ rwpb:msg-new VMInfoItem;
+ config false;
+ key "vm-id";
- list imageinfo-list {
- rwpb:msg-new ImageInfoItem;
- config false;
- key "id";
-
- uses image-info-item;
- }
-
- list tenantinfo-list {
- rwpb:msg-new TenantInfoItem;
- config false;
- key "tenant-id";
-
- leaf tenant-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ uses vm-info-item;
}
- leaf tenant-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- }
+ list imageinfo-list {
+ rwpb:msg-new ImageInfoItem;
+ config false;
+ key "id";
- list userinfo-list {
- rwpb:msg-new UserInfoItem;
- config false;
- key "user-id";
-
- leaf user-name{
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ uses image-info-item;
}
- leaf user-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- }
+ list tenantinfo-list {
+ rwpb:msg-new TenantInfoItem;
+ config false;
+ key "tenant-id";
- list roleinfo-list {
- rwpb:msg-new RoleInfoItem;
- config false;
- key "role-id";
+ leaf tenant-name {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
- leaf role-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ leaf tenant-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
}
- leaf role-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- }
+ list userinfo-list {
+ rwpb:msg-new UserInfoItem;
+ config false;
+ key "user-id";
- list hostinfo-list {
- rwpb:msg-new HostInfoItem;
- config false;
- key "host-id";
+ leaf user-name{
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
- leaf host-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ leaf user-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
}
- leaf host-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- }
+ list roleinfo-list {
+ rwpb:msg-new RoleInfoItem;
+ config false;
+ key "role-id";
- list networkinfo-list {
- rwpb:msg-new NetworkInfoItem;
- config false;
- key "network-id";
+ leaf role-name {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
- uses network-info-item;
- }
-
- list portinfo-list {
- rwpb:msg-new PortInfoItem;
- config false;
- key "port-id";
-
- uses port-info-item;
- }
-
- list flavorinfo-list {
- rwpb:msg-new FlavorInfoItem;
- config false;
- key "id";
-
- leaf id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ leaf role-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
}
- leaf name {
- rwpb:field-inline "true";
- rwpb:field-string-max 255;
- type string;
+ list hostinfo-list {
+ rwpb:msg-new HostInfoItem;
+ config false;
+ key "host-id";
+
+ leaf host-name {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+
+ leaf host-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
}
- uses manotypes:vm-flavor;
- uses manotypes:guest-epa;
- uses manotypes:vswitch-epa;
- uses manotypes:hypervisor-epa;
- uses manotypes:host-epa;
- uses manotypes:placement-group-input;
+ list networkinfo-list {
+ rwpb:msg-new NetworkInfoItem;
+ config false;
+ key "network-id";
+
+ uses network-info-item;
+ }
+
+ list portinfo-list {
+ rwpb:msg-new PortInfoItem;
+ config false;
+ key "port-id";
+
+ uses port-info-item;
+ }
+
+ list flavorinfo-list {
+ rwpb:msg-new FlavorInfoItem;
+ config false;
+ key "id";
+
+ leaf id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+
+ leaf name {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 255;
+ type string;
+ }
+
+ uses manotypes:vm-flavor;
+ uses manotypes:guest-epa;
+ uses manotypes:vswitch-epa;
+ uses manotypes:hypervisor-epa;
+ uses manotypes:host-epa;
+ uses manotypes:placement-group-input;
+ }
}
}
@@ -814,13 +827,14 @@
}
- container virtual-link-req-params {
- description "This object defines the parameters required to create a virtual-link";
- rwpb:msg-new VirtualLinkReqParams;
- uses virtual-link-create-params;
+ augment "/rw-project:project" {
+ container virtual-link-req-params {
+ description "This object defines the parameters required to create a virtual-link";
+ rwpb:msg-new VirtualLinkReqParams;
+ uses virtual-link-create-params;
+ }
}
-
grouping connection-point-type {
leaf type {
description
@@ -976,59 +990,63 @@
}
}
- container vdu-init-params {
- description "This object defines the parameters required to create a VDU";
- rwpb:msg-new VDUInitParams;
- uses vdu-create-params;
+ augment "/rw-project:project" {
+ container vdu-init-params {
+ description "This object defines the parameters required to create a VDU";
+ rwpb:msg-new VDUInitParams;
+ uses vdu-create-params;
+ }
}
- container vdu-modify-params {
- description "This object defines the parameters required to modify VDU";
- rwpb:msg-new VDUModifyParams;
+ augment "/rw-project:project" {
+ container vdu-modify-params {
+ description "This object defines the parameters required to modify VDU";
+ rwpb:msg-new VDUModifyParams;
- leaf vdu-id {
- description "CAL assigned id for VDU to which this connection point belongs";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
-
- leaf image-id {
- description "CAL assigned image-id for the VDU image";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
-
- list connection-points-add {
- key "name";
- leaf name {
- description "Name of the connection point";
- type string;
- }
- leaf virtual-link-id {
- description "CAL assigned resource Id for the Virtual Link";
- type string;
- }
- leaf associate-public-ip {
- type boolean;
- default false;
- }
- leaf port-security-enabled {
- description "Enables the port security";
- type boolean;
- }
-
- uses connection-point-type;
- }
-
- list connection-points-remove {
- key "connection-point-id";
- leaf connection-point-id {
+ leaf vdu-id {
+ description "CAL assigned id for VDU to which this connection point belongs";
rwpb:field-inline "true";
rwpb:field-string-max 64;
type string;
}
+
+ leaf image-id {
+ description "CAL assigned image-id for the VDU image";
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+
+ list connection-points-add {
+ key "name";
+ leaf name {
+ description "Name of the connection point";
+ type string;
+ }
+ leaf virtual-link-id {
+ description "CAL assigned resource Id for the Virtual Link";
+ type string;
+ }
+ leaf associate-public-ip {
+ type boolean;
+ default false;
+ }
+ leaf port-security-enabled {
+ description "Enables the port security";
+ type boolean;
+ }
+
+ uses connection-point-type;
+ }
+
+ list connection-points-remove {
+ key "connection-point-id";
+ leaf connection-point-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ }
}
}
@@ -1221,22 +1239,24 @@
}
- container vnf-resources {
- rwpb:msg-new VNFResources;
- config false;
-
- list virtual-link-info-list {
- rwpb:msg-new VirtualLinkInfoParams;
+ augment "/rw-project:project" {
+ container vnf-resources {
+ rwpb:msg-new VNFResources;
config false;
- key virtual-link-id;
- uses virtual-link-info-params;
- }
- list vdu-info-list {
- rwpb:msg-new VDUInfoParams;
- config false;
- key vdu-id;
- uses vdu-info-params;
+ list virtual-link-info-list {
+ rwpb:msg-new VirtualLinkInfoParams;
+ config false;
+ key virtual-link-id;
+ uses virtual-link-info-params;
+ }
+
+ list vdu-info-list {
+ rwpb:msg-new VDUInfoParams;
+ config false;
+ key vdu-id;
+ uses vdu-info-params;
+ }
}
}
}
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCA.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCA.py
index 0400a54..9d43424 100644
--- a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCA.py
+++ b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCA.py
@@ -32,8 +32,9 @@
"""
Implementation of the riftcm_config_plugin.RiftCMConfigPluginBase
"""
- def __init__(self, dts, log, loop, account):
- riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop, account)
+ def __init__(self, dts, log, loop, project, account):
+ riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log,
+ loop, project, account)
self._name = account.name
self._type = riftcm_config_plugin.DEFAULT_CAP_TYPE
self._rift_install_dir = os.environ['RIFT_INSTALL']
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCM_rpc.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCM_rpc.py
index 5f33024..f622d5f 100644
--- a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCM_rpc.py
+++ b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCM_rpc.py
@@ -42,17 +42,19 @@
GET_NS_CONF_XPATH = "I,/nsr:get-ns-service-primitive-values"
GET_NS_CONF_O_XPATH = "O,/nsr:get-ns-service-primitive-values"
- def __init__(self, dts, log, loop, nsm):
+ def __init__(self, dts, log, loop, project, nsm):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsm = nsm
self._ns_regh = None
self._vnf_regh = None
self._get_ns_conf_regh = None
- self.job_manager = rift.mano.config_agent.ConfigAgentJobManager(dts, log, loop, nsm)
+ self.job_manager = rift.mano.config_agent.ConfigAgentJobManager(dts, log, loop,
+ project, nsm)
self._rift_install_dir = os.environ['RIFT_INSTALL']
self._rift_artif_dir = os.environ['RIFT_ARTIFACTS']
@@ -67,6 +69,16 @@
""" Return the NS manager instance """
return self._nsm
+ def deregister(self):
+ self._log.debug("De-register conman rpc handlers for project {}".
+ format(self._project))
+ for reg in self.reghs:
+ if reg:
+ reg.deregister()
+ reg = None
+
+ self.job_manager.deregister()
+
def prepare_meta(self, rpc_ip):
try:
@@ -264,6 +276,10 @@
def on_ns_config_prepare(xact_info, action, ks_path, msg):
""" prepare callback from dts exec-ns-service-primitive"""
assert action == rwdts.QueryAction.RPC
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
rpc_ip = msg
rpc_op = NsrYang.YangOutput_Nsr_ExecNsServicePrimitive.from_dict({
"triggered_by": rpc_ip.triggered_by,
@@ -389,6 +405,10 @@
@asyncio.coroutine
def on_get_ns_config_values_prepare(xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
nsr_id = msg.nsr_id_ref
cfg_prim_name = msg.name
try:
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/jujuconf.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/jujuconf.py
index add6a29..d642582 100644
--- a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/jujuconf.py
+++ b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/jujuconf.py
@@ -42,8 +42,9 @@
"""
Juju implementation of the riftcm_config_plugin.RiftCMConfigPluginBase
"""
- def __init__(self, dts, log, loop, account):
- riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop, account)
+ def __init__(self, dts, log, loop, project, account):
+ riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop,
+ project, account)
self._name = account.name
self._type = 'juju'
self._ip_address = account.juju.ip_address
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/riftcm_config_plugin.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/riftcm_config_plugin.py
index 640e4b5..d771690 100644
--- a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/riftcm_config_plugin.py
+++ b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/riftcm_config_plugin.py
@@ -25,9 +25,10 @@
Agent class for NSR
created for Agents to use objects from NSR
'''
- def __init__(self, nsr_dict, cfg):
+ def __init__(self, nsr_dict, cfg, project):
self._nsr = nsr_dict
self._cfg = cfg
+ self._project = project
self._vnfrs = []
self._vnfrs_msg = []
self._vnfr_ids = {}
@@ -75,7 +76,7 @@
if vnfr['id'] in self._vnfr_ids.keys():
agent_vnfr = self._vnfr_ids[vnfr['id']]
else:
- agent_vnfr = RiftCMvnfr(self.name, vnfr, vnfr_msg)
+ agent_vnfr = RiftCMvnfr(self.name, vnfr, vnfr_msg, self._project)
self._vnfrs.append(agent_vnfr)
self._vnfrs_msg.append(vnfr_msg)
self._vnfr_ids[agent_vnfr.id] = agent_vnfr
@@ -89,11 +90,12 @@
'''
Agent base class for VNFR processing
'''
- def __init__(self, nsr_name, vnfr_dict, vnfr_msg):
+ def __init__(self, nsr_name, vnfr_dict, vnfr_msg, project):
self._vnfr = vnfr_dict
self._vnfr_msg = vnfr_msg
self._nsr_name = nsr_name
self._configurable = False
+ self._project = project
@property
def nsr_name(self):
@@ -133,7 +135,8 @@
@property
def xpath(self):
""" VNFR xpath """
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id)
+ return self._project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".
+ format(self.id))
def set_to_configurable(self):
self._configurable = True
@@ -152,10 +155,11 @@
There will be single instance of this plugin for each plugin type.
"""
- def __init__(self, dts, log, loop, config_agent):
+ def __init__(self, dts, log, loop, project, config_agent):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._config_agent = config_agent
@property
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_conagent.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_conagent.py
index 5578a35..61f3a17 100644
--- a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_conagent.py
+++ b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_conagent.py
@@ -48,16 +48,17 @@
class ConfigAccountHandler(object):
- def __init__(self, dts, log, loop, on_add_config_agent, on_delete_config_agent):
+ def __init__(self, dts, log, loop, project, on_add_config_agent, on_delete_config_agent):
self._log = log
self._dts = dts
self._loop = loop
+ self._project = project
self._on_add_config_agent = on_add_config_agent
self._on_delete_config_agent = on_delete_config_agent
self._log.debug("creating config account handler")
self.cloud_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber(
- self._dts, self._log,
+ self._dts, self._log, self._project,
rift.mano.config_agent.ConfigAgentCallbacks(
on_add_apply=self.on_config_account_added,
on_delete_apply=self.on_config_account_deleted,
@@ -77,6 +78,10 @@
def register(self):
self.cloud_cfg_handler.register()
+ def deregister(self):
+ self.cloud_cfg_handler.deregister()
+
+
class RiftCMConfigPlugins(object):
""" NSM Config Agent Plugins """
def __init__(self):
@@ -117,7 +122,8 @@
self._config_plugins = RiftCMConfigPlugins()
self._config_handler = ConfigAccountHandler(
- self._dts, self._log, self._loop, self._on_config_agent, self._on_config_agent_delete)
+ self._dts, self._log, self._loop, parent._project,
+ self._on_config_agent, self._on_config_agent_delete)
self._plugin_instances = {}
self._default_account_added = False
@@ -179,7 +185,8 @@
else:
# Otherwise, instantiate a new plugin using the config agent account
self._log.debug("Instantiting new config agent using class: %s", cap_inst)
- new_instance = cap_inst(self._dts, self._log, self._loop, config_agent)
+ new_instance = cap_inst(self._dts, self._log, self._loop,
+ self._ConfigManagerConfig._project, config_agent)
self._plugin_instances[cap_name] = new_instance
# TODO (pjoseph): See why this was added, as this deletes the
@@ -216,6 +223,11 @@
for account in config_agents:
self._on_config_agent(account)
+ def deregister(self):
+ self._log.debug("De-registering config agent nsm plugin manager".
+ format(self._ConfigManagerConfig._project))
+ self._config_handler.deregister()
+
def set_config_agent(self, nsr, vnfr, method):
if method == 'juju':
agent_type = 'juju'
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py
index 4b010b6..d74f695 100644
--- a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py
+++ b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py
@@ -127,13 +127,16 @@
self._log = log
self._loop = loop
self._parent = parent
+ self._project = parent._project
+
self._nsr_dict = {}
self.pending_cfg = {}
self.terminate_cfg = {}
self.pending_tasks = [] # User for NSRid get retry
# (mainly excercised at restart case)
- self._config_xpath = "C,/cm-config"
- self._opdata_xpath = "D,/rw-conman:cm-state"
+
+ self._config_xpath = self._project.add_project("C,/rw-conman:cm-config")
+ self._opdata_xpath = self._project.add_project("D,/rw-conman:cm-state")
self.cm_config = conmanY.SoConfig()
# RO specific configuration
@@ -147,7 +150,7 @@
self.cm_state['states'] = "Initialized"
# Initialize objects to register
- self.cmdts_obj = ConfigManagerDTS(self._log, self._loop, self, self._dts)
+ self.cmdts_obj = ConfigManagerDTS(self._log, self._loop, self, self._dts, self._project)
self._config_agent_mgr = conagent.RiftCMConfigAgent(
self._dts,
self._log,
@@ -157,10 +160,11 @@
self.reg_handles = [
self.cmdts_obj,
self._config_agent_mgr,
- RiftCM_rpc.RiftCMRPCHandler(self._dts, self._log, self._loop,
+ RiftCM_rpc.RiftCMRPCHandler(self._dts, self._log, self._loop, self._project,
PretendNsm(
self._dts, self._log, self._loop, self)),
]
+ self._op_reg = None
def is_nsr_valid(self, nsr_id):
if nsr_id in self._nsr_dict:
@@ -241,7 +245,19 @@
# Initialize all handles that needs to be registered
for reg in self.reg_handles:
yield from reg.register()
-
+
+ def deregister(self):
+ # De-register all reg handles
+ self._log.debug("De-register ConfigManagerConfig for project {}".
+ format(self._project))
+
+ for reg in self.reg_handles:
+ reg.deregister()
+ reg = None
+
+ self._op_reg.delete_element(self._opdata_xpath)
+ self._op_reg.deregister()
+
@asyncio.coroutine
def register_cm_state_opdata(self):
@@ -285,9 +301,9 @@
try:
handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- yield from self._dts.register(xpath=self._opdata_xpath,
- handler=handler,
- flags=rwdts.Flag.PUBLISHER)
+ self._op_reg = yield from self._dts.register(xpath=self._opdata_xpath,
+ handler=handler,
+ flags=rwdts.Flag.PUBLISHER)
self._log.info("Successfully registered for opdata(%s)", self._opdata_xpath)
except Exception as e:
self._log.error("Failed to register for opdata as (%s)", e)
@@ -301,7 +317,7 @@
if method in vnf_config:
return method
return None
-
+
def get_cfg_file_extension(method, configuration_options):
ext_dict = {
"netconf" : "xml",
@@ -473,7 +489,7 @@
try:
if id not in nsr_dict:
- nsr_obj = ConfigManagerNSR(self._log, self._loop, self, id)
+ nsr_obj = ConfigManagerNSR(self._log, self._loop, self, self._project, id)
nsr_dict[id] = nsr_obj
else:
self._log.info("NSR(%s) is already initialized!", id)
@@ -486,7 +502,7 @@
if nsr_obj.cm_nsr['state'] != nsr_obj.state_to_string(conmanY.RecordState.INIT):
self._log.debug("NSR(%s) is already processed, state=%s",
nsr_obj.nsr_name, nsr_obj.cm_nsr['state'])
- yield from nsr_obj.publish_cm_state()
+ # yield from nsr_obj.publish_cm_state()
return True
cmdts_obj = self.cmdts_obj
@@ -502,7 +518,7 @@
# Create Agent NSR class
nsr_config = yield from cmdts_obj.get_nsr_config(id)
self._log.debug("NSR {} config: {}".format(id, nsr_config))
- nsr_obj.agent_nsr = riftcm_config_plugin.RiftCMnsr(nsr, nsr_config)
+ nsr_obj.agent_nsr = riftcm_config_plugin.RiftCMnsr(nsr, nsr_config, self._project)
try:
yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.RECEIVED)
@@ -519,7 +535,7 @@
# return
nsr_obj.set_config_dir(self)
-
+
for const_vnfr in nsr['constituent_vnfr_ref']:
self._log.debug("Fetching VNFR (%s)", const_vnfr['vnfr_id'])
vnfr_msg = yield from cmdts_obj.get_vnfr(const_vnfr['vnfr_id'])
@@ -624,45 +640,50 @@
def terminate_NSR(self, id):
nsr_dict = self._nsr_dict
if id not in nsr_dict:
- self._log.error("NSR(%s) does not exist!", id)
+ self._log.debug("NSR(%s) does not exist!", id)
return
else:
- # Remove this NSR if we have it on pending task list
- for task in self.pending_tasks:
- if task['nsrid'] == id:
- self.del_from_pending_tasks(task)
+ try:
+ # Remove this NSR if we have it on pending task list
+ for task in self.pending_tasks:
+ if task['nsrid'] == id:
+ self.del_from_pending_tasks(task)
- # Remove this object from global list
- nsr_obj = nsr_dict.pop(id, None)
+ # Remove this object from global list
+ nsr_obj = nsr_dict.pop(id, None)
- # Remove this NS cm-state from global status list
- self.cm_state['cm_nsr'].remove(nsr_obj.cm_nsr)
+ # Remove this NS cm-state from global status list
+ self.cm_state['cm_nsr'].remove(nsr_obj.cm_nsr)
- # Also remove any scheduled configuration event
- for nsr_obj_p in self._parent.pending_cfg:
- if nsr_obj_p == nsr_obj:
- assert id == nsr_obj_p._nsr_id
- #self._parent.pending_cfg.remove(nsr_obj_p)
- # Mark this as being deleted so we do not try to configure it if we are in cfg_delay (will wake up and continue to process otherwise)
- nsr_obj_p.being_deleted = True
- self._log.info("Removed scheduled configuration for NSR(%s)", nsr_obj.nsr_name)
+ # Also remove any scheduled configuration event
+ for nsr_obj_p in self._parent.pending_cfg:
+ if nsr_obj_p == nsr_obj:
+ assert id == nsr_obj_p._nsr_id
+ #self._parent.pending_cfg.remove(nsr_obj_p)
+ # Mark this as being deleted so we do not try to configure
+ # it if we are in cfg_delay (will wake up and continue to process otherwise)
+ nsr_obj_p.being_deleted = True
+ self._log.info("Removed scheduled configuration for NSR(%s)", nsr_obj.nsr_name)
- self._parent.remove_nsr_obj(id)
+ self._parent.remove_nsr_obj(id)
- # Call Config Agent to clean up for each VNF
- for agent_vnfr in nsr_obj.agent_nsr.vnfrs:
- yield from self._config_agent_mgr.invoke_config_agent_plugins(
- 'notify_terminate_vnfr',
- nsr_obj.agent_nsr,
- agent_vnfr)
+ # Call Config Agent to clean up for each VNF
+ for agent_vnfr in nsr_obj.agent_nsr.vnfrs:
+ yield from self._config_agent_mgr.invoke_config_agent_plugins(
+ 'notify_terminate_vnfr',
+ nsr_obj.agent_nsr,
+ agent_vnfr)
- # publish delete cm-state (cm-nsr)
- yield from nsr_obj.delete_cm_nsr()
+ # publish delete cm-state (cm-nsr)
+ yield from nsr_obj.delete_cm_nsr()
- #####################TBD###########################
- # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_terminate_ns', self.id)
+ #####################TBD###########################
+ # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_terminate_ns', self.id)
- self._log.info("NSR(%s/%s) is deleted", nsr_obj.nsr_name, id)
+ self._log.info("NSR(%s/%s) is deleted", nsr_obj.nsr_name, id)
+
+ except Exception as e:
+ self._log.exception("Terminate NSR exception: {}".format(e))
@asyncio.coroutine
def process_initial_config(self, nsr_obj, conf, script, vnfr_name=None):
@@ -874,7 +895,7 @@
class ConfigManagerNSR(object):
- def __init__(self, log, loop, parent, id):
+ def __init__(self, log, loop, parent, project, id):
self._log = log
self._loop = loop
self._rwcal = None
@@ -882,6 +903,7 @@
self._cp_dict = {}
self._nsr_id = id
self._parent = parent
+ self._project = project
self._log.info("Instantiated NSR entry for id=%s", id)
self.nsr_cfg_config_attributes_dict = {}
self.vnf_config_attributes_dict = {}
@@ -910,10 +932,9 @@
@property
def nsr_opdata_xpath(self):
''' Returns full xpath for this NSR cm-state opdata '''
- return(
- "D,/rw-conman:cm-state" +
- "/rw-conman:cm-nsr[rw-conman:id='{}']"
- ).format(self._nsr_id)
+ return self._project.add_project((
+ "D,/rw-conman:cm-state/rw-conman:cm-nsr[rw-conman:id='{}']"
+ ).format(self._nsr_id))
@property
def vnfrs(self):
@@ -1340,8 +1361,8 @@
@staticmethod
def nsd_msg(k=None):
- return ("C,/nsd:nsd-catalog/nsd:nsd" +
- "[nsd:id = '{}']".format(k) if k is not None else "")
+ return ("C,/project-nsd:nsd-catalog/project-nsd:nsd" +
+ "[project-nsd:id = '{}']".format(k) if k is not None else "")
@staticmethod
def vnfr_opdata(k=None):
@@ -1350,8 +1371,8 @@
@staticmethod
def vnfd(k=None):
- return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" +
- ("[vnfd:id='{}']".format(k) if k is not None else ""))
+ return ("C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd" +
+ ("[project-vnfd:id='{}']".format(k) if k is not None else ""))
@staticmethod
def config_agent(k=None):
@@ -1369,14 +1390,16 @@
class ConfigManagerDTS(object):
''' This class either reads from DTS or publishes to DTS '''
- def __init__(self, log, loop, parent, dts):
+ def __init__(self, log, loop, parent, dts, project):
self._log = log
self._loop = loop
self._parent = parent
self._dts = dts
+ self._project = project
@asyncio.coroutine
- def _read_dts(self, xpath, do_trace=False):
+ def _read_dts(self, path, do_trace=False):
+ xpath = self._project.add_project(path)
self._log.debug("_read_dts path = %s", xpath)
flags = rwdts.XactFlag.MERGE
res_iter = yield from self._dts.query_read(
@@ -1463,19 +1486,21 @@
return cfgagentl
@asyncio.coroutine
- def update(self, path, msg, flags=rwdts.XactFlag.REPLACE):
+ def update(self, xpath, msg, flags=rwdts.XactFlag.REPLACE):
"""
Update a cm-state (cm-nsr) record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating cm-state %s:%s dts_pub_hdl = %s", path, msg, self.dts_pub_hdl)
self.dts_pub_hdl.update_element(path, msg, flags)
self._log.debug("Updated cm-state, %s:%s", path, msg)
@asyncio.coroutine
- def delete(self, path):
+ def delete(self, xpath):
"""
Delete cm-nsr record in DTS with the path only
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting cm-nsr %s dts_pub_hdl = %s", path, self.dts_pub_hdl)
self.dts_pub_hdl.delete_element(path)
self._log.debug("Deleted cm-nsr, %s", path)
@@ -1484,12 +1509,23 @@
def register(self):
yield from self.register_to_publish()
yield from self.register_for_nsr()
-
+
+ def deregister(self):
+ self._log.debug("De-registering conman config for project {}".
+ format(self._project.name))
+ if self.dts_reg_hdl:
+ self.dts_reg_hdl.deregister()
+ self.dts_reg_hdl = None
+
+ if self.dts_pub_hdl:
+ self.dts_pub_hdl.deregister()
+ self.dts_pub_hdl = None
+
@asyncio.coroutine
def register_to_publish(self):
''' Register to DTS for publishing cm-state opdata '''
- xpath = "D,/rw-conman:cm-state/rw-conman:cm-nsr"
+ xpath = self._project.add_project("D,/rw-conman:cm-state/rw-conman:cm-nsr")
self._log.debug("Registering to publish cm-state @ %s", xpath)
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
@@ -1499,7 +1535,7 @@
@property
def nsr_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
@asyncio.coroutine
def register_for_nsr(self):
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py
index f292a68..e4f043e 100644
--- a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py
+++ b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py
@@ -55,9 +55,12 @@
else:
log_vnf += "{}/".format(vnf_cfg[item])
return log_vnf
-
+
+
class ConfigManagerROifConnectionError(Exception):
pass
+
+
class ScriptError(Exception):
pass
@@ -68,12 +71,15 @@
self._log = log
self._loop = loop
self._parent = parent
- self._nsr_xpath = "/cm-state/cm-nsr"
+ self._nsr_xpath = parent._project.add_project("/cm-state/cm-nsr")
@asyncio.coroutine
def register(self):
pass
+ def deregister(self):
+ pass
+
@asyncio.coroutine
def update_vnf_state(self, vnf_cfg, state):
nsr_obj = vnf_cfg['nsr_obj']
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py
index 4e92b6c..e260b24 100755
--- a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py
+++ b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py
@@ -1,6 +1,6 @@
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,6 +18,10 @@
'''
This file - ConfigManagerTasklet()
|
++
+|
+ConfigManagerProject()
+|
+--|--> ConfigurationManager()
|
+--> rwconman_config.py - ConfigManagerConfig()
@@ -44,6 +48,10 @@
)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
from . import rwconman_config as Config
from . import rwconman_events as Event
@@ -60,10 +68,12 @@
return log_vnf
class ConfigurationManager(object):
- def __init__(self, log, loop, dts):
+ def __init__(self, log, loop, dts, project):
self._log = log
self._loop = loop
self._dts = dts
+ self._project = project
+
self.cfg_sleep = True
self.cfg_dir = os.path.join(os.environ["RIFT_INSTALL"], "etc/conman")
self._config = Config.ConfigManagerConfig(self._dts, self._log, self._loop, self)
@@ -71,6 +81,7 @@
self.pending_cfg = []
self.pending_tasks = {}
self._nsr_objs = {}
+ self._task = None # The configuration_handler task
self._handlers = [
self._config,
@@ -228,71 +239,74 @@
yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED)
return ret_status
- # Basically, this loop will never end.
- while True:
- # Check the pending tasks are complete
- # Store a list of tasks that are completed and
- # remove from the pending_tasks list outside loop
- ids = []
- for nsr_id, task in self.pending_tasks.items():
- if task.done():
- ids.append(nsr_id)
- e = task.exception()
- if e:
- self._log.error("Exception in configuring nsr {}: {}".
- format(nsr_id, e))
- nsr_obj = self.get_nsr_obj(nsr_id)
- if nsr_obj:
- yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED, str(e))
+ try:
+ # Basically, this loop will never end.
+ while True:
+ # Check the pending tasks are complete
+ # Store a list of tasks that are completed and
+ # remove from the pending_tasks list outside loop
+ ids = []
+ for nsr_id, task in self.pending_tasks.items():
+ if task.done():
+ ids.append(nsr_id)
+ e = task.exception()
+ if e:
+ self._log.error("Exception in configuring nsr {}: {}".
+ format(nsr_id, e))
+ nsr_obj = self.get_nsr_obj(nsr_id)
+ if nsr_obj:
+ yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED, str(e))
+ else:
+ rc = task.result()
+ self._log.debug("NSR {} configured: {}".format(nsr_id, rc))
else:
- rc = task.result()
- self._log.debug("NSR {} configured: {}".format(nsr_id, rc))
- else:
- self._log.debug("NSR {} still configuring".format(nsr_id))
+ self._log.debug("NSR {} still configuring".format(nsr_id))
- # Remove the completed tasks
- for nsr_id in ids:
- self.pending_tasks.pop(nsr_id)
+ # Remove the completed tasks
+ for nsr_id in ids:
+ self.pending_tasks.pop(nsr_id)
- # TODO (pjoseph): Fix this
- # Sleep before processing any NS (Why are we getting multiple NSR running DTS updates?)
- # If the sleep is not 10 seconds it does not quite work, NSM is marking it 'running'
- # wrongfully 10 seconds in advance?
- yield from asyncio.sleep(10, loop=self._loop)
+ # TODO (pjoseph): Fix this
+ # Sleep before processing any NS (Why are we getting multiple NSR running DTS updates?)
+ # If the sleep is not 10 seconds it does not quite work, NSM is marking it 'running'
+ # wrongfully 10 seconds in advance?
+ yield from asyncio.sleep(10, loop=self._loop)
- if self.pending_cfg:
- # get first NS, pending_cfg is nsr_obj list
- nsr_obj = self.pending_cfg[0]
- nsr_done = False
- if nsr_obj.being_deleted is False:
- # Process this NS, returns back same obj is successfull or exceeded retries
- try:
- self._log.info("Processing NSR:{}".format(nsr_obj.nsr_name))
+ if self.pending_cfg:
+ # get first NS, pending_cfg is nsr_obj list
+ nsr_obj = self.pending_cfg[0]
+ nsr_done = False
+ if nsr_obj.being_deleted is False:
+ # Process this NS, returns back same obj is successfull or exceeded retries
+ try:
+ self._log.info("Processing NSR:{}".format(nsr_obj.nsr_name))
- # Check if we already have a task running for this NSR
- # Case where we are still configuring and terminate is called
- if nsr_obj.nsr_id in self.pending_tasks:
- self._log.error("NSR {} in state {} has a configure task running.".
- format(nsr_obj.nsr_name, nsr_obj.get_ns_cm_state()))
- # Terminate the task for this NSR
- self.pending_tasks[nsr_obj.nsr_id].cancel()
+ # Check if we already have a task running for this NSR
+ # Case where we are still configuring and terminate is called
+ if nsr_obj.nsr_id in self.pending_tasks:
+ self._log.error("NSR {} in state {} has a configure task running.".
+ format(nsr_obj.nsr_name, nsr_obj.get_ns_cm_state()))
+ # Terminate the task for this NSR
+ self.pending_tasks[nsr_obj.nsr_id].cancel()
- yield from self.update_ns_state(nsr_obj, conmanY.RecordState.CFG_PROCESS)
+ yield from self.update_ns_state(nsr_obj, conmanY.RecordState.CFG_PROCESS)
- # Call in a separate thread
- self.pending_tasks[nsr_obj.nsr_id] = \
- self._loop.create_task(
- process_nsr_obj(nsr_obj)
- )
+ # Call in a separate thread
+ self.pending_tasks[nsr_obj.nsr_id] = \
+ self._loop.create_task(
+ process_nsr_obj(nsr_obj)
+ )
- # Remove this nsr_obj
- self.pending_cfg.remove(nsr_obj)
+ # Remove this nsr_obj
+ self.pending_cfg.remove(nsr_obj)
- except Exception as e:
- self._log.error("Failed to process NSR as %s", str(e))
- self._log.exception(e)
+ except Exception as e:
+ self._log.error("Failed to process NSR as %s", str(e))
+ self._log.exception(e)
+ except asyncio.CancelledError as e:
+ self._log.debug("Stopped configuration handler for project {}".format(self._project))
@asyncio.coroutine
def register(self):
@@ -300,7 +314,37 @@
for reg in self._handlers:
yield from reg.register()
- asyncio.ensure_future(self.configuration_handler(), loop=self._loop)
+ self._task = asyncio.ensure_future(self.configuration_handler(), loop=self._loop)
+
+ def deregister(self):
+ self._log.debug("De-register conman for project {}".format(self._project.name))
+ self._task.cancel()
+
+ for reg in self._handlers:
+ reg.deregister()
+
+
+class ConfigManagerProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(ConfigManagerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._con_man = None
+
+ @asyncio.coroutine
+ def register (self):
+ self._log.info("Initializing the Configuration-Manager tasklet")
+ self._con_man = ConfigurationManager(self.log,
+ self.loop,
+ self._dts,
+ self,)
+ yield from self._con_man.register()
+
+ def deregister(self):
+ self._log.debug("De-register project {}".format(self.name))
+ self._con_man.deregister()
+
class ConfigManagerTasklet(rift.tasklets.Tasklet):
def __init__(self, *args, **kwargs):
@@ -308,7 +352,13 @@
self.rwlog.set_category("rw-conman-log")
self._dts = None
- self._con_man = None
+
+ self.project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
def start(self):
super(ConfigManagerTasklet, self).start()
@@ -327,11 +377,9 @@
@asyncio.coroutine
def init(self):
- self._log.info("Initializing the Configuration-Manager tasklet")
- self._con_man = ConfigurationManager(self.log,
- self.loop,
- self._dts)
- yield from self._con_man.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, ConfigManagerProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
diff --git a/rwcm/plugins/yang/rw-conman.tailf.yang b/rwcm/plugins/yang/rw-conman.tailf.yang
index aabbdd5..d4fd1a5 100644
--- a/rwcm/plugins/yang/rw-conman.tailf.yang
+++ b/rwcm/plugins/yang/rw-conman.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,11 @@
prefix conman;
}
- tailf:annotate "/conman:cm-state" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/conman:cm-state" {
tailf:callpoint rw_callpoint;
}
diff --git a/rwcm/plugins/yang/rw-conman.yang b/rwcm/plugins/yang/rw-conman.yang
index bb1555d..9a29a60 100644
--- a/rwcm/plugins/yang/rw-conman.yang
+++ b/rwcm/plugins/yang/rw-conman.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,6 +39,14 @@
prefix "rwcli";
}
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
+ }
+
+ import rw-project-nsd {
+ prefix "rw-project-nsd";
+ }
+
import nsr {
prefix "nsr";
}
@@ -75,6 +83,19 @@
prefix "rw-config-agent";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-10-27 {
description
"Initial revision.";
@@ -128,27 +149,29 @@
}
}
- container cm-config {
- description "Service Orchestrator specific configuration";
- rwpb:msg-new "SoConfig";
- rwcli:new-mode "cm-config";
+ augment "/rw-project:project" {
+ container cm-config {
+ description "Service Orchestrator specific configuration";
+ rwpb:msg-new "SoConfig";
+ rwcli:new-mode "cm-config";
- container ro-endpoint {
- description "Resource Orchestrator endpoint ip address";
- rwpb:msg-new "RoEndpoint";
- uses ro-endpoint;
- }
-
- //uses vnf-cfg-items;
-
- list nsr {
- key "id";
- leaf id {
- description "Indicates NSR bringup complete, now initiate configuration of the NSR";
- type yang:uuid;
+ container ro-endpoint {
+ description "Resource Orchestrator endpoint ip address";
+ rwpb:msg-new "RoEndpoint";
+ uses ro-endpoint;
}
- }
- }// cm-config
+
+ //uses vnf-cfg-items;
+
+ list nsr {
+ key "id";
+ leaf id {
+ description "Indicates NSR bringup complete, now initiate configuration of the NSR";
+ type yang:uuid;
+ }
+ }
+ }// cm-config
+ }
// =================== SHOW ==================
typedef record-state {
@@ -185,76 +208,78 @@
// This is also used by RO (Resource Orchestrator) to indicate NSR is ready
// It will only fill in IDs
- container cm-state {
- rwpb:msg-new "CmOpdata";
- config false;
- description "CM NS & VNF states";
+ augment "/rw-project:project" {
+ container cm-state {
+ rwpb:msg-new "CmOpdata";
+ config false;
+ description "CM NS & VNF states";
- leaf states {
- description "CM various states";
- type string;
- }
+ leaf states {
+ description "CM various states";
+ type string;
+ }
- list cm-nsr {
- description "List of NS Records";
- key "id";
- leaf id {
- type yang:uuid;
- }
- leaf name {
- description "NSR name.";
- type string;
- }
- leaf state {
- description "State of NSR";
- type record-state;
- }
- leaf state-details {
- description "Details of the state of NSR, in case of errors";
- type string;
- }
-
- list cm-vnfr {
- description "List of VNF Records within NS Record";
+ list cm-nsr {
+ description "List of NS Records";
key "id";
leaf id {
type yang:uuid;
}
leaf name {
- description "VNFR name.";
+ description "NSR name.";
type string;
}
leaf state {
- description "Last known state of this VNFR";
+ description "State of NSR";
type record-state;
}
- container mgmt-interface {
- leaf ip-address {
- type inet:ip-address;
- }
- leaf port {
- type inet:port-number;
- }
- }
- leaf cfg-type {
+ leaf state-details {
+ description "Details of the state of NSR, in case of errors";
type string;
}
- leaf cfg-location {
- type inet:uri;
- }
- list connection-point {
- key "name";
+
+ list cm-vnfr {
+ description "List of VNF Records within NS Record";
+ key "id";
+ leaf id {
+ type yang:uuid;
+ }
leaf name {
- description "Connection Point name";
+ description "VNFR name.";
type string;
}
- leaf ip-address {
- description "IP address assigned to this connection point";
- type inet:ip-address;
+ leaf state {
+ description "Last known state of this VNFR";
+ type record-state;
}
- }
- } // list VNFR
- } // list NSR
- } // cm-state
-
+ container mgmt-interface {
+ leaf ip-address {
+ type inet:ip-address;
+ }
+ leaf port {
+ type inet:port-number;
+ }
+ }
+ leaf cfg-type {
+ type string;
+ }
+ leaf cfg-location {
+ type inet:uri;
+ }
+ list connection-point {
+ key "name";
+ leaf name {
+ description "Connection Point name";
+ type string;
+ }
+ leaf ip-address {
+ description "IP address assigned to this connection point";
+ type inet:ip-address;
+ }
+ }
+ } // list VNFR
+ } // list NSR
+ } // cm-state
+ }
+
} // rw-conman
diff --git a/rwcm/test/rwso_test.py b/rwcm/test/rwso_test.py
index e0c5011..7d2a4a8 100755
--- a/rwcm/test/rwso_test.py
+++ b/rwcm/test/rwso_test.py
@@ -240,11 +240,11 @@
self.log.debug("STARTING - test_create_resource_pools")
tinfo = self.new_tinfo('poolconfig')
dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
- pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
- pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records"
+ pool_xpath = "C,/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+ pool_records_xpath = "D,/rw-project:project/rw-resource-mgr:resource-pool-records"
account_xpath = "C,/rw-launchpad:cloud-account"
- compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
- network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
+ compute_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
+ network_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
@asyncio.coroutine
def configure_cloud_account():
diff --git a/rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt b/rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt
index f11616c..fefbd3e 100644
--- a/rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt
+++ b/rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt
@@ -6,8 +6,10 @@
ietf-restconf-monitoring
ietf-yang-types
mano-types
-nsd
+nsd-base
nsr
+project-nsd
+project-vnfd
rw-base
rwcal
rw-cli-ext
@@ -32,10 +34,14 @@
rw-netconf
rw-restconf
rw-notify-ext
-rw-nsd
+rw-nsd-base
rw-nsm
rw-nsr
rw-pb-ext
+rw-project
+rw-project-mano
+rw-project-nsd
+rw-project-vnfd
rw-resource-mgr
rw-restportforward
rwsdnal
@@ -47,11 +53,11 @@
rwvcs-types
rw-vld
rw-vlr
-rw-vnfd
+rw-vnfd-base
rw-vnfr
rw-yang-types
vld
vlr
-vnfd
+project-vnfd
vnffgd
vnfr
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py
index d71aefc..d43a3d6 100644
--- a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py
+++ b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py
@@ -106,6 +106,7 @@
log,
dts,
loop,
+ project,
nsr_id,
monp_id,
scaling_criteria,
@@ -143,6 +144,7 @@
self.log,
self.dts,
self.loop,
+ project,
self.nsr_id,
self.monp_id,
callback=self.add_value)
@@ -236,6 +238,7 @@
log,
dts,
loop,
+ project,
nsr_id,
nsd_id,
scaling_group_name,
@@ -258,6 +261,7 @@
self.loop = loop
self.log = log
self.dts = dts
+ self.project = project
self.nsd_id = nsd_id
self.nsr_id = nsr_id
self.scaling_group_name = scaling_group_name
@@ -270,6 +274,7 @@
self.log,
self.dts,
self.loop,
+ self.project,
self.nsr_id,
callback=self.handle_nsr_monp)
@@ -311,6 +316,7 @@
self.log,
self.dts,
self.loop,
+ self.project,
self.nsr_id,
monp.id,
cri,
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py
index affa579..98e37e6 100644
--- a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py
+++ b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py
@@ -1,6 +1,6 @@
"""
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -39,10 +39,149 @@
import rift.mano.cloud
import rift.mano.dts as subscriber
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
+class AutoScalerProject(ManoProject, engine.ScalingPolicy.Delegate):
-class AutoScalerTasklet(rift.tasklets.Tasklet, engine.ScalingPolicy.Delegate):
+ def __init__(self, name, tasklet, **kw):
+ super(AutoScalerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self.store = None
+ self.monparam_store = None
+ self.nsr_sub = None
+ self.nsr_monp_subscribers = {}
+ self.instance_id_store = collections.defaultdict(list)
+
+ self.store = subscriber.SubscriberStore.from_project(self)
+ self.nsr_sub = subscriber.NsrCatalogSubscriber(self.log, self.dts, self.loop,
+ self, self.handle_nsr)
+
+ def deregister(self):
+ self.log.debug("De-register project {}".format(self.name))
+ self.nsr_sub.deregister()
+ self.store.deregister()
+
+
+ @asyncio.coroutine
+ def register (self):
+ self.log.debug("creating vnfr subscriber")
+ yield from self.store.register()
+ yield from self.nsr_sub.register()
+
+ def scale_in(self, scaling_group_name, nsr_id):
+ """Delegate callback
+
+ Args:
+ scaling_group_name (str): Scaling group name to be scaled in
+ nsr_id (str): NSR id
+
+ """
+ self.log.info("Sending a scaling-in request for {} in NSR: {}".format(
+ scaling_group_name,
+ nsr_id))
+
+ @asyncio.coroutine
+ def _scale_in():
+ instance_id = self.instance_id_store[(scaling_group_name, nsr_id)].pop()
+
+ # Trigger an rpc
+ rpc_ip = NsrYang.YangInput_Nsr_ExecScaleIn.from_dict({
+ 'project_name': self.name,
+ 'nsr_id_ref': nsr_id,
+ 'instance_id': instance_id,
+ 'scaling_group_name_ref': scaling_group_name})
+
+ rpc_out = yield from self.dts.query_rpc(
+ "/nsr:exec-scale-in",
+ 0,
+ rpc_ip)
+
+ self.loop.create_task(_scale_in())
+
+ def scale_out(self, scaling_group_name, nsr_id):
+ """Delegate callback for scale out requests
+
+ Args:
+ scaling_group_name (str): Scaling group name
+ nsr_id (str): NSR ID
+ """
+ self.log.info("Sending a scaling-out request for {} in NSR: {}".format(
+ scaling_group_name,
+ nsr_id))
+
+ @asyncio.coroutine
+ def _scale_out():
+ # Trigger an rpc
+ rpc_ip = NsrYang.YangInput_Nsr_ExecScaleOut.from_dict({
+ 'project_name': self.name,
+ 'nsr_id_ref': nsr_id ,
+ 'scaling_group_name_ref': scaling_group_name})
+
+ itr = yield from self.dts.query_rpc("/nsr:exec-scale-out", 0, rpc_ip)
+
+ key = (scaling_group_name, nsr_id)
+ for res in itr:
+ result = yield from res
+ rpc_out = result.result
+ self.instance_id_store[key].append(rpc_out.instance_id)
+
+ self.log.info("Created new scaling group {} with instance id {}".format(
+ scaling_group_name,
+ rpc_out.instance_id))
+
+ self.loop.create_task(_scale_out())
+
+
+ def handle_nsr(self, nsr, action):
+ """Callback for NSR opdata changes. Creates a publisher for every
+ NS that moves to config state.
+
+ Args:
+ nsr (RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr): Ns Opdata
+ action (rwdts.QueryAction): Action type of the change.
+ """
+ def nsr_create():
+ if nsr.config_status == "configured" and nsr.ns_instance_config_ref not in self.nsr_monp_subscribers:
+ nsr_id = nsr.ns_instance_config_ref
+ self.nsr_monp_subscribers[nsr_id] = []
+ nsd = self.store.get_nsd(nsr.nsd_ref)
+ @asyncio.coroutine
+ def task():
+ for scaling_group in nsd.scaling_group_descriptor:
+ for policy_cfg in scaling_group.scaling_policy:
+ policy = engine.ScalingPolicy(
+ self.log, self.dts, self.loop, self,
+ nsr.ns_instance_config_ref,
+ nsr.nsd_ref,
+ scaling_group.name,
+ policy_cfg,
+ self.store,
+ delegate=self)
+ self.nsr_monp_subscribers[nsr_id].append(policy)
+ yield from policy.register()
+
+ self.loop.create_task(task())
+
+
+ def nsr_delete():
+ if nsr.ns_instance_config_ref in self.nsr_monp_subscribers:
+ policies = self.nsr_monp_subscribers[nsr.ns_instance_config_ref]
+ for policy in policies:
+ policy.deregister()
+ del self.nsr_monp_subscribers[nsr.ns_instance_config_ref]
+
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+ nsr_create()
+ elif action == rwdts.QueryAction.DELETE:
+ nsr_delete()
+
+
+class AutoScalerTasklet(rift.tasklets.Tasklet):
"""The main task of this Tasklet is to listen for NSR changes and once the
NSR is configured, ScalingPolicy is created.
"""
@@ -50,12 +189,9 @@
try:
super().__init__(*args, **kwargs)
- self.store = None
- self.monparam_store = None
- self.nsr_sub = None
- self.nsr_monp_subscribers = {}
- self.instance_id_store = collections.defaultdict(list)
+ self._project_handler = None
+ self.projects = {}
except Exception as e:
self.log.exception(e)
@@ -72,9 +208,6 @@
self.on_dts_state_change
)
- self.store = subscriber.SubscriberStore.from_tasklet(self)
- self.nsr_sub = subscriber.NsrCatalogSubscriber(self.log, self.dts, self.loop, self.handle_nsr)
-
self.log.debug("Created DTS Api GI Object: %s", self.dts)
def stop(self):
@@ -85,9 +218,9 @@
@asyncio.coroutine
def init(self):
- self.log.debug("creating vnfr subscriber")
- yield from self.store.register()
- yield from self.nsr_sub.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, AutoScalerProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
@@ -124,107 +257,3 @@
if next_state is not None:
self.dts.handle.set_state(next_state)
- def scale_in(self, scaling_group_name, nsr_id):
- """Delegate callback
-
- Args:
- scaling_group_name (str): Scaling group name to be scaled in
- nsr_id (str): NSR id
-
- """
- self.log.info("Sending a scaling-in request for {} in NSR: {}".format(
- scaling_group_name,
- nsr_id))
-
- @asyncio.coroutine
- def _scale_in():
- instance_id = self.instance_id_store[(scaling_group_name, nsr_id)].pop()
-
- # Trigger an rpc
- rpc_ip = NsrYang.YangInput_Nsr_ExecScaleIn.from_dict({
- 'nsr_id_ref': nsr_id,
- 'instance_id': instance_id,
- 'scaling_group_name_ref': scaling_group_name})
-
- rpc_out = yield from self.dts.query_rpc(
- "/nsr:exec-scale-in",
- 0,
- rpc_ip)
-
- self.loop.create_task(_scale_in())
-
- def scale_out(self, scaling_group_name, nsr_id):
- """Delegate callback for scale out requests
-
- Args:
- scaling_group_name (str): Scaling group name
- nsr_id (str): NSR ID
- """
- self.log.info("Sending a scaling-out request for {} in NSR: {}".format(
- scaling_group_name,
- nsr_id))
-
- @asyncio.coroutine
- def _scale_out():
- # Trigger an rpc
- rpc_ip = NsrYang.YangInput_Nsr_ExecScaleOut.from_dict({
- 'nsr_id_ref': nsr_id ,
- 'scaling_group_name_ref': scaling_group_name})
-
- itr = yield from self.dts.query_rpc("/nsr:exec-scale-out", 0, rpc_ip)
-
- key = (scaling_group_name, nsr_id)
- for res in itr:
- result = yield from res
- rpc_out = result.result
- self.instance_id_store[key].append(rpc_out.instance_id)
-
- self.log.info("Created new scaling group {} with instance id {}".format(
- scaling_group_name,
- rpc_out.instance_id))
-
- self.loop.create_task(_scale_out())
-
-
- def handle_nsr(self, nsr, action):
- """Callback for NSR opdata changes. Creates a publisher for every
- NS that moves to config state.
-
- Args:
- nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
- action (rwdts.QueryAction): Action type of the change.
- """
- def nsr_create():
- if nsr.config_status == "configured" and nsr.ns_instance_config_ref not in self.nsr_monp_subscribers:
- nsr_id = nsr.ns_instance_config_ref
- self.nsr_monp_subscribers[nsr_id] = []
- nsd = self.store.get_nsd(nsr.nsd_ref)
- @asyncio.coroutine
- def task():
- for scaling_group in nsd.scaling_group_descriptor:
- for policy_cfg in scaling_group.scaling_policy:
- policy = engine.ScalingPolicy(
- self.log, self.dts, self.loop,
- nsr.ns_instance_config_ref,
- nsr.nsd_ref,
- scaling_group.name,
- policy_cfg,
- self.store,
- delegate=self)
- self.nsr_monp_subscribers[nsr_id].append(policy)
- yield from policy.register()
-
- self.loop.create_task(task())
-
-
- def nsr_delete():
- if nsr.ns_instance_config_ref in self.nsr_monp_subscribers:
- policies = self.nsr_monp_subscribers[nsr.ns_instance_config_ref]
- for policy in policies:
- policy.deregister()
- del self.nsr_monp_subscribers[nsr.ns_instance_config_ref]
-
- if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
- nsr_create()
- elif action == rwdts.QueryAction.DELETE:
- nsr_delete()
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py
index 04185b6..f96a832 100644
--- a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py
+++ b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py
@@ -1,6 +1,6 @@
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,21 +20,18 @@
class NsrMonParamSubscriber(mano_dts.AbstractOpdataSubscriber):
"""Registers for NSR monitoring parameter changes.
-
+
Attributes:
monp_id (str): Monitoring Param ID
nsr_id (str): NSR ID
"""
- def __init__(self, log, dts, loop, nsr_id, monp_id=None, callback=None):
- super().__init__(log, dts, loop, callback)
+ def __init__(self, log, dts, loop, project, nsr_id, monp_id=None, callback=None):
+ super().__init__(log, dts, loop, project, callback)
self.nsr_id = nsr_id
self.monp_id = monp_id
def get_xpath(self):
- return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+ return self.project.add_project(("D,/nsr:ns-instance-opdata/nsr:nsr" +
"[nsr:ns-instance-config-ref='{}']".format(self.nsr_id) +
"/nsr:monitoring-param" +
- ("[nsr:id='{}']".format(self.monp_id) if self.monp_id else ""))
-
-
-
+ ("[nsr:id='{}']".format(self.monp_id) if self.monp_id else "")))
diff --git a/rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py b/rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py
index c00ca11..ec1016f 100644
--- a/rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py
+++ b/rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py
@@ -33,17 +33,17 @@
from gi.repository import (
RwNsrYang,
NsrYang,
- NsdYang,
+ ProjectNsdYang as NsdYang,
RwLaunchpadYang as launchpadyang,
RwVnfrYang,
- RwVnfdYang,
- RwNsdYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwProjectNsdYang as RwNsdYang,
VnfrYang
)
-ScalingCriteria = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy_ScalingCriteria
-ScalingPolicy = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy
+ScalingCriteria = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy_ScalingCriteria
+ScalingPolicy = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy
class MockDelegate(engine.ScalingCriteria.Delegate):
@@ -73,7 +73,7 @@
def __call__(self):
store = mock.MagicMock()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict({
'id': "1",
'monitoring_param': [
{'description': 'no of ping requests',
@@ -98,12 +98,12 @@
store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({'id': '1'})
- mock_vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict({'id': '1'})
+ mock_vnfr.vnfd = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict({
'ns_instance_config_ref': "1",
'name_ref': "Foo",
'nsd_ref': '1',
@@ -138,7 +138,7 @@
scale_in_val = 100
scale_out_val = 200
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict({
'id': '1',
'monitoring_param': (monp_cfg if not self.legacy else []),
'constituent_vnfd': [{'member_vnf_index': 1,
@@ -206,13 +206,13 @@
def _populate_mock_values(self, criterias, nsr_id, floor, ceil):
# Mock publish
# Verify Scale in AND operator
- NsMonParam = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+ NsMonParam = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_MonitoringParam
publisher = rift.test.dts.DescriptorPublisher(self.log, self.dts, self.loop)
for criteria in criterias:
monp_id = criteria.ns_monitoring_param_ref
- w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
+ w_xpath = "D,/rw-project:project/nsr:ns-instance-opdata/nsr:nsr"
w_xpath = w_xpath + "[nsr:ns-instance-config-ref='{}']/nsr:monitoring-param".format(nsr_id)
xpath = w_xpath + "[nsr:id ='{}']".format(monp_id)
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py b/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py
index 10df45b..42185ae 100644
--- a/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py
@@ -48,7 +48,7 @@
self._loop = loop
self._dts = dts
- def create_job(self, image_name, image_checksum, cloud_account_names=None):
+ def create_job(self, image_name, image_checksum, project, cloud_account_names=None):
""" Create an image upload_job and return an UploadJob instance
Arguments:
@@ -62,6 +62,7 @@
"""
create_job_msg = RwImageMgmtYang.CreateUploadJob.from_dict({
"onboarded_image": {
+ "project_name": project.name,
"image_name": image_name,
"image_checksum": image_checksum,
}
@@ -81,9 +82,9 @@
job_id = rpc_result.job_id
- return UploadJob(self._log, self._loop, self._dts, job_id)
+ return UploadJob(self._log, self._loop, self._dts, job_id, project)
- def create_job_threadsafe(self, image_name, image_checksum, cloud_account_names=None):
+ def create_job_threadsafe(self, image_name, image_checksum, project, cloud_account_names=None):
""" A thread-safe, syncronous wrapper for create_job """
future = concurrent.futures.Future()
@@ -96,7 +97,7 @@
def add_task():
task = self._loop.create_task(
- self.create_job(image_name, image_checksum, cloud_account_names)
+ self.create_job(image_name, image_checksum, project, cloud_account_names)
)
task.add_done_callback(on_done)
@@ -106,11 +107,12 @@
class UploadJob(object):
""" A handle for a image upload job """
- def __init__(self, log, loop, dts, job_id):
+ def __init__(self, log, loop, dts, job_id, project):
self._log = log
self._loop = loop
self._dts = dts
self._job_id = job_id
+ self._project = project
@asyncio.coroutine
def wait_until_complete(self):
@@ -122,12 +124,11 @@
UploadJobCancelled: The upload job was cancelled
"""
self._log.debug("waiting for upload job %s to complete", self._job_id)
+ xpath = self._project.add_project("D,/rw-image-mgmt:upload-jobs/" +
+ "rw-image-mgmt:job[rw-image-mgmt:id='{}']".
+ format(self._job_id))
while True:
- query_iter = yield from self._dts.query_read(
- "D,/rw-image-mgmt:upload-jobs/rw-image-mgmt:job[rw-image-mgmt:id='{}']".format(
- self._job_id
- )
- )
+ query_iter = yield from self._dts.query_read(xpath)
job_status_msg = None
for fut_resp in query_iter:
job_status_msg = (yield from fut_resp).result
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py
index 027e582..f3ba2ed 100644
--- a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py
@@ -22,6 +22,13 @@
import rift.tasklets
import rift.mano.cloud
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectConfigCallbacks,
+ ProjectHandler,
+ get_add_delete_update_cfgs,
+ DEFAULT_PROJECT,
+ )
from . import glance_proxy_server
from . import glance_client
@@ -53,16 +60,18 @@
class CloudAccountDtsHandler(object):
- def __init__(self, log, dts, log_hdl):
+ def __init__(self, log, dts, log_hdl, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._cloud_cfg_subscriber = None
+ self._project = project
def register(self, on_add_apply, on_delete_apply):
- self._log.debug("creating cloud account config handler")
+ self._log.debug("Project {}: creating cloud account config handler".
+ format(self._project.name))
self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
- self._dts, self._log, self._log_hdl,
+ self._dts, self._log, self._log_hdl, self._project,
rift.mano.cloud.CloudAccountConfigCallbacks(
on_add_apply=on_add_apply,
on_delete_apply=on_delete_apply,
@@ -70,6 +79,11 @@
)
self._cloud_cfg_subscriber.register()
+ def deregister(self):
+ self._log.debug("Project {}: Removing cloud account config handler".
+ format(self._project.name))
+ self._cloud_cfg_subscriber.deregister()
+
def openstack_image_to_image_info(openstack_image):
"""Convert the OpenstackImage to a ImageInfo protobuf message
@@ -95,19 +109,21 @@
class ImageDTSShowHandler(object):
""" A DTS publisher for the upload-jobs data container """
- def __init__(self, log, loop, dts, job_controller):
+ def __init__(self, log, loop, dts, job_controller, project):
self._log = log
self._loop = loop
self._dts = dts
self._job_controller = job_controller
+ self._project = project
self._subscriber = None
+ def get_xpath(self):
+ return self._project.add_project("D,/rw-image-mgmt:upload-jobs")
+
@asyncio.coroutine
def register(self):
""" Register as a publisher and wait for reg_ready to complete """
- def get_xpath():
- return "D,/rw-image-mgmt:upload-jobs"
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
@@ -119,7 +135,7 @@
xact_info.respond_xpath(
rwdts.XactRspCode.ACK,
- xpath=get_xpath(),
+ xpath=self.get_xpath(),
msg=jobs_pb_msg,
)
@@ -130,7 +146,7 @@
reg_event.set()
self._subscriber = yield from self._dts.register(
- xpath=get_xpath(),
+ xpath=self.get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare,
on_ready=on_ready,
@@ -141,9 +157,18 @@
yield from reg_event.wait()
+ def deregister(self):
+ self._log.debug("Project {}: De-register show image handler".
+ format(self._project.name))
+ if self._subscriber:
+ self._subscriber.delete_element(self.get_xpath())
+ self._subscriber.deregister()
+ self._subscriber = None
+
class ImageDTSRPCHandler(object):
""" A DTS publisher for the upload-job RPC's """
- def __init__(self, log, loop, dts, accounts, glance_client, upload_task_creator, job_controller):
+ def __init__(self, log, loop, dts, accounts, glance_client,
+ upload_task_creator, job_controller, project):
self._log = log
self._loop = loop
self._dts = dts
@@ -151,8 +176,10 @@
self._glance_client = glance_client
self._upload_task_creator = upload_task_creator
self._job_controller = job_controller
+ self._project = project
- self._subscriber = None
+ self._create = None
+ self._cancel = None
@asyncio.coroutine
def _register_create_upload_job(self):
@@ -164,6 +191,10 @@
create_msg = msg
account_names = create_msg.cloud_account
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
# If cloud accounts were not specified, upload image to all cloud account
if not account_names:
account_names = list(self._accounts.keys())
@@ -217,14 +248,14 @@
def on_ready(_, status):
reg_event.set()
- self._subscriber = yield from self._dts.register(
- xpath="I," + get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare,
- on_ready=on_ready,
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._create = yield from self._dts.register(
+ xpath="I," + get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare,
+ on_ready=on_ready,
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
yield from reg_event.wait()
@@ -235,6 +266,9 @@
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
if not msg.has_field("job_id"):
self._log.error("cancel-upload-job missing job-id field.")
xact_info.respond_xpath(rwdts.XactRspCode.NACK)
@@ -256,14 +290,14 @@
def on_ready(_, status):
reg_event.set()
- self._subscriber = yield from self._dts.register(
- xpath="I," + get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare,
- on_ready=on_ready,
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._cancel = yield from self._dts.register(
+ xpath="I," + get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare,
+ on_ready=on_ready,
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
yield from reg_event.wait()
@@ -273,16 +307,28 @@
yield from self._register_create_upload_job()
yield from self._register_cancel_upload_job()
+ def deregister(self):
+ self._log.debug("Project {}: Deregister image rpc handlers".
+ format(self._project.name))
+ if self._create:
+ self._create.deregister()
+ self._create = None
+
+ if self._cancel:
+ self._cancel.deregister()
+ self._cancel = None
+
class GlanceClientUploadTaskCreator(object):
""" This class creates upload tasks using configured cloud accounts and
configured image catalog glance client """
- def __init__(self, log, loop, accounts, glance_client):
+ def __init__(self, log, loop, accounts, glance_client, project):
self._log = log
self._loop = loop
self._accounts = accounts
self._glance_client = glance_client
+ self._project = project
@asyncio.coroutine
def create_tasks(self, account_names, image_id=None, image_name=None, image_checksum=None):
@@ -397,6 +443,78 @@
create_msg.image_checksum if "image_checksum" in create_msg else None)
)
+class ImageMgrProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(ImageMgrProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+ try:
+ self.glance_client = kw['client']
+ except KeyError as e:
+ self._log.exception("kw {}: {}".format(kw, e))
+
+ self.cloud_cfg_subscriber = None
+ self.job_controller = None
+ self.task_creator = None
+ self.rpc_handler = None
+ self.show_handler = None
+
+ self.cloud_accounts = {}
+
+ @asyncio.coroutine
+ def register(self):
+ try:
+ self.log.debug("creating cloud account handler")
+ self.cloud_cfg_subscriber = CloudAccountDtsHandler(self._log,
+ self._dts,
+ self._log_hdl,
+ self)
+ self.cloud_cfg_subscriber.register(
+ self.on_cloud_account_create,
+ self.on_cloud_account_delete
+ )
+
+ self.job_controller = upload.ImageUploadJobController(
+ self.log, self.loop, self
+ )
+
+ self.task_creator = GlanceClientUploadTaskCreator(
+ self.log, self.loop, self.cloud_accounts,
+ self.glance_client, self
+ )
+
+ self.rpc_handler = ImageDTSRPCHandler(
+ self.log, self.loop, self.dts, self.cloud_accounts,
+ self.glance_client, self.task_creator,
+ self.job_controller, self
+ )
+ yield from self.rpc_handler.register()
+
+ self.show_handler = ImageDTSShowHandler(
+ self.log, self.loop, self.dts, self.job_controller, self
+ )
+ yield from self.show_handler.register()
+ except Exception as e:
+ self.log.exception("Error during project {} register: e".
+ format(self.name, e))
+
+ def deregister(self):
+ self.log.debug("De-register handlers for project: {}".format(self.name))
+ self.rpc_handler.deregister()
+ self.show_handler.deregister()
+ self.cloud_cfg_subscriber.deregister()
+
+ def on_cloud_account_create(self, account):
+ self.log.debug("adding cloud account: %s", account.name)
+ self.cloud_accounts[account.name] = account
+
+ def on_cloud_account_delete(self, account_name):
+ self.log.debug("deleting cloud account: %s", account_name)
+ if account_name not in self.cloud_accounts:
+ self.log.warning("cloud account not found: %s", account_name)
+ else:
+ del self.cloud_accounts[account_name]
+
class ImageManagerTasklet(rift.tasklets.Tasklet):
"""
@@ -409,16 +527,13 @@
super().__init__(*args, **kwargs)
self.rwlog.set_category("rw-mano-log")
- self.cloud_cfg_subscriber = None
self.http_proxy = None
self.proxy_server = None
self.dts = None
- self.job_controller = None
- self.cloud_accounts = {}
self.glance_client = None
- self.task_creator = None
- self.rpc_handler = None
- self.show_handler = None
+ self.project_handler = None
+
+ self.projects = {}
def start(self):
super().start()
@@ -443,13 +558,6 @@
@asyncio.coroutine
def init(self):
try:
- self.log.debug("creating cloud account handler")
- self.cloud_cfg_subscriber = CloudAccountDtsHandler(self.log, self.dts, self.log_hdl)
- self.cloud_cfg_subscriber.register(
- self.on_cloud_account_create,
- self.on_cloud_account_delete
- )
-
self.log.debug("creating http proxy server")
self.http_proxy = glance_proxy_server.QuickProxyServer(self.log, self.loop)
@@ -459,43 +567,18 @@
)
self.proxy_server.start()
- self.job_controller = upload.ImageUploadJobController(
- self.log, self.loop
- )
-
self.glance_client = glance_client.OpenstackGlanceClient.from_token(
self.log, "127.0.0.1", "9292", "test"
)
- self.task_creator = GlanceClientUploadTaskCreator(
- self.log, self.loop, self.cloud_accounts, self.glance_client
- )
-
- self.rpc_handler = ImageDTSRPCHandler(
- self.log, self.loop, self.dts, self.cloud_accounts, self.glance_client, self.task_creator,
- self.job_controller
- )
- yield from self.rpc_handler.register()
-
- self.show_handler = ImageDTSShowHandler(
- self.log, self.loop, self.dts, self.job_controller
- )
- yield from self.show_handler.register()
+ self.log.debug("Creating project handler")
+ self.project_handler = ProjectHandler(self, ImageMgrProject,
+ client=self.glance_client)
+ self.project_handler.register()
except Exception as e:
self.log.exception("error during init")
- def on_cloud_account_create(self, account):
- self.log.debug("adding cloud account: %s", account.name)
- self.cloud_accounts[account.name] = account
-
- def on_cloud_account_delete(self, account_name):
- self.log.debug("deleting cloud account: %s", account_name)
- if account_name not in self.cloud_accounts:
- self.log.warning("cloud account not found: %s", account_name)
-
- del self.cloud_accounts[account_name]
-
@asyncio.coroutine
def run(self):
pass
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py
index c1716d3..569fc54 100644
--- a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py
@@ -51,9 +51,10 @@
""" This class starts and manages ImageUploadJobs """
MAX_COMPLETED_JOBS = 20
- def __init__(self, log, loop, max_completed_jobs=MAX_COMPLETED_JOBS):
+ def __init__(self, log, loop, project, max_completed_jobs=MAX_COMPLETED_JOBS):
self._log = log
self._loop = loop
+ self._project = project
self._job_id_gen = itertools.count(1)
self._max_completed_jobs = max_completed_jobs
diff --git a/rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py b/rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py
index 7ba4f76..108da86 100755
--- a/rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py
+++ b/rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py
@@ -45,6 +45,7 @@
from rift.tasklets.rwimagemgr import tasklet
from rift.tasklets.rwimagemgr import upload
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
from rift.test.dts import async_test
@@ -76,15 +77,16 @@
self.log.debug("STARTING - %s", self.id())
self.tinfo = self.new_tinfo(self.id())
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.project = ManoProject(self.log, name=DEFAULT_PROJECT)
self.task_creator_mock = create_upload_task_creator_mock()
self.job_controller_mock = create_job_controller_mock()
self.rpc_handler = tasklet.ImageDTSRPCHandler(
self.log, self.loop, self.dts, {'mock', None}, object(), self.task_creator_mock,
- self.job_controller_mock
+ self.job_controller_mock, self.project,
)
self.show_handler = tasklet.ImageDTSShowHandler(
- self.log, self.loop, self.dts, self.job_controller_mock
+ self.log, self.loop, self.dts, self.job_controller_mock, self.project,
)
self.tinfo_c = self.new_tinfo(self.id() + "_client")
@@ -119,7 +121,8 @@
"onboarded_image": {
"image_name": upload_task.image_name,
"image_checksum": upload_task.image_checksum,
- }
+ },
+ "project_name": self.project.name,
})
query_iter = yield from self.dts_c.query_rpc(
@@ -138,7 +141,7 @@
)
query_iter = yield from self.dts_c.query_read(
- "D,/rw-image-mgmt:upload-jobs",
+ self.project.add_project("D,/rw-image-mgmt:upload-jobs"),
)
for fut_resp in query_iter:
diff --git a/rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py b/rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py
index 9d4464f..32a36a2 100755
--- a/rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py
+++ b/rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py
@@ -34,6 +34,7 @@
from rift.tasklets.rwimagemgr import upload
from rift.package import checksums
from rift.test.dts import async_test
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
import rw_status
import gi
@@ -198,7 +199,7 @@
class ImageMockMixin(object):
- ACCOUNT_MSG = RwCloudYang.CloudAccount(
+ ACCOUNT_MSG = RwCloudYang.CloudAcc(
name="mock",
account_type="mock",
)
@@ -252,6 +253,8 @@
task_pb_msg = upload_task.pb_msg
self.assertEqual(upload_task.image_name, task_pb_msg.image_name)
+ # TODO: Fix this
+ @unittest.skip("Causes coredump in OSM")
@async_test
def test_cancel_image_task(self):
@asyncio.coroutine
@@ -348,6 +351,8 @@
self.assertEqual("FAILED", job.state)
+ # TODO: Fix this
+ @unittest.skip("Causes coredump in OSM")
@async_test
def test_cancel_job(self):
@asyncio.coroutine
@@ -379,14 +384,14 @@
def __init__(self, *args, **kwargs):
self._loop = asyncio.get_event_loop()
self._log = logging.getLogger(__file__)
-
+ self._project = ManoProject(self._log, name=DEFAULT_PROJECT)
ImageMockMixin.__init__(self, self._log)
unittest.TestCase.__init__(self, *args, **kwargs)
@async_test
def test_controller_single_task_job(self):
controller = upload.ImageUploadJobController(
- self._log, self._loop
+ self._log, self._loop, self._project,
)
with self.create_upload_task(self.account) as upload_task:
@@ -406,7 +411,7 @@
@async_test
def test_controller_multi_task_job(self):
controller = upload.ImageUploadJobController(
- self._log, self._loop
+ self._log, self._loop, self._project
)
with self.create_upload_task(self.account) as upload_task1:
@@ -423,7 +428,7 @@
@async_test
def test_controller_multi_jobs(self):
controller = upload.ImageUploadJobController(
- self._log, self._loop
+ self._log, self._loop, self._project,
)
with self.create_upload_task(self.account) as upload_task1:
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py
index 7571c57..e10d7ca 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py
@@ -1,6 +1,6 @@
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,21 +16,29 @@
#
import json
+import logging
import os
-import tempfile
import gi
gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwYang', '1.0')
from gi.repository import (
RwNsdYang,
RwVnfdYang,
NsdYang,
VnfdYang,
+ RwProjectNsdYang,
+ RwProjectVnfdYang,
+ ProjectNsdYang,
+ ProjectVnfdYang,
RwYang,
)
+from rift.mano.utils.project import NS_PROJECT
+
class UnknownExtensionError(Exception):
pass
@@ -51,10 +59,15 @@
"""(De)Serializer/deserializer fo a specific protobuf message into various formats"""
libncx_model = None
- def __init__(self, yang_ns, yang_pb_cls):
+ def __init__(self, yang_ns, yang_pb_cls,
+ yang_ns_project, yang_pb_project_cls):
""" Create a serializer for a specific protobuf message """
self._yang_ns = yang_ns
self._yang_pb_cls = yang_pb_cls
+ self._yang_ns_project = yang_ns_project
+ self._yang_pb_project_cls = yang_pb_project_cls
+
+ self._log = logging.getLogger('rw-maon-log')
@classmethod
def _deserialize_extension_method_map(cls):
@@ -101,6 +114,16 @@
return self._yang_pb_cls
@property
+ def yang_ns_project(self):
+ """ The Protobuf's GI namespace class (e.g. RwProjectVnfdYang) """
+ return self._yang_ns_project
+
+ @property
+ def yang_class_project(self):
+ """ The Protobuf's GI class (e.g. RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd) """
+ return self._yang_pb_project_cls
+
+ @property
def model(self):
cls = self.__class__
@@ -108,6 +131,7 @@
if cls.libncx_model is None:
cls.libncx_model = RwYang.model_create_libncx()
cls.libncx_model.load_schema_ypbc(self.yang_namespace.get_schema())
+ cls.libncx_model.load_schema_ypbc(self.yang_ns_project.get_schema())
return cls.libncx_model
@@ -117,20 +141,54 @@
return self.yang_class.from_xml_v2(self.model, decode(xml), strict=False)
def _from_json_file_hdl(self, file_hdl):
- json = file_hdl.read()
+ jstr = file_hdl.read()
+ self._log.debug("Convert from json file: {}".format(jstr))
- return self.yang_class.from_json(self.model, decode(json), strict=False)
+ try:
+ desc_msg = self.yang_class.from_json(self.model, decode(jstr), strict=False)
+ self._log.debug("desc_msg: {}".format(desc_msg.as_dict()))
+ return self.yang_class_project.from_dict(desc_msg.as_dict())
+ except Exception as e:
+ self._log.exception(e)
+ raise e
def _from_yaml_file_hdl(self, file_hdl):
- yaml = file_hdl.read()
+ yml = file_hdl.read()
- return self.yang_class.from_yaml(self.model, decode(yaml), strict=False)
+ try:
+ desc_msg = self.yang_class.from_yaml(self.model, decode(yml), strict=False)
+ return self.yang_class_project.from_dict(desc_msg.as_dict())
+ except Exception as e:
+ self._log.exception(e)
+ raise e
- def to_json_string(self, pb_msg):
+ def to_desc_msg(self, pb_msg, project_rooted=True):
+ """Convert to and from project rooted pb msg descriptor to catalog
+ rooted pb msg
+ project_rooted: if pb_msg is project rooted or not
+ """
+ if project_rooted:
+ if isinstance(pb_msg, self._yang_pb_project_cls):
+ return self._yang_pb_cls.from_dict(pb_msg.as_dict())
+ elif isinstance(pb_msg, self._yang_pb_cls):
+ return pb_msg
+
+ else:
+ if isinstance(pb_msg, self._yang_pb_cls):
+ return self._yang_pb_project_cls.from_dict(pb_msg.as_dict())
+ elif isinstance(pb_msg, self._yang_pb_project_cls):
+ return pb_msg
+
+ raise TypeError("Invalid protobuf message type provided: {}".format(type(pb_msg)))
+
+
+ def to_json_string(self, pb_msg, project_ns=False):
""" Serialize a protobuf message into JSON
Arguments:
pb_msg - A GI-protobuf object of type provided into constructor
+ project_ns - Need the desc in project namespace, required for
+ posting to Restconf as part of onboarding
Returns:
A JSON string representing the protobuf message
@@ -139,16 +197,24 @@
SerializationError - Message could not be serialized
TypeError - Incorrect protobuf type provided
"""
- if not isinstance(pb_msg, self._yang_pb_cls):
- raise TypeError("Invalid protobuf message type provided")
-
+ self._log.debug("Convert desc to json (ns:{}): {}".format(project_ns, pb_msg.as_dict()))
try:
- json_str = pb_msg.to_json(self.model)
+ # json_str = pb_msg.to_json(self.model)
+
+ desc_msg = self.to_desc_msg(pb_msg, not project_ns)
+ json_str = desc_msg.to_json(self.model)
+ if project_ns:
+ # Remove rw-project:project top level element
+ dic = json.loads(json_str)
+ jstr = json.dumps(dic[NS_PROJECT][0])
+ else:
+ jstr = json_str
except Exception as e:
raise SerializationError(e)
- return json_str
+ self._log.debug("Convert desc to json: {}".format(jstr))
+ return jstr
def to_yaml_string(self, pb_msg):
""" Serialize a protobuf message into YAML
@@ -163,13 +229,12 @@
SerializationError - Message could not be serialized
TypeError - Incorrect protobuf type provided
"""
- if not isinstance(pb_msg, self._yang_pb_cls):
- raise TypeError("Invalid protobuf message type provided")
-
try:
- yaml_str = pb_msg.to_yaml(self.model)
+ desc_msg = self.to_desc_msg(pb_msg)
+ yaml_str = desc_msg.to_yaml(self.model)
except Exception as e:
+ self._log.exception("Exception converting to yaml: {}".format(e))
raise SerializationError(e)
return yaml_str
@@ -187,13 +252,12 @@
SerializationError - Message could not be serialized
TypeError - Incorrect protobuf type provided
"""
- if not isinstance(pb_msg, self._yang_pb_cls):
- raise TypeError("Invalid protobuf message type provided")
-
try:
- xml_str = pb_msg.to_xml_v2(self.model)
+ desc_msg = self.to_desc_msg(pb_msg)
+ xml_str = desc_msg.to_xml_v2(self.model)
except Exception as e:
+ self._log.exception("Exception converting to xml: {}".format(e))
raise SerializationError(e)
return xml_str
@@ -262,22 +326,26 @@
class VnfdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the VNFD descriptor"""
def __init__(self):
- super().__init__(VnfdYang, VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+ super().__init__(VnfdYang, VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd,
+ ProjectVnfdYang, ProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd)
class NsdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the NSD descriptor"""
def __init__(self):
- super().__init__(NsdYang, NsdYang.YangData_Nsd_NsdCatalog_Nsd)
+ super().__init__(NsdYang, NsdYang.YangData_Nsd_NsdCatalog_Nsd,
+ ProjectNsdYang, ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd)
class RwVnfdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the VNFD descriptor"""
def __init__(self):
- super().__init__(RwVnfdYang, RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+ super().__init__(RwVnfdYang, RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd,
+ RwProjectVnfdYang, RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd)
class RwNsdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the NSD descriptor"""
def __init__(self):
- super().__init__(RwNsdYang, RwNsdYang.YangData_Nsd_NsdCatalog_Nsd)
+ super().__init__(RwNsdYang, RwNsdYang.YangData_Nsd_NsdCatalog_Nsd,
+ RwProjectNsdYang, RwProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py
index 9ebd03c..a88cfdc 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py
@@ -52,9 +52,9 @@
@property
def root_dir(self):
return self._root_dir
-
def _get_package_dir(self, package_id):
+ self._log.debug("Package dir {}, {}".format(self._root_dir, package_id))
return os.path.join(self._root_dir, package_id)
def _get_package_files(self, package_id):
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py
index 05731a6..a481a11 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py
@@ -35,19 +35,20 @@
XPATH = "D,/rw-launchpad:datacenters"
- def __init__(self, log, dts, loop):
+ def __init__(self, log, dts, loop, project):
"""Creates an instance of a DataCenterPublisher
Arguments:
tasklet - the tasklet that this publisher is registered for
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self._ro_sub = mano_dts.ROAccountConfigSubscriber(
self.log,
self.dts,
self.loop,
+ self.project,
callback=self.on_ro_account_change
)
self.ro_accounts = {}
@@ -58,6 +59,15 @@
elif action == RwDts.QueryAction.DELETE and ro_account.name in self.ro_accounts:
del self.ro_accounts[ro_account.name]
+ def deregister(self):
+ self._log.debug("De-register datacenter handler for project {}".
+ format(self.project.name))
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
+ self._ro_sub.deregister()
+
@asyncio.coroutine
def register(self):
"""Registers the publisher with DTS"""
@@ -105,7 +115,7 @@
xact_info.respond_xpath(
RwDts.XactRspCode.MORE,
- 'D,/rw-launchpad:datacenters',
+ self.project.add_project(DataCenterPublisher.XPATH),
datacenters,
)
@@ -119,7 +129,7 @@
with self.dts.group_create() as group:
self.reg = group.register(
- xpath=DataCenterPublisher.XPATH,
+ xpath=self.project.add_project(DataCenterPublisher.XPATH),
handler=handler,
flags=RwDts.Flag.PUBLISHER,
)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py
index ff6a373..d2cdb65 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py
@@ -1,6 +1,6 @@
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,13 +35,9 @@
from . import tosca
import gi
-gi.require_version('NsdYang', '1.0')
-gi.require_version('VnfdYang', '1.0')
gi.require_version('RwPkgMgmtYang', '1.0')
from gi.repository import (
- NsdYang,
- VnfdYang,
RwPkgMgmtYang)
import rift.mano.dts as mano_dts
@@ -195,21 +191,18 @@
class ExportRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, application, store_map, exporter, catalog_map):
+ def __init__(self, application, catalog_map):
"""
Args:
application: UploaderApplication
- store_map: dict containing VnfdStore & NsdStore
- exporter : DescriptorPackageArchiveExporter
calalog_map: Dict containing Vnfds and Nsd onboarding.
"""
- super().__init__(log, dts, loop)
+ super().__init__(application.log, application.dts, application.loop)
self.application = application
- self.store_map = store_map
- self.exporter = exporter
+ self.store_map = application.package_store_map
+ self.exporter = application.exporter
self.catalog_map = catalog_map
- self.log = log
@property
def xpath(self):
@@ -240,7 +233,7 @@
# Parse the IDs
desc_id = msg.package_id
- catalog = self.catalog_map[desc_type]
+ catalog = self.catalog_map[desc_type](project=msg.project_name)
if desc_id not in catalog:
raise ValueError("Unable to find package ID: {}".format(desc_id))
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py
index 7c4dfa0..2527aef 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py
@@ -65,9 +65,9 @@
except Exception as e:
raise ImageUploadError("Failed to upload image to catalog: %s" % str(e)) from e
- def upload_image_to_cloud_accounts(self, image_name, image_checksum, cloud_accounts=None):
+ def upload_image_to_cloud_accounts(self, image_name, image_checksum, project, cloud_accounts=None):
self._log.debug("uploading image %s to all cloud accounts", image_name)
- upload_job = self._client.create_job_threadsafe(image_name, image_checksum, cloud_accounts)
+ upload_job = self._client.create_job_threadsafe(image_name, image_checksum, project, cloud_accounts)
try:
upload_job.wait_until_complete_threadsafe()
except client.UploadJobError as e:
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py
index b12e192..ac505ec 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py
@@ -17,12 +17,13 @@
import requests
+from rift.mano.utils.project import DEFAULT_PROJECT
from rift.package import convert
from gi.repository import (
- NsdYang,
- RwNsdYang,
- VnfdYang,
- RwVnfdYang,
+ ProjectNsdYang as NsdYang,
+ RwProjectNsdYang as RwNsdYang,
+ ProjectVnfdYang as VnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
)
@@ -37,17 +38,17 @@
class DescriptorOnboarder(object):
""" This class is responsible for onboarding descriptors using Restconf"""
DESC_ENDPOINT_MAP = {
- NsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
- RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
- VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
- RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: "nsd-catalog/nsd",
+ RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: "nsd-catalog/nsd",
+ VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+ RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
}
DESC_SERIALIZER_MAP = {
- NsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.NsdSerializer(),
- RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.RwNsdSerializer(),
- VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.VnfdSerializer(),
- RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.RwVnfdSerializer(),
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: convert.NsdSerializer(),
+ RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: convert.RwNsdSerializer(),
+ VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: convert.VnfdSerializer(),
+ RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: convert.RwVnfdSerializer(),
}
HEADERS = {"content-type": "application/vnd.yang.data+json"}
@@ -72,28 +73,32 @@
return headers
- def _get_url(self, descriptor_msg):
+ def _get_url(self, descriptor_msg, project=None):
if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
raise TypeError("Invalid descriptor message type")
+ if project is None:
+ project = DEFAULT_PROJECT
+
endpoint = DescriptorOnboarder.DESC_ENDPOINT_MAP[type(descriptor_msg)]
+ ep = "project/{}/{}".format(project, endpoint)
url = "{}://{}:{}/api/config/{}".format(
"https" if self._use_ssl else "http",
self._host,
self.port,
- endpoint,
+ ep,
)
return url
- def _make_request_args(self, descriptor_msg, auth=None):
+ def _make_request_args(self, descriptor_msg, auth=None, project=None):
if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
raise TypeError("Invalid descriptor message type")
serializer = DescriptorOnboarder.DESC_SERIALIZER_MAP[type(descriptor_msg)]
- json_data = serializer.to_json_string(descriptor_msg)
- url = self._get_url(descriptor_msg)
+ json_data = serializer.to_json_string(descriptor_msg, project_ns=True)
+ url = self._get_url(descriptor_msg, project=project)
request_args = dict(
url=url,
@@ -107,7 +112,7 @@
return request_args
- def update(self, descriptor_msg, auth=None):
+ def update(self, descriptor_msg, auth=None, project=None):
""" Update the descriptor config
Arguments:
@@ -134,7 +139,7 @@
self._log.error(msg)
raise UpdateError(msg) from e
- def onboard(self, descriptor_msg, auth=None):
+ def onboard(self, descriptor_msg, auth=None, project=None):
""" Onboard the descriptor config
Arguments:
@@ -145,20 +150,23 @@
OnboardError - The descriptor config update failed
"""
- request_args = self._make_request_args(descriptor_msg, auth)
+ request_args = self._make_request_args(descriptor_msg, auth, project)
try:
response = requests.post(**request_args)
response.raise_for_status()
except requests.exceptions.ConnectionError as e:
msg = "Could not connect to restconf endpoint: %s" % str(e)
self._log.error(msg)
+ self._log.exception(msg)
raise OnboardError(msg) from e
except requests.exceptions.HTTPError as e:
msg = "POST request to %s error: %s" % (request_args["url"], response.text)
self._log.error(msg)
+ self._log.exception(msg)
raise OnboardError(msg) from e
except requests.exceptions.Timeout as e:
msg = "Timed out connecting to restconf endpoint: %s", str(e)
self._log.error(msg)
+ self._log.exception(msg)
raise OnboardError(msg) from e
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py
index 0eff616..d416f9c 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py
@@ -40,6 +40,12 @@
import rift.tasklets
import rift.mano.cloud
import rift.mano.config_agent
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ get_add_delete_update_cfgs,
+ DEFAULT_PROJECT,
+ )
from rift.package import store
from . import uploader
@@ -53,73 +59,53 @@
MAX_BODY_SIZE = 1 * MB # Max. size loaded into memory!
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
+class LaunchpadError(Exception):
+ pass
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
+class LpProjectNotFound(Exception):
+ pass
class CatalogDtsHandler(object):
- def __init__(self, tasklet, app):
+ def __init__(self, project, app):
self.app = app
self.reg = None
- self.tasklet = tasklet
+ self.project = project
@property
def log(self):
- return self.tasklet.log
+ return self.project.log
@property
def dts(self):
- return self.tasklet.dts
+ return self.project.dts
class NsdCatalogDtsHandler(CatalogDtsHandler):
- XPATH = "C,/nsd:nsd-catalog/nsd:nsd"
+ XPATH = "C,/project-nsd:nsd-catalog/project-nsd:nsd"
def add_nsd(self, nsd):
self.log.debug('nsd-catalog-handler:add:{}'.format(nsd.id))
- if nsd.id not in self.tasklet.nsd_catalog:
- self.tasklet.nsd_catalog[nsd.id] = nsd
+ if nsd.id not in self.project.nsd_catalog:
+ self.project.nsd_catalog[nsd.id] = nsd
else:
self.log.error("nsd already in catalog: {}".format(nsd.id))
def update_nsd(self, nsd):
self.log.debug('nsd-catalog-handler:update:{}'.format(nsd.id))
- if nsd.id in self.tasklet.nsd_catalog:
- self.tasklet.nsd_catalog[nsd.id] = nsd
+ if nsd.id in self.project.nsd_catalog:
+ self.project.nsd_catalog[nsd.id] = nsd
else:
self.log.error("unrecognized NSD: {}".format(nsd.id))
def delete_nsd(self, nsd_id):
self.log.debug('nsd-catalog-handler:delete:{}'.format(nsd_id))
- if nsd_id in self.tasklet.nsd_catalog:
- del self.tasklet.nsd_catalog[nsd_id]
+ if nsd_id in self.project.nsd_catalog:
+ del self.project.nsd_catalog[nsd_id]
else:
self.log.error("unrecognized NSD: {}".format(nsd_id))
try:
- self.tasklet.nsd_package_store.delete_package(nsd_id)
+ self.project.tasklet.nsd_package_store.delete_package(nsd_id)
except store.PackageStoreError as e:
self.log.warning("could not delete package from store: %s", str(e))
@@ -151,47 +137,54 @@
for cfg in update_cfgs:
self.update_nsd(cfg)
- self.log.debug("Registering for NSD catalog")
+ self.log.debug("Registering for NSD catalog in project".
+ format(self.project.name))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self.dts.appconf_group_create(acg_handler) as acg:
+ xpath = self.project.add_project(NsdCatalogDtsHandler.XPATH)
self.reg = acg.register(
- xpath=NsdCatalogDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
)
+ def deregister(self):
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
class VnfdCatalogDtsHandler(CatalogDtsHandler):
- XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ XPATH = "C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd"
def add_vnfd(self, vnfd):
self.log.debug('vnfd-catalog-handler:add:{}'.format(vnfd.id))
- if vnfd.id not in self.tasklet.vnfd_catalog:
- self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+ if vnfd.id not in self.project.vnfd_catalog:
+ self.project.vnfd_catalog[vnfd.id] = vnfd
else:
self.log.error("VNFD already in catalog: {}".format(vnfd.id))
def update_vnfd(self, vnfd):
self.log.debug('vnfd-catalog-handler:update:{}'.format(vnfd.id))
- if vnfd.id in self.tasklet.vnfd_catalog:
- self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+ if vnfd.id in self.project.vnfd_catalog:
+ self.project.vnfd_catalog[vnfd.id] = vnfd
else:
self.log.error("unrecognized VNFD: {}".format(vnfd.id))
def delete_vnfd(self, vnfd_id):
self.log.debug('vnfd-catalog-handler:delete:{}'.format(vnfd_id))
- if vnfd_id in self.tasklet.vnfd_catalog:
- del self.tasklet.vnfd_catalog[vnfd_id]
+ if vnfd_id in self.project.vnfd_catalog:
+ del self.project.vnfd_catalog[vnfd_id]
else:
self.log.error("unrecognized VNFD: {}".format(vnfd_id))
try:
- self.tasklet.vnfd_package_store.delete_package(vnfd_id)
+ self.project.tasklet.vnfd_package_store.delete_package(vnfd_id)
except store.PackageStoreError as e:
self.log.warning("could not delete package from store: %s", str(e))
@@ -223,28 +216,36 @@
for cfg in update_cfgs:
self.update_vnfd(cfg)
- self.log.debug("Registering for VNFD catalog")
+ self.log.debug("Registering for VNFD catalog in project {}".
+ format(self.project.name))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self.dts.appconf_group_create(acg_handler) as acg:
+ xpath = self.project.add_project(VnfdCatalogDtsHandler.XPATH)
self.reg = acg.register(
- xpath=VnfdCatalogDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
)
+ def deregister(self):
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
class CfgAgentAccountHandlers(object):
- def __init__(self, dts, log, log_hdl, loop):
+ def __init__(self, dts, log, log_hdl, loop, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._project = project
self._log.debug("creating config agent account config handler")
self.cfg_agent_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber(
- self._dts, self._log,
+ self._dts, self._log, self._project,
rift.mano.config_agent.ConfigAgentCallbacks(
on_add_apply=self.on_cfg_agent_account_added,
on_delete_apply=self.on_cfg_agent_account_deleted,
@@ -253,7 +254,7 @@
self._log.debug("creating config agent account opdata handler")
self.cfg_agent_operdata_handler = rift.mano.config_agent.CfgAgentDtsOperdataHandler(
- self._dts, self._log, self._loop,
+ self._dts, self._log, self._loop, self._project
)
def on_cfg_agent_account_deleted(self, account):
@@ -269,46 +270,133 @@
self.cfg_agent_cfg_handler.register()
yield from self.cfg_agent_operdata_handler.register()
+ def deregister(self):
+ self.cfg_agent_operdata_handler.deregister()
+ self.cfg_agent_cfg_handler.deregister()
+
+
class CloudAccountHandlers(object):
- def __init__(self, dts, log, log_hdl, loop, app):
+ def __init__(self, dts, log, log_hdl, loop, app, project):
self._log = log
self._log_hdl = log_hdl
self._dts = dts
self._loop = loop
self._app = app
+ self._project = project
- self._log.debug("creating cloud account config handler")
+ self._log.debug("Creating cloud account config handler for project {}".
+ format(project.name))
self.cloud_cfg_handler = rift.mano.cloud.CloudAccountConfigSubscriber(
- self._dts, self._log, self._log_hdl,
+ self._dts, self._log, self._log_hdl, self._project,
rift.mano.cloud.CloudAccountConfigCallbacks(
on_add_apply=self.on_cloud_account_added,
on_delete_apply=self.on_cloud_account_deleted,
- )
+ ),
)
self._log.debug("creating cloud account opdata handler")
self.cloud_operdata_handler = rift.mano.cloud.CloudAccountDtsOperdataHandler(
- self._dts, self._log, self._loop,
+ self._dts, self._log, self._loop, self._project,
)
def on_cloud_account_deleted(self, account_name):
self._log.debug("cloud account deleted")
- self._app.accounts.clear()
- self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+ self._app.accounts[self._project.name] = \
+ list(self.cloud_cfg_handler.accounts.values())
self.cloud_operdata_handler.delete_cloud_account(account_name)
def on_cloud_account_added(self, account):
self._log.debug("cloud account added")
- self._app.accounts.clear()
- self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+ self._app.accounts[self._project.name] = \
+ list(self.cloud_cfg_handler.accounts.values())
self._log.debug("accounts: %s", self._app.accounts)
self.cloud_operdata_handler.add_cloud_account(account)
@asyncio.coroutine
def register(self):
- self.cloud_cfg_handler.register()
+ yield from self.cloud_cfg_handler.register()
yield from self.cloud_operdata_handler.register()
+ def deregister(self):
+ self.cloud_cfg_handler.deregister()
+ self.cloud_operdata_handler.deregister()
+
+
+class LaunchpadProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(LaunchpadProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+ self._app = kw['app']
+
+ self.config_handler = None
+ self.nsd_catalog_handler = None
+ self.vld_catalog_handler = None
+ self.vnfd_catalog_handler = None
+ self.cloud_handler = None
+ self.datacenter_handler = None
+ self.lp_config_handler = None
+ self.account_handler = None
+
+ self.nsd_catalog = dict()
+ self.vld_catalog = dict()
+ self.vnfd_catalog = dict()
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @asyncio.coroutine
+ def register(self):
+ self.log.debug("creating NSD catalog handler for project {}".format(self.name))
+ self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self._app)
+ yield from self.nsd_catalog_handler.register()
+
+ self.log.debug("creating VNFD catalog handler for project {}".format(self.name))
+ self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self._app)
+ yield from self.vnfd_catalog_handler.register()
+
+ self.log.debug("creating datacenter handler for project {}".format(self.name))
+ self.datacenter_handler = datacenters.DataCenterPublisher(self.log, self.dts,
+ self.loop, self)
+ yield from self.datacenter_handler.register()
+
+ self.log.debug("creating cloud account handler for project {}".format(self.name))
+ self.cloud_handler = CloudAccountHandlers(self.dts, self.log, self.log_hdl,
+ self.loop, self._app, self)
+ yield from self.cloud_handler.register()
+
+ self.log.debug("creating config agent handler for project {}".format(self.name))
+ self.config_handler = CfgAgentAccountHandlers(self.dts, self.log, self.log_hdl,
+ self.loop, self)
+ yield from self.config_handler.register()
+
+ def deregister(self):
+ self.log.debug("De-register handlers for project: {}".format(self.name))
+ self.config_handler.deregister()
+ self.cloud_handler.deregister()
+ self.datacenter_handler.deregister()
+ self.vnfd_catalog_handler.deregister()
+ self.nsd_catalog_handler.deregister()
+
+ @asyncio.coroutine
+ def delete_prepare(self):
+ # TODO: Do we need this check
+ # if self.nsd_catalog or self.vnfd_catalog or self.vld_catalog:
+ # return False
+ return True
+
+ @property
+ def cloud_accounts(self):
+ if self.cloud_handler is None:
+ return list()
+
+ return list(self.cloud_handler.cloud_cfg_handler.accounts.values())
+
class LaunchpadTasklet(rift.tasklets.Tasklet):
UPLOAD_MAX_BODY_SIZE = MAX_BODY_SIZE
@@ -320,31 +408,36 @@
self.rwlog.set_category("rw-mano-log")
self.rwlog.set_subcategory("launchpad")
- self.app = None
- self.server = None
-
- self.account_handler = None
- self.config_handler = None
- self.nsd_catalog_handler = None
- self.vld_catalog_handler = None
- self.vnfd_catalog_handler = None
- self.cloud_handler = None
- self.datacenter_handler = None
- self.lp_config_handler = None
+ self.dts = None
+ self.project_handler = None
self.vnfd_package_store = store.VnfdPackageFilesystemStore(self.log)
self.nsd_package_store = store.NsdPackageFilesystemStore(self.log)
- self.nsd_catalog = dict()
- self.vld_catalog = dict()
- self.vnfd_catalog = dict()
+ self.app = None
+ self.server = None
+ self.projects = {}
+ print("LP Tasklet init")
- @property
- def cloud_accounts(self):
- if self.cloud_handler is None:
- return list()
+ def _get_project(project=None):
+ if project is None:
+ project = DEFAULT_PROJECT
- return list(self.cloud_handler.cloud_cfg_handler.accounts.values())
+ if project in self.projects:
+ return self.projects[project]
+
+ msg = "Project {} not found".format(project)
+ self._log.error(msg)
+ raise LpProjectNotFound(msg)
+
+ def nsd_catalog_get(self, project=None):
+ return self._get_project(project=project).nsd_catalog
+
+ def vnfd_catalog_get(self, project=None):
+ return self._get_project(project=project).vnfd_catalog
+
+ def get_cloud_accounts(self, project=None):
+ return self._get_project(project=project).cloud_accounts
def start(self):
super(LaunchpadTasklet, self).start()
@@ -368,56 +461,51 @@
self.log.exception("Caught Exception in LP stop")
raise
+ def get_vnfd_catalog(self, project):
+ return self.projects[project].vnfd_catalog
+
+ def get_nsd_catalog(self, project):
+ return self.projects[project].nsd_catalog
+
@asyncio.coroutine
def init(self):
- io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
- self.app = uploader.UploaderApplication.from_tasklet(self)
- yield from self.app.register()
+ try:
+ io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+ self.app = uploader.UploaderApplication.from_tasklet(self)
+ yield from self.app.register()
- manifest = self.tasklet_info.get_pb_manifest()
- ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
- ssl_key = manifest.bootstrap_phase.rwsecurity.key
- ssl_options = {
+ manifest = self.tasklet_info.get_pb_manifest()
+ ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+ ssl_key = manifest.bootstrap_phase.rwsecurity.key
+ ssl_options = {
"certfile": ssl_cert,
"keyfile": ssl_key,
- }
+ }
- if manifest.bootstrap_phase.rwsecurity.use_ssl:
- self.server = tornado.httpserver.HTTPServer(
- self.app,
- max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
- io_loop=io_loop,
- ssl_options=ssl_options,
- )
-
- else:
- self.server = tornado.httpserver.HTTPServer(
- self.app,
- max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
- io_loop=io_loop,
- )
-
- self.log.debug("creating NSD catalog handler")
- self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self.app)
- yield from self.nsd_catalog_handler.register()
-
- self.log.debug("creating VNFD catalog handler")
- self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self.app)
- yield from self.vnfd_catalog_handler.register()
-
- self.log.debug("creating datacenter handler")
- self.datacenter_handler = datacenters.DataCenterPublisher(self.log, self.dts, self.loop)
- yield from self.datacenter_handler.register()
-
- self.log.debug("creating cloud account handler")
- self.cloud_handler = CloudAccountHandlers(
- self.dts, self.log, self.log_hdl, self.loop, self.app
+ if manifest.bootstrap_phase.rwsecurity.use_ssl:
+ self.server = tornado.httpserver.HTTPServer(
+ self.app,
+ max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+ io_loop=io_loop,
+ ssl_options=ssl_options,
)
- yield from self.cloud_handler.register()
- self.log.debug("creating config agent handler")
- self.config_handler = CfgAgentAccountHandlers(self.dts, self.log, self.log_hdl, self.loop)
- yield from self.config_handler.register()
+ else:
+ self.server = tornado.httpserver.HTTPServer(
+ self.app,
+ max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+ io_loop=io_loop,
+ )
+
+ self.log.debug("Registering project handler")
+ print("PJ: Registering project handler")
+ self.project_handler = ProjectHandler(self, LaunchpadProject,
+ app=self.app)
+ self.project_handler.register()
+
+ except Exception as e:
+ self.log.error("Exception : {}".format(e))
+ self.log.exception(e)
@asyncio.coroutine
def run(self):
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py
index c908bb3..1e45ea4 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py
@@ -38,12 +38,12 @@
import gi
gi.require_version('RwLaunchpadYang', '1.0')
-gi.require_version('NsdYang', '1.0')
-gi.require_version('VnfdYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
- VnfdYang,
+ ProjectNsdYang as NsdYang,
+ ProjectVnfdYang as VnfdYang,
)
import rift.mano.cloud
@@ -146,12 +146,12 @@
class UploadRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, application):
+ def __init__(self, application):
"""
Args:
application: UploaderApplication
"""
- super().__init__(log, dts, loop)
+ super().__init__(application.log, application.dts, application.loop)
self.application = application
@property
@@ -164,30 +164,41 @@
log = self.application.get_logger(transaction_id)
log.message(OnboardStart())
+ self.log.debug("Package create RPC: {}".format(msg))
auth = None
if msg.username is not None:
auth = (msg.username, msg.password)
+ try:
+ project = msg.project_name
+ except AttributeError as e:
+ self._log.warning("Did not get project name in RPC: {}".
+ format(msg.as_dict()))
+ project = rift.mano.utils.project.DEFAULT_PROJECT
+
self.application.onboard(
msg.external_url,
transaction_id,
- auth=auth
+ auth=auth,
+ project=project,
)
rpc_op = RPC_PACKAGE_CREATE_ENDPOINT.from_dict({
- "transaction_id": transaction_id})
+ "transaction_id": transaction_id,
+ "project_name": project,
+ })
return rpc_op
class UpdateRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, application):
+ def __init__(self, application):
"""
Args:
application: UploaderApplication
"""
- super().__init__(log, dts, loop)
+ super().__init__(application.log, application.dts, application.loop)
self.application = application
@property
@@ -208,11 +219,14 @@
self.application.update(
msg.external_url,
transaction_id,
- auth=auth
+ auth=auth,
+ project=msg.project_name,
)
rpc_op = RPC_PACKAGE_UPDATE_ENDPOINT.from_dict({
- "transaction_id": transaction_id})
+ "transaction_id": transaction_id,
+ "project_name": msg.project_name,
+ })
return rpc_op
@@ -231,11 +245,12 @@
class UpdatePackage(downloader.DownloaderProtocol):
- def __init__(self, log, loop, url, auth,
+ def __init__(self, log, loop, project, url, auth,
onboarder, uploader, package_store_map):
super().__init__()
self.log = log
self.loop = loop
+ self.project = project
self.url = url
self.auth = auth
self.onboarder = onboarder
@@ -355,7 +370,7 @@
)
try:
self.uploader.upload_image(image_name, image_checksum, image_hdl)
- self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum)
+ self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum, self.project)
except image.ImageUploadError as e:
self.log.exception("Failed to upload image: %s", image_name)
@@ -427,22 +442,24 @@
self.log.message(UpdateDescriptorUpdate())
try:
- self.onboarder.update(descriptor_msg)
+ self.onboarder.update(descriptor_msg, project=self.project)
except onboard.UpdateError as e:
raise MessageException(UpdateDescriptorError(package.descriptor_file)) from e
class OnboardPackage(downloader.DownloaderProtocol):
- def __init__(self, log, loop, url, auth,
+ def __init__(self, log, loop, project, url, auth,
onboarder, uploader, package_store_map):
self.log = log
self.loop = loop
+ self.project = project
self.url = url
self.auth = auth
self.onboarder = onboarder
self.uploader = uploader
self.package_store_map = package_store_map
+ self.project = project
def _onboard_package(self, packages):
# Extract package could return multiple packages if
@@ -623,7 +640,7 @@
self.log.message(OnboardDescriptorOnboard())
try:
- self.onboarder.onboard(descriptor_msg)
+ self.onboarder.onboard(descriptor_msg, project=self.project)
except onboard.OnboardError as e:
raise MessageException(OnboardDescriptorError(package.descriptor_file)) from e
@@ -637,29 +654,23 @@
ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
ssl_key = manifest.bootstrap_phase.rwsecurity.key
return cls(
- tasklet.log,
- tasklet.dts,
- tasklet.loop,
- ssl=(ssl_cert, ssl_key),
- vnfd_store=tasklet.vnfd_package_store,
- nsd_store=tasklet.nsd_package_store,
- vnfd_catalog=tasklet.vnfd_catalog,
- nsd_catalog=tasklet.nsd_catalog)
+ tasklet,
+ ssl=(ssl_cert, ssl_key),
+ vnfd_store=tasklet.vnfd_package_store,
+ nsd_store=tasklet.nsd_package_store)
def __init__(
self,
- log,
- dts,
- loop,
+ tasklet,
ssl=None,
vnfd_store=None,
- nsd_store=None,
- vnfd_catalog=None,
- nsd_catalog=None):
+ nsd_store=None):
- self.log = log
- self.loop = loop
- self.dts = dts
+ self.log = tasklet.log
+ self.loop = tasklet.loop
+ self.dts = tasklet.dts
+
+ self.accounts = {}
self.use_ssl = False
self.ssl_cert, self.ssl_key = None, None
@@ -673,7 +684,6 @@
if not nsd_store:
nsd_store = rift.package.store.NsdPackageFilesystemStore(self.log)
- self.accounts = []
self.messages = collections.defaultdict(list)
self.export_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/exports')
@@ -689,24 +699,16 @@
self.exporter = export.DescriptorPackageArchiveExporter(self.log)
self.loop.create_task(export.periodic_export_cleanup(self.log, self.loop, self.export_dir))
- self.vnfd_catalog = vnfd_catalog
- self.nsd_catalog = nsd_catalog
+ self.get_vnfd_catalog = tasklet.get_vnfd_catalog
+ self.get_nsd_catalog = tasklet.get_nsd_catalog
catalog_map = {
- "vnfd": self.vnfd_catalog,
- "nsd": self.nsd_catalog
+ "vnfd": self.get_vnfd_catalog,
+ "nsd": self.get_nsd_catalog
}
- self.upload_handler = UploadRpcHandler(self.log, self.dts, self.loop, self)
- self.update_handler = UpdateRpcHandler(self.log, self.dts, self.loop, self)
- self.export_handler = export.ExportRpcHandler(
- self.log,
- self.dts,
- self.loop,
- self,
- store_map=self.package_store_map,
- exporter=self.exporter,
- catalog_map=catalog_map
- )
+ self.upload_handler = UploadRpcHandler(self)
+ self.update_handler = UpdateRpcHandler(self)
+ self.export_handler = export.ExportRpcHandler(self, catalog_map)
attrs = dict(log=self.log, loop=self.loop)
@@ -737,12 +739,13 @@
def get_logger(self, transaction_id):
return message.Logger(self.log, self.messages[transaction_id])
- def onboard(self, url, transaction_id, auth=None):
+ def onboard(self, url, transaction_id, auth=None, project=None):
log = message.Logger(self.log, self.messages[transaction_id])
onboard_package = OnboardPackage(
log,
self.loop,
+ project,
url,
auth,
self.onboarder,
@@ -752,12 +755,13 @@
self.loop.run_in_executor(None, onboard_package.download_package)
- def update(self, url, transaction_id, auth=None):
+ def update(self, url, transaction_id, auth=None, project=None):
log = message.Logger(self.log, self.messages[transaction_id])
update_package = UpdatePackage(
log,
self.loop,
+ project,
url,
auth,
self.onboarder,
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/onboard_pkg b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/onboard_pkg
index ba82e7e..849f9e4 100755
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/onboard_pkg
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/onboard_pkg
@@ -100,6 +100,8 @@
self._log = log
self._args = args
+ self._project = args.project
+
self._pkgs = None
self._service_name = None
@@ -121,30 +123,36 @@
user=self._user,
passwd=self._password,
api_server_ip=self._api_server_ip)
+
self._upload_url = "curl -k https://{ip}:{port}/api/upload". \
format(ip=self._ip,
port=self._uport)
self._headers = '-H "accept: application/json"' + \
' -H "content-type: application/json"'
- self._conf_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/config". \
+
+ self._conf_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/config/project/{project}". \
format(header=self._headers,
user=self._user,
passwd=self._password,
ip=self._ip,
- port=self._rport)
- self._oper_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/operational". \
+ port=self._rport,
+ project=self._project)
+
+ self._oper_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/operational/project/{project}". \
format(header=self._headers,
user=self._user,
passwd=self._password,
ip=self._ip,
- port=self._rport)
+ port=self._rport,
+ project=self._project)
@property
def log(self):
return self._log
def validate_args(self):
+ args = self._args
if args.upload_pkg is not None:
self._pkgs = args.upload_pkg
self.log.debug("Packages to upload: {}".format(self._pkgs))
@@ -165,8 +173,8 @@
uuid.UUID(args.datacenter)
self._dc = args.datacenter
except ValueError as e:
- raise OnboardPkgInvalidDescId("Invalid UUID for datacenter: {}".
- format(args.datacenter))
+ raise OnboardPkgInvalidDescId("Invalid UUID for datacenter {}: {}".
+ format(args.datacenter, e))
elif args.vim_account:
self._account = args.vim_account
@@ -181,8 +189,8 @@
self._service_name,
self._account))
- if (self._pkgs is None) and (self._nsd_id is None):
- raise OnboardPkgInputError("Need to specify either upload-pkg or instantiate options")
+ if (self._pkgs is None) and (self._nsd_id is None) and (not args.list_nsds):
+ raise OnboardPkgInputError("Need to specify either upload-pkg or instantiate or list options")
# Validate the port numbers are correct
def valid_port(port):
@@ -334,7 +342,12 @@
format(self._nsd_id,
js['error']))
- nsd = js['nsd:nsd']
+ try:
+ nsd = js['project-nsd:nsd']
+ except KeyError as e:
+ raise OnboardPkgNsdError("NSD ID {} provided is not valid".
+ format(self._nsd_id))
+
self.log.debug("NSD to instantiate: {}".format(nsd))
# Generate a UUID for NS
@@ -381,6 +394,35 @@
self.log.info("Successfully initiated instantiation of NS as {} ({})".
format(self._service_name, ns_id))
+ def list_nsds(self):
+ if self._args.list_nsds:
+ self.log.debug("Check NSDS at {}:{}, with credentials {}:{}".
+ format(self._ip, self._rport, self._user, self._password))
+
+ rest_url = self._conf_url+"/nsd-catalog/nsd"
+ try:
+ output = self._exec_cmd(rest_url)
+ self.log.debug("Output of NSD list: {}".
+ format(output))
+ if output:
+ js = json.loads(output)
+ if "error" in js:
+ raise OnboardPkgRcConnError("SO Restconf connect error: {}".
+ format(js["error"]))
+ else:
+ print("No NSDs found on SO")
+ return
+
+ self.log.debug("NSD list: {}".format(js))
+ print('List of NSDs on SO:\nName\tID')
+ for nsd in js['project-nsd:nsd']:
+ print('{}\t{}'.format(nsd['name'], nsd['id']))
+
+ except OnboardPkgCmdError as e:
+ self.log.error("SO restconf connect failed: {}".format(e))
+ raise OnboardPkgRcConnError("SO Restconf connect error: {}".
+ format(e))
+
def process(self):
try:
self.validate_args()
@@ -396,6 +438,7 @@
self.validate_connectivity()
self.upload_packages()
self.instantiate()
+ self.list_nsds()
if __name__ == "__main__":
@@ -407,6 +450,9 @@
help="Descriptor packages to upload. " + \
"If multiple descriptors are provided, they are uploaded in the same sequence.")
+ parser.add_argument("-l", "--list-nsds", action='store_true',
+ help="List available network service descriptors")
+
parser.add_argument("-i", "--instantiate",
help="Instantiate a network service with the name")
parser.add_argument("-d", "--nsd-id",
@@ -416,6 +462,8 @@
parser.add_argument("-c", "--vim-account",
help="Cloud/VIM account to instantiate on")
+ parser.add_argument("--project", default='default',
+ help="Project to use, default 'default'")
parser.add_argument("-o", "--onboard-port", default=8443, type=int,
help="Onboarding port number - node port number, default 8443")
parser.add_argument("-p", "--upload-port", default=4567, type=int,
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py
index 7a787c7..434d036 100755
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py
@@ -42,10 +42,11 @@
from rift.tasklets.rwlaunchpad import export
import gi
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- RwVnfdYang,
- VnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ ProjectVnfdYang as VnfdYang,
)
import utest_package
@@ -59,7 +60,7 @@
self._vnfd_serializer = rift.package.convert.VnfdSerializer()
def test_create_archive(self):
- rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+ rw_vnfd_msg = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd(
id="new_id", name="new_name", description="new_description"
)
json_desc_str = self._rw_vnfd_serializer.to_json_string(rw_vnfd_msg)
@@ -80,11 +81,11 @@
self.assertEqual(package.descriptor_msg, rw_vnfd_msg)
def test_export_package(self):
- rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+ rw_vnfd_msg = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd(
id="new_id", name="new_name", description="new_description",
meta="THIS FIELD IS NOT IN REGULAR VNFD"
)
- vnfd_msg = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ vnfd_msg = VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
vnfd_msg.from_dict(rw_vnfd_msg.as_dict(), ignore_missing_keys=True)
self.assertNotEqual(rw_vnfd_msg, vnfd_msg)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_fileserver.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_fileserver.py
index e56ec04..812f332 100755
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_fileserver.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_fileserver.py
@@ -33,12 +33,12 @@
from rift.package.handler import FileRestApiHandler
import gi
-gi.require_version('NsdYang', '1.0')
-gi.require_version('VnfdYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
- VnfdYang,
+ ProjectNsdYang as NsdYang,
+ ProjectVnfdYang as VnfdYang,
)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py
index 871132f..d0f323d 100755
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py
@@ -22,6 +22,7 @@
import base64
import concurrent.futures
import io
+import json
import logging
import os
import sys
@@ -38,19 +39,18 @@
import gi
gi.require_version('NsdYang', '1.0')
gi.require_version('VnfdYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
from gi.repository import (
NsdYang,
VnfdYang,
+ ProjectNsdYang,
+ ProjectVnfdYang,
)
class RestconfDescriptorHandler(tornado.web.RequestHandler):
- DESC_SERIALIZER_MAP = {
- "nsd": convert.NsdSerializer(),
- "vnfd": convert.VnfdSerializer(),
- }
-
class AuthError(Exception):
pass
@@ -129,16 +129,14 @@
self._verify_content_type_header()
def _verify_request_body(self, descriptor_type):
- if descriptor_type not in RestconfDescriptorHandler.DESC_SERIALIZER_MAP:
+ if descriptor_type not in ['nsd', 'vnfd']:
raise ValueError("Unsupported descriptor type: %s" % descriptor_type)
- body = self.request.body
- bytes_hdl = io.BytesIO(body)
-
- serializer = RestconfDescriptorHandler.DESC_SERIALIZER_MAP[descriptor_type]
+ body = convert.decode(self.request.body)
+ self._logger.debug("Received msg: {}".format(body))
try:
- message = serializer.from_file_hdl(bytes_hdl, ".json")
+ message = json.loads(body)
except convert.SerializationError as e:
self.set_status(400)
self._transforms = []
@@ -150,7 +148,7 @@
self._info.last_request_message = message
- self._logger.debug("Received a valid descriptor request")
+ self._logger.debug("Received a valid descriptor request: {}".format(message))
def put(self, descriptor_type):
self._info.last_descriptor_type = descriptor_type
@@ -195,6 +193,11 @@
class OnboardTestCase(tornado.testing.AsyncHTTPTestCase):
+ DESC_SERIALIZER_MAP = {
+ "nsd": convert.NsdSerializer(),
+ "vnfd": convert.VnfdSerializer(),
+ }
+
AUTH = ("admin", "admin")
def setUp(self):
self._log = logging.getLogger(__file__)
@@ -213,28 +216,44 @@
def get_app(self):
attrs = dict(auth=OnboardTestCase.AUTH, log=self._log, info=self._handler_info)
return tornado.web.Application([
- (r"/api/config/.*/(nsd|vnfd)", RestconfDescriptorHandler, attrs),
+ (r"/api/config/project/default/.*/(nsd|vnfd)",
+ RestconfDescriptorHandler, attrs),
])
+
+ def get_msg(self, desc=None):
+ if desc is None:
+ desc = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ serializer = OnboardTestCase.DESC_SERIALIZER_MAP['nsd']
+ jstr = serializer.to_json_string(desc, project_ns=False)
+ self._desc = jstr
+ hdl = io.BytesIO(str.encode(jstr))
+ return serializer.from_file_hdl(hdl, ".json")
+
+ def get_json(self, msg):
+ serializer = OnboardTestCase.DESC_SERIALIZER_MAP['nsd']
+ json_data = serializer.to_json_string(msg, project_ns=True)
+ return json.loads(json_data)
+
@rift.test.dts.async_test
def test_onboard_nsd(self):
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = self.get_msg()
yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
- self.assertEqual(self._handler_info.last_request_message, nsd_msg)
+ self.assertEqual(self._handler_info.last_request_message, self.get_json(nsd_msg))
self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
self.assertEqual(self._handler_info.last_method, "POST")
@rift.test.dts.async_test
def test_update_nsd(self):
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = self.get_msg()
yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
- self.assertEqual(self._handler_info.last_request_message, nsd_msg)
+ self.assertEqual(self._handler_info.last_request_message, self.get_json(nsd_msg))
self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
self.assertEqual(self._handler_info.last_method, "PUT")
@rift.test.dts.async_test
def test_bad_descriptor_type(self):
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog()
+ nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
with self.assertRaises(TypeError):
yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
@@ -246,7 +265,7 @@
# Use a port not used by the instantiated server
new_port = self._port - 1
self._onboarder.port = new_port
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = self.get_msg()
with self.assertRaises(onboard.OnboardError):
yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
@@ -259,7 +278,7 @@
# Set the timeout to something minimal to speed up test
self._onboarder.timeout = .1
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = self.get_msg()
# Force the request to timeout by running the call synchronously so the
with self.assertRaises(onboard.OnboardError):
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py
index 1efd2df..826871c 100755
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py
@@ -40,16 +40,6 @@
import rift.package.cloud_init
-import gi
-gi.require_version('RwpersonDbYang', '1.0')
-gi.require_version('RwYang', '1.0')
-
-from gi.repository import (
- RwpersonDbYang,
- RwYang,
- )
-
-
nsd_yaml = b"""nsd:nsd-catalog:
nsd:nsd:
- nsd:id: gw_corpA
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py
index af8e1f8..c90f27f 100755
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py
@@ -34,21 +34,29 @@
import gi
gi.require_version('RwpersonDbYang', '1.0')
+gi.require_version('RwProjectPersonDbYang', '1.0')
gi.require_version('RwYang', '1.0')
from gi.repository import (
RwpersonDbYang,
+ RwProjectPersonDbYang,
RwYang,
)
+from rift.package.convert import SerializationError
+
+
class TestSerializer(unittest.TestCase):
def setUp(self):
self._serializer = ProtoMessageSerializer(
RwpersonDbYang,
- RwpersonDbYang.Person
+ RwpersonDbYang.Person,
+ RwProjectPersonDbYang,
+ RwProjectPersonDbYang.Person,
)
self._sample_person = RwpersonDbYang.Person(name="Fred")
+ self._project_person = RwProjectPersonDbYang.Person(name="Fred")
self._model = RwYang.model_create_libncx()
self._model.load_schema_ypbc(RwpersonDbYang.get_schema())
@@ -63,14 +71,14 @@
with io.StringIO(sample_person_yaml) as file_hdl:
person = self._serializer.from_file_hdl(file_hdl, ".yml")
- self.assertEqual(person, self._sample_person)
+ self.assertEqual(person, self._project_person)
def test_from_json_file(self):
sample_person_json = self._sample_person.to_json(self._model)
with io.StringIO(sample_person_json) as file_hdl:
person = self._serializer.from_file_hdl(file_hdl, ".json")
- self.assertEqual(person, self._sample_person)
+ self.assertEqual(person, self._project_person)
def test_unknown_file_extension(self):
with io.StringIO("asdf") as file_hdl:
@@ -90,7 +98,7 @@
self.assertEqual(person, self._sample_person)
def test_to_json_string_invalid_type(self):
- with self.assertRaises(TypeError):
+ with self.assertRaises(SerializationError):
self._serializer.to_json_string(RwpersonDbYang.FlatPerson(name="bob"))
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_uploader_app_dts.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_uploader_app_dts.py
index fdc2e22..b9316b5 100755
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_uploader_app_dts.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_uploader_app_dts.py
@@ -33,25 +33,28 @@
import tornado.web
import tornado.httputil
-import gi
import requests
from tornado.platform.asyncio import AsyncIOMainLoop
from tornado.ioloop import IOLoop
from concurrent.futures.thread import ThreadPoolExecutor
from concurrent.futures.process import ProcessPoolExecutor
+
+import gi
gi.require_version('RwDts', '1.0')
gi.require_version('RwPkgMgmtYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
RwDts as rwdts,
RwPkgMgmtYang,
- RwVnfdYang
-
+ RwProjectVnfdYang as RwVnfdYang,
)
import rift.tasklets.rwlaunchpad.uploader as uploader
import rift.tasklets.rwlaunchpad.message as message
import rift.tasklets.rwlaunchpad.export as export
+from rift.mano.utils.project import DEFAULT_PROJECT
import rift.test.dts
+
import mock
TEST_STRING = "foobar"
@@ -74,16 +77,23 @@
mock_vnfd_catalog = mock.MagicMock()
self.uid, path = self.create_mock_package()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict({
"id": self.uid
})
mock_vnfd_catalog = {self.uid: mock_vnfd}
- self.app = uploader.UploaderApplication(
- self.log,
- self.dts,
- self.loop,
- vnfd_catalog=mock_vnfd_catalog)
+ class MockTasklet:
+ def __init__(cls):
+ def get_vnfd_catalog(project=DEFAULT_PROJECT):
+ return mock_vnfd_catalog
+
+ cls.log = self.log
+ cls.loop = self.loop
+ cls.dts = self.dts
+ cls.get_vnfd_catalog = get_vnfd_catalog
+ cls.get_nsd_catalog = None
+
+ self.app = uploader.UploaderApplication(MockTasklet())
AsyncIOMainLoop().install()
self.server = tornado.httpserver.HTTPServer(
@@ -122,7 +132,8 @@
yield from self.app.register()
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageCreate.from_dict({
"package_type": "VNFD",
- "external_url": "http://repo.riftio.com/releases/open.riftio.com/4.2.1/VNFS/ping_vnfd.tar.gz"
+ "external_url": "http://repo.riftio.com/releases/open.riftio.com/4.2.1/VNFS/ping_vnfd.tar.gz",
+ "project_name": DEFAULT_PROJECT
})
rpc_out = yield from self.dts.query_rpc(
@@ -147,7 +158,8 @@
# Update
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageUpdate.from_dict({
"package_type": "VNFD",
- "external_url": "http://repo.riftio.com/releases/open.riftio.com/4.2.1/VNFS/ping_vnfd.tar.gz"
+ "external_url": "http://repo.riftio.com/releases/open.riftio.com/4.2.1/VNFS/ping_vnfd.tar.gz",
+ "project_name": DEFAULT_PROJECT
})
rpc_out = yield from self.dts.query_rpc(
"I,/rw-pkg-mgmt:package-update",
diff --git a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py
index b8abea7..f43da6f 100644
--- a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py
+++ b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py
@@ -231,7 +231,7 @@
self._account = account
self._plugin = plugin
self._timestamp = 0
- self._metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ self._metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
self._vdur = vdur
self._vim_id = vdur.vim_id
self._updating = None
@@ -305,7 +305,7 @@
try:
# Create uninitialized metric structure
- vdu_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ vdu_metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
# VCPU
vdu_metrics.vcpu.total = self.vdur.vm_flavor.vcpu_count
@@ -549,17 +549,19 @@
different sub-systems that are used to monitor the NFVI.
"""
- def __init__(self, loop, log, config):
+ def __init__(self, loop, log, config, project):
"""Create a Monitor object
Arguments:
- loop - an event loop
- log - the logger used by this object
- config - an instance of InstanceConfiguration
+ loop - an event loop
+ log - the logger used by this object
+ config - an instance of InstanceConfiguration
+ project - an instance of the project
"""
self._loop = loop
self._log = log
+ self._project = project
self._cloud_accounts = dict()
self._nfvi_plugins = NfviMetricsPluginManager(log)
@@ -581,6 +583,10 @@
return self._log
@property
+ def project(self):
+ return self._project
+
+ @property
def cache(self):
"""The NFVI metrics cache"""
return self._cache
diff --git a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py
index 4ab351e..b9173e6 100644
--- a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py
+++ b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py
@@ -90,30 +90,34 @@
import rift.tasklets
import rift.mano.cloud
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
from . import core
class DtsHandler(object):
- def __init__(self, tasklet):
+ def __init__(self, project):
self.reg = None
- self.tasklet = tasklet
+ self.project = project
@property
def log(self):
- return self.tasklet.log
+ return self.project._log
@property
def log_hdl(self):
- return self.tasklet.log_hdl
+ return self.project._log_hdl
@property
def dts(self):
- return self.tasklet.dts
+ return self.project._dts
@property
def loop(self):
- return self.tasklet.loop
+ return self.project._loop
@property
def classname(self):
@@ -151,7 +155,7 @@
with self.dts.group_create() as group:
group.register(
- xpath=VnfrCatalogSubscriber.XPATH,
+ xpath=self.project.add_project(VnfrCatalogSubscriber.XPATH),
flags=rwdts.Flag.SUBSCRIBER,
handler=handler,
)
@@ -173,20 +177,20 @@
with self.dts.appconf_group_create(acg_handler) as acg:
self.reg = acg.register(
- xpath=NsInstanceConfigSubscriber.XPATH,
+ xpath=self.project.add_project(NsInstanceConfigSubscriber.XPATH),
flags=rwdts.Flag.SUBSCRIBER,
)
class CloudAccountDtsHandler(DtsHandler):
- def __init__(self, tasklet):
- super().__init__(tasklet)
+ def __init__(self, project):
+ super().__init__(project)
self._cloud_cfg_subscriber = None
def register(self):
self.log.debug("creating cloud account config handler")
self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
- self.dts, self.log, self.log_hdl,
+ self.dts, self.log, self.log_hdl, self.project,
rift.mano.cloud.CloudAccountConfigCallbacks(
on_add_apply=self.tasklet.on_cloud_account_create,
on_delete_apply=self.tasklet.on_cloud_account_delete,
@@ -208,7 +212,7 @@
# before timing out, the current data will be published instead.
TIMEOUT = 2.0
- def __init__(self, tasklet, vnfr, vdur):
+ def __init__(self, project, vnfr, vdur):
"""Create an instance of VdurNvfiPublisher
Arguments:
@@ -217,12 +221,12 @@
vdur - the VDUR of the VDU whose metrics are published
"""
- super().__init__(tasklet)
+ super().__init__(project)
self._vnfr = vnfr
self._vdur = vdur
self._handle = None
- self._xpath = VdurNfviMetricsPublisher.XPATH.format(vnfr.id, vdur.id)
+ self._xpath = project.add_project(VdurNfviMetricsPublisher.XPATH.format(vnfr.id, vdur.id))
self._deregistered = asyncio.Event(loop=self.loop)
@@ -321,7 +325,7 @@
with self.dts.appconf_group_create(acg_handler) as acg:
self.reg = acg.register(
- xpath="C,/rw-launchpad:launchpad-config",
+ xpath=self.project.add_project("C,/rw-launchpad:launchpad-config"),
flags=rwdts.Flag.SUBSCRIBER,
)
@@ -335,8 +339,8 @@
them on to the tasklet.
"""
- def __init__(self, tasklet):
- super().__init__(tasklet)
+ def __init__(self, project):
+ super().__init__(project)
self._handle = None
@asyncio.coroutine
@@ -345,6 +349,10 @@
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
try:
+
+ if not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
response = VnfrYang.YangOutput_Vnfr_CreateAlarm()
response.alarm_id = yield from self.tasklet.on_create_alarm(
msg.cloud_account,
@@ -382,8 +390,8 @@
them on to the tasklet.
"""
- def __init__(self, tasklet):
- super().__init__(tasklet)
+ def __init__(self, project):
+ super().__init__(project)
self._handle = None
@asyncio.coroutine
@@ -392,6 +400,9 @@
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
try:
+ if not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
yield from self.tasklet.on_destroy_alarm(
msg.cloud_account,
msg.alarm_id,
@@ -473,70 +484,31 @@
])
-class MonitorTasklet(rift.tasklets.Tasklet):
- """
- The MonitorTasklet provides a interface for DTS to interact with an
- instance of the Monitor class. This allows the Monitor class to remain
- independent of DTS.
- """
+class MonitorProject(ManoProject):
- DEFAULT_POLLING_PERIOD = 1.0
+ def __init__(self, name, tasklet, **kw):
+ super(MonitorProject, self).__init__(log, name)
+ self._tasklet = tasklet
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
- def __init__(self, *args, **kwargs):
- try:
- super().__init__(*args, **kwargs)
- self.rwlog.set_category("rw-monitor-log")
+ self.vnfr_subscriber = VnfrCatalogSubscriber(self)
+ self.cloud_cfg_subscriber = CloudAccountDtsHandler(self)
+ self.ns_instance_config_subscriber = NsInstanceConfigSubscriber(self)
+ self.launchpad_cfg_subscriber = LaunchpadConfigDtsSubscriber(self)
- self.vnfr_subscriber = VnfrCatalogSubscriber(self)
- self.cloud_cfg_subscriber = CloudAccountDtsHandler(self)
- self.ns_instance_config_subscriber = NsInstanceConfigSubscriber(self)
- self.launchpad_cfg_subscriber = LaunchpadConfigDtsSubscriber(self)
+ self.config = core.InstanceConfiguration()
+ self.config.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD
- self.config = core.InstanceConfiguration()
- self.config.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD
+ self.monitor = core.Monitor(self.loop, self.log, self.config, self)
+ self.vdur_handlers = dict()
- self.monitor = core.Monitor(self.loop, self.log, self.config)
- self.vdur_handlers = dict()
-
- self.webhooks = None
- self.create_alarm_rpc = CreateAlarmRPC(self)
- self.destroy_alarm_rpc = DestroyAlarmRPC(self)
-
-
- except Exception as e:
- self.log.exception(e)
-
- @property
- def polling_period(self):
- return self.config.polling_period
-
- @property
- def public_ip(self):
- """The public IP of the launchpad"""
- return self.config.public_ip
-
- def start(self):
- super().start()
- self.log.info("Starting MonitoringTasklet")
-
- self.log.debug("Registering with dts")
- self.dts = rift.tasklets.DTS(
- self.tasklet_info,
- RwLaunchpadYang.get_schema(),
- self.loop,
- self.on_dts_state_change
- )
-
- self.log.debug("Created DTS Api GI Object: %s", self.dts)
-
- def stop(self):
- try:
- self.dts.deinit()
- except Exception as e:
- self.log.exception(e)
+ self.create_alarm_rpc = CreateAlarmRPC(self)
+ self.destroy_alarm_rpc = DestroyAlarmRPC(self)
@asyncio.coroutine
- def init(self):
+ def register (self):
self.log.debug("creating cloud account handler")
self.cloud_cfg_subscriber.register()
@@ -555,23 +527,15 @@
self.log.debug("creating destroy-alarm rpc handler")
yield from self.destroy_alarm_rpc.register()
- self.log.debug("creating webhook server")
- loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
- self.webhooks = WebhookApplication(self)
- self.server = tornado.httpserver.HTTPServer(
- self.webhooks,
- io_loop=loop,
- )
- @asyncio.coroutine
- def on_public_ip(self, ip):
- """Store the public IP of the launchpad
+ @property
+ def polling_period(self):
+ return self.config.polling_period
- Arguments:
- ip - a string containing the public IP address of the launchpad
-
- """
- self.config.public_ip = ip
+ @property
+ def public_ip(self):
+ """The public IP of the launchpad"""
+ return self.config.public_ip
def on_ns_instance_config_update(self, config):
"""Update configuration information
@@ -589,44 +553,6 @@
def on_cloud_account_delete(self, account_name):
self.monitor.remove_cloud_account(account_name)
- @asyncio.coroutine
- def run(self):
- self.webhooks.listen(WebhookApplication.DEFAULT_WEBHOOK_PORT)
-
- def on_instance_started(self):
- self.log.debug("Got instance started callback")
-
- @asyncio.coroutine
- def on_dts_state_change(self, state):
- """Handle DTS state change
-
- Take action according to current DTS state to transition application
- into the corresponding application state
-
- Arguments
- state - current dts state
-
- """
- switch = {
- rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
- rwdts.State.CONFIG: rwdts.State.RUN,
- }
-
- handlers = {
- rwdts.State.INIT: self.init,
- rwdts.State.RUN: self.run,
- }
-
- # Transition application to next state
- handler = handlers.get(state, None)
- if handler is not None:
- yield from handler()
-
- # Transition dts to next state
- next_state = switch.get(state, None)
- if next_state is not None:
- self.dts.handle.set_state(next_state)
-
def on_vnfr_create(self, vnfr):
if not self.monitor.nfvi_metrics_available(vnfr.cloud_account):
msg = "NFVI metrics unavailable for {}"
@@ -712,3 +638,105 @@
"""
yield from self.monitor.destroy_alarm(account, alarm_id)
+
+
+class MonitorTasklet(rift.tasklets.Tasklet):
+ """
+ The MonitorTasklet provides a interface for DTS to interact with an
+ instance of the Monitor class. This allows the Monitor class to remain
+ independent of DTS.
+ """
+
+ DEFAULT_POLLING_PERIOD = 1.0
+
+ def __init__(self, *args, **kwargs):
+ try:
+ super().__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-monitor-log")
+
+ self._project_handler = None
+ self.projects = {}
+
+ self.webhooks = None
+
+ except Exception as e:
+ self.log.exception(e)
+
+ def start(self):
+ super().start()
+ self.log.info("Starting MonitoringTasklet")
+
+ self.log.debug("Registering with dts")
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ RwLaunchpadYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+ def stop(self):
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ self.log.debug("creating webhook server")
+ loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+ self.webhooks = WebhookApplication(self)
+ self.server = tornado.httpserver.HTTPServer(
+ self.webhooks,
+ io_loop=loop,
+ )
+
+ @asyncio.coroutine
+ def on_public_ip(self, ip):
+ """Store the public IP of the launchpad
+
+ Arguments:
+ ip - a string containing the public IP address of the launchpad
+
+ """
+ self.config.public_ip = ip
+
+ @asyncio.coroutine
+ def run(self):
+ self.webhooks.listen(WebhookApplication.DEFAULT_WEBHOOK_PORT)
+
+ def on_instance_started(self):
+ self.log.debug("Got instance started callback")
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
+
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py
index 78a3c8f..ce19580 100644
--- a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py
+++ b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py
@@ -42,22 +42,22 @@
monp_id (str): Monitoring Param ID
vnfr_id (str): VNFR ID
"""
- def __init__(self, log, dts, loop, vnfr_id, monp_id, callback=None):
- super().__init__(log, dts, loop, callback)
+ def __init__(self, log, dts, loop, project, vnfr_id, monp_id, callback=None):
+ super().__init__(log, dts, loop, project, callback)
self.vnfr_id = vnfr_id
self.monp_id = monp_id
def get_xpath(self):
- return("D,/vnfr:vnfr-catalog" +
+ return self.project.add_project(("D,/vnfr:vnfr-catalog" +
"/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id) +
"/vnfr:monitoring-param" +
- "[vnfr:id='{}']".format(self.monp_id))
+ "[vnfr:id='{}']".format(self.monp_id)))
class NsrMonitoringParam():
"""Class that handles NS Mon-param data.
"""
- MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+ MonParamMsg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_MonitoringParam
MISSING = None
DEFAULT_AGGREGATION_TYPE = "AVERAGE"
@@ -66,7 +66,7 @@
"""Convenience class that constructs NSMonitoringParam objects
Args:
- nsd (RwNsdYang.YangData_Nsd_NsdCatalog_Nsd): Nsd object
+ nsd (RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd): Nsd object
constituent_vnfrs (list): List of constituent vnfr objects of NSR
store (SubscriberStore): Store object instance
@@ -76,8 +76,6 @@
Also handles legacy NSD descriptor which has no mon-param defines. In
such cases the mon-params are created from VNFD's mon-param config.
"""
- MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
-
mon_params = []
for mon_param_msg in nsd.monitoring_param:
mon_params.append(NsrMonitoringParam(
@@ -278,16 +276,17 @@
def from_handler(cls, handler, monp, callback):
"""Convenience class to build NsrMonitoringParamPoller object.
"""
- return cls(handler.log, handler.dts, handler.loop, monp, callback)
+ return cls(handler.log, handler.dts, handler.loop, handler.project,
+ monp, callback)
- def __init__(self, log, dts, loop, monp, callback=None):
+ def __init__(self, log, dts, loop, project, monp, callback=None):
"""
Args:
monp (NsrMonitoringParam): Param object
callback (None, optional): Callback to be triggered after value has
been aggregated.
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.monp = monp
self.subscribers = []
@@ -341,7 +340,8 @@
for vnfr_id, monp_id in self.monp.vnfr_ids:
callback = functools.partial(self.update_value, vnfr_id=vnfr_id)
self.subscribers.append(VnfrMonitoringParamSubscriber(
- self.loop, self.dts, self.loop, vnfr_id, monp_id, callback=callback))
+ self.loop, self.dts, self.loop, self.project,
+ vnfr_id, monp_id, callback=callback))
@asyncio.coroutine
def start(self):
@@ -356,14 +356,14 @@
class NsrMonitorDtsHandler(mano_dts.DtsHandler):
""" NSR monitoring class """
- def __init__(self, log, dts, loop, nsr, constituent_vnfrs, store):
+ def __init__(self, log, dts, loop, project, nsr, constituent_vnfrs, store):
"""
Args:
- nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): NSR object
+ nsr (RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr): NSR object
constituent_vnfrs (list): list of VNFRs in NSR
store (SubscriberStore): Store instance
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.nsr = nsr
self.store = store
@@ -371,7 +371,7 @@
self.mon_params_pollers = []
def xpath(self, param_id=None):
- return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+ return self.project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr" +
"[nsr:ns-instance-config-ref='{}']".format(self.nsr.ns_instance_config_ref) +
"/nsr:monitoring-param" +
("[nsr:id='{}']".format(param_id) if param_id else ""))
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py
index 04e0306..2bfad6d 100644
--- a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py
+++ b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py
@@ -34,21 +34,20 @@
import rift.mano.cloud
import rift.mano.dts as subscriber
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
from . import vnfr_core
from . import nsr_core
-class MonitoringParameterTasklet(rift.tasklets.Tasklet):
- """The main task of this Tasklet is to listen for VNFR changes and once the
- VNFR hits the running state, triggers the monitor.
- """
- def __init__(self, *args, **kwargs):
- try:
- super().__init__(*args, **kwargs)
- self.rwlog.set_category("rw-monitor-log")
- except Exception as e:
- self.log.exception(e)
+class MonParamProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(MonParamProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
self.vnfr_subscriber = None
self.store = None
@@ -59,77 +58,27 @@
# Needs to be moved to store once the DTS bug is resolved
self.vnfrs = {}
- def start(self):
- super().start()
-
- self.log.info("Starting MonitoringParameterTasklet")
- self.log.debug("Registering with dts")
-
- self.dts = rift.tasklets.DTS(
- self.tasklet_info,
- RwLaunchpadYang.get_schema(),
- self.loop,
- self.on_dts_state_change
- )
-
- self.vnfr_subscriber = subscriber.VnfrCatalogSubscriber.from_tasklet(
+ self.vnfr_subscriber = subscriber.VnfrCatalogSubscriber.from_project(
self,
callback=self.handle_vnfr)
- self.nsr_subsriber = subscriber.NsrCatalogSubscriber.from_tasklet(
+ self.nsr_subsriber = subscriber.NsrCatalogSubscriber.from_project(
self,
callback=self.handle_nsr)
- self.store = subscriber.SubscriberStore.from_tasklet(self)
+ self.store = subscriber.SubscriberStore.from_project(self)
self.log.debug("Created DTS Api GI Object: %s", self.dts)
- def stop(self):
- try:
- self.dts.deinit()
- except Exception as e:
- self.log.exception(e)
-
@asyncio.coroutine
- def init(self):
+ def register (self):
self.log.debug("creating vnfr subscriber")
yield from self.store.register()
yield from self.vnfr_subscriber.register()
yield from self.nsr_subsriber.register()
- @asyncio.coroutine
- def run(self):
- pass
-
- @asyncio.coroutine
- def on_dts_state_change(self, state):
- """Handle DTS state change
-
- Take action according to current DTS state to transition application
- into the corresponding application state
-
- Arguments
- state - current dts state
-
- """
- switch = {
- rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
- rwdts.State.CONFIG: rwdts.State.RUN,
- }
-
- handlers = {
- rwdts.State.INIT: self.init,
- rwdts.State.RUN: self.run,
- }
-
- # Transition application to next state
- handler = handlers.get(state, None)
- if handler is not None:
- yield from handler()
-
- # Transition dts to next state
- next_state = switch.get(state, None)
- if next_state is not None:
- self.dts.handle.set_state(next_state)
+ def deregister(self):
+ self.log.debug("De-register vnfr project {}".format(self.name))
+ #TODO:
def handle_vnfr(self, vnfr, action):
"""Starts a monitoring parameter job for every VNFR that reaches
@@ -141,7 +90,6 @@
"""
def vnfr_create():
- # if vnfr.operational_status == "running" and vnfr.id not in self.vnfr_monitors:
if vnfr.config_status == "configured" and vnfr.id not in self.vnfr_monitors:
vnf_mon = vnfr_core.VnfMonitorDtsHandler.from_vnf_data(
@@ -178,7 +126,7 @@
NS that moves to config state.
Args:
- nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
+ nsr (RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr): Ns Opdata
action (rwdts.QueryAction): Action type of the change.
"""
def nsr_create():
@@ -188,6 +136,7 @@
self.log,
self.dts,
self.loop,
+ self,
nsr,
list(self.vnfrs.values()),
self.store
@@ -197,8 +146,12 @@
@asyncio.coroutine
def task():
- yield from nsr_mon.register()
- yield from nsr_mon.start()
+ try:
+ yield from nsr_mon.register()
+ yield from nsr_mon.start()
+ except Exception as e:
+ self.log.exception("NSR {} monparam task failed: {}".
+ format(nsr.name_ref, e))
self.loop.create_task(task())
@@ -214,3 +167,78 @@
nsr_create()
elif action == rwdts.QueryAction.DELETE:
nsr_delete()
+
+
+class MonitoringParameterTasklet(rift.tasklets.Tasklet):
+ """The main task of this Tasklet is to listen for VNFR changes and once the
+ VNFR hits the running state, triggers the monitor.
+ """
+ def __init__(self, *args, **kwargs):
+ try:
+ super().__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-monitor-log")
+ except Exception as e:
+ self.log.exception(e)
+
+ self._project_handler = None
+ self.projects = {}
+
+ def start(self):
+ super().start()
+
+ self.log.info("Starting MonitoringParameterTasklet")
+ self.log.debug("Registering with dts")
+
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ RwLaunchpadYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ def stop(self):
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, MonParamProject)
+ self.project_handler.register()
+
+ @asyncio.coroutine
+ def run(self):
+ pass
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py
index 6dc3a25..335e957 100644
--- a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py
+++ b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py
@@ -616,29 +616,29 @@
XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:monitoring-param"
@classmethod
- def from_vnf_data(cls, tasklet, vnfr_msg, vnfd_msg):
- handler = cls(tasklet.log, tasklet.dts, tasklet.loop,
+ def from_vnf_data(cls, project, vnfr_msg, vnfd_msg):
+ handler = cls(project.log, project.dts, project.loop, project,
vnfr_msg.id, vnfr_msg.mgmt_interface.ip_address,
vnfd_msg.monitoring_param, vnfd_msg.http_endpoint)
return handler
- def __init__(self, log, dts, loop, vnfr_id, mgmt_ip, params, endpoints):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, vnfr_id, mgmt_ip, params, endpoints):
+ super().__init__(log, dts, loop, project)
self._mgmt_ip = mgmt_ip
self._vnfr_id = vnfr_id
mon_params = []
for mon_param in params:
- param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(
+ param = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict(
mon_param.as_dict()
)
mon_params.append(param)
http_endpoints = []
for endpoint in endpoints:
- endpoint = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict(
+ endpoint = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint.from_dict(
endpoint.as_dict()
)
http_endpoints.append(endpoint)
@@ -674,10 +674,10 @@
def xpath(self, param_id=None):
""" Monitoring params xpath """
- return("D,/vnfr:vnfr-catalog" +
+ return self.project.add_project(("D,/vnfr:vnfr-catalog" +
"/vnfr:vnfr[vnfr:id='{}']".format(self._vnfr_id) +
"/vnfr:monitoring-param" +
- ("[vnfr:id='{}']".format(param_id) if param_id else ""))
+ ("[vnfr:id='{}']".format(param_id) if param_id else "")))
@property
def msg(self):
diff --git a/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py
index fd48952..c99fc00 100755
--- a/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py
+++ b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py
@@ -54,7 +54,7 @@
'ping-response-rx-count': 10
}
- mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+ mon_param_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam()
mon_param_msg.from_dict({
'id': '1',
'name': 'ping-request-tx-count',
@@ -67,7 +67,7 @@
'units': 'packets'
})
- endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+ endpoint_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint()
endpoint_msg.from_dict({
'path': ping_path,
'polling_interval_secs': 1,
@@ -231,7 +231,7 @@
'ping-response-rx-count': 10
}
- mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+ mon_param_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam()
mon_param_msg.from_dict({
'id': '1',
'name': 'ping-request-tx-count',
@@ -244,7 +244,7 @@
'units': 'packets'
})
- endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+ endpoint_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint()
endpoint_msg.from_dict({
'path': ping_path,
'https': 'true',
diff --git a/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py
index fb0b039..5dec362 100644
--- a/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py
+++ b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py
@@ -40,8 +40,8 @@
RwLaunchpadYang as launchpadyang,
RwDts as rwdts,
RwVnfrYang,
- RwVnfdYang,
- RwNsdYang
+ RwProjectVnfdYang as RwVnfdYang,
+ RwProjectNsdYang as RwNsdYang,
)
import utest_mon_params
@@ -50,7 +50,7 @@
class MonParamMsgGenerator(object):
def __init__(self, num_messages=1):
ping_path = r"/api/v1/ping/stats"
- self._endpoint_msg = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict({
+ self._endpoint_msg = vnfryang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint.from_dict({
'path': ping_path,
'https': 'true',
'polling_interval_secs': 1,
@@ -61,7 +61,7 @@
self._mon_param_msgs = []
for i in range(1, num_messages):
- self._mon_param_msgs.append(vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+ self._mon_param_msgs.append(vnfryang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
'id': '%s' % i,
'name': 'param_num_%s' % i,
'json_query_method': "NAMEKEY",
@@ -127,7 +127,7 @@
def setup_mock_store(self, aggregation_type, monps, legacy=False):
store = mock.MagicMock()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict({
'id': "1",
'monitoring_param': [
{'description': 'no of ping requests',
@@ -151,14 +151,14 @@
})
store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict({
'id': '1',
'monitoring_param': ([monp.as_dict() for monp in monps] if not legacy else [])
})
- mock_vnfr.vnfd = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
+ mock_vnfr.vnfd = vnfryang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict({
'ns_instance_config_ref': "1",
'name_ref': "Foo",
'constituent_vnfr_ref': [{'vnfr_id': mock_vnfr.id}],
@@ -182,7 +182,7 @@
'vnfd_monitoring_param_ref': '2'}]
}]
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict({
'id': str(uuid.uuid1()),
'monitoring_param': (monp if not legacy else [])
})
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py
index 007e62c..ef7856d 100644
--- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py
@@ -128,15 +128,17 @@
class CloudAccountConfigSubscriber:
- def __init__(self, log, dts, log_hdl):
+ def __init__(self, log, dts, log_hdl, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
+ self._project = project
self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
self._dts,
self._log,
self._log_hdl,
+ self._project,
rift.mano.cloud.CloudAccountConfigCallbacks())
def get_cloud_account_sdn_name(self, account_name):
@@ -154,6 +156,9 @@
def register(self):
self._cloud_sub.register()
+ def deregister(self):
+ self._cloud_sub.deregister()
+
class ROAccountPluginSelector(object):
"""
@@ -166,10 +171,11 @@
"""
DEFAULT_PLUGIN = RwNsPlugin
- def __init__(self, dts, log, loop, records_publisher):
+ def __init__(self, dts, log, loop, project, records_publisher):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._records_publisher = records_publisher
self._nsm_plugins = NsmPlugins()
@@ -178,12 +184,14 @@
self._log,
self._dts,
self._loop,
+ self._project,
callback=self.on_ro_account_change
)
self._nsr_sub = mano_dts.NsrCatalogSubscriber(
self._log,
self._dts,
self._loop,
+ self._project,
self.handle_nsr)
# The default plugin will be RwNsPlugin
@@ -240,3 +248,8 @@
def register(self):
yield from self._ro_sub.register()
yield from self._nsr_sub.register()
+
+ def deregister(self):
+ self._log.debug("Project {} de-register".format(self._project.name))
+ self._ro_sub.deregister()
+ self._nsr_sub.deregister()
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py
index 0ad877e..2bb4d18 100644
--- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py
@@ -102,7 +102,7 @@
)
if action == rwdts.QueryAction.READ:
- schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur.schema()
path_entry = schema.keyspec_to_entry(ks_path)
try:
@@ -118,7 +118,7 @@
self._nsr._nsr_uuid,
self._vdur_id
)
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
if console_url:
vdur_console.console_url = console_url
@@ -128,7 +128,7 @@
except openmano_client.InstanceStatusError as e:
self._log.error("Could not get NS instance console URL: %s",
str(e))
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
vdur_console.console_url = 'none'
@@ -873,7 +873,7 @@
def vnfr_uptime_update(self, vnfr):
try:
- vnfr_ = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({'id': vnfr.id})
+ vnfr_ = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict({'id': vnfr.id})
while True:
vnfr_.uptime = int(time.time()) - vnfr._create_time
yield from self._publisher.publish_vnfr(None, vnfr_)
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py
index 6c4b123..1b5c787 100644
--- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py
@@ -21,7 +21,7 @@
from gi.repository import (
RwDts as rwdts,
RwTypes,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwYang
)
import rift.tasklets
@@ -33,10 +33,11 @@
""" The network service op data DTS handler """
XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._regh = None
@property
@@ -47,38 +48,45 @@
@asyncio.coroutine
def register(self):
""" Register for Nsr op data publisher registration"""
- self._log.debug("Registering Nsr op data path %s as publisher",
- NsrOpDataDtsHandler.XPATH)
+ if self._regh:
+ return
+
+ xpath = self._project.add_project(NsrOpDataDtsHandler.XPATH)
+ self._log.debug("Registering Nsr op data path {} as publisher".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
- self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ)
@asyncio.coroutine
- def create(self, xact, path, msg):
+ def create(self, xact, xpath, msg):
"""
Create an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating NSR xact = %s, %s:%s", xact, path, msg)
self.regh.create_element(path, msg)
self._log.debug("Created NSR xact = %s, %s:%s", xact, path, msg)
@asyncio.coroutine
- def update(self, xact, path, msg, flags=rwdts.XactFlag.REPLACE):
+ def update(self, xact, xpath, msg, flags=rwdts.XactFlag.REPLACE):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating NSR xact = %s, %s:%s regh = %s", xact, path, msg, self.regh)
self.regh.update_element(path, msg, flags)
self._log.debug("Updated NSR xact = %s, %s:%s", xact, path, msg)
@asyncio.coroutine
- def delete(self, xact, path):
+ def delete(self, xact, xpath):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting NSR xact:%s, path:%s", xact, path)
self.regh.delete_element(path)
self._log.debug("Deleted NSR xact:%s, path:%s", xact, path)
@@ -86,13 +94,14 @@
class VnfrPublisherDtsHandler(object):
- """ Registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' DTS"""
+ """ Registers 'D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr' DTS"""
XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._regh = None
@@ -104,6 +113,8 @@
@asyncio.coroutine
def register(self):
""" Register for Vvnfr create/update/delete/read requests from dts """
+ if self._regh:
+ return
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
@@ -116,12 +127,13 @@
"%s action on VirtualNetworkFunctionRecord not supported",
action)
- self._log.debug("Registering for VNFR using xpath: %s",
- VnfrPublisherDtsHandler.XPATH,)
+ xpath = self._project.add_project(VnfrPublisherDtsHandler.XPATH)
+ self._log.debug("Registering for VNFR using xpath: {}".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VnfrPublisherDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=(rwdts.Flag.PUBLISHER |
rwdts.Flag.NO_PREP_READ |
@@ -160,13 +172,14 @@
class VlrPublisherDtsHandler(object):
- """ registers 'D,/vlr:vlr-catalog/vlr:vlr """
+ """ registers 'D,/rw-project:project/vlr:vlr-catalog/vlr:vlr """
XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._regh = None
@@ -179,6 +192,9 @@
def register(self):
""" Register for vlr create/update/delete/read requests from dts """
+ if self._regh:
+ return
+
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
""" prepare callback from dts """
@@ -190,12 +206,13 @@
"%s action on VirtualLinkRecord not supported",
action)
- self._log.debug("Registering for VLR using xpath: %s",
- VlrPublisherDtsHandler.XPATH,)
+ xpath = self._project.add_project(VlrPublisherDtsHandler.XPATH)
+ self._log.debug("Registering for VLR using xpath: {}".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VlrPublisherDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=(rwdts.Flag.PUBLISHER |
rwdts.Flag.NO_PREP_READ |
@@ -238,10 +255,11 @@
HEADERS = {"content-type": "application/vnd.yang.data+json"}
- def __init__(self, use_ssl, ssl_cert, ssl_key, loop):
+ def __init__(self, use_ssl, ssl_cert, ssl_key, loop, project):
self.use_ssl = use_ssl
self.ssl_cert = ssl_cert
self.ssl_key = ssl_key
+ self._project = project
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self.loop = loop
@@ -255,7 +273,7 @@
scheme = "https" if self.use_ssl else "http"
- url = "{}://127.0.0.1:8008/api/config/vnfd-catalog/vnfd/{}"
+ url = "{}://127.0.0.1:8008/api/config/project/{}/vnfd-catalog/vnfd/{}"
model = RwYang.Model.create_libncx()
model.load_module("rw-vnfd")
@@ -263,7 +281,7 @@
data = vnfd.to_json(model)
- key = "vnfd:vnfd-catalog"
+ key = "project-vnfd:vnfd-catalog"
newdict = json.loads(data)
if key in newdict:
data = json.dumps(newdict[key])
@@ -277,7 +295,7 @@
options["cert"] = (self.ssl_cert, self.ssl_key)
response = requests.put(
- url.format(scheme, vnfd.id),
+ url.format(scheme, self._project.name, vnfd.id),
**options
)
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py
index 23ab7b6..7d05457 100644
--- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py
@@ -46,6 +46,7 @@
self._loop = loop
self._dts = dts
self.nsm = parent
+ self.project = parent._project
self._log.debug("Initialized ROConfigManager")
def is_ready(self):
@@ -53,7 +54,7 @@
@property
def cm_state_xpath(self):
- return ("/rw-conman:cm-state/rw-conman:cm-nsr")
+ return self.project.add_project("/rw-conman:cm-state/rw-conman:cm-nsr")
@classmethod
def map_config_status(cls, status):
@@ -119,7 +120,7 @@
@asyncio.coroutine
def register(self):
""" Register for cm-state changes """
-
+
@asyncio.coroutine
def on_prepare(xact_info, query_action, ks_path, msg):
""" cm-state changed """
@@ -151,4 +152,9 @@
handler=handler)
except Exception as e:
self._log.error("Failed to register for cm-state changes as %s", str(e))
-
+
+
+ def deregister(self):
+ if self.dts_reg_hdl:
+ self.dts_reg_hdl.deregister()
+ self.dts_reg_hdl = None
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
index 69fac68..ea17e60 100755
--- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
@@ -35,7 +35,7 @@
import gi
gi.require_version('RwYang', '1.0')
-gi.require_version('RwNsdYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
gi.require_version('RwDts', '1.0')
gi.require_version('RwNsmYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
@@ -46,7 +46,7 @@
RwYang,
RwNsrYang,
NsrYang,
- NsdYang,
+ ProjectNsdYang as NsdYang,
RwVlrYang,
VnfrYang,
RwVnfrYang,
@@ -61,6 +61,12 @@
import rift.mano.ncclient
import rift.mano.config_data.config
import rift.mano.dts as mano_dts
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ get_add_delete_update_cfgs,
+ DEFAULT_PROJECT,
+ )
from . import rwnsm_conman as conman
from . import cloud
@@ -225,7 +231,7 @@
"sdn_account": self._sdn_account_name,
"operational_status": 'init',
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
elif self._vnffgr_state == VnffgRecordState.TERMINATED:
vnffgr_dict = {"id": self._vnffgr_id,
"vnffgd_id_ref": self._vnffgd_msg.id,
@@ -233,7 +239,7 @@
"sdn_account": self._sdn_account_name,
"operational_status": 'terminated',
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
else:
try:
vnffgr = self._vnffgmgr.fetch_vnffgr(self._vnffgr_id)
@@ -246,7 +252,7 @@
"sdn_account": self._sdn_account_name,
"operational_status": 'failed',
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
return vnffgr
@@ -258,7 +264,7 @@
"vnffgd_name_ref": self._vnffgd_msg.name,
"sdn_account": self._sdn_account_name,
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
for rsp in self._vnffgd_msg.rsp:
vnffgr_rsp = vnffgr.rsp.add()
vnffgr_rsp.id = str(uuid.uuid4())
@@ -270,9 +276,11 @@
vnfd = [vnfr.vnfd for vnfr in self._nsr.vnfrs.values() if vnfr.vnfd.id == rsp_cp_ref.vnfd_id_ref]
self._log.debug("VNFD message during VNFFG instantiation is %s",vnfd)
if len(vnfd) > 0 and vnfd[0].has_field('service_function_type'):
- self._log.debug("Service Function Type for VNFD ID %s is %s",rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type)
+ self._log.debug("Service Function Type for VNFD ID %s is %s",
+ rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type)
else:
- self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",rsp_cp_ref.vnfd_id_ref)
+ self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",
+ rsp_cp_ref.vnfd_id_ref)
continue
vnfr_cp_ref = vnffgr_rsp.vnfr_connection_point_ref.add()
@@ -293,7 +301,8 @@
self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
if vnfr.operational_status == 'failed':
self._log.error("Fetching VNFR for %s failed", vnfr.id)
- raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+ raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" %
+ (self.id, vnfr.id))
yield from asyncio.sleep(2, loop=self._loop)
vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
self._log.debug("Received VNFR is %s", vnfr)
@@ -320,7 +329,8 @@
rsp_id_ref = _rsp[0].id
rsp_name = _rsp[0].name
else:
- self._log.error("RSP with ID %s not found during classifier creation for classifier id %s",vnffgd_classifier.rsp_id_ref,vnffgd_classifier.id)
+ self._log.error("RSP with ID %s not found during classifier creation for classifier id %s",
+ vnffgd_classifier.rsp_id_ref,vnffgd_classifier.id)
continue
vnffgr_classifier = vnffgr.classifier.add()
vnffgr_classifier.id = vnffgd_classifier.id
@@ -344,7 +354,8 @@
self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
if vnfr.operational_status == 'failed':
self._log.error("Fetching VNFR for %s failed", vnfr.id)
- raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+ raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" %
+ (self.id, vnfr.id))
yield from asyncio.sleep(2, loop=self._loop)
vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
self._log.debug("Received VNFR is %s", vnfr)
@@ -357,8 +368,9 @@
for ext_intf in vdu.external_interface:
if ext_intf.name == vnffgr_classifier.vnfr_connection_point_ref:
vnffgr_classifier.vm_id = vdu.vim_id
- self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id,
- vnfr_cp_ref.connection_point_params.vm_id)
+ self._log.debug("VIM ID for CP %s in VNFR %s is %s",
+ cp.name,nsr_vnfr.id,
+ vnfr_cp_ref.connection_point_params.vm_id)
break
self._log.info("VNFFGR msg to be sent is %s", vnffgr)
@@ -459,7 +471,9 @@
XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
@staticmethod
@asyncio.coroutine
- def create_record(dts, log, loop, nsr_name, vld_msg, cloud_account_name, om_datacenter, ip_profile, nsr_id, restart_mode=False):
+ def create_record(dts, log, loop, project, nsr_name, vld_msg,
+ cloud_account_name, om_datacenter, ip_profile,
+ nsr_id, restart_mode=False):
"""Creates a new VLR object based on the given data.
If restart mode is enabled, then we look for existing records in the
@@ -472,6 +486,7 @@
dts,
log,
loop,
+ project,
nsr_name,
vld_msg,
cloud_account_name,
@@ -482,7 +497,7 @@
if restart_mode:
res_iter = yield from dts.query_read(
- "D,/vlr:vlr-catalog/vlr:vlr",
+ project.add_project("D,/vlr:vlr-catalog/vlr:vlr"),
rwdts.XactFlag.MERGE)
for fut in res_iter:
@@ -498,10 +513,12 @@
return vlr_obj
- def __init__(self, dts, log, loop, nsr_name, vld_msg, cloud_account_name, om_datacenter, ip_profile, nsr_id):
+ def __init__(self, dts, log, loop, project, nsr_name, vld_msg,
+ cloud_account_name, om_datacenter, ip_profile, nsr_id):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsr_name = nsr_name
self._vld_msg = vld_msg
self._cloud_account_name = cloud_account_name
@@ -517,7 +534,8 @@
@property
def xpath(self):
""" path for this object """
- return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self._vlr_id)
+ return self._project.add_project("D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".
+ format(self._vlr_id))
@property
def id(self):
@@ -615,7 +633,7 @@
vlr_dict['ip_profile_params' ] = self._ip_profile.ip_profile_params.as_dict()
vlr_dict.update(vld_copy_dict)
- vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+ vlr = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.from_dict(vlr_dict)
return vlr
def reset_id(self, vlr_id):
@@ -623,7 +641,7 @@
def create_nsr_vlr_msg(self, vnfrs):
""" The VLR message"""
- nsr_vlr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vlr()
+ nsr_vlr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vlr()
nsr_vlr.vlr_ref = self._vlr_id
nsr_vlr.assigned_subnet = self.assigned_subnet
nsr_vlr.cloud_account = self.cloud_account_name
@@ -721,7 +739,7 @@
@staticmethod
@asyncio.coroutine
- def create_record(dts, log, loop, vnfd, const_vnfd_msg, nsd_id, nsr_name,
+ def create_record(dts, log, loop, project, vnfd, const_vnfd_msg, nsd_id, nsr_name,
cloud_account_name, om_datacenter_name, nsr_id, group_name, group_instance_id,
placement_groups, restart_mode=False):
"""Creates a new VNFR object based on the given data.
@@ -736,6 +754,7 @@
dts,
log,
loop,
+ project,
vnfd,
const_vnfd_msg,
nsd_id,
@@ -750,7 +769,7 @@
if restart_mode:
res_iter = yield from dts.query_read(
- "D,/vnfr:vnfr-catalog/vnfr:vnfr",
+ project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr"),
rwdts.XactFlag.MERGE)
for fut in res_iter:
@@ -767,6 +786,7 @@
dts,
log,
loop,
+ project,
vnfd,
const_vnfd_msg,
nsd_id,
@@ -781,6 +801,7 @@
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._vnfd = vnfd
self._const_vnfd_msg = const_vnfd_msg
self._nsd_id = nsd_id
@@ -820,7 +841,8 @@
@property
def xpath(self):
""" VNFR xpath """
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id)
+ return self._project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']"
+ .format(self.id))
@property
def vnfr_msg(self):
@@ -830,7 +852,9 @@
@property
def const_vnfr_msg(self):
""" VNFR message """
- return RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConstituentVnfrRef(vnfr_id=self.id,cloud_account=self.cloud_account_name,om_datacenter=self._om_datacenter_name)
+ return RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConstituentVnfrRef(
+ vnfr_id=self.id, cloud_account=self.cloud_account_name,
+ om_datacenter=self._om_datacenter_name)
@property
def vnfd(self):
@@ -896,7 +920,8 @@
@staticmethod
def vnfr_xpath(vnfr):
""" Get the VNFR path from VNFR """
- return (VirtualNetworkFunctionRecord.XPATH + "[vnfr:id = '{}']").format(vnfr.id)
+ return (self._project.add_project(VirtualNetworkFunctionRecord.XPATH) +
+ "[vnfr:id = '{}']").format(vnfr.id)
@property
def config_type(self):
@@ -956,9 +981,10 @@
}
vnfr_dict.update(vnfd_copy_dict)
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
- vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict(),
- ignore_missing_keys=True)
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+ vnfr.vnfd = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd. \
+ from_dict(self.vnfd.as_dict(),
+ ignore_missing_keys=True)
vnfr.member_vnf_index_ref = self.member_vnf_index
vnfr.vnf_configuration.from_dict(self._vnfd.vnf_configuration.as_dict())
@@ -981,7 +1007,7 @@
format(self.name, self.vnfr_msg))
yield from self._dts.query_update(
self.xpath,
- rwdts.XactFlag.TRACE,
+ 0, #rwdts.XactFlag.TRACE,
self.vnfr_msg
)
@@ -1068,8 +1094,10 @@
return None
# For every connection point in the VNFD fill in the identifier
+ self._log.debug("Add connection point for VNF %s: %s",
+ self.vnfr_msg.name, self._vnfd.connection_point)
for conn_p in self._vnfd.connection_point:
- cpr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint()
+ cpr = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint()
cpr.name = conn_p.name
cpr.type_yang = conn_p.type_yang
if conn_p.has_field('port_security_enabled'):
@@ -1099,9 +1127,6 @@
self._log.info("Created VNF with xpath %s and vnfr %s",
self.xpath, self.vnfr_msg)
- self._log.info("Instantiated VNFR with xpath %s and vnfd %s, vnfr %s",
- self.xpath, self._vnfd, self.vnfr_msg)
-
@asyncio.coroutine
def update_state(self, vnfr_msg):
""" Update this VNFR"""
@@ -1222,7 +1247,7 @@
event_list = []
idx = 1
for entry in self._events:
- event = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_OperationalEvents()
+ event = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_OperationalEvents()
event.id = idx
idx += 1
event.timestamp, event.event, event.description, event.details = entry
@@ -1234,7 +1259,8 @@
""" Network service record """
XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
- def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg, sdn_account_name, key_pairs, restart_mode=False,
+ def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg,
+ sdn_account_name, key_pairs, project, restart_mode=False,
vlr_handler=None):
self._dts = dts
self._log = log
@@ -1244,6 +1270,7 @@
self._nsm_plugin = nsm_plugin
self._sdn_account_name = sdn_account_name
self._vlr_handler = vlr_handler
+ self._project = project
self._nsd = None
self._nsr_msg = None
@@ -1277,7 +1304,7 @@
self.set_state(NetworkServiceRecordState.INIT)
- self.substitute_input_parameters = InputParameterSubstitution(self._log)
+ self.substitute_input_parameters = InputParameterSubstitution(self._log, self._project)
@property
def nsm_plugin(self):
@@ -1393,7 +1420,7 @@
for group_info in self._nsr_cfg_msg.nsd_placement_group_maps:
if group_info.placement_group_ref == input_group.name:
- group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+ group = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_PlacementGroupsInfo()
group_dict = {k:v for k,v in
group_info.as_dict().items() if k != 'placement_group_ref'}
for param in copy_dict:
@@ -1416,7 +1443,7 @@
""" Fetch Cloud Account for the passed vnfd id """
if self._nsr_cfg_msg.vnf_cloud_account_map:
vim_accounts = [(vnf.cloud_account,vnf.om_datacenter) for vnf in self._nsr_cfg_msg.vnf_cloud_account_map \
- if vnfd_member_index == vnf.member_vnf_index_ref]
+ if str(vnfd_member_index) == vnf.member_vnf_index_ref]
if vim_accounts and vim_accounts[0]:
return vim_accounts[0]
return (self.cloud_account_name,self.om_datacenter_name)
@@ -1465,14 +1492,16 @@
def vlr_uptime_update(self, vlr):
try:
- vlr_ = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict({'id': vlr.id})
+ vlr_ = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.from_dict({'id': vlr.id})
while True:
vlr_.uptime = int(time.time()) - vlr._create_time
- yield from self._vlr_handler.update(None, VirtualLinkRecord.vlr_xpath(vlr), vlr_)
+ xpath = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
+ yield from self._vlr_handler.update(None, xpath, vlr_)
yield from asyncio.sleep(2, loop=self._loop)
except asyncio.CancelledError:
self._log.debug("Received cancellation request for vlr_uptime_update task")
- yield from self._vlr_handler.delete(None, VirtualLinkRecord.vlr_xpath(vlr))
+ xpath = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
+ yield from self._vlr_handler.delete(None, xpath)
@asyncio.coroutine
@@ -1850,6 +1879,7 @@
self._dts,
self._log,
self._loop,
+ self._project,
self.name,
vld,
cloud_account,
@@ -2008,7 +2038,7 @@
for group in self.nsd_msg.placement_groups:
for member_vnfd in group.member_vnfd:
if (member_vnfd.vnfd_id_ref == vnfd_msg.id) and \
- (member_vnfd.member_vnf_index_ref == const_vnfd.member_vnf_index):
+ (member_vnfd.member_vnf_index_ref == str(const_vnfd.member_vnf_index)):
group_info = self.resolve_placement_group_cloud_construct(group)
if group_info is None:
self._log.info("Could not resolve cloud-construct for placement group: %s", group.name)
@@ -2034,6 +2064,7 @@
vnfr = yield from VirtualNetworkFunctionRecord.create_record(self._dts,
self._log,
self._loop,
+ self._project,
vnfd_msg,
const_vnfd,
self.nsd_id,
@@ -2189,23 +2220,23 @@
@property
def nsr_xpath(self):
""" Returns the xpath associated with this NSR """
- return(
+ return self._project.add_project((
"D,/nsr:ns-instance-opdata" +
"/nsr:nsr[nsr:ns-instance-config-ref = '{}']"
- ).format(self.id)
+ ).format(self.id))
@staticmethod
def xpath_from_nsr(nsr):
""" Returns the xpath associated with this NSR op data"""
- return (NetworkServiceRecord.XPATH +
- "[nsr:ns-instance-config-ref = '{}']").format(nsr.id)
+ return self._project.add_project((NetworkServiceRecord.XPATH +
+ "[nsr:ns-instance-config-ref = '{}']").format(nsr.id))
@property
def nsd_xpath(self):
""" Return NSD config xpath."""
- return(
- "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}']"
- ).format(self.nsd_id)
+ return self._project.add_project((
+ "C,/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id = '{}']"
+ ).format(self.nsd_id))
@asyncio.coroutine
def instantiate(self, config_xact):
@@ -2487,7 +2518,7 @@
def create_msg(self):
""" The network serice record as a message """
nsr_dict = {"ns_instance_config_ref": self.id}
- nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict(nsr_dict)
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict(nsr_dict)
#nsr.cloud_account = self.cloud_account_name
nsr.sdn_account = self._sdn_account_name
nsr.name_ref = self.name
@@ -2501,7 +2532,7 @@
nsr.uptime = int(time.time()) - self._create_time
for cfg_prim in self.nsd_msg.service_primitive:
- cfg_prim = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
+ cfg_prim = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
cfg_prim.as_dict())
nsr.service_primitive.append(cfg_prim)
@@ -2641,7 +2672,7 @@
This class is responsible for substituting input parameters into an NSD.
"""
- def __init__(self, log):
+ def __init__(self, log, project):
"""Create an instance of InputParameterSubstitution
Arguments:
@@ -2649,6 +2680,27 @@
"""
self.log = log
+ self.project = project
+
+ def _fix_xpath(self, xpath):
+ # Fix the parameter.xpath to include project and correct namespace
+ self.log.error("Provided xpath: {}".format(xpath))
+ #Split the xpath at the /
+ attrs = xpath.split('/')
+ new_xp = attrs[0]
+ for attr in attrs[1:]:
+ new_ns = 'project-nsd'
+ name = attr
+ if ':' in attr:
+ # Includes namespace
+ ns, name = attr.split(':', 2)
+ if ns == "rw-nsd":
+ ns = "rw-project-nsd"
+
+ new_xp = new_xp + '/' + new_ns + ':' + name
+
+ self.log.error("Updated xpath: {}".format(new_xp))
+ return new_xp
def __call__(self, nsd, nsr_config):
"""Substitutes input parameters from the NSR config into the NSD
@@ -2686,7 +2738,8 @@
)
try:
- xpath.setxattr(nsd, param.xpath, param.value)
+ xp = self._fix_xpath(param.xpath)
+ xpath.setxattr(nsd, xp, param.value)
except Exception as e:
self.log.exception(e)
@@ -2747,7 +2800,9 @@
@staticmethod
def path_for_id(nsd_id):
""" Return path for the passed nsd_id"""
- return "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}'".format(nsd_id)
+ return self._nsm._project.add_project(
+ "C,/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id = '{}'".
+ format(nsd_id))
def path(self):
""" Return the message associated with this NetworkServiceDescriptor"""
@@ -2760,7 +2815,7 @@
class NsdDtsHandler(object):
""" The network service descriptor DTS handler """
- XPATH = "C,/nsd:nsd-catalog/nsd:nsd"
+ XPATH = "C,/project-nsd:nsd-catalog/project-nsd:nsd"
def __init__(self, dts, log, loop, nsm):
self._dts = dts
@@ -2769,6 +2824,7 @@
self._nsm = nsm
self._regh = None
+ self._project = nsm._project
@property
def regh(self):
@@ -2779,16 +2835,27 @@
def register(self):
""" Register for Nsd create/update/delete/read requests from dts """
+ if self._regh:
+ self._log.warning("DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
self._log.debug("Got nsd apply cfg (xact:%s) (action:%s)",
xact, action)
- # Create/Update an NSD record
- for cfg in self._regh.get_xact_elements(xact):
- # Only interested in those NSD cfgs whose ID was received in prepare callback
- if cfg.id in scratch.get('nsds', []) or is_recovery:
- self._nsm.update_nsd(cfg)
+
+ if self._regh:
+ # Create/Update an NSD record
+ for cfg in self._regh.get_xact_elements(xact):
+ # Only interested in those NSD cfgs whose ID was received in prepare callback
+ if cfg.id in scratch.get('nsds', []) or is_recovery:
+ self._nsm.update_nsd(cfg)
+
+ else:
+ self._log.error("No reg handle for {} for project {}".
+ format(self.__class__, self._project.name))
scratch.pop('nsds', None)
@@ -2806,7 +2873,7 @@
except Exception as e:
self._log.error("Exception in cleaning up NSD libs {}: {}".
format(nsd_id, e))
- self._log.excpetion(e)
+ self._log.exception(e)
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
@@ -2846,14 +2913,21 @@
# Need a list in scratch to store NSDs to create/update later
# acg._scratch['nsds'] = list()
self._regh = acg.register(
- xpath=NsdDtsHandler.XPATH,
+ xpath=self._project.add_project(NsdDtsHandler.XPATH),
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
on_prepare=on_prepare)
+ def deregister(self):
+ self._log.debug("De-register NSD handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
class VnfdDtsHandler(object):
""" DTS handler for VNFD config changes """
- XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ XPATH = "C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd"
def __init__(self, dts, log, loop, nsm):
self._dts = dts
@@ -2861,6 +2935,7 @@
self._loop = loop
self._nsm = nsm
self._regh = None
+ self._project = nsm._project
@property
def regh(self):
@@ -2871,21 +2946,31 @@
def register(self):
""" Register for VNFD configuration"""
+ if self._regh:
+ self._log.warning("DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
@asyncio.coroutine
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
self._log.debug("Got NSM VNFD apply (xact: %s) (action: %s)(scr: %s)",
xact, action, scratch)
- # Create/Update a VNFD record
- for cfg in self._regh.get_xact_elements(xact):
- # Only interested in those VNFD cfgs whose ID was received in prepare callback
- if cfg.id in scratch.get('vnfds', []):
- self._nsm.update_vnfd(cfg)
+ if self._regh:
+ # Create/Update a VNFD record
+ for cfg in self._regh.get_xact_elements(xact):
+ # Only interested in those VNFD cfgs whose ID was received in prepare callback
+ if cfg.id in scratch.get('vnfds', []):
+ self._nsm.update_vnfd(cfg)
- for cfg in self._regh.elements:
- if cfg.id in scratch.get('deleted_vnfds', []):
- yield from self._nsm.delete_vnfd(cfg.id)
+ for cfg in self._regh.elements:
+ if cfg.id in scratch.get('deleted_vnfds', []):
+ yield from self._nsm.delete_vnfd(cfg.id)
+
+ else:
+ self._log.error("Reg handle none for {} in project {}".
+ format(self.__class__, self._project))
scratch.pop('vnfds', None)
scratch.pop('deleted_vnfds', None)
@@ -2911,20 +2996,28 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ xpath = self._project.add_project(VnfdDtsHandler.XPATH)
self._log.debug(
- "Registering for VNFD config using xpath: %s",
- VnfdDtsHandler.XPATH,
- )
+ "Registering for VNFD config using xpath {} for project {}"
+ .format(xpath, self._project))
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self._dts.appconf_group_create(handler=acg_hdl) as acg:
# Need a list in scratch to store VNFDs to create/update later
# acg._scratch['vnfds'] = list()
# acg._scratch['deleted_vnfds'] = list()
self._regh = acg.register(
- xpath=VnfdDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
on_prepare=on_prepare)
+ def deregister(self):
+ self._log.debug("De-register VNFD handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
+
class NsrRpcDtsHandler(object):
""" The network service instantiation RPC DTS handler """
EXEC_NSR_CONF_XPATH = "I,/nsr:start-network-service"
@@ -2946,7 +3039,9 @@
self._ns_regh = None
self._manager = None
- self._nsr_config_url = NsrRpcDtsHandler.REST_BASE_V2_URL + 'config/ns-instance-config'
+ self._nsr_config_url = NsrRpcDtsHandler.REST_BASE_V2_URL + \
+ 'config/project/{}/ns-instance-config'. \
+ format(self._nsm._project.name)
self._model = RwYang.Model.create_libncx()
self._model.load_schema_ypbc(RwNsrYang.get_schema())
@@ -2994,24 +3089,39 @@
def _apply_ns_instance_config(self,payload_dict):
#self._log.debug("At apply NS instance config with payload %s",payload_dict)
- req_hdr= {'accept':'application/vnd.yang.data+json','content-type':'application/vnd.yang.data+json'}
- response=requests.post(self._nsr_config_url, headers=req_hdr, auth=('admin', 'admin'),data=payload_dict,verify=False)
+ req_hdr= {'accept':'application/vnd.yang.data+json',
+ 'content-type':'application/vnd.yang.data+json'}
+ response=requests.post(self._nsr_config_url, headers=req_hdr,
+ auth=('admin', 'admin'),data=payload_dict,verify=False)
return response
@asyncio.coroutine
def register(self):
""" Register for NS monitoring read from dts """
+ if self._ns_regh:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
@asyncio.coroutine
def on_ns_config_prepare(xact_info, action, ks_path, msg):
""" prepare callback from dts start-network-service"""
assert action == rwdts.QueryAction.RPC
rpc_ip = msg
- rpc_op = NsrYang.YangOutput_Nsr_StartNetworkService.from_dict({
- "nsr_id":str(uuid.uuid4())
- })
- if not ('name' in rpc_ip and 'nsd_ref' in rpc_ip and ('cloud_account' in rpc_ip or 'om_datacenter' in rpc_ip)):
- self._log.error("Mandatory parameters name or nsd_ref or cloud account not found in start-network-service {}".format(rpc_ip))
+ if not self._nsm._project.rpc_check(msg, xact_info=xact_info):
+ return
+
+ rpc_op = NsrYang.YangOutput_Nsr_StartNetworkService.from_dict({
+ "nsr_id":str(uuid.uuid4()),
+ "project_name": msg.prject_name,
+ })
+
+ if not ('name' in rpc_ip and 'nsd_ref' in rpc_ip and
+ ('cloud_account' in rpc_ip or 'om_datacenter' in rpc_ip)):
+ self._log.error("Mandatory parameters name or nsd_ref or " +
+ "cloud account not found in start-network-service {}".
+ format(rpc_ip))
self._log.debug("start-network-service RPC input: {}".format(rpc_ip))
@@ -3030,11 +3140,11 @@
ns_instance_config_dict = {"id":rpc_op.nsr_id, "admin_status":"ENABLED"}
ns_instance_config_copy_dict = {k:v for k, v in rpc_ip.as_dict().items()
- if k in RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr().fields}
+ if k in RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr().fields}
ns_instance_config_dict.update(ns_instance_config_copy_dict)
- ns_instance_config = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr.from_dict(ns_instance_config_dict)
- ns_instance_config.nsd = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+ ns_instance_config = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.from_dict(ns_instance_config_dict)
+ ns_instance_config.nsd = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_Nsd()
ns_instance_config.nsd.from_dict(nsd_copy.msg.as_dict())
payload_dict = ns_instance_config.to_json(self._model)
@@ -3077,6 +3187,13 @@
flags=rwdts.Flag.PUBLISHER,
)
+ def deregister(self):
+ self._log.debug("De-register NSR RPC for project {}".
+ format(self._nsm._project.name))
+ if self._ns_regh:
+ self._ns_regh.deregister()
+ self._ns_regh = None
+
class NsrDtsHandler(object):
""" The network service DTS handler """
@@ -3089,6 +3206,7 @@
self._log = log
self._loop = loop
self._nsm = nsm
+ self._project = self._nsm._project
self._nsr_regh = None
self._scale_regh = None
@@ -3103,13 +3221,18 @@
def register(self):
""" Register for Nsr create/update/delete/read requests from dts """
+ if self._nsr_regh:
+ self._log.warning("DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
@@ -3200,32 +3323,6 @@
for vld in vl_delta["deleted"]:
yield from self._nsm.nsr_terminate_vl(nsr_id, vld)
- def get_add_delete_update_cfgs(dts_member_reg, xact, key_name, scratch):
- # Unfortunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys
- if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
def get_nsr_key_pairs(dts_member_reg, xact):
key_pairs = {}
for instance_cfg, keyspec in dts_member_reg.get_xact_elements(xact, include_keyspec=True):
@@ -3274,24 +3371,36 @@
def begin_instantiation(nsr):
# Begin instantiation
self._log.info("Beginning NS instantiation: %s", nsr.id)
- yield from self._nsm.instantiate_ns(nsr.id, xact)
+ try:
+ yield from self._nsm.instantiate_ns(nsr.id, xact)
+ except Exception as e:
+ self._log.exception("NS instantiation: {}".format(e))
+ raise e
self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)",
xact, action, scratch)
if action == rwdts.AppconfAction.INSTALL and xact.id is None:
key_pairs = []
- for element in self._key_pair_regh.elements:
- key_pairs.append(element)
- for element in self._nsr_regh.elements:
- nsr = handle_create_nsr(element, key_pairs, restart_mode=True)
- self._loop.create_task(begin_instantiation(nsr))
+ if self._key_pair_regh:
+ for element in self._key_pair_regh.elements:
+ key_pairs.append(element)
+ else:
+ self._log.error("Reg handle none for key pair in project {}".
+ format(self._project))
+
+ if self._nsr_regh:
+ for element in self._nsr_regh.elements:
+ nsr = handle_create_nsr(element, key_pairs, restart_mode=True)
+ self._loop.create_task(begin_instantiation(nsr))
+ else:
+ self._log.error("Reg handle none for NSR in project {}".
+ format(self._project))
(added_msgs, deleted_msgs, updated_msgs) = get_add_delete_update_cfgs(self._nsr_regh,
xact,
- "id",
- scratch)
+ "id")
self._log.debug("Added: %s, Deleted: %s, Updated: %s", added_msgs,
deleted_msgs, updated_msgs)
@@ -3402,24 +3511,40 @@
acg.handle.prepare_complete_ok(xact_info.handle)
- self._log.debug("Registering for NSR config using xpath: %s",
- NsrDtsHandler.NSR_XPATH)
+ xpath = self._project.add_project(NsrDtsHandler.NSR_XPATH)
+ self._log.debug("Registering for NSR config using xpath: {}".
+ format(xpath))
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self._dts.appconf_group_create(handler=acg_hdl) as acg:
- self._nsr_regh = acg.register(xpath=NsrDtsHandler.NSR_XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
- on_prepare=on_prepare)
+ self._nsr_regh = acg.register(
+ xpath=xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare
+ )
self._scale_regh = acg.register(
- xpath=NsrDtsHandler.SCALE_INSTANCE_XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY| rwdts.Flag.CACHE,
- )
+ xpath=self._project.add_project(NsrDtsHandler.SCALE_INSTANCE_XPATH),
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY| rwdts.Flag.CACHE,
+ )
self._key_pair_regh = acg.register(
- xpath=NsrDtsHandler.KEY_PAIR_XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
- )
+ xpath=self._project.add_project(NsrDtsHandler.KEY_PAIR_XPATH),
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ )
+
+ def deregister(self):
+ self._log.debug("De-register NSR config for project {}".
+ format(self._project.name))
+ if self._nsr_regh:
+ self._nsr_regh.deregister()
+ self._nsr_regh = None
+ if self._scale_regh:
+ self._scale_regh.deregister()
+ self._scale_regh = None
+ if self._key_pair_regh:
+ self._key_pair_regh.deregister()
+ self._key_pair_regh = None
class NsrOpDataDtsHandler(object):
@@ -3431,6 +3556,8 @@
self._log = log
self._loop = loop
self._nsm = nsm
+
+ self._project = nsm._project
self._regh = None
@property
@@ -3446,39 +3573,55 @@
@asyncio.coroutine
def register(self):
""" Register for Nsr op data publisher registration"""
- self._log.debug("Registering Nsr op data path %s as publisher",
- NsrOpDataDtsHandler.XPATH)
+ if self._regh:
+ self._log.warning("NSR op data handler already registered for project {}".
+ format(self._project.name))
+ return
+
+ xpath = self._project.add_project(NsrOpDataDtsHandler.XPATH)
+ self._log.debug("Registering Nsr op data path {} as publisher".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
handlers = rift.tasklets.Group.Handler()
with self._dts.group_create(handler=handlers) as group:
- self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ | rwdts.Flag.DATASTORE)
+ def deregister(self):
+ self._log.debug("De-register NSR opdata for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
- def create(self, path, msg):
+ def create(self, xpath, msg):
"""
Create an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating NSR %s:%s", path, msg)
self.regh.create_element(path, msg)
self._log.debug("Created NSR, %s:%s", path, msg)
@asyncio.coroutine
- def update(self, path, msg, flags=rwdts.XactFlag.REPLACE):
+ def update(self, xpath, msg, flags=rwdts.XactFlag.REPLACE):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating NSR, %s:%s regh = %s", path, msg, self.regh)
self.regh.update_element(path, msg, flags)
self._log.debug("Updated NSR, %s:%s", path, msg)
@asyncio.coroutine
- def delete(self, path):
+ def delete(self, xpath):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting NSR path:%s", path)
self.regh.delete_element(path)
self._log.debug("Deleted NSR path:%s", path)
@@ -3509,6 +3652,11 @@
@asyncio.coroutine
def register(self):
""" Register for vnfr create/update/delete/ advises from dts """
+ if self._regh:
+ self._log.warning("VNFR DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
def on_commit(xact_info):
""" The transaction has been committed """
@@ -3524,16 +3672,17 @@
xact_info, action, ks_path, msg
)
- schema = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+ schema = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
- if path_entry.key00.id not in self._nsm._vnfrs:
- self._log.error("%s request for non existent record path %s",
- action, xpath)
+ if not path_entry or (path_entry.key00.id not in self._nsm._vnfrs):
+ # Check if this is a monitoring param xpath
+ if 'vnfr:monitoring-param' not in xpath:
+ self._log.error("%s request for non existent record path %s",
+ action, xpath)
xact_info.respond_xpath(rwdts.XactRspCode.NA, xpath)
return
- self._log.debug("Deleting VNFR with id %s", path_entry.key00.id)
if action == rwdts.QueryAction.CREATE or action == rwdts.QueryAction.UPDATE:
yield from self._nsm.update_vnfr(msg)
elif action == rwdts.QueryAction.DELETE:
@@ -3548,10 +3697,17 @@
hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._nsm._project.add_project(
+ VnfrDtsHandler.XPATH),
handler=hdl,
flags=(rwdts.Flag.SUBSCRIBER),)
+ def deregister(self):
+ self._log.debug("De-register VNFR for project {}".
+ format(self._nsm._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
class NsdRefCountDtsHandler(object):
""" The NSD Ref Count DTS handler """
@@ -3578,6 +3734,11 @@
@asyncio.coroutine
def register(self):
""" Register for NSD ref count read from dts """
+ if self._regh:
+ self._log.warning("NSD ref DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
@@ -3585,7 +3746,7 @@
xpath = ks_path.to_xpath(RwNsrYang.get_schema())
if action == rwdts.QueryAction.READ:
- schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount.schema()
+ schema = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_NsdRefCount.schema()
path_entry = schema.keyspec_to_entry(ks_path)
nsd_list = yield from self._nsm.get_nsd_refcount(path_entry.key00.nsd_id_ref)
for xpath, msg in nsd_list:
@@ -3598,19 +3759,28 @@
hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=NsdRefCountDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._nsm._project.add_project(
+ NsdRefCountDtsHandler.XPATH),
handler=hdl,
flags=rwdts.Flag.PUBLISHER,)
+ def deregister(self):
+ self._log.debug("De-register NSD Ref count for project {}".
+ format(self._nsm._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
class NsManager(object):
""" The Network Service Manager class"""
- def __init__(self, dts, log, loop,
+ def __init__(self, dts, log, loop, project,
nsr_handler, vnfr_handler, vlr_handler, ro_plugin_selector,
vnffgmgr, vnfd_pub_handler, cloud_account_handler):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsr_handler = nsr_handler
self._vnfr_pub_handler = vnfr_handler
self._vlr_pub_handler = vlr_handler
@@ -3641,8 +3811,9 @@
VnfrDtsHandler(dts, log, loop, self),
NsdRefCountDtsHandler(dts, log, loop, self),
NsrDtsHandler(dts, log, loop, self),
- ScalingRpcHandler(log, dts, loop, self.scale_rpc_callback),
- NsrRpcDtsHandler(dts,log,loop,self),
+ ScalingRpcHandler(log, dts, loop, self._project,
+ self.scale_rpc_callback),
+ NsrRpcDtsHandler(dts, log, loop, self),
self._vnfd_dts_handler,
self.cfgmgr_obj,
]
@@ -3718,6 +3889,11 @@
for dts_handle in self._dts_handlers:
yield from dts_handle.register()
+ def deregister(self):
+ """ Register all static DTS handlers """
+ for dts_handle in self._dts_handlers:
+ dts_handle.deregister()
+
def get_ns_by_nsr_id(self, nsr_id):
""" get NSR by nsr id """
@@ -3757,12 +3933,16 @@
msg : RPC input
action : Scaling Action
"""
- ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
- ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+ ScalingGroupInstance = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance
+ ScalingGroup = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup
- xpath = ('C,/nsr:ns-instance-config/nsr:nsr[nsr:id="{}"]').format(
- msg.nsr_id_ref)
- instance = ScalingGroupInstance.from_dict({"id": msg.instance_id})
+ xpath = self._project.add_project(
+ ('C,/nsr:ns-instance-config/nsr:nsr[nsr:id="{}"]').
+ format(msg.nsr_id_ref))
+
+ instance = ScalingGroupInstance.from_dict({
+ "id": msg.instance_id,
+ "project_name": self._project.name,})
@asyncio.coroutine
def get_nsr_scaling_group():
@@ -3863,6 +4043,7 @@
nsr_msg,
sdn_account_name,
key_pairs,
+ self._project,
restart_mode=restart_mode,
vlr_handler=self._ro_plugin_selector._records_publisher._vlr_pub_hdlr
)
@@ -3921,7 +4102,7 @@
@asyncio.coroutine
def get_nsr_config(self, nsd_id):
- xpath = "C,/nsr:ns-instance-config"
+ xpath = self._project.add_project("C,/nsr:ns-instance-config")
results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
for result in results:
@@ -4100,18 +4281,18 @@
def nsd_refcount_xpath(nsd_id):
""" xpath for ref count entry """
- return (NsdRefCountDtsHandler.XPATH +
+ return (self._project.add_project(NsdRefCountDtsHandler.XPATH) +
"[rw-nsr:nsd-id-ref = '{}']").format(nsd_id)
nsd_list = []
if nsd_id is None or nsd_id == "":
for nsd in self._nsds.values():
- nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount()
+ nsd_msg = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_NsdRefCount()
nsd_msg.nsd_id_ref = nsd.id
nsd_msg.instance_ref_count = nsd.ref_count
nsd_list.append((nsd_refcount_xpath(nsd.id), nsd_msg))
elif nsd_id in self._nsds:
- nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount()
+ nsd_msg = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_NsdRefCount()
nsd_msg.nsd_id_ref = self._nsds[nsd_id].id
nsd_msg.instance_ref_count = self._nsds[nsd_id].ref_count
nsd_list.append((nsd_refcount_xpath(nsd_id), nsd_msg))
@@ -4147,10 +4328,12 @@
""" This class provides a publisher interface that allows plugin objects
to publish NSR/VNFR/VLR"""
- def __init__(self, dts, log, loop, nsr_pub_hdlr, vnfr_pub_hdlr, vlr_pub_hdlr):
+ def __init__(self, dts, log, loop, project, nsr_pub_hdlr,
+ vnfr_pub_hdlr, vlr_pub_hdlr,):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsr_pub_hdlr = nsr_pub_hdlr
self._vlr_pub_hdlr = vlr_pub_hdlr
self._vnfr_pub_hdlr = vnfr_pub_hdlr
@@ -4182,13 +4365,13 @@
@asyncio.coroutine
def publish_vlr(self, xact, vlr):
""" Publish a VLR """
- path = VirtualLinkRecord.vlr_xpath(vlr)
+ path = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
return (yield from self._vlr_pub_hdlr.update(xact, path, vlr))
@asyncio.coroutine
def unpublish_vlr(self, xact, vlr):
""" Unpublish a VLR """
- path = VirtualLinkRecord.vlr_xpath(vlr)
+ path = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
return (yield from self._vlr_pub_hdlr.delete(xact, path))
@@ -4202,24 +4385,35 @@
ACTION = Enum('ACTION', 'SCALE_IN SCALE_OUT')
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
self.callback = callback
self.last_instance_id = defaultdict(int)
+ self._regh_in = None
+ self._regh_out = None
@asyncio.coroutine
def register(self):
+ if self._regh_in:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
@asyncio.coroutine
def on_scale_in_prepare(xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
try:
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
if self.callback:
self.callback(xact_info.xact, msg, self.ACTION.SCALE_IN)
rpc_op = NsrYang.YangOutput_Nsr_ExecScaleIn.from_dict({
- "instance_id": msg.instance_id})
+ "instance_id": msg.instance_id,
+ "project_name": self._project.name,})
xact_info.respond_xpath(
rwdts.XactRspCode.ACK,
@@ -4237,6 +4431,9 @@
assert action == rwdts.QueryAction.RPC
try:
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
scaling_group = msg.scaling_group_name_ref
if not msg.instance_id:
last_instance_id = self.last_instance_id[scale_group]
@@ -4247,7 +4444,8 @@
self.callback(xact_info.xact, msg, self.ACTION.SCALE_OUT)
rpc_op = NsrYang.YangOutput_Nsr_ExecScaleOut.from_dict({
- "instance_id": msg.instance_id})
+ "instance_id": msg.instance_id,
+ "project_name": self._project.name,})
xact_info.respond_xpath(
rwdts.XactRspCode.ACK,
@@ -4266,14 +4464,130 @@
on_prepare=on_scale_out_prepare)
with self.dts.group_create() as group:
- group.register(
- xpath=self.__class__.SCALE_IN_INPUT_XPATH,
- handler=scale_in_hdl,
- flags=rwdts.Flag.PUBLISHER)
- group.register(
- xpath=self.__class__.SCALE_OUT_INPUT_XPATH,
- handler=scale_out_hdl,
- flags=rwdts.Flag.PUBLISHER)
+ self._regh_in = group.register(
+ xpath=self.__class__.SCALE_IN_INPUT_XPATH,
+ handler=scale_in_hdl,
+ flags=rwdts.Flag.PUBLISHER)
+ self._regh_out = group.register(
+ xpath=self.__class__.SCALE_OUT_INPUT_XPATH,
+ handler=scale_out_hdl,
+ flags=rwdts.Flag.PUBLISHER)
+
+ def deregister(self):
+ self._log.debug("De-register scale RPCs for project {}".
+ format(self._project.name))
+ if self._regh_in:
+ self._regh_in.deregister()
+ self._regh_in = None
+ if self._regh_out:
+ self._regh_out.deregister()
+ self._regh_out = None
+
+
+class NsmProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(NsmProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._nsm = None
+
+ self._ro_plugin_selector = None
+ self._vnffgmgr = None
+
+ self._nsr_pub_handler = None
+ self._vnfr_pub_handler = None
+ self._vlr_pub_handler = None
+ self._vnfd_pub_handler = None
+ self._scale_cfg_handler = None
+
+ self._records_publisher_proxy = None
+
+ @asyncio.coroutine
+ def register(self):
+ self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(
+ self._dts, self.log, self.loop, self)
+ yield from self._nsr_pub_handler.register()
+
+ self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(
+ self._dts, self.log, self.loop, self)
+ yield from self._vnfr_pub_handler.register()
+
+ self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(
+ self._dts, self.log, self.loop, self)
+ yield from self._vlr_pub_handler.register()
+
+ manifest = self._tasklet.tasklet_info.get_pb_manifest()
+ use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
+ ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+ ssl_key = manifest.bootstrap_phase.rwsecurity.key
+
+ self._vnfd_pub_handler = publisher.VnfdPublisher(
+ use_ssl, ssl_cert, ssl_key, self.loop, self)
+
+ self._records_publisher_proxy = NsmRecordsPublisherProxy(
+ self._dts,
+ self.log,
+ self.loop,
+ self,
+ self._nsr_pub_handler,
+ self._vnfr_pub_handler,
+ self._vlr_pub_handler,
+ )
+
+ # Register the NSM to receive the nsm plugin
+ # when cloud account is configured
+ self._ro_plugin_selector = cloud.ROAccountPluginSelector(
+ self._dts,
+ self.log,
+ self.loop,
+ self,
+ self._records_publisher_proxy,
+ )
+ yield from self._ro_plugin_selector.register()
+
+ self._cloud_account_handler = cloud.CloudAccountConfigSubscriber(
+ self._log,
+ self._dts,
+ self.log_hdl,
+ self,
+ )
+
+ yield from self._cloud_account_handler.register()
+
+ self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts, self.log, self.log_hdl, self.loop, self)
+ yield from self._vnffgmgr.register()
+
+ self._nsm = NsManager(
+ self._dts,
+ self.log,
+ self.loop,
+ self,
+ self._nsr_pub_handler,
+ self._vnfr_pub_handler,
+ self._vlr_pub_handler,
+ self._ro_plugin_selector,
+ self._vnffgmgr,
+ self._vnfd_pub_handler,
+ self._cloud_account_handler,
+ )
+
+ yield from self._nsm.register()
+
+ def deregister(self):
+ self._log.debug("Project {} de-register".format(self.name))
+ self._nsm.deregister()
+ self._vnffgmgr.deregister()
+ self._cloud_account_handler.deregister()
+ self._ro_plugin_selector.deregister()
+ self._nsm = None
+
+ @asyncio.coroutine
+ def delete_prepare(self):
+ # Check if any NS instance is present
+ if self._nsm and self._nsm._nsrs:
+ return False
+ return True
class NsmTasklet(rift.tasklets.Tasklet):
@@ -4286,18 +4600,12 @@
self.rwlog.set_subcategory("nsm")
self._dts = None
- self._nsm = None
+ self.project_handler = None
+ self.projects = {}
- self._ro_plugin_selector = None
- self._vnffgmgr = None
-
- self._nsr_handler = None
- self._vnfr_pub_handler = None
- self._vlr_pub_handler = None
- self._vnfd_pub_handler = None
- self._scale_cfg_handler = None
-
- self._records_publisher_proxy = None
+ @property
+ def dts(self):
+ return self._dts
def start(self):
""" The task start callback """
@@ -4328,67 +4636,11 @@
""" Task init callback """
self.log.debug("Got instance started callback")
- self.log.debug("creating config account handler")
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, NsmProject)
+ self.project_handler.register()
- self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(self._dts, self.log, self.loop)
- yield from self._nsr_pub_handler.register()
- self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(self._dts, self.log, self.loop)
- yield from self._vnfr_pub_handler.register()
-
- self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(self._dts, self.log, self.loop)
- yield from self._vlr_pub_handler.register()
-
- manifest = self.tasklet_info.get_pb_manifest()
- use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
- ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
- ssl_key = manifest.bootstrap_phase.rwsecurity.key
-
- self._vnfd_pub_handler = publisher.VnfdPublisher(use_ssl, ssl_cert, ssl_key, self.loop)
-
- self._records_publisher_proxy = NsmRecordsPublisherProxy(
- self._dts,
- self.log,
- self.loop,
- self._nsr_pub_handler,
- self._vnfr_pub_handler,
- self._vlr_pub_handler,
- )
-
- # Register the NSM to receive the nsm plugin
- # when cloud account is configured
- self._ro_plugin_selector = cloud.ROAccountPluginSelector(
- self._dts,
- self.log,
- self.loop,
- self._records_publisher_proxy,
- )
- yield from self._ro_plugin_selector.register()
-
- self._cloud_account_handler = cloud.CloudAccountConfigSubscriber(
- self._log,
- self._dts,
- self.log_hdl)
-
- yield from self._cloud_account_handler.register()
-
- self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts, self.log, self.log_hdl, self.loop)
- yield from self._vnffgmgr.register()
-
- self._nsm = NsManager(
- self._dts,
- self.log,
- self.loop,
- self._nsr_pub_handler,
- self._vnfr_pub_handler,
- self._vlr_pub_handler,
- self._ro_plugin_selector,
- self._vnffgmgr,
- self._vnfd_pub_handler,
- self._cloud_account_handler
- )
-
- yield from self._nsm.register()
@asyncio.coroutine
def run(self):
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py
index 4d6cde4..0b04efd 100755
--- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py
@@ -1,6 +1,6 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -61,20 +61,26 @@
class VnffgMgr(object):
""" Implements the interface to backend plugins to fetch topology """
- def __init__(self, dts, log, log_hdl, loop):
+ def __init__(self, dts, log, log_hdl, loop, project):
self._account = {}
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._project = project
self._sdn = {}
- self._sdn_handler = SDNAccountDtsHandler(self._dts,self._log,self)
+ self._sdn_handler = SDNAccountDtsHandler(self._dts, self._log, self)
self._vnffgr_list = {}
@asyncio.coroutine
def register(self):
yield from self._sdn_handler.register()
+ def deregister(self):
+ self._log.debug("Project {} de-register vnffgmgr".
+ format(self._project.name))
+ self._sdn_handler.deregister()
+
def set_sdn_account(self,account):
if (account.name in self._account):
self._log.error("SDN Account is already set")
@@ -329,8 +335,10 @@
self._dts = dts
self._log = log
self._parent = parent
+ self._project = self._parent._project
self._sdn_account = {}
+ self._reg = None
def _set_sdn_account(self, account):
self._log.info("Setting sdn account: {}".format(account))
@@ -380,9 +388,11 @@
if msg.has_field("account_type"):
errmsg = "Cannot update SDN account's account-type."
self._log.error(errmsg)
- xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- SDNAccountDtsHandler.XPATH,
- errmsg)
+ xact_info.send_error_xpath(
+ RwTypes.RwStatus.FAILURE,
+ self._project.add_project(SDNAccountDtsHandler.XPATH),
+ errmsg
+ )
raise SdnAccountError(errmsg)
# Update the sdn account record
@@ -392,9 +402,11 @@
if not msg.has_field('account_type'):
errmsg = "New SDN account must contain account-type field."
self._log.error(errmsg)
- xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- SDNAccountDtsHandler.XPATH,
- errmsg)
+ xact_info.send_error_xpath(
+ RwTypes.RwStatus.FAILURE,
+ self._project.add_project(SDNAccountDtsHandler.XPATH),
+ errmsg
+ )
raise SdnAccountError(errmsg)
# Set the sdn account record
@@ -403,20 +415,23 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for Sdn Account config using xpath: %s",
- SDNAccountDtsHandler.XPATH,
- )
+ xpath = self._project.add_project(SDNAccountDtsHandler.XPATH)
+ self._log.debug("Registering for Sdn Account config using xpath: {}".
+ format(xpath))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self._dts.appconf_group_create(acg_handler) as acg:
- acg.register(
- xpath=SDNAccountDtsHandler.XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
- on_prepare=on_prepare
- )
+ self._reg = acg.register(
+ xpath=xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+ on_prepare=on_prepare
+ )
-
-
+ def deregister(self):
+ self._log.debug("De-register SDN Account handler in vnffg for project".
+ format(self._project.name))
+ self._reg.deregister()
+ self._reg = None
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py
index 8bbf894..9112f48 100644
--- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py
@@ -1,6 +1,6 @@
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,7 +19,10 @@
from enum import Enum
-from gi.repository import NsdYang, NsrYang
+from gi.repository import (
+ ProjectNsdYang as NsdYang,
+ NsrYang
+ )
class ScalingGroupIndexExists(Exception):
@@ -104,7 +107,7 @@
def create_record_msg(self):
""" Returns a NSR Scaling group record """
- msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord(
+ msg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ScalingGroupRecord(
scaling_group_name_ref=self.name,
)
@@ -259,7 +262,7 @@
return self._vnfrs.values()
def create_record_msg(self):
- msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance(
+ msg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance(
instance_id=self._instance_id,
create_time=self._create_time,
op_status=self._op_status,
diff --git a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/publisher/download_status.py b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/publisher/download_status.py
index d8c6ade..16359c9 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/publisher/download_status.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/publisher/download_status.py
@@ -33,14 +33,15 @@
class DownloadStatusPublisher(mano_dts.DtsHandler, url_downloader.DownloaderProtocol):
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project):
+ super().__init__(log, dts, loop, project)
self.tasks = {}
def xpath(self, download_id=None):
- return ("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job" +
- ("[download-id='{}']".format(download_id) if download_id else ""))
+ return self._project.add_project("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job" +
+ ("[download-id='{}']".
+ format(download_id) if download_id else ""))
@asyncio.coroutine
def _dts_publisher(self, job):
@@ -54,6 +55,13 @@
flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
assert self.reg is not None
+
+ def dergister(self):
+ self._log.debug("De-registering download status for project {}".
+ format(self.project.name))
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
@staticmethod
def _async_func(func, fut):
diff --git a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rpc.py b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rpc.py
index a71f108..d360640 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rpc.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rpc.py
@@ -68,12 +68,12 @@
class SchemaRpcHandler(mano_dts.AbstractRpcHandler):
"""RPC handler to generate the schema for the packages.
"""
- def __init__(self, log, dts, loop, proxy):
+ def __init__(self, log, dts, loop, project, proxy):
"""
Args:
proxy: Any impl of .proxy.AbstractPackageManagerProxy
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.proxy = proxy
@property
@@ -103,22 +103,36 @@
3. Return a tracking ID for the client to monitor the entire status
"""
- def __init__(self, log, dts, loop, proxy, publisher):
+ def __init__(self, log, dts, loop, proxy, tasklet):
"""
Args:
proxy: Any impl of .proxy.AbstractPackageManagerProxy
- publisher: Instance of DownloadStatusPublisher
+ publisher: Instance of tasklet to find the DownloadStatusPublisher
+ for a specific project
"""
super().__init__(log, dts, loop)
self.proxy = proxy
- self.publisher = publisher
+ self.tasklet = tasklet
@property
def xpath(self):
return "/rw-pkg-mgmt:package-file-add"
+ def get_publisher(self, msg):
+ try:
+ proj = self.tasklet.projects[msg.project_name]
+ except Exception as e:
+ err = "Project or project name not found {}: {}". \
+ format(msg.as_dict(), e)
+ self.log.error (err)
+ raise Exception (err)
+
+ return proj.job_handler
+
@asyncio.coroutine
def callback(self, ks_path, msg):
+ publisher = self.get_publisher(msg)
+
if not msg.external_url:
# For now we will only support External URL download
raise Exception ("No download URL provided")
@@ -139,20 +153,20 @@
proxy=self.proxy,
log=self.log)
- download_id = yield from self.publisher.register_downloader(url_downloader)
+ download_id = yield from publisher.register_downloader(url_downloader)
rpc_op = RPC_PACKAGE_ADD_ENDPOINT.from_dict({"task_id": download_id})
return rpc_op
class PackageCopyOperationsRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, proxy, publisher):
+ def __init__(self, log, dts, loop, project, proxy, publisher):
"""
Args:
proxy: Any impl of .proxy.AbstractPackageManagerProxy
publisher: CopyStatusPublisher object
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.proxy = proxy
self.publisher = publisher
diff --git a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rwpkgmgr.py b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rwpkgmgr.py
index 5773b0e..0fcabe3 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rwpkgmgr.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rwpkgmgr.py
@@ -30,15 +30,52 @@
from gi.repository import (
RwDts as rwdts,
- RwPkgMgmtYang)
+ RwPkgMgmtYang)
import rift.tasklets
-
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
from . import rpc
from .proxy import filesystem
from . import publisher as pkg_publisher
from . import subscriber
+class PackageManagerProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(PackageManagerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+ proxy = kw["proxy"]
+
+ args = [self.log, self.dts, self.loop, self]
+ self.job_handler = pkg_publisher.DownloadStatusPublisher(*args)
+ self.copy_publisher = pkg_publisher.CopyStatusPublisher(*args + [self.tasklet.tasklet_info])
+
+ # create catalog subscribers
+ self.vnfd_catalog_sub = subscriber.VnfdStatusSubscriber(*args)
+ self.nsd_catalog_sub = subscriber.NsdStatusSubscriber(*args)
+
+ args.append(proxy)
+ self.copy_rpc = rpc.PackageCopyOperationsRpcHandler(*(args + [self.copy_publisher]))
+
+ @asyncio.coroutine
+ def register (self):
+ yield from self.vnfd_catalog_sub.register()
+ yield from self.nsd_catalog_sub.register()
+ yield from self.copy_rpc.register()
+ yield from self.copy_publisher.register()
+ yield from self.job_handler.register()
+
+ def deregister (self):
+ yield from self.job_handler.deregister()
+ yield from self.copy_rpc.deregister()
+ yield from self.copy_publisher.deregister()
+ yield from self.vnfd_catalog_sub.deregister()
+ yield from self.nsd_catalog_sub.deregister()
+
+
class PackageManagerTasklet(rift.tasklets.Tasklet):
def __init__(self, *args, **kwargs):
try:
@@ -46,6 +83,10 @@
self.rwlog.set_category("rw-mano-log")
self.endpoint_rpc = None
self.schema_rpc = None
+
+ self._project_handler = None
+ self.projects = {}
+
except Exception as e:
self.log.exception(e)
@@ -61,29 +102,22 @@
self.loop,
self.on_dts_state_change
)
-
+
proxy = filesystem.FileSystemProxy(self.loop, self.log)
args = [self.log, self.dts, self.loop]
- # create catalog publishers
- self.job_handler = pkg_publisher.DownloadStatusPublisher(*args)
- self.copy_publisher = pkg_publisher.CopyStatusPublisher(*args +[self.tasklet_info])
-
- # create catalog subscribers
- self.vnfd_catalog_sub = subscriber.VnfdStatusSubscriber(*args)
- self.nsd_catalog_sub = subscriber.NsdStatusSubscriber(*args)
-
args.append(proxy)
self.endpoint_rpc = rpc.EndpointDiscoveryRpcHandler(*args)
self.schema_rpc = rpc.SchemaRpcHandler(*args)
self.delete_rpc = rpc.PackageDeleteOperationsRpcHandler(*args)
- self.copy_rpc = rpc.PackageCopyOperationsRpcHandler(*(args + [self.copy_publisher]))
- args.append(self.job_handler)
+ args.append(self)
self.pkg_op = rpc.PackageOperationsRpcHandler(*args)
+ self.project_handler = ProjectHandler(self, PackageManagerProject,
+ proxy=proxy,)
except Exception as e:
- self.log.error("Exception caught rwpkgmgr start: %s", str(e))
+ self.log.exception("Exception caught rwpkgmgr start: %s", str(e))
else:
self.log.debug("rwpkgmgr started successfully!")
@@ -95,18 +129,14 @@
@asyncio.coroutine
def init(self):
- try:
- yield from self.endpoint_rpc.register()
- yield from self.schema_rpc.register()
- yield from self.pkg_op.register()
- yield from self.job_handler.register()
- yield from self.delete_rpc.register()
- yield from self.copy_rpc.register()
- yield from self.copy_publisher.register()
- yield from self.vnfd_catalog_sub.register()
- yield from self.nsd_catalog_sub.register()
- except Exception as e:
- self.log.error("Exception caught rwpkgmgr init %s", str(e))
+ yield from self.endpoint_rpc.register()
+ yield from self.schema_rpc.register()
+ yield from self.pkg_op.register()
+ yield from self.delete_rpc.register()
+
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, PackageManagerProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
diff --git a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/download_status.py b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/download_status.py
index b7bed38..6bca858 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/download_status.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/download_status.py
@@ -36,21 +36,22 @@
)
class DownloadStatusSubscriber(mano_dts.AbstractOpdataSubscriber):
+ def __init__(self, log, dts, loop, project, callback):
+ super().__init__(log, dts, loop, project, callback)
- def __init__(self, log, dts, loop, callback):
- super().__init__(log, dts, loop, callback)
-
- def get_xpath(self):
- return ("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job")
+ def get_xpath(self):
+ return self._project.add_project(
+ "D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job")
+
class VnfdStatusSubscriber(DownloadStatusSubscriber):
DOWNLOAD_DIR = store.VnfdPackageFilesystemStore.DEFAULT_ROOT_DIR
MODULE_DESC = 'vnfd rw-vnfd'.split()
DESC_TYPE = 'vnfd'
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop, self.on_change)
- self.subscriber = mano_dts.VnfdCatalogSubscriber(log, dts, loop)
+ def __init__(self, log, dts, loop, project):
+ super().__init__(log, dts, loop, project, self.on_change)
+ self.subscriber = mano_dts.VnfdCatalogSubscriber(log, dts, loop, project)
def on_change(self, msg, action):
log_msg = "1. Vnfd called w/ msg attributes: {} id {} name {} action: {}".format(repr(msg), msg.id, msg.name, repr(action))
@@ -70,9 +71,9 @@
MODULE_DESC = 'nsd rw-nsd'.split()
DESC_TYPE = 'nsd'
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop, self.on_change)
- self.subscriber = mano_dts.NsdCatalogSubscriber(log, dts, loop)
+ def __init__(self, log, dts, loop, project):
+ super().__init__(log, dts, loop, project, self.on_change)
+ self.subscriber = mano_dts.NsdCatalogSubscriber(log, dts, loop, project)
def on_change(self, msg, action):
log_msg = "1. Nsd called w/ msg attributes: {} id {} name {} action: {}".format(repr(msg), msg.id, msg.name, repr(action))
diff --git a/rwlaunchpad/plugins/rwpkgmgr/test/utest_filesystem_proxy_dts.py b/rwlaunchpad/plugins/rwpkgmgr/test/utest_filesystem_proxy_dts.py
index 75b310a..8b7fecc 100755
--- a/rwlaunchpad/plugins/rwpkgmgr/test/utest_filesystem_proxy_dts.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/test/utest_filesystem_proxy_dts.py
@@ -39,9 +39,36 @@
import rift.tasklets.rwpkgmgr.publisher as pkg_publisher
import rift.tasklets.rwpkgmgr.rpc as rpc
import rift.test.dts
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
TEST_STRING = "foobar"
+
+class MockPublisher(object):
+ def __init__(self, uid):
+ self.assert_uid = uid
+
+ @asyncio.coroutine
+ def register_downloader(self, *args):
+ return self.assert_uid
+
+
+class MockProject(ManoProject):
+ def __init__(self, log, uid=None):
+ super().__init__(log, name=DEFAULT_PROJECT)
+ self.job_handler = MockPublisher(uid)
+
+
+class MockTasklet:
+ def __init__(self, log, uid=None):
+ self.log = log
+ self.projects = {}
+ project = MockProject(self.log,
+ uid=uid)
+ project.publisher = None
+ self.projects[project.name] = project
+
+
class TestCase(rift.test.dts.AbstractDTSTest):
@classmethod
def configure_schema(cls):
@@ -86,7 +113,8 @@
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_GetPackageEndpoint.from_dict({
"package_type": "VNFD",
- "package_id": "BLAHID"})
+ "package_id": "BLAHID",
+ "project_name": DEFAULT_PROJECT})
rpc_out = yield from self.dts.query_rpc(
"I,/get-package-endpoint",
@@ -108,7 +136,8 @@
yield from endpoint.register()
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_GetPackageSchema.from_dict({
- "package_type": "VNFD"})
+ "package_type": "VNFD",
+ "project_name": DEFAULT_PROJECT})
rpc_out = yield from self.dts.query_rpc(
"I,/get-package-schema",
@@ -125,10 +154,6 @@
1. The file RPC returns a valid UUID thro' DTS
"""
assert_uid = str(uuid.uuid4())
- class MockPublisher:
- @asyncio.coroutine
- def register_downloader(self, *args):
- return assert_uid
uid, path = self.create_mock_package()
@@ -138,14 +163,15 @@
self.dts,
self.loop,
proxy,
- MockPublisher())
+ MockTasklet(self.log, uid=assert_uid))
yield from endpoint.register()
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageFileAdd.from_dict({
"package_type": "VNFD",
"package_id": uid,
"external_url": "https://raw.githubusercontent.com/RIFTIO/RIFT.ware/master/rift-shell",
- "package_path": "script/rift-shell"})
+ "package_path": "script/rift-shell",
+ "project_name": DEFAULT_PROJECT})
rpc_out = yield from self.dts.query_rpc(
"I,/rw-pkg-mgmt:package-file-add",
@@ -167,13 +193,16 @@
uid, path = self.create_mock_package()
proxy = filesystem.FileSystemProxy(self.loop, self.log)
- publisher = pkg_publisher.DownloadStatusPublisher(self.log, self.dts, self.loop)
+ tasklet = MockTasklet(self.log, uid=uid)
+ project = tasklet.projects[DEFAULT_PROJECT]
+ publisher = pkg_publisher.DownloadStatusPublisher(self.log, self.dts, self.loop, project)
+ project.job_handler = publisher
endpoint = rpc.PackageOperationsRpcHandler(
self.log,
self.dts,
self.loop,
proxy,
- publisher)
+ tasklet)
yield from publisher.register()
yield from endpoint.register()
@@ -182,7 +211,8 @@
"package_type": "VNFD",
"package_id": uid,
"external_url": "https://raw.githubusercontent.com/RIFTIO/RIFT.ware/master/rift-shell",
- "package_path": "icons/rift-shell"})
+ "package_path": "icons/rift-shell",
+ "project_name": DEFAULT_PROJECT})
rpc_out = yield from self.dts.query_rpc(
"I,/rw-pkg-mgmt:package-file-add",
@@ -191,6 +221,7 @@
yield from asyncio.sleep(5, loop=self.loop)
filepath = os.path.join(path, ip.package_path)
+ self.log.debug("Filepath: {}".format(filepath))
assert os.path.isfile(filepath)
mode = oct(os.stat(filepath)[stat.ST_MODE])
assert str(mode) == "0o100664"
@@ -218,7 +249,8 @@
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageFileDelete.from_dict({
"package_type": "VNFD",
"package_id": uid,
- "package_path": "logo.png"})
+ "package_path": "logo.png",
+ "project_name": DEFAULT_PROJECT})
assert os.path.isfile(os.path.join(path, ip.package_path))
diff --git a/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py b/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py
index a02e5c6..8e79889 100755
--- a/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py
@@ -36,6 +36,7 @@
import rift.tasklets.rwpkgmgr.downloader as downloader
import rift.tasklets.rwpkgmgr.publisher as pkg_publisher
import rift.test.dts
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
class TestCase(rift.test.dts.AbstractDTSTest):
@@ -51,8 +52,10 @@
self.log.debug("STARTING - %s", test_id)
self.tinfo = self.new_tinfo(str(test_id))
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.project = ManoProject(self.log, name=DEFAULT_PROJECT)
- self.job_handler = pkg_publisher.DownloadStatusPublisher(self.log, self.dts, self.loop)
+ self.job_handler = pkg_publisher.DownloadStatusPublisher(self.log, self.dts,
+ self.loop, self.project)
def tearDown(self):
super().tearDown()
@@ -102,24 +105,24 @@
yield from self.job_handler._dts_publisher(mock_msg)
yield from asyncio.sleep(5, loop=self.loop)
- itr = yield from self.dts.query_read("/download-jobs/job[download-id='{}']".format(
- mock_msg.download_id))
+ xpath = self.project.add_project("/download-jobs/job[download-id='{}']".
+ format(mock_msg.download_id))
+ itr = yield from self.dts.query_read(xpath)
result = None
for fut in itr:
result = yield from fut
result = result.result
- print ("Mock ", mock_msg)
+ self.log.debug("Mock msg: {}".format(mock_msg))
assert result == mock_msg
# Modify the msg
mock_msg.url = "http://bar/foo"
yield from self.job_handler._dts_publisher(mock_msg)
yield from asyncio.sleep(5, loop=self.loop)
-
- itr = yield from self.dts.query_read("/download-jobs/job[download-id='{}']".format(
- mock_msg.download_id))
+
+ itr = yield from self.dts.query_read(xpath)
result = None
for fut in itr:
@@ -143,13 +146,25 @@
download_id = yield from self.job_handler.register_downloader(url_downloader)
assert download_id is not None
-
- # Waiting for 5 secs to be sure that the file is downloaded
- yield from asyncio.sleep(5, loop=self.loop)
- xpath = "/download-jobs/job[download-id='{}']".format(
- download_id)
- result = yield from self.read_xpath(xpath)
- self.log.debug("Test result before complete check - %s", result)
+
+ # Waiting to be sure that the file is downloaded
+ # From BLR, it sometimes take longer for the file to
+ # be downloaded
+ max_time = 60
+ total_time = 0
+ while True:
+ yield from asyncio.sleep(5, loop=self.loop)
+ xpath = self.project.add_project("/download-jobs/job[download-id='{}']".
+ format(download_id))
+ result = yield from self.read_xpath(xpath)
+ self.log.debug("Test result before complete check - %s", result)
+ if result.status != "COMPLETED":
+ total_time = total_time + 5
+ if total_time <= max_time:
+ continue
+ else:
+ break
+
assert result.status == "COMPLETED"
assert len(self.job_handler.tasks) == 0
@@ -169,10 +184,10 @@
download_id = yield from self.job_handler.register_downloader(url_downloader)
assert download_id is not None
- xpath = "/download-jobs/job[download-id='{}']".format(
- download_id)
+ xpath = self.project.add_project("/download-jobs/job[download-id='{}']".
+ format(download_id))
- yield from asyncio.sleep(1, loop=self.loop)
+ yield from asyncio.sleep(10, loop=self.loop)
result = yield from self.read_xpath(xpath)
self.log.debug("Test result before in_progress check - %s", result)
@@ -184,7 +199,7 @@
self.log.debug("Test result before cancel check - %s", result)
assert result.status == "CANCELLED"
assert len(self.job_handler.tasks) == 0
-
+
def main():
runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
diff --git a/rwlaunchpad/plugins/rwpkgmgr/test/utest_subscriber_dts.py b/rwlaunchpad/plugins/rwpkgmgr/test/utest_subscriber_dts.py
index 4281e11..ede85a5 100755
--- a/rwlaunchpad/plugins/rwpkgmgr/test/utest_subscriber_dts.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/test/utest_subscriber_dts.py
@@ -21,7 +21,7 @@
import uuid
import gi
-gi.require_version('RwDtsYang', '1.0')
+gi.require_version('RwDts', '1.0')
gi.require_version('RwPkgMgmtYang', '1.0')
from gi.repository import (
RwPkgMgmtYang,
@@ -29,6 +29,7 @@
)
import rift.tasklets.rwpkgmgr.subscriber as pkg_subscriber
import rift.test.dts
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
class DescriptorPublisher(object):
@@ -88,6 +89,7 @@
self.tinfo = self.new_tinfo(str(test_id))
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
self.publisher = DescriptorPublisher(self.log, self.dts, self.loop)
+ self.project = ManoProject(self.log, name=DEFAULT_PROJECT)
def tearDown(self):
super().tearDown()
@@ -100,7 +102,7 @@
"package_id": "123",
"download_id": str(uuid.uuid4())})
- w_xpath = "D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job"
+ w_xpath = self.project.add_project("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job")
xpath = "{}[download-id='{}']".format(w_xpath, mock_msg.download_id)
mock_called = False
@@ -113,6 +115,7 @@
self.log,
self.dts,
self.loop,
+ self.project,
callback=mock_cb)
yield from sub.register()
@@ -135,4 +138,4 @@
)
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py
index 5035b18..9f6dcfd 100644
--- a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py
+++ b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py
@@ -1,6 +1,6 @@
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -51,12 +51,27 @@
self._parent = parent
self._cloud_sub = None
+ self._res_sub = None
+ self._project = parent._project
@asyncio.coroutine
def register(self):
yield from self.register_resource_pool_operational_data()
- self.register_cloud_account_config()
+ yield from self.register_cloud_account_config()
+ def deregister(self):
+ self._log.debug("De-register for project {}".format(self._project.name))
+ if self._cloud_sub:
+ self._cloud_sub.deregister()
+ self._cloud_sub = None
+
+ if self._res_sub:
+ self._res_sub.delete_element(
+ self._project.add_project(ResourceMgrConfig.XPATH_POOL_OPER_DATA))
+ self._res_sub.deregister()
+ self._res_sub = None
+
+ @asyncio.coroutine
def register_cloud_account_config(self):
def on_add_cloud_account_apply(account):
self._log.debug("Received on_add_cloud_account: %s", account)
@@ -78,9 +93,10 @@
)
self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
- self._dts, self._log, self._rwlog_hdl, cloud_callbacks
- )
- self._cloud_sub.register()
+ self._dts, self._log, self._rwlog_hdl,
+ self._project, cloud_callbacks
+ )
+ yield from self._cloud_sub.register()
@asyncio.coroutine
def register_resource_pool_operational_data(self):
@@ -102,14 +118,14 @@
cloud_account_msg.records.append(pool_info)
xact_info.respond_xpath(rwdts.XactRspCode.ACK,
- ResourceMgrConfig.XPATH_POOL_OPER_DATA,
+ self._project.add_project(ResourceMgrConfig.XPATH_POOL_OPER_DATA),
msg=msg,)
- self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: %s",
- ResourceMgrConfig.XPATH_POOL_OPER_DATA)
+ xpath = self._project.add_project(ResourceMgrConfig.XPATH_POOL_OPER_DATA)
+ self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: {}".
+ format(xpath))
handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- response = yield from self._dts.register(xpath=ResourceMgrConfig.XPATH_POOL_OPER_DATA,
- handler=handler,
- flags=rwdts.Flag.PUBLISHER)
-
+ self._res_sub = yield from self._dts.register(xpath=xpath,
+ handler=handler,
+ flags=rwdts.Flag.PUBLISHER)
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py
index c80925c..360390b 100755
--- a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py
+++ b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py
@@ -48,6 +48,7 @@
self._dts = dts
self._loop = loop
self._parent = parent
+ self._project = parent._project
self._vdu_reg = None
self._link_reg = None
@@ -60,22 +61,34 @@
yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()],
timeout=timeout, loop=self._loop)
- def create_record_dts(self, regh, xact, path, msg):
+ def _add_config_flag(self, xpath, config=False):
+ if xpath[0] == '/':
+ if config:
+ return 'C,' + xpath
+ else:
+ return 'D,' + xpath
+
+ return xpath
+
+ def create_record_dts(self, regh, xact, xpath, msg):
"""
Create a record in DTS with path and message
"""
+ path = self._add_config_flag(self._project.add_project(xpath))
self._log.debug("Creating Resource Record xact = %s, %s:%s",
xact, path, msg)
regh.create_element(path, msg)
- def delete_record_dts(self, regh, xact, path):
+ def delete_record_dts(self, regh, xact, xpath):
"""
Delete a VNFR record in DTS with path and message
"""
+ path = self._add_config_flag(self._project.add_project(xpath))
self._log.debug("Deleting Resource Record xact = %s, %s",
xact, path)
regh.delete_element(path)
+
@asyncio.coroutine
def register(self):
@asyncio.coroutine
@@ -161,12 +174,17 @@
yield from self._parent.release_virtual_network(pathentry.key00.event_id)
self.delete_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
elif action == rwdts.QueryAction.READ:
- response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id)
+ # TODO: Check why we are getting null event id request
+ if pathentry.key00.event_id:
+ response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id)
+ else:
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
else:
raise ValueError("Only read/create/delete actions available. Received action: %s" %(action))
- self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.",
- response_xpath, response_info)
+ self._log.info("Responding with VirtualLinkInfo at xpath %s: %s.",
+ response_xpath, response_info)
xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
@@ -177,7 +195,7 @@
return rwdts.MemberRspCode.ACTION_OK
def monitor_vdu_state(response_xpath, pathentry):
- self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
+ self._log.debug("Initiating VDU state monitoring for xpath: %s ", response_xpath)
time_to_wait = 300
sleep_time = 2
loop_cnt = int(time_to_wait/sleep_time)
@@ -197,7 +215,8 @@
response_info)
else:
if response_info.resource_state == 'active' or response_info.resource_state == 'failed':
- self._log.info("VDU state monitoring: VDU reached terminal state. Publishing VDU info: %s at path: %s",
+ self._log.info("VDU state monitoring: VDU reached terminal state. " +
+ "Publishing VDU info: %s at path: %s",
response_info, response_xpath)
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
@@ -205,7 +224,9 @@
return
else:
### End of loop. This is only possible if VDU did not reach active state
- err_msg = "VDU state monitoring: VDU at xpath :{} did not reached active state in {} seconds. Aborting monitoring".format(response_xpath, time_to_wait)
+ err_msg = ("VDU state monitoring: VDU at xpath :{} did not reached active " +
+ "state in {} seconds. Aborting monitoring".
+ format(response_xpath, time_to_wait))
self._log.info(err_msg)
response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
response_info.resource_state = 'failed'
@@ -217,6 +238,7 @@
def allocate_vdu_task(ks_path, event_id, cloud_account, request_msg):
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+ response_xpath = self._add_config_flag(response_xpath)
schema = RwResourceMgrYang.VDUEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
try:
@@ -233,21 +255,22 @@
response_info)
else:
if response_info.resource_state == 'failed' or response_info.resource_state == 'active' :
- self._log.info("Virtual compute create task completed. Publishing VDU info: %s at path: %s",
- response_info, response_xpath)
+ self._log.debug("Virtual compute create task completed. Publishing VDU info: %s at path: %s",
+ response_info, response_xpath)
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
response_info)
else:
+ self._log.debug("VDU create monitor at {}".format(response_xpath))
asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry),
loop = self._loop)
-
@asyncio.coroutine
def on_vdu_request_prepare(xact_info, action, ks_path, request_msg):
self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s",
xact_info, action, request_msg)
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+ response_xpath = self._add_config_flag(response_xpath)
schema = RwResourceMgrYang.VDUEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
@@ -269,7 +292,12 @@
yield from self._parent.release_virtual_compute(pathentry.key00.event_id)
self.delete_record_dts(self._vdu_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
elif action == rwdts.QueryAction.READ:
- response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+ # TODO: Check why we are getting null event id request
+ if pathentry.key00.event_id:
+ response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+ else:
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
else:
raise ValueError("Only create/delete actions available. Received action: %s" %(action))
@@ -293,24 +321,37 @@
link_handlers = rift.tasklets.Group.Handler(on_event=onlink_event,)
with self._dts.group_create(handler=link_handlers) as link_group:
- self._log.debug("Registering for Link Resource Request using xpath: %s",
- ResourceMgrEvent.VLINK_REQUEST_XPATH)
+ xpath = self._project.add_project(ResourceMgrEvent.VLINK_REQUEST_XPATH)
+ self._log.debug("Registering for Link Resource Request using xpath: {}".
+ format(xpath))
- self._link_reg = link_group.register(xpath=ResourceMgrEvent.VLINK_REQUEST_XPATH,
- handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
- on_commit=on_link_request_commit,
- on_prepare=on_link_request_prepare),
- flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+ self._link_reg = link_group.register(xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+ on_commit=on_link_request_commit,
+ on_prepare=on_link_request_prepare),
+ flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
vdu_handlers = rift.tasklets.Group.Handler(on_event=onvdu_event, )
with self._dts.group_create(handler=vdu_handlers) as vdu_group:
- self._log.debug("Registering for VDU Resource Request using xpath: %s",
- ResourceMgrEvent.VDU_REQUEST_XPATH)
+ xpath = self._project.add_project(ResourceMgrEvent.VDU_REQUEST_XPATH)
+ self._log.debug("Registering for VDU Resource Request using xpath: {}".
+ format(xpath))
- self._vdu_reg = vdu_group.register(xpath=ResourceMgrEvent.VDU_REQUEST_XPATH,
- handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
- on_commit=on_vdu_request_commit,
- on_prepare=on_vdu_request_prepare),
- flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+ self._vdu_reg = vdu_group.register(xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+ on_commit=on_vdu_request_commit,
+ on_prepare=on_vdu_request_prepare),
+ flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+
+ def deregister(self):
+ self._log.debug("De-register for project {}".format(self._project.name))
+
+ if self._vdu_reg:
+ self._vdu_reg.deregister()
+ self._vdu_reg = None
+
+ if self._link_reg:
+ self._link_reg.deregister()
+ self._link_reg = None
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py
index 44e7938..929e483 100755
--- a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py
+++ b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py
@@ -34,6 +34,10 @@
)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
from . import rwresmgr_core as Core
from . import rwresmgr_config as Config
@@ -41,11 +45,13 @@
class ResourceManager(object):
- def __init__(self, log, log_hdl, loop, dts):
+ def __init__(self, log, log_hdl, loop, dts, project):
self._log = log
self._log_hdl = log_hdl
self._loop = loop
self._dts = dts
+ self._project = project
+
self.config_handler = Config.ResourceMgrConfig(self._dts, self._log, self._log_hdl, self._loop, self)
self.event_handler = Event.ResourceMgrEvent(self._dts, self._log, self._loop, self)
self.core = Core.ResourceMgrCore(self._dts, self._log, self._log_hdl, self._loop, self)
@@ -55,6 +61,10 @@
yield from self.config_handler.register()
yield from self.event_handler.register()
+ def deregister(self):
+ self.event_handler.deregister()
+ self.config_handler.deregister()
+
def add_cloud_account_config(self, account):
self._log.debug("Received Cloud-Account add config event for account: %s", account.name)
self.core.add_cloud_account(account)
@@ -160,16 +170,45 @@
return info
+class ResMgrProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(ResMgrProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._resource_manager = None
+
+ @asyncio.coroutine
+ def register (self):
+ self._log.debug("Initializing the Resource Manager tasklet for project {}".
+ format(self.name))
+ self._resource_manager = ResourceManager(self._log,
+ self._log_hdl,
+ self._loop,
+ self._dts,
+ self,)
+ yield from self._resource_manager.register()
+
+ def deregister(self):
+ self._log.debug("De-registering project {}".format(self.name))
+ self._resource_manager.deregister()
+
+
class ResMgrTasklet(rift.tasklets.Tasklet):
def __init__(self, *args, **kwargs):
super(ResMgrTasklet, self).__init__(*args, **kwargs)
self.rwlog.set_category("rw-resource-mgr-log")
self._dts = None
- self._resource_manager = None
+ self._project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
def start(self):
super(ResMgrTasklet, self).start()
- self.log.info("Starting ResMgrTasklet")
+ self.log.debug("Starting ResMgrTasklet")
self.log.debug("Registering with dts")
@@ -192,12 +231,9 @@
@asyncio.coroutine
def init(self):
- self._log.info("Initializing the Resource Manager tasklet")
- self._resource_manager = ResourceManager(self.log,
- self.log_hdl,
- self.loop,
- self._dts)
- yield from self._resource_manager.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, ResMgrProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
diff --git a/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py b/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py
index 87d11a2..c0b4788 100755
--- a/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py
+++ b/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -432,7 +432,7 @@
@asyncio.coroutine
def configure_cloud_account(self, dts, acct_type):
- account_xpath = "C,/rw-cloud:cloud/account"
+ account_xpath = "C,/rw-project:project/rw-cloud:cloud/account"
msg = self.get_cloud_account_msg(acct_type)
self.log.info("Configuring cloud-account: %s",msg)
yield from dts.query_create(account_xpath,
@@ -441,7 +441,7 @@
@asyncio.coroutine
def configure_compute_resource_pools(self, dts, resource_type, cloud_type):
- pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+ pool_xpath = "C,/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
msg = self.get_compute_pool_msg("virtual-compute", resource_type, cloud_type)
self.log.info("Configuring compute-resource-pool: %s",msg)
yield from dts.query_create(pool_xpath,
@@ -451,7 +451,7 @@
@asyncio.coroutine
def configure_network_resource_pools(self, dts, resource_type, cloud_type):
- pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+ pool_xpath = "C,/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
msg = self.get_network_pool_msg("virtual-network", resource_type, cloud_type)
self.log.info("Configuring network-resource-pool: %s",msg)
yield from dts.query_create(pool_xpath,
@@ -460,7 +460,7 @@
@asyncio.coroutine
def verify_resource_pools_config(self, dts):
- pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records"
+ pool_records_xpath = "D,/rw-project:project/rw-resource-mgr:resource-pool-records"
self.log.debug("Verifying test_create_resource_pools results")
res_iter = yield from dts.query_read(pool_records_xpath,)
for result in res_iter:
@@ -491,7 +491,7 @@
@asyncio.coroutine
def reserve_network_resources(self, name, dts, cloud_type):
- network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
+ network_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
msg,xpath = self.get_network_reserve_msg(name, cloud_type, network_xpath)
self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg))
yield from dts.query_create(xpath, 0, msg)
@@ -500,7 +500,7 @@
@asyncio.coroutine
def reserve_compute_resources(self, name, dts, cloud_type, vlinks = []):
- compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
+ compute_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
msg,xpath = self.get_compute_reserve_msg(name, cloud_type, compute_xpath, vlinks)
self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg))
yield from dts.query_create(xpath, 0, msg)
diff --git a/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/model/staging_area.py b/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/model/staging_area.py
index 473999f..8cb0836 100644
--- a/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/model/staging_area.py
+++ b/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/model/staging_area.py
@@ -44,6 +44,10 @@
return self._model
@property
+ def project_name(self):
+ return self._model.project_name
+
+ @property
def has_expired(self):
current_time = time.time()
expiry_time = self.model.created_time + self.model.validity_time
diff --git a/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/publisher/staging_status.py b/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/publisher/staging_status.py
index 82e2da5..36c36f6 100644
--- a/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/publisher/staging_status.py
+++ b/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/publisher/staging_status.py
@@ -28,13 +28,13 @@
class StagingStorePublisher(mano_dts.DtsHandler, StagingStoreProtocol):
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, project):
+ super().__init__(project.log, project.dts, project.loop, project)
self.delegate = None
def xpath(self, area_id=None):
- return ("D,/rw-staging-mgmt:staging-areas/rw-staging-mgmt:staging-area" +
- ("[area-id='{}']".format(area_id) if area_id else ""))
+ return self.project.add_project("D,/rw-staging-mgmt:staging-areas/rw-staging-mgmt:staging-area" +
+ ("[area-id='{}']".format(area_id) if area_id else ""))
@asyncio.coroutine
def register(self):
@@ -59,6 +59,12 @@
assert self.reg is not None
+ def deregister(self):
+ self._log.debug("Project {}: de-register staging store handler".
+ format(self._project.name))
+ if self.reg:
+ self.reg.deregister()
+
def on_staging_area_create(self, store):
self.reg.update_element(self.xpath(store.area_id), store)
diff --git a/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/rwstagingmgr.py b/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/rwstagingmgr.py
index 04a7cae..4cb6553 100644
--- a/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/rwstagingmgr.py
+++ b/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/rwstagingmgr.py
@@ -36,6 +36,10 @@
RwDts as rwdts,
RwStagingMgmtYang)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
from . import rpc
from . import store
@@ -43,14 +47,36 @@
from .publisher import StagingStorePublisher
+class StagingManagerProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(StagingManagerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self.publisher = StagingStorePublisher(self)
+ # For recovery
+ self.publisher.delegate = tasklet.store
+
+ @asyncio.coroutine
+ def register (self):
+ yield from self.publisher.register()
+
+ def deregister(self):
+ self.publisher.deregister()
+
+
class StagingManagerTasklet(rift.tasklets.Tasklet):
"""Tasklet to handle all staging related operations
"""
def __init__(self, *args, **kwargs):
try:
super().__init__(*args, **kwargs)
+ self._project_handler = None
+ self.projects = {}
+
except Exception as e:
- self.log.exception(e)
+ self.log.exception("Staging Manager tasklet init: {}".
+ format(e))
def start(self):
super().start()
@@ -72,14 +98,7 @@
@asyncio.coroutine
def init(self):
- self.store = store.StagingFileStore(log=self.log)
- self.publisher = StagingStorePublisher(self.log, self.dts, self.loop)
- # Fore recovery
- self.publisher.delegate = self.store
- # For create and delete events
- self.store.delegate = self.publisher
- yield from self.publisher.register()
-
+ self.store = store.StagingFileStore(self)
io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
self.app = StagingApplication(self.store)
@@ -107,9 +126,12 @@
self.dts,
self.loop,
self.store)
-
yield from self.create_stg_rpc.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, StagingManagerProject)
+ self.project_handler.register()
+
@asyncio.coroutine
def run(self):
self.server.listen(self.app.PORT)
diff --git a/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/store/file_store.py b/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/store/file_store.py
index aec4180..5280fe1 100644
--- a/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/store/file_store.py
+++ b/rwlaunchpad/plugins/rwstagingmgr/rift/tasklets/rwstagingmgr/store/file_store.py
@@ -33,6 +33,7 @@
gi.require_version("RwStagingMgmtYang", "1.0")
from gi.repository import RwStagingMgmtYang
import rift.mano.dts as mano_dts
+from rift.mano.utils.project import DEFAULT_PROJECT
from .. import model
from ..protocol import StagingStorePublisherProtocol
@@ -53,7 +54,7 @@
META_YAML = "meta.yaml"
DEFAULT_EXPIRY = 60 * 60
- def __init__(self, log=None, root_dir=None):
+ def __init__(self, tasklet, root_dir=None):
default_path = os.path.join(
os.getenv('RIFT_ARTIFACTS'),
"launchpad/staging")
@@ -63,11 +64,11 @@
if not os.path.isdir(self.root_dir):
os.makedirs(self.root_dir)
- self.log = log or logging.getLogger()
+ self.log = tasklet.log
self.tmp_dir = tempfile.mkdtemp(dir=self.root_dir)
self._cache = {}
- self.delegate = None
+ self.tasklet = tasklet
def on_recovery(self, staging_areas):
for area in staging_areas:
@@ -82,6 +83,20 @@
return self._cache[area_id]
+ def get_delegate(self, project_name):
+ if not project_name:
+ project_name = DEFAULT_PROJECT
+
+ try:
+ proj = self.tasklet.projects[project_name]
+ except Exception as e:
+ err = "Project or project name not found {}: {}". \
+ format(msg.as_dict(), e)
+ self.log.error (err)
+ raise Exception (err)
+
+ return proj.publisher
+
def create_staging_area(self, staging_area_config):
"""Create the staging area
Args:
@@ -93,6 +108,8 @@
Raises:
StagingAreaExists: if the staging area already exists
"""
+ delegate = self.get_delegate(staging_area_config.project_name)
+
area_id = str(uuid.uuid4())
container_path = os.path.join(self.root_dir, str(area_id))
@@ -118,10 +135,10 @@
self._cache[area_id] = staging_area
try:
- if self.delegate:
- self.delegate.on_staging_area_create(staging_area.model)
+ if delegate:
+ delegate.on_staging_area_create(staging_area.model)
except Exception as e:
- self.log.exception(str(e))
+ self.log.exception(e)
return staging_area
@@ -134,13 +151,15 @@
if type(staging_area) is str:
staging_area = self.get_staging_area(staging_area)
+ delegate = self.get_delegate(staging_area.project_name)
+
if os.path.isdir(staging_area.model.path):
shutil.rmtree(staging_area.model.path)
staging_area.model.status = "EXPIRED"
try:
- if self.delegate:
- self.delegate.on_staging_area_delete(staging_area.model)
+ if delegate:
+ delegate.on_staging_area_delete(staging_area.model)
except Exception as e:
- self.log.exception(str(e))
+ self.log.exception(e)
diff --git a/rwlaunchpad/plugins/rwstagingmgr/test/utest_publisher_dts.py b/rwlaunchpad/plugins/rwstagingmgr/test/utest_publisher_dts.py
index 585a0d9..f7b0e4e 100755
--- a/rwlaunchpad/plugins/rwstagingmgr/test/utest_publisher_dts.py
+++ b/rwlaunchpad/plugins/rwstagingmgr/test/utest_publisher_dts.py
@@ -34,6 +34,13 @@
)
import rift.tasklets.rwstagingmgr.publisher as publisher
import rift.test.dts
+from rift.mano.utils.project import ManoProject
+
+class TestProject(ManoProject):
+ def __init__(self, log, dts, loop):
+ super().__init__(log)
+ self._dts = dts
+ self._loop = loop
class TestCase(rift.test.dts.AbstractDTSTest):
@@ -49,8 +56,9 @@
self.log.debug("STARTING - %s", test_id)
self.tinfo = self.new_tinfo(str(test_id))
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.project = TestProject(self.log, self.dts, self.loop)
- self.job_handler = publisher.StagingStorePublisher(self.log, self.dts, self.loop)
+ self.job_handler = publisher.StagingStorePublisher(self.project)
def tearDown(self):
super().tearDown()
@@ -82,6 +90,7 @@
yield from asyncio.sleep(2, loop=self.loop)
published_xpaths = yield from self.get_published_xpaths()
assert self.job_handler.xpath() in published_xpaths
+ self.job_handler.deregister()
@rift.test.dts.async_test
def test_publish(self):
@@ -95,8 +104,9 @@
self.job_handler.on_staging_area_create(mock_msg)
yield from asyncio.sleep(5, loop=self.loop)
- itr = yield from self.dts.query_read("/staging-areas/staging-area[area-id='{}']".format(
- mock_msg.area_id))
+ xpath = self.project.add_project("/staging-areas/staging-area[area-id='{}']".
+ format(mock_msg.area_id))
+ itr = yield from self.dts.query_read(xpath)
result = None
@@ -106,6 +116,7 @@
print (result)
assert result == mock_msg
+ self.job_handler.deregister()
def main():
runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
diff --git a/rwlaunchpad/plugins/rwstagingmgr/test/utest_staging_store.py b/rwlaunchpad/plugins/rwstagingmgr/test/utest_staging_store.py
index eb71aa3..33bb9d8 100755
--- a/rwlaunchpad/plugins/rwstagingmgr/test/utest_staging_store.py
+++ b/rwlaunchpad/plugins/rwstagingmgr/test/utest_staging_store.py
@@ -27,6 +27,7 @@
import xmlrunner
from rift.tasklets.rwstagingmgr.store import StagingFileStore
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
import gi
gi.require_version('RwStagingMgmtYang', '1.0')
@@ -34,6 +35,18 @@
RwStagingMgmtYang,
)
+class MockTasklet(object):
+ def __init__(self):
+ self.log = logging.getLogger()
+ self.projects = {}
+ project = ManoProject(self.log, name=DEFAULT_PROJECT)
+ project.publisher = None
+ self.projects[project.name] = project
+
+ def set_delegate(self, store):
+ self.projects[DEFAULT_PROJECT].publisher = store
+
+
class TestSerializer(unittest.TestCase):
def test_staging_area_create(self):
@@ -44,7 +57,8 @@
"""
tmp_dir = tempfile.mkdtemp()
- store = StagingFileStore(root_dir=tmp_dir)
+ tasklet = MockTasklet()
+ store = StagingFileStore(tasklet, root_dir=tmp_dir)
mock_model = RwStagingMgmtYang.StagingArea.from_dict({})
stg = store.create_staging_area(mock_model)
@@ -63,7 +77,8 @@
"""
tmp_dir = tempfile.mkdtemp()
- store = StagingFileStore(root_dir=tmp_dir)
+ tasklet = MockTasklet()
+ store = StagingFileStore(tasklet, root_dir=tmp_dir)
mock_model = RwStagingMgmtYang.StagingArea.from_dict({})
# get the wrapped mock model
diff --git a/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py
index 2cbe240..253094f 100755
--- a/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py
+++ b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py
@@ -51,6 +51,10 @@
import rift.package.cloud_init
import rift.package.script
import rift.mano.dts as mano_dts
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
import rift.mano.utils.short_name as mano_short_name
@@ -163,7 +167,8 @@
class VcsComponent(object):
""" VCS Component within the VNF descriptor """
- def __init__(self, dts, log, loop, cluster_name, vcs_handler, component, mangled_name):
+ def __init__(self, dts, log, loop, cluster_name,
+ vcs_handler, component, mangled_name):
self._dts = dts
self._log = log
self._loop = loop
@@ -185,7 +190,7 @@
@property
def path(self):
""" The path for this object """
- return("D,/rw-manifest:manifest" +
+ return ("D,/rw-manifest:manifest" +
"/rw-manifest:operational-inventory" +
"/rw-manifest:component" +
"[rw-manifest:component-name = '{}']").format(self.name)
@@ -270,6 +275,7 @@
dts,
log,
loop,
+ project,
vdud,
vnfr,
nsr_config,
@@ -282,6 +288,7 @@
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._vdud = vdud
self._vnfr = vnfr
self._nsr_config = nsr_config
@@ -301,7 +308,8 @@
self._rm_regh = None
self._vm_resp = None
self._vdud_cloud_init = None
- self._vdur_console_handler = VnfrConsoleOperdataDtsHandler(dts, log, loop, self._vnfr._vnfm, self._vnfr.vnfr_id, self._vdur_id,self.vdu_id)
+ self._vdur_console_handler = VnfrConsoleOperdataDtsHandler(
+ dts, log, loop, self._vnfr._vnfm, self._vnfr.vnfr_id, self._vdur_id,self.vdu_id)
@asyncio.coroutine
def vdu_opdata_register(self):
@@ -510,14 +518,15 @@
placement_groups.append(group.as_dict())
vdur_dict['placement_groups_info'] = placement_groups
- return RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
+ return RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
@property
def resmgr_path(self):
""" path for resource-mgr"""
- return ("D,/rw-resource-mgr:resource-mgmt" +
- "/vdu-event" +
- "/vdu-event-data[event-id='{}']".format(self._request_id))
+ xpath = self._project.add_project("D,/rw-resource-mgr:resource-mgmt" +
+ "/vdu-event" +
+ "/vdu-event-data[event-id='{}']".format(self._request_id))
+ return xpath
@property
def vm_flavor_msg(self):
@@ -572,15 +581,19 @@
if availability_zones:
if len(availability_zones) > 1:
- self._log.error("Can not launch VDU: %s in multiple availability zones. Requested Zones: %s", self.name, availability_zones)
- raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple availability zones. Requsted Zones".format(self.name, availability_zones))
+ self._log.error("Can not launch VDU: %s in multiple availability zones. " +
+ "Requested Zones: %s", self.name, availability_zones)
+ raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple availability" +
+ " zones. Requsted Zones".format(self.name, availability_zones))
else:
vm_create_msg_dict['availability_zone'] = availability_zones[0]
if server_groups:
if len(server_groups) > 1:
- self._log.error("Can not launch VDU: %s in multiple Server Group. Requested Groups: %s", self.name, server_groups)
- raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple Server Groups. Requsted Groups".format(self.name, server_groups))
+ self._log.error("Can not launch VDU: %s in multiple Server Group. " +
+ "Requested Groups: %s", self.name, server_groups)
+ raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple " +
+ "Server Groups. Requsted Groups".format(self.name, server_groups))
else:
vm_create_msg_dict['server_group'] = server_groups[0]
@@ -734,7 +747,7 @@
self._rm_regh = None
if self._vdur_console_handler is not None:
- self._log.error("Deregistering vnfr vdur registration handle")
+ self._log.debug("Deregistering vnfr vdur registration handle")
self._vdur_console_handler._regh.deregister()
self._vdur_console_handler._regh = None
@@ -1027,10 +1040,12 @@
class InternalVirtualLinkRecord(object):
""" Internal Virtual Link record """
- def __init__(self, dts, log, loop, ivld_msg, vnfr_name, cloud_account_name, ip_profile=None):
+ def __init__(self, dts, log, loop, project,
+ ivld_msg, vnfr_name, cloud_account_name, ip_profile=None):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._ivld_msg = ivld_msg
self._vnfr_name = vnfr_name
self._cloud_account_name = cloud_account_name
@@ -1060,7 +1075,8 @@
def vlr_path(self):
""" VLR path for this VLR instance"""
- return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self.vlr_id)
+ return self._project.add_project("D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".
+ format(self.vlr_id))
def create_vlr(self):
""" Create the VLR record which will be instantiated """
@@ -1085,7 +1101,7 @@
vlr_dict.update(vld_copy_dict)
- vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+ vlr = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.from_dict(vlr_dict)
return vlr
@asyncio.coroutine
@@ -1182,6 +1198,7 @@
self._dts = dts
self._log = log
self._loop = loop
+ self._project = vnfm._project
self._cluster_name = cluster_name
self._vnfr_msg = vnfr_msg
self._vnfr_id = vnfr_msg.id
@@ -1234,7 +1251,8 @@
@staticmethod
def vnfd_xpath(vnfd_id):
""" VNFD xpath associated with this VNFR """
- return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id)
+ return ("C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id = '{}']".
+ format(vnfd_id))
@property
def vnfd_ref_count(self):
@@ -1315,7 +1333,7 @@
def get_nsr_config(self):
### Need access to NS instance configuration for runtime resolution.
### This shall be replaced when deployment flavors are implemented
- xpath = "C,/nsr:ns-instance-config"
+ xpath = self._project.add_project("C,/nsr:ns-instance-config")
results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
for result in results:
@@ -1365,7 +1383,7 @@
vnfd_fields = ["short_name", "vendor", "description", "version"]
vnfd_copy_dict = {k: v for k, v in self.vnfd.as_dict().items() if k in vnfd_fields}
- mgmt_intf = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MgmtInterface()
+ mgmt_intf = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MgmtInterface()
ip_address, port = self.mgmt_intf_info()
if ip_address is not None:
@@ -1385,8 +1403,8 @@
vnfr_dict.update(vnfd_copy_dict)
- vnfr_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
- vnfr_msg.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
+ vnfr_msg = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+ vnfr_msg.vnfd = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
vnfr_msg.create_time = self._create_time
vnfr_msg.uptime = int(time.time()) - self._create_time
@@ -1407,13 +1425,13 @@
vnfr_msg.dashboard_url = self.dashboard_url
for cpr in self._cprs:
- new_cp = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict())
+ new_cp = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict())
vnfr_msg.connection_point.append(new_cp)
if self._vnf_mon is not None:
for monp in self._vnf_mon.msg:
vnfr_msg.monitoring_param.append(
- VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict()))
+ VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict()))
if self._vnfr.vnf_configuration is not None:
vnfr_msg.vnf_configuration.from_dict(self._vnfr.vnf_configuration.as_dict())
@@ -1422,7 +1440,7 @@
vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = ip_address
for group in self._vnfr_msg.placement_groups_info:
- group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+ group_info = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_PlacementGroupsInfo()
group_info.from_dict(group.as_dict())
vnfr_msg.placement_groups_info.append(group_info)
@@ -1452,7 +1470,7 @@
@property
def xpath(self):
""" path for this VNFR """
- return("D,/vnfr:vnfr-catalog"
+ return self._project.add_project("D,/vnfr:vnfr-catalog"
"/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id))
@asyncio.coroutine
@@ -1527,7 +1545,7 @@
for group_info in nsr_config.vnfd_placement_group_maps:
if group_info.placement_group_ref == input_group.name and \
group_info.vnfd_id_ref == self.vnfd_id:
- group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+ group = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
group_dict = {k:v for k,v in
group_info.as_dict().items()
if (k != 'placement_group_ref' and k !='vnfd_id_ref')}
@@ -1542,7 +1560,7 @@
placement_groups = []
### Step-1: Get VNF level placement groups
for group in self._vnfr_msg.placement_groups_info:
- #group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+ #group_info = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
#group_info.from_dict(group.as_dict())
placement_groups.append(group)
@@ -1553,10 +1571,11 @@
group_info = self.resolve_placement_group_cloud_construct(group,
nsr_config)
if group_info is None:
- self._log.info("Could not resolve cloud-construct for placement group: %s", group.name)
- ### raise VNFMPlacementGroupError("Could not resolve cloud-construct for placement group: {}".format(group.name))
+ self._log.info("Could not resolve cloud-construct for " +
+ "placement group: %s", group.name)
else:
- self._log.info("Successfully resolved cloud construct for placement group: %s for VDU: %s in VNF: %s (Member Index: %s)",
+ self._log.info("Successfully resolved cloud construct for " +
+ "placement group: %s for VDU: %s in VNF: %s (Member Index: %s)",
str(group_info),
vdu.name,
self.vnf_name,
@@ -1614,6 +1633,7 @@
dts=self._dts,
log=self._log,
loop=self._loop,
+ project = self._project,
vdud=vdu,
vnfr=vnfr,
nsr_config=nsr_config,
@@ -1761,8 +1781,7 @@
def vlr_xpath(self, vlr_id):
""" vlr xpath """
- return(
- "D,/vlr:vlr-catalog/"
+ return self._project.add_project("D,/vlr:vlr-catalog/"
"vlr:vlr[vlr:id = '{}']".format(vlr_id))
def ext_vlr_by_id(self, vlr_id):
@@ -1852,6 +1871,7 @@
@asyncio.coroutine
def instantiate(self, xact, restart_mode=False):
""" instantiate this VNF """
+ self._log.info("Instantiate VNF {}: {}".format(self._vnfr_id, self._state))
self.set_state(VirtualNetworkFunctionRecordState.VL_INIT_PHASE)
self._rw_vnfd = yield from self._vnfm.fetch_vnfd(self._vnfd_id)
@@ -1867,7 +1887,7 @@
cp_copy_dict = {k: v for k, v in cp.as_dict().items() if k in cp_fields}
cpr_dict = {}
cpr_dict.update(cp_copy_dict)
- return VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict)
+ return VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict)
self._log.debug("Fetching VLRs for VNFR id = %s, cps = %s",
self._vnfr_id, self._vnfr.connection_point)
@@ -1879,7 +1899,7 @@
vlr_path = self.vlr_xpath(cp.vlr_ref)
self._log.debug("Fetching VLR with path = %s", vlr_path)
- res_iter = yield from self._dts.query_read(self.vlr_xpath(cp.vlr_ref),
+ res_iter = yield from self._dts.query_read(vlr_path,
rwdts.XactFlag.MERGE)
for i in res_iter:
r = yield from i
@@ -1902,11 +1922,11 @@
yield from self.publish_inventory(xact)
# Publish inventory
- self._log.debug("VNFR-ID %s: Creating VLs", self._vnfr_id)
+ self._log.debug("Create VLs {}: {}".format(self._vnfr_id, self._state))
yield from self.create_vls()
# publish the VNFR
- self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+ self._log.debug("Publish VNFR {}: {}".format(self._vnfr_id, self._state))
yield from self.publish(xact)
@@ -1933,12 +1953,13 @@
yield from self.publish(xact)
# publish the VNFR
- self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+ self._log.debug("VNFR {}: Publish VNFR with state {}".
+ format(self._vnfr_id, self._state))
yield from self.publish(xact)
# instantiate VDUs
# ToDo: Check if this should be prevented during restart
- self._log.debug("VNFR-ID %s: Instantiate VDUs", self._vnfr_id)
+ self._log.debug("Instantiate VDUs {}: {}".format(self._vnfr_id, self._state))
_ = self._loop.create_task(self.instantiate_vdus(xact, self))
# publish the VNFR
@@ -2004,7 +2025,7 @@
class VnfdDtsHandler(object):
""" DTS handler for VNFD config changes """
- XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ XPATH = "C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd"
def __init__(self, dts, log, loop, vnfm):
self._dts = dts
@@ -2018,6 +2039,14 @@
""" DTS registration handle """
return self._regh
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFD DTS handler for project {}".
+ format(self._vnfm._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for VNFD configuration"""
@@ -2032,8 +2061,9 @@
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
""" on prepare callback """
- self._log.debug("Got on prepare for VNFD (path: %s) (action: %s)",
- ks_path.to_xpath(RwVnfmYang.get_schema()), msg)
+ self._log.debug("Got on prepare for VNFD (path: %s) (action: %s) (msg: %s)",
+ ks_path.to_xpath(RwVnfmYang.get_schema()),
+ xact_info.query_action, msg)
fref = ProtobufC.FieldReference.alloc()
fref.goto_whole_message(msg.to_pbcm())
@@ -2050,14 +2080,14 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug(
- "Registering for VNFD config using xpath: %s",
- VnfdDtsHandler.XPATH,
- )
+ xpath = self._vnfm._project.add_project(VnfdDtsHandler.XPATH)
+ self._log.debug("Registering for VNFD config using xpath: {}".
+ format(xpath))
+
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self._dts.appconf_group_create(handler=acg_hdl) as acg:
self._regh = acg.register(
- xpath=VnfdDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
on_prepare=on_prepare)
@@ -2080,6 +2110,14 @@
""" DTS registration handle """
return self._regh
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VCS DTS handler for project {}".
+ format(self._vnfm._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Registers VCS component dts publisher registration"""
@@ -2105,11 +2143,16 @@
VcsComponentDtsHandler.XPATH, xact, path, msg)
class VnfrConsoleOperdataDtsHandler(object):
- """ registers 'D,/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]' and handles CRUD from DTS"""
+ """
+ Registers 'D,/rw-project:project/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]'
+ and handles CRUD from DTS
+ """
+
@property
def vnfr_vdu_console_xpath(self):
""" path for resource-mgr"""
- return ("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
+ return self._project.add_project("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']" +
+ "/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
def __init__(self, dts, log, loop, vnfm, vnfr_id, vdur_id, vdu_id):
self._dts = dts
@@ -2122,6 +2165,16 @@
self._vdur_id = vdur_id
self._vdu_id = vdu_id
+ self._project = vnfm._project
+
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFR console DTS handler for project {}".
+ format(self._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for VNFR VDU Operational Data read from dts """
@@ -2136,7 +2189,7 @@
)
if action == rwdts.QueryAction.READ:
- schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur.schema()
path_entry = schema.keyspec_to_entry(ks_path)
self._log.debug("VDU Opdata path is {}".format(path_entry.key00.id))
try:
@@ -2153,7 +2206,7 @@
return
with self._dts.transaction() as new_xact:
resp = yield from vdur.read_resource(new_xact)
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
if resp.console_url:
vdur_console.console_url = resp.console_url
@@ -2162,7 +2215,7 @@
self._log.debug("Recevied console URL for vdu {} is {}".format(self._vdu_id,vdur_console))
except Exception:
self._log.exception("Caught exception while reading VDU %s", self._vdu_id)
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
vdur_console.console_url = 'none'
@@ -2187,7 +2240,7 @@
class VnfrDtsHandler(object):
- """ registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS"""
+ """ registers 'D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS"""
XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
def __init__(self, dts, log, loop, vnfm):
@@ -2197,6 +2250,7 @@
self._vnfm = vnfm
self._regh = None
+ self._project = vnfm._project
@property
def regh(self):
@@ -2208,6 +2262,14 @@
""" Return VNF manager instance """
return self._vnfm
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFR DTS handler for project {}".
+ format(self._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for vnfr create/update/delete/read requests from dts """
@@ -2269,7 +2331,7 @@
vnfr.set_state(VirtualNetworkFunctionRecordState.FAILED)
yield from vnfr.publish(None)
elif action == rwdts.QueryAction.DELETE:
- schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
vnfr = self._vnfm.get_vnfr(path_entry.key00.id)
@@ -2288,7 +2350,7 @@
self._log.error("Caught exception while deleting vnfr %s", path_entry.key00.id)
elif action == rwdts.QueryAction.UPDATE:
- schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
vnfr = None
try:
@@ -2316,14 +2378,15 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for VNFR using xpath: %s",
- VnfrDtsHandler.XPATH,)
+ xpath = self._project.add_project(VnfrDtsHandler.XPATH)
+ self._log.debug("Registering for VNFR using xpath: {}".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
on_prepare=on_prepare,)
handlers = rift.tasklets.Group.Handler(on_event=on_event,)
with self._dts.group_create(handler=handlers) as group:
- self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=(rwdts.Flag.PUBLISHER |
rwdts.Flag.NO_PREP_READ |
@@ -2331,10 +2394,11 @@
rwdts.Flag.DATASTORE),)
@asyncio.coroutine
- def create(self, xact, path, msg):
+ def create(self, xact, xpath, msg):
"""
Create a VNFR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating VNFR xact = %s, %s:%s",
xact, path, msg)
@@ -2343,10 +2407,11 @@
xact, path, msg)
@asyncio.coroutine
- def update(self, xact, path, msg):
+ def update(self, xact, xpath, msg):
"""
Update a VNFR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating VNFR xact = %s, %s:%s",
xact, path, msg)
self.regh.update_element(path, msg)
@@ -2354,10 +2419,11 @@
xact, path, msg)
@asyncio.coroutine
- def delete(self, xact, path):
+ def delete(self, xact, xpath):
"""
Delete a VNFR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting VNFR xact = %s, %s", xact, path)
self.regh.delete_element(path)
self._log.debug("Deleted VNFR xact = %s, %s", xact, path)
@@ -2385,6 +2451,14 @@
""" Return the NS manager instance """
return self._vnfm
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFD Ref DTS handler for project {}".
+ format(self._vnfm._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for VNFD ref count read from dts """
@@ -2399,7 +2473,7 @@
)
if action == rwdts.QueryAction.READ:
- schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_VnfdRefCount.schema()
path_entry = schema.keyspec_to_entry(ks_path)
vnfd_list = yield from self._vnfm.get_vnfd_refcount(path_entry.key00.vnfd_id_ref)
for xpath, msg in vnfd_list:
@@ -2414,7 +2488,8 @@
hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VnfdRefCountDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._vnfm._project.add_project(
+ VnfdRefCountDtsHandler.XPATH),
handler=hdl,
flags=rwdts.Flag.PUBLISHER,
)
@@ -2546,16 +2621,18 @@
class VnfManager(object):
""" The virtual network function manager class """
- def __init__(self, dts, log, loop, cluster_name):
+ def __init__(self, dts, log, loop, project, cluster_name):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._cluster_name = cluster_name
self._vcs_handler = VcsComponentDtsHandler(dts, log, loop, self)
self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self)
self._vnfr_ref_handler = VnfdRefCountDtsHandler(dts, log, loop, self)
- self._nsr_handler = mano_dts.NsInstanceConfigSubscriber(log, dts, loop, callback=self.handle_nsr)
+ self._nsr_handler = mano_dts.NsInstanceConfigSubscriber(
+ log, dts, loop, project, callback=self.handle_nsr)
self._dts_handlers = [VnfdDtsHandler(dts, log, loop, self),
self._vnfr_handler,
@@ -2582,6 +2659,11 @@
for hdl in self._dts_handlers:
yield from hdl.register()
+ def deregister(self):
+ self._log.debug("De-register VNFM project {}".format(self._project.name))
+ for hdl in self._dts_handlers:
+ hdl.deregister()
+
@asyncio.coroutine
def run(self):
""" Run this VNFM instance """
@@ -2667,11 +2749,13 @@
@asyncio.coroutine
def fetch_vnfd(self, vnfd_id):
""" Fetch VNFDs based with the vnfd id"""
- vnfd_path = VirtualNetworkFunctionRecord.vnfd_xpath(vnfd_id)
+ vnfd_path = self._project.add_project(
+ VirtualNetworkFunctionRecord.vnfd_xpath(vnfd_id))
self._log.debug("Fetch vnfd with path %s", vnfd_path)
vnfd = None
- res_iter = yield from self._dts.query_read(vnfd_path, rwdts.XactFlag.MERGE)
+ res_iter = yield from self._dts.query_read(vnfd_path,
+ rwdts.XactFlag.MERGE)
for ent in res_iter:
res = yield from ent
@@ -2730,8 +2814,8 @@
def vnfd_refcount_xpath(self, vnfd_id):
""" xpath for ref count entry """
- return (VnfdRefCountDtsHandler.XPATH +
- "[rw-vnfr:vnfd-id-ref = '{}']").format(vnfd_id)
+ return self._project.add_project(VnfdRefCountDtsHandler.XPATH +
+ "[rw-vnfr:vnfd-id-ref = '{}']").format(vnfd_id)
@asyncio.coroutine
def get_vnfd_refcount(self, vnfd_id):
@@ -2739,12 +2823,12 @@
vnfd_list = []
if vnfd_id is None or vnfd_id == "":
for vnfd in self._vnfds_to_vnfr.keys():
- vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+ vnfd_msg = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_VnfdRefCount()
vnfd_msg.vnfd_id_ref = vnfd
vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd]
vnfd_list.append((self.vnfd_refcount_xpath(vnfd), vnfd_msg))
elif vnfd_id in self._vnfds_to_vnfr:
- vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+ vnfd_msg = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_VnfdRefCount()
vnfd_msg.vnfd_id_ref = vnfd_id
vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd_id]
vnfd_list.append((self.vnfd_refcount_xpath(vnfd_id), vnfd_msg))
@@ -2752,6 +2836,31 @@
return vnfd_list
+class VnfmProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(VnfmProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._vnfm = None
+
+ @asyncio.coroutine
+ def register (self):
+ try:
+ vm_parent_name = self._tasklet.tasklet_info.get_parent_vm_parent_instance_name()
+ assert vm_parent_name is not None
+ self._vnfm = VnfManager(self._dts, self.log, self.loop, self, vm_parent_name)
+ yield from self._vnfm.run()
+ except Exception:
+ print("Caught Exception in VNFM init:", sys.exc_info()[0])
+ raise
+
+ def deregister(self):
+ self._log.debug("De-register project {} for VnfmProject".
+ format(self.name))
+ self._vnfm.deregister()
+
+
class VnfmTasklet(rift.tasklets.Tasklet):
""" VNF Manager tasklet class """
def __init__(self, *args, **kwargs):
@@ -2760,7 +2869,12 @@
self.rwlog.set_subcategory("vnfm")
self._dts = None
- self._vnfm = None
+ self._project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
def start(self):
try:
@@ -2794,14 +2908,9 @@
@asyncio.coroutine
def init(self):
""" Task init callback """
- try:
- vm_parent_name = self.tasklet_info.get_parent_vm_parent_instance_name()
- assert vm_parent_name is not None
- self._vnfm = VnfManager(self._dts, self.log, self.loop, vm_parent_name)
- yield from self._vnfm.run()
- except Exception:
- print("Caught Exception in VNFM init:", sys.exc_info()[0])
- raise
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, VnfmProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
diff --git a/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py b/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py
index 6ec2421..ba3c5a2 100755
--- a/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py
+++ b/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py
@@ -1,6 +1,6 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,6 +28,10 @@
)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
import rift.mano.sdn
from rift.vlmgr import (
@@ -54,16 +58,17 @@
class SDNAccountHandlers(object):
- def __init__(self, dts, log, log_hdl, acctstore, loop):
+ def __init__(self, dts, log, log_hdl, acctstore, loop, project):
self._log = log
self._log_hdl = log_hdl
self._dts = dts
self._loop = loop
self._acctstore = acctstore
+ self._project = project
self._log.debug("Creating SDN account config handler")
self.sdn_cfg_handler = rift.mano.sdn.SDNAccountConfigSubscriber(
- self._dts, self._log, self._log_hdl,
+ self._dts, self._log, project, self._log_hdl,
rift.mano.sdn.SDNAccountConfigCallbacks(
on_add_apply=self.on_sdn_account_added,
on_delete_apply=self.on_sdn_account_deleted,
@@ -74,7 +79,7 @@
self._log.debug("Creating SDN account opdata handler")
self.sdn_operdata_handler = rift.mano.sdn.SDNAccountDtsOperdataHandler(
- self._dts, self._log, self._loop,
+ self._dts, self._log, self._loop, project,
)
def on_sdn_account_deleted(self, account_name):
@@ -90,21 +95,28 @@
self.sdn_cfg_handler.register()
yield from self.sdn_operdata_handler.register()
+ def deregister(self):
+ self.sdn_cfg_handler.deregister()
+ self.sdn_operdata_handler.deregister()
+
class VnsManager(object):
""" The Virtual Network Service Manager """
- def __init__(self, dts, log, log_hdl, loop):
+ def __init__(self, dts, log, log_hdl, loop, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._project = project
self._acctstore = {}
self._vlr_handler = VlrDtsHandler(dts, log, loop, self)
self._vld_handler = VldDtsHandler(dts, log, loop, self)
- self._sdn_handlers = SDNAccountHandlers(dts, log, log_hdl, self._acctstore, loop)
+ self._sdn_handlers = SDNAccountHandlers(dts, log, log_hdl, self._acctstore, loop, project)
self._nwtopdata_store = NwtopDataStore(log)
- self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, self._acctstore, self._nwtopdata_store)
- self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, self._acctstore, self._nwtopdata_store)
+ self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, project,
+ self._acctstore, self._nwtopdata_store)
+ self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, project,
+ self._acctstore, self._nwtopdata_store)
self._vlrs = {}
@asyncio.coroutine
@@ -146,6 +158,13 @@
yield from self.register_nwtopstatic_handler()
yield from self.register_nwtopdiscovery_handler()
+ def deregister(self):
+ self._nwtopdiscovery_handler.deregister()
+ self._nwtopstatic_handler.deregister()
+ self._vld_handler.deregister()
+ self._vlr_handler.deregister()
+ self._sdn_handlers.deregister()
+
def create_vlr(self, msg):
""" Create VLR """
if msg.id in self._vlrs:
@@ -199,19 +218,52 @@
return False
@asyncio.coroutine
- def publish_vlr(self, xact, path, msg):
+ def publish_vlr(self, xact, xpath, msg):
""" Publish a VLR """
+ path = self._project.add_project(xpath)
self._log.debug("Publish vlr called with path %s, msg %s",
path, msg)
yield from self._vlr_handler.update(xact, path, msg)
@asyncio.coroutine
- def unpublish_vlr(self, xact, path):
+ def unpublish_vlr(self, xact, xpath):
""" Publish a VLR """
+ path = self._project.add_project(xpath)
self._log.debug("Unpublish vlr called with path %s", path)
yield from self._vlr_handler.delete(xact, path)
+class VnsProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(VnsProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._vlr_handler = None
+ self._vnsm = None
+ # A mapping of instantiated vlr_id's to VirtualLinkRecord objects
+ self._vlrs = {}
+
+ @asyncio.coroutine
+ def register (self):
+ self._vnsm = VnsManager(dts=self._dts,
+ log=self.log,
+ log_hdl=self._log_hdl,
+ loop=self._loop,
+ project=self)
+ yield from self._vnsm.run()
+
+ # NSM needs to detect VLD deletion that has active VLR
+ # self._vld_handler = VldDescriptorConfigDtsHandler(
+ # self._dts, self.log, self.loop, self._vlrs,
+ # )
+ # yield from self._vld_handler.register()
+
+ def deregister(self):
+ self._log.debug("De-register project {}".format(self.name))
+ self._vnsm.deregister()
+
+
class VnsTasklet(rift.tasklets.Tasklet):
""" The VNS tasklet class """
def __init__(self, *args, **kwargs):
@@ -220,11 +272,12 @@
self.rwlog.set_subcategory("vns")
self._dts = None
- self._vlr_handler = None
+ self._project_handler = None
+ self.projects = {}
- self._vnsm = None
- # A mapping of instantiated vlr_id's to VirtualLinkRecord objects
- self._vlrs = {}
+ @property
+ def dts(self):
+ return self._dts
def start(self):
super(VnsTasklet, self).start()
@@ -252,17 +305,9 @@
@asyncio.coroutine
def init(self):
""" task init callback"""
- self._vnsm = VnsManager(dts=self._dts,
- log=self.log,
- log_hdl=self.log_hdl,
- loop=self.loop)
- yield from self._vnsm.run()
-
- # NSM needs to detect VLD deletion that has active VLR
- # self._vld_handler = VldDescriptorConfigDtsHandler(
- # self._dts, self.log, self.loop, self._vlrs,
- # )
- # yield from self._vld_handler.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, VnsProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py
index af4b75b..7f6b4dd 100755
--- a/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py
+++ b/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py
@@ -40,10 +40,11 @@
""" Handles DTS interactions for the Discovered Topology registration """
DISC_XPATH = "D,/nd:network"
- def __init__(self, dts, log, loop, acctstore, nwdatastore):
+ def __init__(self, dts, log, loop, project, acctstore, nwdatastore):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._acctstore = acctstore
self._nwdatastore = nwdatastore
@@ -54,6 +55,13 @@
""" The registration handle associated with this Handler"""
return self._regh
+ def deregister(self):
+ self._log.debug("De-register Topology discovery handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for the Discovered Topology path """
@@ -119,10 +127,11 @@
""" Handles DTS interactions for the Static Topology registration """
STATIC_XPATH = "C,/nd:network"
- def __init__(self, dts, log, loop, acctstore, nwdatastore):
+ def __init__(self, dts, log, loop, project, acctstore, nwdatastore):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._acctstore = acctstore
self._regh = None
@@ -133,8 +142,14 @@
def regh(self):
""" The registration handle associated with this Handler"""
return self._regh
-
-
+
+ def deregister(self):
+ self._log.debug("De-register Topology static handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for the Static Topology path """
@@ -173,8 +188,6 @@
on_apply=apply_nw_config)
with self._dts.appconf_group_create(handler=handler) as acg:
- acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH,
- flags = rwdts.Flag.SUBSCRIBER,
- on_prepare=prepare_nw_cfg)
-
-
+ self._regh = acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH,
+ flags = rwdts.Flag.SUBSCRIBER,
+ on_prepare=prepare_nw_cfg)
diff --git a/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py b/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py
index bdea4ef..8fc267e 100755
--- a/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py
+++ b/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py
@@ -70,6 +70,7 @@
self._vnsm = vnsm
self._vlr_msg = vlr_msg
+ self._project = vnsm._project
self._network_id = None
self._network_pool = None
self._assigned_subnet = None
@@ -85,7 +86,8 @@
@property
def vld_xpath(self):
""" VLD xpath associated with this VLR record """
- return "C,/vld:vld-catalog/vld:vld[id='{}']".format(self.vld_id)
+ return self._project.add_project("C,/vld:vld-catalog/vld:vld[id='{}']".
+ format(self.vld_id))
@property
def vld_id(self):
@@ -100,7 +102,7 @@
@property
def xpath(self):
""" path for this VLR """
- return("D,/vlr:vlr-catalog"
+ return self._project.add_project("D,/vlr:vlr-catalog"
"/vlr:vlr[vlr:id='{}']".format(self.vlr_id))
@property
@@ -116,7 +118,7 @@
@property
def resmgr_path(self):
""" path for resource-mgr"""
- return ("D,/rw-resource-mgr:resource-mgmt" +
+ return self._project.add_project("D,/rw-resource-mgr:resource-mgmt" +
"/vlink-event/vlink-event-data[event-id='{}']".format(self._request_id))
@property
@@ -135,7 +137,7 @@
@property
def msg(self):
""" VLR message for this VLR """
- msg = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr()
+ msg = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr()
msg.copy_from(self._vlr_msg)
if self._network_id is not None:
@@ -313,6 +315,7 @@
self._vnsm = vnsm
self._regh = None
+ self._project = vnsm._project
@property
def regh(self):
@@ -369,7 +372,7 @@
return
elif action == rwdts.QueryAction.DELETE:
# Delete an VLR record
- schema = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.schema()
+ schema = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
self._log.debug("Terminating VLR id %s", path_entry.key00.id)
yield from self._vnsm.delete_vlr(path_entry.key00.id, xact_info.xact)
@@ -379,8 +382,9 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
return
- self._log.debug("Registering for VLR using xpath: %s",
- VlrDtsHandler.XPATH)
+ xpath = self._project.add_project(VlrDtsHandler.XPATH)
+ self._log.debug("Registering for VLR using xpath: {}".
+ format(xpath))
reg_handle = rift.tasklets.DTS.RegistrationHandler(
on_commit=on_commit,
@@ -389,16 +393,24 @@
handlers = rift.tasklets.Group.Handler(on_event=on_event,)
with self._dts.group_create(handler=handlers) as group:
self._regh = group.register(
- xpath=VlrDtsHandler.XPATH,
+ xpath=xpath,
handler=reg_handle,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ| rwdts.Flag.DATASTORE,
)
+ def deregister(self):
+ self._log.debug("De-register VLR handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
- def create(self, xact, path, msg):
+ def create(self, xact, xpath, msg):
"""
Create a VLR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating VLR xact = %s, %s:%s",
xact, path, msg)
self.regh.create_element(path, msg)
@@ -406,10 +418,11 @@
xact, path, msg)
@asyncio.coroutine
- def update(self, xact, path, msg):
+ def update(self, xact, xpath, msg):
"""
Update a VLR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating VLR xact = %s, %s:%s",
xact, path, msg)
self.regh.update_element(path, msg)
@@ -417,10 +430,11 @@
xact, path, msg)
@asyncio.coroutine
- def delete(self, xact, path):
+ def delete(self, xact, xpath):
"""
Delete a VLR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting VLR xact = %s, %s", xact, path)
self.regh.delete_element(path)
self._log.debug("Deleted VLR xact = %s, %s", xact, path)
@@ -453,8 +467,13 @@
"Got on prepare for VLD update (ks_path: %s) (action: %s)",
ks_path.to_xpath(VldYang.get_schema()), msg)
- schema = VldYang.YangData_Vld_VldCatalog_Vld.schema()
+ schema = VldYang.YangData_RwProject_Project_VldCatalog_Vld.schema()
path_entry = schema.keyspec_to_entry(ks_path)
+ # TODO: Check why on project delete this gets called
+ if not path_entry:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ return
+
vld_id = path_entry.key00.id
disabled_actions = [rwdts.QueryAction.DELETE, rwdts.QueryAction.UPDATE]
@@ -477,7 +496,14 @@
handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
yield from self._dts.register(
- VldDtsHandler.XPATH,
+ self._vnsm._project.add_project(VldDtsHandler.XPATH),
flags=rwdts.Flag.SUBSCRIBER,
handler=handler
)
+
+ def deregister(self):
+ self._log.debug("De-register VLD handler for project {}".
+ format(self._vnsm._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
diff --git a/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt
index 8f87f66..d3aa299 100644
--- a/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt
+++ b/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -36,7 +36,7 @@
rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rwsdnal_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
ietf_network_yang-1.0 ietf_network_topology_yang-1.0
ietf_l2_topology_yang-1.0 rw_topology_yang-1.0
- rw_log-1.0
+ rw_log-1.0 rw_project_yang-1.0
VAPI_DIRS
${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
@@ -50,7 +50,7 @@
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- DEPENDS rwcal_yang rwsdnal_yang mano_yang rwlog_gi rwschema_yang
+ DEPENDS rwcal_yang rwsdnal_yang mano_yang rwlog_gi rwschema_yang rwproject_yang
)
rift_install_vala_artifacts(
diff --git a/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt
index 5e7e98a..c7c015d 100644
--- a/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt
+++ b/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,10 +28,7 @@
YANG_FILES ${source_yang_files}
COMPONENT ${PKG_LONG_NAME}
LIBRARIES
- rwschema_yang_gen
- rwyang
- rwlog
- rwlog-mgmt_yang_gen
mano-types_yang_gen
+ rwprojectmano_yang_gen
)
diff --git a/rwlaunchpad/plugins/rwvns/yang/rwsdnal.yang b/rwlaunchpad/plugins/rwvns/yang/rwsdnal.yang
index b24952b..5cd1563 100644
--- a/rwlaunchpad/plugins/rwvns/yang/rwsdnal.yang
+++ b/rwlaunchpad/plugins/rwvns/yang/rwsdnal.yang
@@ -51,6 +51,14 @@
prefix "yang";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
revision 2014-12-30 {
description
@@ -82,7 +90,7 @@
}
}
- uses connection-status;
+ // uses connection-status;
typedef sdn-account-type {
description "SDN account type";
@@ -204,70 +212,106 @@
}
}
- container sdn-accounts {
- list sdn-account-list {
- rwpb:msg-new SDNAccount;
- key "name";
+ augment "/rw-project:project" {
+ container sdn-accounts {
+ list sdn-account-list {
+ rwpb:msg-new SDNAccount;
+ key "name";
- leaf name {
- type string;
+ leaf name {
+ type string;
+ }
+
+ uses sdn-provider-auth;
+ uses connection-status;
}
-
- uses sdn-provider-auth;
- uses connection-status;
}
}
- container vnffgs {
- list vnffg-chain {
- key "name";
- rwpb:msg-new VNFFGChain;
+ augment "/rw-project:project" {
+ container vnffgs {
+ list vnffg-chain {
+ key "name";
+ rwpb:msg-new VNFFGChain;
- leaf name {
- type string;
- }
-
- list vnf-chain-path {
- key "order";
- leaf order {
- type uint32;
- description " Order of the VNF in VNFFG chain";
- }
- leaf service-function-type {
+ leaf name {
type string;
}
- leaf nsh-aware {
- type boolean;
- }
- leaf transport-type {
- type string;
- }
- list vnfr-ids {
- key "vnfr-id";
- leaf vnfr-id {
- type yang:uuid;
+
+ list vnf-chain-path {
+ key "order";
+ leaf order {
+ type uint32;
+ description " Order of the VNF in VNFFG chain";
}
- leaf vnfr-name {
+ leaf service-function-type {
+ type string;
+ }
+ leaf nsh-aware {
+ type boolean;
+ }
+ leaf transport-type {
+ type string;
+ }
+ list vnfr-ids {
+ key "vnfr-id";
+ leaf vnfr-id {
+ type yang:uuid;
+ }
+ leaf vnfr-name {
+ type string;
+ }
+ leaf mgmt-address {
+ type inet:ip-address;
+ }
+ leaf mgmt-port {
+ type inet:port-number;
+ }
+ list vdu-list {
+ key "vm-id port-id";
+ leaf port-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ leaf vm-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ leaf name {
+ type string;
+ }
+ leaf address {
+ type inet:ip-address;
+ }
+ leaf port {
+ type inet:port-number;
+ }
+ }
+ leaf sff-name {
+ description "SFF name useful for non OVS based SFF";
+ type string;
+ }
+ }
+ }
+ list sff {
+ rwpb:msg-new VNFFGSff;
+ key "name";
+ leaf name {
+ type string;
+ }
+ leaf function-type {
type string;
}
leaf mgmt-address {
type inet:ip-address;
}
leaf mgmt-port {
- type inet:port-number;
+ type inet:port-number;
}
- list vdu-list {
- key "vm-id port-id";
- leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
+ list dp-endpoints {
+ key "name";
leaf name {
type string;
}
@@ -278,182 +322,153 @@
type inet:port-number;
}
}
- leaf sff-name {
- description "SFF name useful for non OVS based SFF";
- type string;
- }
+ list vnfr-list {
+ key "vnfr-name";
+ leaf vnfr-name {
+ type string;
+ }
+ }
+ }
+ leaf classifier-name {
+ type string;
}
}
- list sff {
- rwpb:msg-new VNFFGSff;
- key "name";
+ }
+ }
+
+ augment "/rw-project:project" {
+ container vnffg-rendered-paths {
+ rwpb:msg-new VNFFGRenderedPaths;
+ list vnffg-rendered-path {
+ key "name";
+ rwpb:msg-new VNFFGRenderedPath;
+ config false;
leaf name {
type string;
}
- leaf function-type {
- type string;
+ leaf path-id {
+ description
+ "Unique Identifier for the service path";
+ type uint32;
}
- leaf mgmt-address {
- type inet:ip-address;
- }
- leaf mgmt-port {
- type inet:port-number;
- }
- list dp-endpoints {
- key "name";
- leaf name {
- type string;
- }
- leaf address {
- type inet:ip-address;
+ list rendered-path-hop {
+ key "hop-number";
+ leaf hop-number {
+ type uint8;
}
- leaf port {
- type inet:port-number;
+ leaf service-index {
+ description
+ "Location within the service path";
+ type uint8;
}
- }
- list vnfr-list {
- key "vnfr-name";
leaf vnfr-name {
type string;
}
+ container service-function-forwarder {
+ leaf name {
+ description
+ "Service Function Forwarder name";
+ type string;
+ }
+ leaf ip-address {
+ description
+ "Service Function Forwarder Data Plane IP address";
+ type inet:ip-address;
+ }
+ leaf port {
+ description
+ "Service Function Forwarder Data Plane port";
+ type inet:port-number;
+ }
+ }
}
}
- leaf classifier-name {
- type string;
- }
}
}
- container vnffg-rendered-paths {
- rwpb:msg-new VNFFGRenderedPaths;
- list vnffg-rendered-path {
- key "name";
- rwpb:msg-new VNFFGRenderedPath;
- config false;
- leaf name {
- type string;
- }
- leaf path-id {
- description
- "Unique Identifier for the service path";
- type uint32;
- }
- list rendered-path-hop {
- key "hop-number";
- leaf hop-number {
- type uint8;
- }
- leaf service-index {
- description
- "Location within the service path";
- type uint8;
- }
- leaf vnfr-name {
+ augment "/rw-project:project" {
+ container vnffg-classifiers {
+ list vnffg-classifier {
+ key "name";
+ rwpb:msg-new VNFFGClassifier;
+
+ leaf name {
type string;
}
- container service-function-forwarder {
- leaf name {
- description
- "Service Function Forwarder name";
+ leaf rsp-name {
+ type string;
+ }
+ leaf rsp-id {
+ type yang:uuid;
+ }
+ leaf port-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ leaf vm-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ leaf sff-name {
+ type string;
+ }
+ container vnffg-metadata {
+ leaf ctx1 {
type string;
}
- leaf ip-address {
- description
- "Service Function Forwarder Data Plane IP address";
- type inet:ip-address;
- }
- leaf port {
- description
- "Service Function Forwarder Data Plane port";
- type inet:port-number;
- }
+ leaf ctx2 {
+ type string;
+ }
+ leaf ctx3 {
+ type string;
+ }
+ leaf ctx4 {
+ type string;
+ }
}
- }
- }
- }
-
-
- container vnffg-classifiers {
- list vnffg-classifier {
- key "name";
- rwpb:msg-new VNFFGClassifier;
-
- leaf name {
- type string;
- }
- leaf rsp-name {
- type string;
- }
- leaf rsp-id {
- type yang:uuid;
- }
- leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- leaf sff-name {
- type string;
- }
- container vnffg-metadata {
- leaf ctx1 {
- type string;
- }
- leaf ctx2 {
- type string;
- }
- leaf ctx3 {
- type string;
- }
- leaf ctx4 {
- type string;
- }
- }
- list match-attributes {
- description
+ list match-attributes {
+ description
"List of match attributes.";
- key "name";
- leaf name {
- description
+ key "name";
+ leaf name {
+ description
"Name for the Access list";
- type string;
- }
+ type string;
+ }
- leaf ip-proto {
- description
+ leaf ip-proto {
+ description
"IP Protocol.";
- type uint8;
- }
+ type uint8;
+ }
- leaf source-ip-address {
- description
+ leaf source-ip-address {
+ description
"Source IP address.";
- type inet:ip-prefix;
- }
+ type inet:ip-prefix;
+ }
- leaf destination-ip-address {
- description
+ leaf destination-ip-address {
+ description
"Destination IP address.";
- type inet:ip-prefix;
- }
+ type inet:ip-prefix;
+ }
- leaf source-port {
- description
+ leaf source-port {
+ description
"Source port number.";
- type inet:port-number;
- }
+ type inet:port-number;
+ }
- leaf destination-port {
- description
+ leaf destination-port {
+ description
"Destination port number.";
- type inet:port-number;
- }
- } //match-attributes
+ type inet:port-number;
+ }
+ } //match-attributes
+ }
}
}
diff --git a/rwlaunchpad/plugins/yang/CMakeLists.txt b/rwlaunchpad/plugins/yang/CMakeLists.txt
index 43e87e1..2381e86 100644
--- a/rwlaunchpad/plugins/yang/CMakeLists.txt
+++ b/rwlaunchpad/plugins/yang/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -71,11 +71,29 @@
rw_conman_yang_gen
rwconfig_agent_yang_gen
mano-types_yang_gen
+ rwprojectmano_yang_gen
DEPENDS
mano_yang
rwcloud_yang
rw_conman_yang
rwconfig_agent_yang
mano-types_yang
+ rwprojectmano_yang
+ # Added to make sure that the target is built,
+ # Not required b mano yang
+ rw_project_person_yang
+
)
+rift_add_yang_target(
+ TARGET rw_project_person_yang
+ YANG_FILES
+ rw-project-person-db.yang
+ COMPONENT ${PKG_LONG_NAME}
+ LIBRARIES
+ rwprojectmano_yang_gen
+ DEPENDS
+ rwprojectmano_yang
+ ASSOCIATED_FILES
+ rw-launchpad.role.xml
+)
diff --git a/rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang b/rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang
index 0184a9a..66bcdbf 100644
--- a/rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang
+++ b/rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,11 @@
prefix tailf;
}
- tailf:annotate "/rw-image-mgmt:upload-jobs" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/rw-image-mgmt:upload-jobs" {
tailf:callpoint rw_callpoint;
}
diff --git a/rwlaunchpad/plugins/yang/rw-image-mgmt.yang b/rwlaunchpad/plugins/yang/rw-image-mgmt.yang
index 833931f..457db6d 100644
--- a/rwlaunchpad/plugins/yang/rw-image-mgmt.yang
+++ b/rwlaunchpad/plugins/yang/rw-image-mgmt.yang
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,13 +42,30 @@
}
import rw-cloud {
- prefix "rwcloud";
+ prefix "rw-cloud";
}
import rwcal {
prefix "rwcal";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ import mano-types {
+ prefix "mano-types";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2016-06-01 {
description
"Initial revision.";
@@ -145,7 +162,7 @@
leaf cloud-account {
description "The cloud account to upload the image to";
type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
@@ -153,39 +170,41 @@
uses upload-task-status;
}
- container upload-jobs {
- rwpb:msg-new UploadJobs;
- description "Image upload jobs";
- config false;
+ augment "/rw-project:project" {
+ container upload-jobs {
+ rwpb:msg-new UploadJobs;
+ description "Image upload jobs";
+ config false;
- list job {
- rwpb:msg-new UploadJob;
- key "id";
+ list job {
+ rwpb:msg-new UploadJob;
+ key "id";
- leaf id {
- description "Unique image upload job-id";
- type uint32;
- }
+ leaf id {
+ description "Unique image upload job-id";
+ type uint32;
+ }
- leaf status {
- description "Current job status";
- type job-status;
- }
+ leaf status {
+ description "Current job status";
+ type job-status;
+ }
- leaf start-time {
- description "The job start time (unix epoch)";
- type uint32;
- }
+ leaf start-time {
+ description "The job start time (unix epoch)";
+ type uint32;
+ }
- leaf stop-time {
- description "The job stop time (unix epoch)";
- type uint32;
- }
+ leaf stop-time {
+ description "The job stop time (unix epoch)";
+ type uint32;
+ }
- list upload-tasks {
- rwpb:msg-new UploadTask;
- description "The upload tasks that are part of this job";
- uses upload-task;
+ list upload-tasks {
+ rwpb:msg-new UploadTask;
+ description "The upload tasks that are part of this job";
+ uses upload-task;
+ }
}
}
}
@@ -194,6 +213,8 @@
input {
rwpb:msg-new CreateUploadJob;
+ uses mano-types:rpc-project-name;
+
choice image-selection {
case onboarded-image {
description "Use an image previously onboarded in the image catalog";
@@ -230,7 +251,8 @@
leaf-list cloud-account {
description "List of cloud accounts to upload the image to";
type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ path "/rw-project:project[rw-project:name=current()/.." +
+ "/project-name]/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
}
@@ -247,9 +269,12 @@
rpc cancel-upload-job {
input {
rwpb:msg-new CancelUploadJob;
+
leaf job-id {
type uint32;
}
+
+ uses mano-types:rpc-project-name;
}
}
}
diff --git a/rwlaunchpad/plugins/yang/rw-launchpad.role.xml b/rwlaunchpad/plugins/yang/rw-launchpad.role.xml
new file mode 100644
index 0000000..0efb351
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-launchpad.role.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-launchpad-role</role>
+ <key-set>
+ <name>project-name</name>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-oper</role>
+ <keys-role>rw-project-mano:rw-launchpad-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-launchpad:datacenters</path>
+ <path>/rw-project:project/rw-launchpad:resource-orchestrator</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-admin</role>
+ <keys-role>rw-project-mano:rw-launchpad-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-launchpad:datacenters</path>
+ <path>/rw-project:project/rw-launchpad:resource-orchestrator</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-launchpad-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-launchpad:datacenters</path>
+ </authorize>
+ </role-definition>
+</config>
diff --git a/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang b/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang
index 1fab791..27f40af 100644
--- a/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang
+++ b/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,11 @@
prefix tailf;
}
- tailf:annotate "/rw-launchpad:datacenters" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/rw-launchpad:datacenters" {
tailf:callpoint rw_callpoint;
}
}
diff --git a/rwlaunchpad/plugins/yang/rw-launchpad.yang b/rwlaunchpad/plugins/yang/rw-launchpad.yang
index 0adaee9..7787fa2 100644
--- a/rwlaunchpad/plugins/yang/rw-launchpad.yang
+++ b/rwlaunchpad/plugins/yang/rw-launchpad.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -57,16 +57,16 @@
prefix "rwcal";
}
- import rw-vnfd {
- prefix "rw-vnfd";
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
}
import vld {
prefix "vld";
}
- import rw-nsd {
- prefix "rw-nsd";
+ import rw-project-nsd {
+ prefix "rw-project-nsd";
}
import rw-cloud {
@@ -102,42 +102,57 @@
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-14 {
description
"Initial revision.";
}
- container datacenters {
- description "OpenMano data centers";
+ augment "/rw-project:project" {
+ container datacenters {
+ description "OpenMano data centers";
- rwpb:msg-new DataCenters;
- config false;
+ rwpb:msg-new DataCenters;
+ config false;
- list ro-accounts {
- description
+ list ro-accounts {
+ description
"A list of OpenMano cloud accounts that have data centers associated
with them";
- rwpb:msg-new ROAccount;
- key "name";
-
- leaf name {
- description "The name of the cloud account";
- type leafref {
- path "/rw-launchpad:resource-orchestrator/rw-launchpad:name";
- }
- }
-
- list datacenters {
- rwpb:msg-new DataCenter;
- leaf uuid {
- description "The UUID of the data center";
- type yang:uuid;
- }
+ rwpb:msg-new ROAccount;
+ key "name";
leaf name {
- description "The name of the data center";
- type string;
+ description "The name of the cloud account";
+ type leafref {
+ path "../../../rw-launchpad:resource-orchestrator/rw-launchpad:name";
+ }
+ }
+
+ list datacenters {
+ rwpb:msg-new DataCenter;
+ leaf uuid {
+ description "The UUID of the data center";
+ type yang:uuid;
+ }
+
+ leaf name {
+ description "The name of the data center";
+ type string;
+ }
}
}
}
@@ -151,66 +166,70 @@
}
}
- container resource-orchestrator {
- rwpb:msg-new ResourceOrchestrator;
+ augment "/rw-project:project" {
+ container resource-orchestrator {
+ rwpb:msg-new ResourceOrchestrator;
- leaf name {
- type string;
- }
-
- leaf account-type {
- type resource-orchestrator-account-type;
- }
-
- choice resource-orchestrator {
- description
- "The resource orchestrator to use by the Launchpad";
- default rift-ro;
-
- case rift-ro {
- description
- "Use the RIFT.io resource orchestrator";
-
- container rift-ro {
- leaf rift-ro {
- type empty;
- }
- }
+ leaf name {
+ type string;
}
- case openmano {
+ leaf account-type {
+ type resource-orchestrator-account-type;
+ }
+
+ choice resource-orchestrator {
description
- "Use OpenMano as RO";
+ "The resource orchestrator to use by the Launchpad";
+ default rift-ro;
- container openmano {
- leaf host {
- type string;
- default "localhost";
- }
+ case rift-ro {
+ description
+ "Use the RIFT.io resource orchestrator";
- leaf port {
- type uint16;
- default 9090;
- }
-
- leaf tenant-id {
- type string {
- length "36";
+ container rift-ro {
+ leaf rift-ro {
+ type empty;
}
- mandatory true;
+ }
+ }
+
+ case openmano {
+ description
+ "Use OpenMano as RO";
+
+ container openmano {
+ leaf host {
+ type string;
+ default "localhost";
+ }
+
+ leaf port {
+ type uint16;
+ default 9090;
+ }
+
+ leaf tenant-id {
+ type string {
+ length "36";
+ }
+ mandatory true;
+ }
}
}
}
}
}
- container launchpad-config {
- leaf public-ip {
- description
+ augment "/rw-project:project" {
+ container launchpad-config {
+ leaf public-ip {
+ description
"An IP address that can, at least, be reached by the host that the
launchpad is running on. This is not a mandatory but is required for
alarms to function correctly.";
- type string;
+ type string;
+ }
}
}
}
diff --git a/rwlaunchpad/plugins/yang/rw-monitor.yang b/rwlaunchpad/plugins/yang/rw-monitor.yang
index 559880d..e9a1112 100644
--- a/rwlaunchpad/plugins/yang/rw-monitor.yang
+++ b/rwlaunchpad/plugins/yang/rw-monitor.yang
@@ -63,6 +63,10 @@
prefix "yang";
}
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
revision 2015-10-30 {
description
"Initial revision.";
diff --git a/rwlaunchpad/plugins/yang/rw-nsm.yang b/rwlaunchpad/plugins/yang/rw-nsm.yang
index 4e6d9aa..975bd81 100644
--- a/rwlaunchpad/plugins/yang/rw-nsm.yang
+++ b/rwlaunchpad/plugins/yang/rw-nsm.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -43,30 +43,38 @@
prefix "inet";
}
- import rw-nsd {
- prefix "rw-nsd";
+ import rw-project-nsd {
+ prefix "rw-project-nsd";
}
- import nsd {
- prefix "nsd";
+
+ import project-nsd {
+ prefix "project-nsd";
}
+
import rw-nsr {
prefix "rw-nsr";
}
+
import vld {
prefix "vld";
}
+
import rw-vlr {
prefix "rw-vlr";
}
+
import rw-vns {
prefix "rw-vns";
}
- import rw-vnfd {
- prefix "rw-vnfd";
+
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
}
- import vnfd {
- prefix "vnfd";
+
+ import project-vnfd {
+ prefix "project-vnfd";
}
+
import rw-vnfr {
prefix "rw-vnfr";
}
@@ -91,6 +99,19 @@
prefix "rw-config-agent";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-10-07 {
description
"Initial revision.";
@@ -119,15 +140,17 @@
}
}
- container ro-config {
- description "Resource Orchestrator endpoint ip address";
- rwpb:msg-new "roConfig";
- rwcli:new-mode "ro-config";
+ augment "/rw-project:project" {
+ container ro-config {
+ description "Resource Orchestrator endpoint ip address";
+ rwpb:msg-new "roConfig";
+ rwcli:new-mode "ro-config";
- container cm-endpoint {
- description "Service Orchestrator endpoint ip address";
- rwpb:msg-new "SoEndpoint";
- uses cm-endpoint;
+ container cm-endpoint {
+ description "Service Orchestrator endpoint ip address";
+ rwpb:msg-new "SoEndpoint";
+ uses cm-endpoint;
+ }
}
}
}
diff --git a/rwlaunchpad/plugins/yang/rw-pkg-mgmt.tailf.yang b/rwlaunchpad/plugins/yang/rw-pkg-mgmt.tailf.yang
index 13136c9..9e15181 100644
--- a/rwlaunchpad/plugins/yang/rw-pkg-mgmt.tailf.yang
+++ b/rwlaunchpad/plugins/yang/rw-pkg-mgmt.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,11 @@
prefix tailf;
}
- tailf:annotate "/rw-pkg-mgmt:download-jobs" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/rw-pkg-mgmt:download-jobs" {
tailf:callpoint rw_callpoint;
}
diff --git a/rwlaunchpad/plugins/yang/rw-pkg-mgmt.yang b/rwlaunchpad/plugins/yang/rw-pkg-mgmt.yang
index 5fbd621..ee3b7d1 100644
--- a/rwlaunchpad/plugins/yang/rw-pkg-mgmt.yang
+++ b/rwlaunchpad/plugins/yang/rw-pkg-mgmt.yang
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -56,10 +56,24 @@
import rw-vnfd {
prefix "rwvnfd";
}
+
import rw-nsd {
prefix "rwnsd";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2016-06-01 {
description
"Initial revision.";
@@ -190,45 +204,47 @@
}
}
- container download-jobs {
- rwpb:msg-new DownloadJobs;
- description "Download jobs";
- config false;
+ augment "/rw-project:project" {
+ container download-jobs {
+ rwpb:msg-new DownloadJobs;
+ description "Download jobs";
+ config false;
- list job {
- rwpb:msg-new DownloadJob;
- key "download-id";
+ list job {
+ rwpb:msg-new DownloadJob;
+ key "download-id";
- leaf download-id {
- description "Unique UUID";
- type string;
+ leaf download-id {
+ description "Unique UUID";
+ type string;
+ }
+
+ leaf url {
+ description "URL of the download";
+ type string;
+ }
+
+ uses package-file-identifer;
+ uses download-task-status;
}
-
- leaf url {
- description "URL of the download";
- type string;
- }
-
- uses package-file-identifer;
- uses download-task-status;
}
- }
- container copy-jobs {
- rwpb:msg-new CopyJobs;
- description "Copy jobs";
- config false;
+ container copy-jobs {
+ rwpb:msg-new CopyJobs;
+ description "Copy jobs";
+ config false;
- list job {
- rwpb:msg-new CopyJob;
- key "transaction-id";
+ list job {
+ rwpb:msg-new CopyJob;
+ key "transaction-id";
- leaf transaction-id {
- description "Unique UUID";
- type string;
+ leaf transaction-id {
+ description "Unique UUID";
+ type string;
+ }
+
+ uses copy-task-status;
}
-
- uses copy-task-status;
}
}
@@ -237,6 +253,7 @@
input {
uses package-identifer;
+ uses manotypes:rpc-project-name;
}
output {
@@ -257,6 +274,8 @@
description "Name of destination package";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
output {
@@ -277,6 +296,8 @@
description "Type of the package";
type manotypes:package-type;
}
+
+ uses manotypes:rpc-project-name;
}
output {
@@ -293,6 +314,7 @@
input {
uses package-identifer;
uses external-url-data;
+ uses manotypes:rpc-project-name;
}
output {
@@ -300,6 +322,7 @@
description "Valid ID to track the status of the task";
type string;
}
+ uses manotypes:rpc-project-name;
}
}
@@ -309,6 +332,7 @@
input {
uses package-identifer;
uses external-url-data;
+ uses manotypes:rpc-project-name;
}
output {
@@ -316,6 +340,7 @@
description "Valid ID to track the status of the task";
type string;
}
+ uses manotypes:rpc-project-name;
}
}
@@ -324,6 +349,7 @@
input {
uses package-identifer;
+ uses manotypes:rpc-project-name;
leaf export-schema {
description "Schema to export";
@@ -342,7 +368,6 @@
type export-format;
default YAML;
}
-
}
output {
@@ -355,6 +380,8 @@
description "Valid ID to track the status of the task";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
}
@@ -364,6 +391,7 @@
input {
uses package-file-identifer;
uses external-url-data;
+ uses manotypes:rpc-project-name;
}
output {
@@ -371,6 +399,7 @@
description "Valid ID to track the status of the task";
type string;
}
+ uses manotypes:rpc-project-name;
}
}
@@ -379,6 +408,7 @@
input {
uses package-file-identifer;
+ uses manotypes:rpc-project-name;
}
output {
@@ -392,6 +422,7 @@
type string;
}
+ uses manotypes:rpc-project-name;
}
}
diff --git a/rwlaunchpad/plugins/yang/rw-project-person-db.yang b/rwlaunchpad/plugins/yang/rw-project-person-db.yang
new file mode 100644
index 0000000..c79d334
--- /dev/null
+++ b/rwlaunchpad/plugins/yang/rw-project-person-db.yang
@@ -0,0 +1,60 @@
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+
+module rw-project-person-db
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-person-db";
+ prefix "rw-project-person-db";
+
+ import rw-pb-ext {
+ prefix "rwpb";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2016-04-03 {
+ description
+ "Initial revision.
+ Test YANG for unit testing.";
+ }
+
+ augment "/rw-project:project" {
+ container person {
+ rwpb:msg-new Person;
+
+ leaf name {
+ description
+ "This is the person's name.";
+ type string;
+ }
+ }
+
+ container flat-person {
+ rwpb:msg-new FlatPerson;
+ rwpb:msg-flat "true";
+
+ leaf name {
+ type string; rwpb:field-string-max "64";
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang b/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang
index 6b6e8b1..a70088e 100644
--- a/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang
+++ b/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,11 +32,15 @@
prefix tailf;
}
- tailf:annotate "/rw-resource-mgr:resource-pool-records" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/rw-resource-mgr:resource-pool-records" {
tailf:callpoint rw_callpoint;
}
- tailf:annotate "/rw-resource-mgr:resource-mgmt" {
+ tailf:annotate "/rw-project:project/rw-resource-mgr:resource-mgmt" {
tailf:callpoint rw_callpoint;
}
}
diff --git a/rwlaunchpad/plugins/yang/rw-resource-mgr.yang b/rwlaunchpad/plugins/yang/rw-resource-mgr.yang
index 9bf914a..08d606d 100644
--- a/rwlaunchpad/plugins/yang/rw-resource-mgr.yang
+++ b/rwlaunchpad/plugins/yang/rw-resource-mgr.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -52,6 +52,19 @@
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-10-16 {
description
"Initial revision.";
@@ -90,31 +103,33 @@
}
- container resource-mgr-config {
- description "Data model for configuration of resource-mgr";
- rwpb:msg-new ResourceManagerConfig;
- config true;
+ augment "/rw-project:project" {
+ container resource-mgr-config {
+ description "Data model for configuration of resource-mgr";
+ rwpb:msg-new ResourceManagerConfig;
+ config true;
- container management-domain {
- leaf name {
- description "The management domain name this launchpad is associated with.";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- //mandatory true;
- }
- }
-
- container resource-pools {
- description "Resource Pool configuration";
- rwpb:msg-new ResourcePools;
- list cloud-account {
- key "name";
+ container management-domain {
leaf name {
- description
- "Resource pool for the configured cloud account";
- type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ description "The management domain name this launchpad is associated with.";
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ //mandatory true;
+ }
+ }
+
+ container resource-pools {
+ description "Resource Pool configuration";
+ rwpb:msg-new ResourcePools;
+ list cloud-account {
+ key "name";
+ leaf name {
+ description
+ "Resource pool for the configured cloud account";
+ type leafref {
+ path "../../../../rwcloud:cloud/rwcloud:account/rwcloud:name";
+ }
}
}
}
@@ -136,171 +151,177 @@
}
}
- container resource-mgmt {
- description "Resource management ";
- config false;
+ augment "/rw-project:project" {
+ container resource-mgmt {
+ description "Resource management ";
+ config false;
- container vdu-event {
- description "Events for VDU Management";
- rwpb:msg-new VDUEvent;
+ container vdu-event {
+ description "Events for VDU Management";
+ rwpb:msg-new VDUEvent;
- list vdu-event-data {
- rwpb:msg-new VDUEventData;
- key "event-id";
+ list vdu-event-data {
+ rwpb:msg-new VDUEventData;
+ key "event-id";
- leaf event-id {
- description "Identifier associated with the VDU transaction";
- type yang:uuid;
- }
-
- leaf cloud-account {
- description "The cloud account to use for this resource request";
- type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ leaf event-id {
+ description "Identifier associated with the VDU transaction";
+ type yang:uuid;
}
- }
- container request-info {
- description "Information about required resource";
-
- uses rwcal:vdu-create-params;
- }
-
- container resource-info {
- description "Information about allocated resource";
- leaf pool-name {
- type string;
+ leaf cloud-account {
+ description "The cloud account to use for this resource request";
+ type leafref {
+ path "../../../../rwcloud:cloud/rwcloud:account/rwcloud:name";
+ }
}
- uses resource-state;
- uses rwcal:vdu-info-params;
+
+ container request-info {
+ description "Information about required resource";
+
+ uses rwcal:vdu-create-params;
+ }
+
+ container resource-info {
+ description "Information about allocated resource";
+ leaf pool-name {
+ type string;
+ }
+ uses resource-state;
+ uses rwcal:vdu-info-params;
+ }
}
}
- }
- container vlink-event {
- description "Events for Virtual Link management";
- rwpb:msg-new VirtualLinkEvent;
+ container vlink-event {
+ description "Events for Virtual Link management";
+ rwpb:msg-new VirtualLinkEvent;
- list vlink-event-data {
- rwpb:msg-new VirtualLinkEventData;
+ list vlink-event-data {
+ rwpb:msg-new VirtualLinkEventData;
- key "event-id";
+ key "event-id";
- leaf event-id {
- description "Identifier associated with the Virtual Link transaction";
- type yang:uuid;
- }
-
- leaf cloud-account {
- description "The cloud account to use for this resource request";
- type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ leaf event-id {
+ description "Identifier associated with the Virtual Link transaction";
+ type yang:uuid;
}
- }
- container request-info {
- description "Information about required resource";
-
- uses rwcal:virtual-link-create-params;
- }
-
- container resource-info {
- leaf pool-name {
- type string;
+ leaf cloud-account {
+ description "The cloud account to use for this resource request";
+ type leafref {
+ path "../../../../rwcloud:cloud/rwcloud:account/rwcloud:name";
+ }
}
- uses resource-state;
- uses rwcal:virtual-link-info-params;
+
+ container request-info {
+ description "Information about required resource";
+
+ uses rwcal:virtual-link-create-params;
+ }
+
+ container resource-info {
+ leaf pool-name {
+ type string;
+ }
+ uses resource-state;
+ uses rwcal:virtual-link-info-params;
+ }
}
}
}
}
- container resource-pool-records {
- description "Resource Pool Records";
- rwpb:msg-new ResourcePoolRecords;
- config false;
+ augment "/rw-project:project" {
+ container resource-pool-records {
+ description "Resource Pool Records";
+ rwpb:msg-new ResourcePoolRecords;
+ config false;
- list cloud-account {
- key "name";
- leaf name {
- description
- "The configured cloud account's pool records.";
- type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
- }
- }
-
- list records {
- rwpb:msg-new ResourceRecordInfo;
+ list cloud-account {
key "name";
- uses resource-pool-info;
-
- leaf pool-status {
- type enumeration {
- enum unknown;
- enum locked;
- enum unlocked;
- }
- }
-
- leaf total-resources {
- type uint32;
- }
-
- leaf free-resources {
- type uint32;
- }
-
- leaf allocated-resources {
- type uint32;
- }
- }
- }
- }
-
-
- container resource-mgr-data{
- description "Resource Manager operational data";
- config false;
-
- container pool-record {
- description "Resource Pool record";
-
- list cloud {
- key "name";
- max-elements 16;
- rwpb:msg-new "ResmgrCloudPoolRecords";
leaf name {
description
"The configured cloud account's pool records.";
type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ path "../../../rwcloud:cloud/rwcloud:account/rwcloud:name";
}
}
list records {
+ rwpb:msg-new ResourceRecordInfo;
key "name";
uses resource-pool-info;
- list free-vdu-list {
- key vdu-id;
- uses rwcal:vdu-info-params;
+ leaf pool-status {
+ type enumeration {
+ enum unknown;
+ enum locked;
+ enum unlocked;
+ }
}
- list in-use-vdu-list {
- key vdu-id;
- uses rwcal:vdu-info-params;
+ leaf total-resources {
+ type uint32;
}
- list free-vlink-list {
- key virtual-link-id;
- uses rwcal:virtual-link-info-params;
+ leaf free-resources {
+ type uint32;
}
- list in-use-vlink-list {
+ leaf allocated-resources {
+ type uint32;
+ }
+ }
+ }
+ }
+ }
+
+
+ augment "/rw-project:project" {
+ container resource-mgr-data {
+ description "Resource Manager operational data";
+ config false;
+
+ container pool-record {
+ description "Resource Pool record";
+
+ list cloud {
+ key "name";
+ max-elements 16;
+ rwpb:msg-new "ResmgrCloudPoolRecords";
+ leaf name {
+ description
+ "The configured cloud account's pool records.";
+ type leafref {
+ path "../../../../rwcloud:cloud/rwcloud:account/rwcloud:name";
+ }
+ }
+
+ list records {
+ key "name";
+ uses resource-pool-info;
+
+ list free-vdu-list {
+ key vdu-id;
+ uses rwcal:vdu-info-params;
+ }
+
+ list in-use-vdu-list {
+ key vdu-id;
+ uses rwcal:vdu-info-params;
+ }
+
+ list free-vlink-list {
key virtual-link-id;
- uses rwcal:virtual-link-info-params;
+ uses rwcal:virtual-link-info-params;
+ }
+
+ list in-use-vlink-list {
+ key virtual-link-id;
+ uses rwcal:virtual-link-info-params;
+ }
}
}
}
diff --git a/rwlaunchpad/plugins/yang/rw-staging-mgmt.tailf.yang b/rwlaunchpad/plugins/yang/rw-staging-mgmt.tailf.yang
index 9b35ff4..382515f 100644
--- a/rwlaunchpad/plugins/yang/rw-staging-mgmt.tailf.yang
+++ b/rwlaunchpad/plugins/yang/rw-staging-mgmt.tailf.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,11 +31,15 @@
prefix tailf;
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
tailf:annotate "/rw-staging-mgmt:create-staging-area" {
tailf:actionpoint rw_actionpoint;
}
- tailf:annotate "/rw-staging-mgmt:staging-areas" {
+ tailf:annotate "/rw-project:project/rw-staging-mgmt:staging-areas" {
tailf:callpoint rw_callpoint;
}
diff --git a/rwlaunchpad/plugins/yang/rw-staging-mgmt.yang b/rwlaunchpad/plugins/yang/rw-staging-mgmt.yang
index d5722cd..7e16425 100644
--- a/rwlaunchpad/plugins/yang/rw-staging-mgmt.yang
+++ b/rwlaunchpad/plugins/yang/rw-staging-mgmt.yang
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -53,6 +53,19 @@
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2016-06-01 {
description
"Initial revision.";
@@ -81,6 +94,13 @@
type uint64;
default 3600;
}
+
+ leaf project-name {
+ description "Project to which this belongs";
+ type leafref {
+ path "/rw-project:project/rw-project:name";
+ }
+ }
}
grouping staging-area-meta {
@@ -112,26 +132,27 @@
}
- container staging-areas {
- rwpb:msg-new StagingAreas;
- description "Staging Areas";
- config false;
+ augment "/rw-project:project" {
+ container staging-areas {
+ rwpb:msg-new StagingAreas;
+ description "Staging Areas";
+ config false;
- list staging-area {
- rwpb:msg-new StagingArea;
- key "area-id";
+ list staging-area {
+ rwpb:msg-new StagingArea;
+ key "area-id";
- leaf area-id {
- description "Staging Area ID";
- type string;
+ leaf area-id {
+ description "Staging Area ID";
+ type string;
+ }
+
+ uses staging-area-config;
+ uses staging-area-meta;
}
-
- uses staging-area-config;
- uses staging-area-meta;
}
}
-
rpc create-staging-area {
description "Creates a staging area for the upload.";
diff --git a/rwlaunchpad/plugins/yang/rw-vnfm.yang b/rwlaunchpad/plugins/yang/rw-vnfm.yang
index 25e1abb..dc83a4c 100644
--- a/rwlaunchpad/plugins/yang/rw-vnfm.yang
+++ b/rwlaunchpad/plugins/yang/rw-vnfm.yang
@@ -47,8 +47,8 @@
prefix "rw-vns";
}
- import rw-vnfd {
- prefix "rw-vnfd";
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
}
import rw-vnfr {
@@ -71,6 +71,10 @@
prefix "rw-launchpad";
}
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
revision 2015-10-07 {
description
"Initial revision.";
diff --git a/rwlaunchpad/plugins/yang/rw-vns.yang b/rwlaunchpad/plugins/yang/rw-vns.yang
index 8dc63bb..6ea19eb 100644
--- a/rwlaunchpad/plugins/yang/rw-vns.yang
+++ b/rwlaunchpad/plugins/yang/rw-vns.yang
@@ -89,6 +89,10 @@
prefix "rw-sdn";
}
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
revision 2015-10-05 {
description
"Initial revision.";
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py b/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
index a3c565b..8510fa6 100644
--- a/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
@@ -23,13 +23,13 @@
import tempfile
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwNsrYang,
RwVnfrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwLaunchpadYang,
RwBaseYang
)
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
index 557518b..bf46ac8 100755
--- a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
@@ -33,12 +33,12 @@
import uuid
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwNsrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwLaunchpadYang,
RwBaseYang
)
@@ -71,7 +71,7 @@
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
@@ -210,11 +210,11 @@
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+ descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id='%s']/project-nsd:description" % nsd.id
descr_value = "New NSD Description"
in_param_id = str(uuid.uuid4())
- input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1= NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
index ca6e9b5..ae9f404 100755
--- a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
+++ b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
@@ -33,12 +33,12 @@
import uuid
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwNsrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwLaunchpadYang,
RwBaseYang
)
@@ -78,7 +78,7 @@
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
@@ -206,11 +206,11 @@
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+ descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id='%s']/project-nsd:description" % nsd.id
descr_value = "New NSD Description"
in_param_id = str(uuid.uuid4())
- input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1= NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
diff --git a/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py b/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
index 846ef2e..8e203d9 100644
--- a/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
+++ b/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
@@ -17,7 +17,13 @@
import pytest
-from gi.repository import NsrYang, RwNsrYang, RwVnfrYang, NsdYang, RwNsdYang
+from gi.repository import (
+ NsrYang,
+ RwNsrYang,
+ RwVnfrYang,
+ ProjectNsdYang as NsdYang,
+ RwProjectNsdYang as RwNsdYang
+ )
import rift.auto.session
@pytest.fixture(scope='module')
@@ -25,8 +31,8 @@
return mgmt_session.proxy
-ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
-ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+ScalingGroupInstance = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance
+ScalingGroup = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup
INSTANCE_ID = 1
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
index 45a7832..49cfcb5 100644
--- a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
@@ -41,18 +41,19 @@
import gi
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwLaunchpadYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
RwNsrYang,
RwVnfrYang,
NsrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwLaunchpadYang,
RwBaseYang
)
@@ -98,7 +99,7 @@
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
@@ -409,11 +410,11 @@
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+ descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id='%s']/project-nsd:vendor" % nsd.id
descr_value = "automation"
in_param_id = str(uuid.uuid4())
- input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
@@ -597,11 +598,11 @@
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+ descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id='%s']/project-nsd:vendor" % nsd.id
descr_value = "automation"
in_param_id = str(uuid.uuid4())
- input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
index 9f1cd0a..81eeeb0 100644
--- a/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
@@ -26,14 +26,14 @@
import re
gi.require_version('RwNsrYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
RwBaseYang,
RwConmanYang,
RwNsrYang,
- RwNsdYang,
+ RwProjectNsdYang as RwNsdYang,
RwVcsYang,
RwVlrYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwVnfrYang,
VlrYang,
VnfrYang,
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
index 0878db7..d38509f 100644
--- a/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
+++ b/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
@@ -35,10 +35,10 @@
from gi.repository import (
NsrYang,
- NsdYang,
+ ProjectNsdYang as NsdYang,
VnfrYang,
RwNsrYang,
- RwNsdYang,
+ RwProjectNsdYang as RwNsdYang,
RwVnfrYang,
)
diff --git a/rwlaunchpad/ra/pytest/ns/test_onboard.py b/rwlaunchpad/ra/pytest/ns/test_onboard.py
index 5951ce8..07a31dd 100644
--- a/rwlaunchpad/ra/pytest/ns/test_onboard.py
+++ b/rwlaunchpad/ra/pytest/ns/test_onboard.py
@@ -37,19 +37,19 @@
import gi
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwLaunchpadYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
from gi.repository import (
RwcalYang,
- NsdYang,
+ ProjectNsdYang as NsdYang,
RwNsrYang,
RwVnfrYang,
NsrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwLaunchpadYang,
RwBaseYang
)
@@ -99,7 +99,7 @@
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
@@ -333,11 +333,11 @@
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+ descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id='%s']/project-nsd:description" % nsd.id
descr_value = "New NSD Description"
in_param_id = str(uuid.uuid4())
- input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
diff --git a/rwlaunchpad/ra/pytest/test_failover.py b/rwlaunchpad/ra/pytest/test_failover.py
index 60ba82a..e952b69 100755
--- a/rwlaunchpad/ra/pytest/test_failover.py
+++ b/rwlaunchpad/ra/pytest/test_failover.py
@@ -27,7 +27,7 @@
import subprocess
import gi
-from gi.repository import RwVnfdYang
+from gi.repository import RwProjectVnfdYang as RwVnfdYang
from gi.repository import RwVnfrYang
import rift.auto.proxy
diff --git a/rwlaunchpad/test/launchpad.py b/rwlaunchpad/test/launchpad.py
index 98680ba..b73c500 100755
--- a/rwlaunchpad/test/launchpad.py
+++ b/rwlaunchpad/test/launchpad.py
@@ -40,6 +40,7 @@
from rift.vcs.ext import ClassProperty
+
logger = logging.getLogger(__name__)
@@ -369,6 +370,34 @@
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet')
plugin_name = ClassProperty('rwconmantasklet')
+
+class ProjectMgrManoTasklet(rift.vcs.core.Tasklet):
+ """
+ This class represents a Resource Manager tasklet.
+ """
+
+ def __init__(self, name='Project-Manager-Mano', uid=None,
+ config_ready=True,
+ recovery_action=core.RecoveryType.FAILCRITICAL.value,
+ data_storetype=core.DataStore.NOSTORE.value,
+ ):
+ """
+ Creates a ProjectMgrManoTasklet object.
+
+ Arguments:
+ name - the name of the tasklet
+ uid - a unique identifier
+ """
+ super(ProjectMgrManoTasklet, self).__init__(name=name, uid=uid,
+ config_ready=config_ready,
+ recovery_action=recovery_action,
+ data_storetype=data_storetype,
+ )
+
+ plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwprojectmano')
+ plugin_name = ClassProperty('rwprojectmano')
+
+
class PackageManagerTasklet(rift.vcs.core.Tasklet):
"""
This class represents a Resource Manager tasklet.
@@ -421,10 +450,12 @@
GlanceServer(),
rift.vcs.DtsRouterTasklet(),
rift.vcs.MsgBrokerTasklet(),
- rift.vcs.RestPortForwardTasklet(),
rift.vcs.RestconfTasklet(),
rift.vcs.RiftCli(),
rift.vcs.uAgentTasklet(),
+ rift.vcs.IdentityManagerTasklet(),
+ rift.vcs.ProjectManagerTasklet(),
+ ProjectMgrManoTasklet(),
rift.vcs.Launchpad(),
]
@@ -453,6 +484,7 @@
AutoscalerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
PackageManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
StagingManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+ ProjectMgrManoTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
]
if not mgmt_ip_list or len(mgmt_ip_list) == 0:
@@ -480,6 +512,16 @@
stby_lp_vm.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)
colony.append(stby_lp_vm)
+ if ha_mode == "LSS":
+ stby_lp_vm_2 = rift.vcs.VirtualMachine(
+ name='launchpad-vm-3',
+ ip=mgmt_ip_list[2],
+ procs=standby_procs,
+ start=False,
+ )
+ stby_lp_vm_2.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)
+ colony.append(stby_lp_vm_2)
+
sysinfo = rift.vcs.SystemInfo(
mode='ethsim',
zookeeper=rift.vcs.manifest.RaZookeeper(master_ip=mgmt_ip_list[0]),
@@ -548,8 +590,8 @@
for f in os.listdir(cleanup_dir_name):
if f.endswith(".aof") or f.endswith(".rdb"):
os.remove(os.path.join(cleanup_dir_name, f))
-
- # Remove the persistant DTS recovery files
+
+ # Remove the persistant DTS recovery files
for f in os.listdir(cleanup_dir_name):
if f.endswith(".db"):
os.remove(os.path.join(cleanup_dir_name, f))
@@ -567,10 +609,12 @@
#load demo info and create Demo object
demo = Demo(args.no_ui, ha_mode, mgmt_ip_list, args.test_name)
- # Create the prepared system from the demo
- system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args,
- northbound_listing="cli_launchpad_schema_listing.txt",
- netconf_trace_override=True)
+ system = rift.vcs.demo.prepared_system_from_demo_and_args(
+ demo, args,
+ northbound_listing=["platform_schema_listing.txt",
+ "platform_mgmt_schema_listing.txt",
+ "cli_launchpad_schema_listing.txt"],
+ netconf_trace_override=True)
# Search for externally accessible IP address with netifaces
gateways = netifaces.gateways()
@@ -590,6 +634,7 @@
if __name__ == "__main__":
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) )
+ os.system('/usr/rift/bin/UpdateHostsFile')
try:
main()
except rift.vcs.demo.ReservationError:
diff --git a/rwlaunchpad/test/mano_error_ut.py b/rwlaunchpad/test/mano_error_ut.py
index e593cee..0790290 100755
--- a/rwlaunchpad/test/mano_error_ut.py
+++ b/rwlaunchpad/test/mano_error_ut.py
@@ -174,8 +174,8 @@
class ResourceMgrMock(object):
- VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
- VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
+ VDU_REQUEST_XPATH = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
+ VLINK_REQUEST_XPATH = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
def __init__(self, dts, log, loop):
self._log = log
diff --git a/rwlaunchpad/test/mano_ut.py b/rwlaunchpad/test/mano_ut.py
index 69a0d40..d85088b 100755
--- a/rwlaunchpad/test/mano_ut.py
+++ b/rwlaunchpad/test/mano_ut.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -57,8 +57,14 @@
import rift.tasklets
import rift.test.dts
import rw_peas
+from rift.mano.utils.project import (
+ ManoProject,
+ DEFAULT_PROJECT,
+)
+PROJECT = 'default'
+
openstack_info = {
'username': 'pluto',
'password': 'mypasswd',
@@ -75,8 +81,8 @@
class XPaths(object):
@staticmethod
def nsd(k=None):
- return ("C,/nsd:nsd-catalog/nsd:nsd" +
- ("[nsd:id='{}']".format(k) if k is not None else ""))
+ return ("C,/project-nsd:nsd-catalog/project-nsd:nsd" +
+ ("[project-nsd:id='{}']".format(k) if k is not None else ""))
@staticmethod
def vld(k=None):
@@ -85,8 +91,8 @@
@staticmethod
def vnfd(k=None):
- return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" +
- ("[vnfd:id='{}']".format(k) if k is not None else ""))
+ return ("C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd" +
+ ("[project-vnfd:id='{}']".format(k) if k is not None else ""))
@staticmethod
def vnfr(k=None):
@@ -125,11 +131,8 @@
@staticmethod
def cm_state(k=None):
- if k is None:
- return ("D,/rw-conman:cm-state/rw-conman:cm-nsr")
- else:
- return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
- ("[rw-conman:id='{}']".format(k) if k is not None else ""))
+ return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
+ ("[rw-conman:id='{}']".format(k) if k is not None else ""))
@staticmethod
def nsr_scale_group_instance(nsr_id=None, group_name=None, index=None):
@@ -149,19 +152,37 @@
("/nsr:instance") +
("[nsr:index='{}']".format(index) if index is not None else ""))
+ @staticmethod
+ def cloud_account(k=None):
+ return ("C,/rw-cloud:cloud/rw-cloud:account" +
+ ("[rw-cloud:name='{}']".format(k) if k is not None else ""))
+
+ @staticmethod
+ def project(k=None):
+ return ("C,/rw-project:project" +
+ ("[rw-project:name='{}']".format(k) if k is not None else ""))
+
class ManoQuerier(object):
- def __init__(self, log, dts):
+ def __init__(self, log, dts, project):
self.log = log
self.dts = dts
+ self.project = project
+
+ def add_project(self, xpath):
+ return self.project.add_project(xpath)
@asyncio.coroutine
- def _read_query(self, xpath, do_trace=False):
- self.log.debug("Running XPATH read query: %s (trace: %s)", xpath, do_trace)
+ def _read_query(self, xpath, do_trace=False, project=True):
+ if project:
+ xp = self.add_project(xpath)
+ else:
+ xp = xpath
+ self.log.debug("Running XPATH read query: %s (trace: %s)", xp, do_trace)
flags = rwdts.XactFlag.MERGE
flags += rwdts.XactFlag.TRACE if do_trace else 0
res_iter = yield from self.dts.query_read(
- xpath, flags=flags
+ xp, flags=flags
)
results = []
@@ -173,6 +194,27 @@
return results
@asyncio.coroutine
+ def _delete_query(self, xpath, flags=0):
+ xp = self.add_project(xpath)
+ self.log.debug("Running XPATH delete query: %s (flags: %d)", xp, flags)
+ with self.dts.transaction() as xact:
+ yield from self.dts.query_delete(
+ xp,
+ flags
+ )
+
+ @asyncio.coroutine
+ def _update_query(self, xpath, msg, flags=0):
+ xp = self.add_project(xpath)
+ self.log.debug("Running XPATH update query: %s (flags: %d)", xp, flags)
+ with self.dts.transaction() as xact:
+ yield from self.dts.query_update(
+ xp,
+ flags,
+ msg
+ )
+
+ @asyncio.coroutine
def get_cm_state(self, nsr_id=None):
return (yield from self._read_query(XPaths.cm_state(nsr_id), False))
@@ -183,7 +225,6 @@
@asyncio.coroutine
def get_nsr_scale_group_instance_opdata(self, nsr_id=None, group_name=None, index=None):
return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name, index), False))
- #return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name), True))
@asyncio.coroutine
def get_nsr_configs(self, nsr_id=None):
@@ -211,66 +252,34 @@
@asyncio.coroutine
def delete_nsr(self, nsr_id):
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- XPaths.nsr_config(nsr_id),
- 0
- #rwdts.XactFlag.TRACE,
- #rwdts.Flag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.nsr_config(nsr_id)))
@asyncio.coroutine
def delete_nsd(self, nsd_id):
- nsd_xpath = XPaths.nsd(nsd_id)
- self.log.debug("Attempting to delete NSD with path = %s", nsd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- nsd_xpath,
- rwdts.XactFlag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.nsd(nsd_id),
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def delete_vnfd(self, vnfd_id):
- vnfd_xpath = XPaths.vnfd(vnfd_id)
- self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- vnfd_xpath,
- rwdts.XactFlag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.vnfd(vnfd_id),
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_nsd(self, nsd_id, nsd_msg):
- nsd_xpath = XPaths.nsd(nsd_id)
- self.log.debug("Attempting to update NSD with path = %s", nsd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- nsd_xpath,
- rwdts.XactFlag.ADVISE,
- nsd_msg,
- )
+ return (yield from self._update_query(XPaths.nsd(nsd_id), nsd_msg,
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_vnfd(self, vnfd_id, vnfd_msg):
- vnfd_xpath = XPaths.vnfd(vnfd_id)
- self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- vnfd_xpath,
- rwdts.XactFlag.ADVISE,
- vnfd_msg,
- )
+ return (yield from self._update_query(XPaths.vnfd(vnfd_id), vnfd_msg,
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_nsr_config(self, nsr_id, nsr_msg):
- nsr_xpath = XPaths.nsr_config(nsr_id)
- self.log.debug("Attempting to update NSR with path = %s", nsr_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- nsr_xpath,
- rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE,
- nsr_msg,
- )
+ return (yield from self._update_query(
+ XPaths.nsr_config(nsr_id),
+ nsr_msg,
+ rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE))
class ManoTestCase(rift.test.dts.AbstractDTSTest):
@@ -370,39 +379,47 @@
nsd_ref_counts = yield from self.querier.get_nsd_ref_counts(nsd_id)
self.assertEqual(num_ref, nsd_ref_counts[0].instance_ref_count)
+
class DescriptorPublisher(object):
- def __init__(self, log, loop, dts):
+ def __init__(self, log, loop, dts, project):
self.log = log
self.loop = loop
self.dts = dts
+ self.project = project
self._registrations = []
@asyncio.coroutine
def publish(self, w_path, path, desc):
ready_event = asyncio.Event(loop=self.loop)
+ if 'rw-project' in path:
+ w_xp = w_path
+ xp = path
+ else:
+ w_xp = self.project.add_project(w_path)
+ xp = self.project.add_project(path)
@asyncio.coroutine
def on_ready(regh, status):
self.log.debug("Create element: %s, obj-type:%s obj:%s",
- path, type(desc), desc)
+ xp, type(desc), desc)
with self.dts.transaction() as xact:
- regh.create_element(path, desc, xact.xact)
- self.log.debug("Created element: %s, obj:%s", path, desc)
+ regh.create_element(xp, desc, xact.xact)
+ self.log.debug("Created element: %s, obj:%s", xp, desc)
ready_event.set()
handler = rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready
)
- self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+ self.log.debug("Registering path: %s, obj:%s", w_xp, desc)
reg = yield from self.dts.register(
- w_path,
+ w_xp,
handler,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
)
self._registrations.append(reg)
- self.log.debug("Registered path : %s", w_path)
+ self.log.debug("Registered path : %s", w_xp)
yield from ready_event.wait()
return reg
@@ -413,23 +430,114 @@
reg.deregister()
-class PingPongNsrConfigPublisher(object):
- XPATH = "C,/nsr:ns-instance-config"
+class ProjectPublisher(object):
+ XPATH = "C,/rw-project:project"
- def __init__(self, log, loop, dts, ping_pong, cloud_account_name):
+ def __init__(self, log, loop, dts, project):
self.dts = dts
self.log = log
self.loop = loop
+ self.project = project
self.ref = None
- self.querier = ManoQuerier(log, dts)
+ self.querier = ManoQuerier(log, dts, project)
+ self.publisher = DescriptorPublisher(log, loop,
+ dts, project)
- self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig()
+ self._ready_event = asyncio.Event(loop=self.loop)
+ asyncio.ensure_future(self.register(), loop=loop)
- nsr = rwnsryang.YangData_Nsr_NsInstanceConfig_Nsr()
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self._ready_event.set()
+
+ self.log.debug("Registering path: %s", ProjectPublisher.XPATH)
+ self.reg = yield from self.dts.register(
+ ProjectPublisher.XPATH,
+ flags=rwdts.Flag.PUBLISHER,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_ready=on_ready,
+ ),
+ )
+
+ def deregister(self):
+ if self.reg is not None:
+ self.reg.deregister()
+
+ @asyncio.coroutine
+ def publish_project(self, config, xpath, xpath_wild):
+ # Publish project
+ self.log.debug("Publishing cloud_account path: %s - %s, type:%s, obj:%s",
+ xpath, xpath_wild, type(config), config)
+ yield from self.publisher.publish(xpath_wild, xpath, config)
+
+
+class CloudAccountPublisher(object):
+ XPATH = "C,/rw-cloud:cloud"
+
+ def __init__(self, log, loop, dts, project):
+ self.dts = dts
+ self.log = log
+ self.loop = loop
+ self.project = project
+ self.ref = None
+
+ self.querier = ManoQuerier(log, dts, project)
+ self.publisher = DescriptorPublisher(log, loop,
+ dts, project)
+
+ self.xpath = self.project.add_project(CloudAccountPublisher.XPATH)
+
+ self._ready_event = asyncio.Event(loop=self.loop)
+ asyncio.ensure_future(self.register(), loop=loop)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self._ready_event.set()
+
+ self.log.debug("Registering path: %s", self.xpath)
+ self.reg = yield from self.dts.register(
+ self.xpath,
+ flags=rwdts.Flag.PUBLISHER,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_ready=on_ready,
+ ),
+ )
+
+ def deregister(self):
+ if self.reg is not None:
+ self.reg.deregister()
+
+ @asyncio.coroutine
+ def publish_account(self, account, xpath, xpath_wild):
+ # Publish cloud account
+ self.log.debug("Publishing cloud_account path: %s - %s, type:%s, obj:%s",
+ xpath, xpath_wild, type(account), account)
+ yield from self.publisher.publish(xpath_wild, xpath, account)
+
+
+class PingPongNsrConfigPublisher(object):
+ XPATH = "C,/nsr:ns-instance-config"
+
+ def __init__(self, log, loop, dts, ping_pong, cloud_account_name, project):
+ self.dts = dts
+ self.log = log
+ self.loop = loop
+ self.project = project
+ self.ref = None
+
+ self.querier = ManoQuerier(log, dts, project)
+ self.xpath = self.project.add_project(PingPongNsrConfigPublisher.XPATH)
+ self.nsr_config = rwnsryang.YangData_RwProject_Project_NsInstanceConfig()
+
+ nsr = rwnsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "ns1.{}".format(nsr.id)
- nsr.nsd = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+ nsr.nsd = nsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr_Nsd()
nsr.nsd.from_dict(ping_pong.ping_pong_nsd.nsd.as_dict())
nsr.cloud_account = cloud_account_name
@@ -439,8 +547,9 @@
#'cloud_account':'mock_account1'
})
- inputs = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
- inputs.xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(ping_pong.nsd_id)
+ inputs = nsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter()
+ inputs.xpath = self.project.add_project(
+ "/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]/project-nsd:name".format(ping_pong.nsd_id))
inputs.value = "inigo montoya"
fast_cpu = {'metadata_key': 'FASTCPU', 'metadata_value': 'True'}
@@ -488,9 +597,9 @@
def on_ready(regh, status):
self._ready_event.set()
- self.log.debug("Registering path: %s", PingPongNsrConfigPublisher.XPATH)
+ self.log.debug("Registering path: %s", self.xpath)
self.reg = yield from self.dts.register(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
flags=rwdts.Flag.PUBLISHER,
handler=rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready,
@@ -503,7 +612,7 @@
yield from self._ready_event.wait()
with self.dts.transaction() as xact:
self.reg.create_element(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
self.nsr_config,
xact=xact.xact,
)
@@ -520,7 +629,7 @@
})
with self.dts.transaction() as xact:
self.reg.update_element(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
self.nsr_config,
xact=xact.xact,
)
@@ -539,7 +648,7 @@
"cloud_type" : cloud_type,
construct_type : construct_value,
})
-
+
def create_vnfd_placement_group_map(self,
nsr,
@@ -555,21 +664,16 @@
"cloud_type" : cloud_type,
construct_type : construct_value,
})
-
-
+
+
@asyncio.coroutine
def delete_scale_group_instance(self, group_name, index):
self.log.debug("Deleting scale group %s instance %s", group_name, index)
#del self.nsr_config.nsr[0].scaling_group[0].instance[0]
- xpath = XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id, group_name, index)
+ xpath = self.project.add_project(
+ XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id,
+ group_name, index))
yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
- #with self.dts.transaction() as xact:
- # self.reg.update_element(
- # PingPongNsrConfigPublisher.XPATH,
- # self.nsr_config,
- # flags=rwdts.XactFlag.REPLACE,
- # xact=xact.xact,
- # )
def deregister(self):
if self.reg is not None:
@@ -617,10 +721,12 @@
def update_vnf_cloud_map(self,vnf_cloud_map):
self.log.debug("Modifying NSR to add VNF cloud account map: {}".format(vnf_cloud_map))
for vnf_index,cloud_acct in vnf_cloud_map.items():
- vnf_maps = [vnf_map for vnf_map in self.nsr_config.nsr[0].vnf_cloud_account_map if vnf_index == vnf_map.member_vnf_index_ref]
+ vnf_maps = [vnf_map for vnf_map in \
+ self.nsr_config.nsr[0].vnf_cloud_account_map \
+ if vnf_index == vnf_map.member_vnf_index_ref]
if vnf_maps:
vnf_maps[0].cloud_account = cloud_acct
- else:
+ else:
self.nsr_config.nsr[0].vnf_cloud_account_map.add().from_dict({
'member_vnf_index_ref':vnf_index,
'cloud_account':cloud_acct
@@ -628,13 +734,16 @@
class PingPongDescriptorPublisher(object):
- def __init__(self, log, loop, dts, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
+ def __init__(self, log, loop, dts, project,
+ num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
self.log = log
self.loop = loop
self.dts = dts
+ self.project = project
- self.querier = ManoQuerier(self.log, self.dts)
- self.publisher = DescriptorPublisher(self.log, self.loop, self.dts)
+ self.querier = ManoQuerier(self.log, self.dts, self.project)
+ self.publisher = DescriptorPublisher(self.log, self.loop,
+ self.dts, self.project)
self.ping_vnfd, self.pong_vnfd, self.ping_pong_nsd = \
ping_pong_nsd.generate_ping_pong_descriptors(
pingcount=1,
@@ -717,8 +826,6 @@
)
-
-
class ManoTestCase(rift.test.dts.AbstractDTSTest):
"""
DTS GI interface unittests
@@ -755,9 +862,9 @@
@staticmethod
def get_cal_account(account_type, account_name):
"""
- Creates an object for class RwcalYang.Clo
+ Creates an object for class RwcalYang.Cloud
"""
- account = rwcloudyang.CloudAccount()
+ account = rwcloudyang.CloudAcc()
if account_type == 'mock':
account.name = account_name
account.account_type = "mock"
@@ -773,13 +880,33 @@
return account
@asyncio.coroutine
- def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
+ def configure_project(self, project=None):
+ if project is None:
+ project = self.project
+
+ proj_xpath = "C,{}/project-config".format(project.prefix)
+ self.log.info("Creating project: {} with {}".
+ format(proj_xpath, project.config.as_dict()))
+ xpath_wild = "C,/rw-project:project/project-config"
+ yield from self.project_publisher.publish_project(project.config,
+ proj_xpath,
+ xpath_wild)
+
+ @asyncio.coroutine
+ def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1", project=None):
account = self.get_cal_account(cloud_type, cloud_name)
- account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
self.log.info("Configuring cloud-account: %s", account)
- yield from dts.query_create(account_xpath,
- rwdts.XactFlag.ADVISE,
- account)
+ if project is None:
+ project = self.project
+ xpath = project.add_project(XPaths.cloud_account(account.name))
+ xpath_wild = project.add_project(XPaths.cloud_account())
+
+ # account_xpath = project.add_project(
+ # "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name))
+ # yield from dts.query_create(account_xpath,
+ # rwdts.XactFlag.ADVISE,
+ # account)
+ yield from self.cloud_publisher.publish_account(account, xpath, xpath_wild)
@asyncio.coroutine
def wait_tasklets(self):
@@ -789,22 +916,74 @@
self.log.debug("STARTING - %s", self.id())
self.tinfo = self.new_tinfo(self.id())
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
- self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop, self.dts)
- self.querier = ManoQuerier(self.log, self.dts)
+ self.project = ManoProject(self.log,
+ name=DEFAULT_PROJECT)
+ self.project1 = ManoProject(self.log,
+ name='test-1')
+ self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop,
+ self.dts, self.project)
+ self.querier = ManoQuerier(self.log, self.dts, self.project)
+ self.project_publisher = ProjectPublisher(
+ self.log,
+ loop,
+ self.dts,
+ self.project
+ )
+ self.cloud_publisher = CloudAccountPublisher(
+ self.log,
+ loop,
+ self.dts,
+ self.project
+ )
self.nsr_publisher = PingPongNsrConfigPublisher(
self.log,
loop,
self.dts,
self.ping_pong,
"mock_account",
+ self.project,
)
def test_create_nsr_record(self):
@asyncio.coroutine
+ def verify_projects(termination=False):
+ self.log.debug("Verifying projects = %s", XPaths.project())
+
+ accts = yield from self.querier._read_query(XPaths.project(),
+ project=False)
+ projs = []
+ for acc in accts:
+ self.log.debug("Project: {}".format(acc.as_dict()))
+ if acc.name not in projs:
+ projs.append(acc.name)
+ self.log.debug("Merged: {}".format(projs))
+ self.assertEqual(2, len(projs))
+
+ @asyncio.coroutine
+ def verify_cloud_accounts(termination=False):
+ self.log.debug("Verifying cloud accounts = %s", XPaths.cloud_account())
+
+ accts = yield from self.querier._read_query(XPaths.cloud_account())
+ self.assertEqual(2, len(accts))
+
+ accts = yield from self.querier._read_query(
+ self.project1.add_project(XPaths.cloud_account()), project=False)
+ self.assertEqual(1, len(accts))
+
+ accts = yield from self.querier._read_query(
+ "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account",
+ project=False)
+ self.assertEqual(3, len(accts))
+
+ accts = yield from self.querier._read_query(
+ "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='mock_account']",
+ project=False)
+ self.assertEqual(2, len(accts))
+
+ @asyncio.coroutine
def verify_cm_state(termination=False, nsrid=None):
self.log.debug("Verifying cm_state path = %s", XPaths.cm_state(nsrid))
- #print("###>>> Verifying cm_state path:", XPaths.cm_state(nsrid))
loop_count = 10
loop_sleep = 10
@@ -878,7 +1057,7 @@
nsr_config = nsr_configs[0]
self.assertEqual(
- "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id),
+ "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]/project-nsd:name".format(self.ping_pong.nsd_id),
nsr_config.input_parameter[0].xpath,
)
@@ -1031,6 +1210,9 @@
yield from verify_cm_state(termination, nsrid)
yield from verify_nsr_config_status(termination, nsrid)
+ yield from verify_cloud_account(termination)
+ yield from verify_project_record(termination)
+
@asyncio.coroutine
def verify_scale_instance(index):
self.log.debug("Verifying scale record path = %s, Termination=%d",
@@ -1074,12 +1256,20 @@
def run_test():
yield from self.wait_tasklets()
+ yield from self.configure_project()
+ yield from self.configure_project(project=self.project1)
cloud_type = "mock"
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account")
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account1")
+ yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account",
+ project=self.project1)
+
+ yield from verify_cloud_accounts()
+ yield from verify_projects()
yield from self.ping_pong.publish_desciptors()
+ return
# Attempt deleting VNFD not in use
yield from self.ping_pong.update_ping_vnfd()
diff --git a/rwlaunchpad/test/pytest/lp_kt_utm_test.py b/rwlaunchpad/test/pytest/lp_kt_utm_test.py
index 0a8d6ba..75e30d3 100644
--- a/rwlaunchpad/test/pytest/lp_kt_utm_test.py
+++ b/rwlaunchpad/test/pytest/lp_kt_utm_test.py
@@ -41,10 +41,10 @@
gi.require_version('RwNsrYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwBaseYang,
RwCloudYang,
@@ -54,7 +54,7 @@
RwNsrYang,
RwResourceMgrYang,
RwConmanYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
)
@@ -206,7 +206,7 @@
raise DescriptorOnboardError(state)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "UTM-only"
nsr.short_name = "UTM-only"
@@ -255,14 +255,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
diff --git a/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py b/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py
index 705565b..9b2fa8e 100644
--- a/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py
+++ b/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -39,13 +39,13 @@
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwNsrYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwBaseYang,
RwCloudYang,
@@ -55,7 +55,7 @@
RwNsrYang,
RwResourceMgrYang,
RwConmanYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
)
@@ -223,7 +223,7 @@
raise DescriptorOnboardError(state)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "UTM-WIMS"
nsr.short_name = "UTM-WIMS"
@@ -261,7 +261,7 @@
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
pools = RwResourceMgrYang.ResourcePools.from_dict({
@@ -272,14 +272,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
diff --git a/rwlaunchpad/test/pytest/lp_test.py b/rwlaunchpad/test/pytest/lp_test.py
index b987b35..8957dfe 100644
--- a/rwlaunchpad/test/pytest/lp_test.py
+++ b/rwlaunchpad/test/pytest/lp_test.py
@@ -39,13 +39,13 @@
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwlogMgmtYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
-gi.require_version('RwNsmYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwBaseYang,
RwCloudYang,
@@ -55,7 +55,7 @@
RwNsrYang,
RwResourceMgrYang,
RwConmanYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
)
@@ -172,7 +172,7 @@
def create_nsr_from_nsd_id(nsd_id):
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "pingpong_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
nsr.short_name = "nsr_short_name"
@@ -181,8 +181,8 @@
nsr.admin_status = "ENABLED"
nsr.cloud_account = "openstack"
- param = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
- param.xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:vendor'
+ param = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter()
+ param.xpath = '/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:vendor'
param.value = "rift-o-matic"
nsr.input_parameter.append(param)
diff --git a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py
index 16a8990..bd51e1d 100644
--- a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py
+++ b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,7 +35,7 @@
import gi
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
@@ -44,7 +44,19 @@
-from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+from gi.repository import (
+ RwIwpYang,
+ ProjectNsdYang as NsdYang,
+ NsrYang,
+ RwNsrYang,
+ VldYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwCloudYang,
+ RwBaseYang,
+ RwResourceMgrYang,
+ RwConmanYang,
+ RwNsmYang
+)
logging.basicConfig(level=logging.DEBUG)
@@ -172,7 +184,7 @@
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-2Vrouter-TS EPA"
nsr.short_name = "TG-2Vrouter-TS EPA"
@@ -240,7 +252,7 @@
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
pools = RwResourceMgrYang.ResourcePools.from_dict({
@@ -251,14 +263,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
diff --git a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py
index ed00a25..059217d 100644
--- a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py
+++ b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,16 +35,26 @@
import gi
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
-
-
-from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+from gi.repository import (
+ RwIwpYang,
+ ProjectNsdYang as NsdYang,
+ NsrYang,
+ RwNsrYang,
+ VldYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwCloudYang,
+ RwBaseYang,
+ RwResourceMgrYang,
+ RwConmanYang,
+ RwNsmYang
+ )
logging.basicConfig(level=logging.DEBUG)
@@ -172,7 +182,7 @@
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-2Vrouter-TS EPA"
nsr.short_name = "TG-2Vrouter-TS EPA"
@@ -240,7 +250,7 @@
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
pools = RwResourceMgrYang.ResourcePools.from_dict({
@@ -251,14 +261,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
diff --git a/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py b/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py
index 4d6e345..db88015 100644
--- a/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py
+++ b/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,7 +35,7 @@
import gi
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
@@ -43,7 +43,19 @@
gi.require_version('RwNsmYang', '1.0')
-from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+from gi.repository import (
+ RwIwpYang,
+ ProjectNsdYang,
+ NsrYang,
+ RwNsrYang,
+ VldYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwCloudYang,
+ RwBaseYang,
+ RwResourceMgrYang,
+ RwConmanYang,
+ RwNsmYang
+ )
logging.basicConfig(level=logging.DEBUG)
@@ -171,7 +183,7 @@
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-Vrouter-TS-EPA-SRIOV"
nsr.short_name = "TG-Vrouter-TS-EPA-SRIOV"
@@ -239,7 +251,7 @@
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
pools = RwResourceMgrYang.ResourcePools.from_dict({
@@ -250,14 +262,14 @@
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
diff --git a/rwlaunchpad/test/utest_nsr_handler.py b/rwlaunchpad/test/utest_nsr_handler.py
index ffab929..d921aca 100755
--- a/rwlaunchpad/test/utest_nsr_handler.py
+++ b/rwlaunchpad/test/utest_nsr_handler.py
@@ -47,8 +47,8 @@
class NsrDtsHandler(object):
""" The network service DTS handler """
- NSR_XPATH = "C,/nsr:ns-instance-config/nsr:nsr"
- SCALE_INSTANCE_XPATH = "C,/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
+ NSR_XPATH = "C,/rw-project:project/nsr:ns-instance-config/nsr:nsr"
+ SCALE_INSTANCE_XPATH = "C,/rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
def __init__(self, dts, log, loop, nsm):
self._dts = dts
@@ -66,12 +66,12 @@
def get_scale_group_instances(self, nsr_id, group_name):
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
@@ -95,12 +95,12 @@
""" Register for Nsr create/update/delete/read requests from dts """
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
@@ -327,11 +327,11 @@
class XPaths(object):
@staticmethod
def nsr_config(nsr_id=None):
- return ("C,/nsr:ns-instance-config/nsr:nsr" +
+ return ("C,/rw-project:project/nsr:ns-instance-config/nsr:nsr" +
("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else ""))
def scaling_group_instance(nsr_id, group_name, instance_id):
- return ("C,/nsr:ns-instance-config/nsr:nsr" +
+ return ("C,/rw-project:project/nsr:ns-instance-config/nsr:nsr" +
"[nsr:id='{}']".format(nsr_id) +
"/nsr:scaling-group" +
"[nsr:scaling-group-name-ref='{}']".format(group_name) +
@@ -377,7 +377,7 @@
block = xact.block_create()
block.add_query_update(
XPaths.nsr_config(nsr1_uuid),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
@@ -388,7 +388,7 @@
block = xact.block_create()
block.add_query_update(
XPaths.scaling_group_instance(nsr1_uuid, "group", 1234),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
@@ -409,7 +409,7 @@
block = xact.block_create()
block.add_query_create(
XPaths.scaling_group_instance(nsr1_uuid, "group", 12345),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
@@ -427,7 +427,7 @@
block = xact.block_create()
block.add_query_update(
XPaths.nsr_config(nsr2_uuid),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
diff --git a/rwlaunchpad/test/utest_ro_account.py b/rwlaunchpad/test/utest_ro_account.py
index aa485ef..2ac784a 100755
--- a/rwlaunchpad/test/utest_ro_account.py
+++ b/rwlaunchpad/test/utest_ro_account.py
@@ -24,18 +24,19 @@
import rift.test.dts
import rift.tasklets.rwnsmtasklet.cloud as cloud
import rift.tasklets.rwnsmtasklet.openmano_nsm as openmano_nsm
+from rift.mano.utils.project import ManoProject
import rw_peas
import gi
-gi.require_version('RwDtsYang', '1.0')
+gi.require_version('RwDts', '1.0')
from gi.repository import (
RwLaunchpadYang as launchpadyang,
RwDts as rwdts,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwVnfrYang,
RwNsrYang,
- RwNsdYang,
- VnfrYang
+ RwProjectNsdYang as RwNsdYang,
+ VnfrYang,
)
@@ -94,6 +95,7 @@
self.log.debug("STARTING - %s", test_id)
self.tinfo = self.new_tinfo(str(test_id))
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.project = ManoProject(self.log)
self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
@@ -105,7 +107,7 @@
@rift.test.dts.async_test
def test_orch_account_create(self):
- orch = cloud.ROAccountPluginSelector(self.dts, self.log, self.loop, None)
+ orch = cloud.ROAccountPluginSelector(self.dts, self.log, self.loop, self.project, None)
yield from orch.register()
@@ -115,7 +117,7 @@
{'name': 'rift-ro', 'account_type': 'rift_ro', 'rift_ro': {'rift_ro': True}})
# Test rift-ro plugin CREATE
- w_xpath = "C,/rw-launchpad:resource-orchestrator"
+ w_xpath = self.project.add_project("C,/rw-launchpad:resource-orchestrator")
xpath = w_xpath
yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
yield from asyncio.sleep(5, loop=self.loop)
@@ -139,7 +141,7 @@
# Test update
mock_orch_acc.openmano.port = 9789
mock_orch_acc.openmano.host = "10.64.11.78"
- yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
+ yield from self.dts.query_update(w_xpath,
rwdts.XactFlag.ADVISE, mock_orch_acc)
assert orch.ro_plugin._cli_api._port == mock_orch_acc.openmano.port
assert orch.ro_plugin._cli_api._host == mock_orch_acc.openmano.host
@@ -150,11 +152,11 @@
mock_orch_acc.openmano.port = 9788
with self.assertRaises(Exception):
- yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
+ yield from self.dts.query_update(w_xpath,
rwdts.XactFlag.ADVISE, mock_orch_acc)
# Test delete
- yield from self.dts.query_delete("C,/rw-launchpad:resource-orchestrator",
+ yield from self.dts.query_delete(w_xpath,
flags=rwdts.XactFlag.ADVISE)
assert orch.ro_plugin == None
@@ -170,4 +172,4 @@
)
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
diff --git a/rwlaunchpad/test/utest_rwmonitor.py b/rwlaunchpad/test/utest_rwmonitor.py
index 46c33b3..780a63e 100755
--- a/rwlaunchpad/test/utest_rwmonitor.py
+++ b/rwlaunchpad/test/utest_rwmonitor.py
@@ -60,6 +60,7 @@
UnknownAccountError,
)
import rw_peas
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
class wait_for_pending_tasks(object):
@@ -108,17 +109,17 @@
def make_nsr(ns_instance_config_ref=str(uuid.uuid4())):
- nsr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr()
nsr.ns_instance_config_ref = ns_instance_config_ref
return nsr
def make_vnfr(id=str(uuid.uuid4())):
- vnfr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.id = id
return vnfr
def make_vdur(id=str(uuid.uuid4()), vim_id=str(uuid.uuid4())):
- vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.id = id
vdur.vim_id = vim_id
return vdur
@@ -149,7 +150,7 @@
mock = self.plugin_manager.plugin(self.account.name)
mock.set_impl(TestNfviMetricsCache.Plugin())
- self.vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ self.vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
self.vdur.id = "test-vdur-id"
self.vdur.vim_id = "test-vim-id"
self.vdur.vm_flavor.vcpu_count = 4
@@ -207,7 +208,7 @@
return True
def nfvi_metrics(self, account, vim_id):
- metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
metrics.vcpu.utilization = 0.5
return None, metrics
@@ -340,7 +341,7 @@
pass
def test_alarm_create_and_destroy(self):
- alarm = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_Alarms()
+ alarm = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_Alarms()
alarm.name = "test-alarm"
alarm.description = "test-description"
alarm.vdur_id = "test-vdur-id"
@@ -553,8 +554,9 @@
self.loop = asyncio.get_event_loop()
self.logger = logging.getLogger('test-logger')
+ self.project = ManoProject(self.logger, name=DEFAULT_PROJECT)
self.config = InstanceConfiguration()
- self.monitor = Monitor(self.loop, self.logger, self.config)
+ self.monitor = Monitor(self.loop, self.logger, self.config, self.project)
self.account = RwcalYang.CloudAccount(
name='test-cloud-account',
@@ -606,7 +608,7 @@
self.monitor.add_cloud_account(self.account)
# Create a VNFR associated with the cloud account
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.cloud_account = self.account.name
vnfr.id = 'test-vnfr-id'
@@ -644,7 +646,7 @@
to retrieve the NFVI metrics associated with the VDU.
"""
# Define the VDUR to be registered
- vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vm_flavor.vcpu_count = 4
vdur.vm_flavor.memory_mb = 100
vdur.vm_flavor.storage_gb = 2
@@ -680,11 +682,11 @@
the VDURs contained in the VNFR are unregistered.
"""
# Define the VDUR to be registered
- vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vim_id = 'test-vim-id-1'
vdur.id = 'test-vdur-id-1'
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.cloud_account = self.account.name
vnfr.id = 'test-vnfr-id'
@@ -699,7 +701,7 @@
# Add another VDUR to the VNFR and update the monitor. Both VDURs
# should now be registered
- vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vim_id = 'test-vim-id-2'
vdur.id = 'test-vdur-id-2'
@@ -730,7 +732,7 @@
Monitor.
"""
# Create the VNFR
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.cloud_account = self.account.name
vnfr.id = 'test-vnfr-id'
diff --git a/rwlaunchpad/test/utest_rwnsm.py b/rwlaunchpad/test/utest_rwnsm.py
index e125739..74b83c4 100755
--- a/rwlaunchpad/test/utest_rwnsm.py
+++ b/rwlaunchpad/test/utest_rwnsm.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-17 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -25,15 +25,24 @@
import uuid
import xmlrunner
+import gi
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('NsrYang', '1.0')
from gi.repository import (
- NsdYang,
- NsrYang,
- )
+ ProjectNsdYang,
+ NsrYang,
+)
+
logger = logging.getLogger('test-rwnsmtasklet')
import rift.tasklets.rwnsmtasklet.rwnsmtasklet as rwnsmtasklet
import rift.tasklets.rwnsmtasklet.xpath as rwxpath
+from rift.mano.utils.project import ManoProject
+
+
+def prefix_project(xpath):
+ return "/rw-project:project" + xpath
class TestGiXpath(unittest.TestCase):
def setUp(self):
@@ -46,26 +55,27 @@
"""
# Create the initial NSD catalog
- nsd_catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ nsd_catalog = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog()
# Create an NSD, set its 'id', and add it to the catalog
nsd_id = str(uuid.uuid4())
nsd_catalog.nsd.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd(
id=nsd_id,
)
)
# Retrieve the NSD using and xpath expression
- xpath = '/nsd:nsd-catalog/nsd:nsd[nsd:id={}]'.format(nsd_id)
+ xpath = prefix_project('/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]'.
+ format(nsd_id))
nsd = rwxpath.getxattr(nsd_catalog, xpath)
self.assertEqual(nsd_id, nsd.id)
# Modified the name of the NSD using an xpath expression
- rwxpath.setxattr(nsd_catalog, xpath + "/nsd:name", "test-name")
+ rwxpath.setxattr(nsd_catalog, xpath + "/project-nsd:name", "test-name")
- name = rwxpath.getxattr(nsd_catalog, xpath + "/nsd:name")
+ name = rwxpath.getxattr(nsd_catalog, xpath + "/project-nsd:name")
self.assertEqual("test-name", name)
def test_nsd_scalar_fields(self):
@@ -74,24 +84,27 @@
"""
# Define a simple NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+
+ xpath = prefix_project('/project-nsd:nsd-catalog/project-nsd:nsd')
# Check that the unset fields are in fact set to None
- self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
- self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+ self.assertEqual(None, rwxpath.getxattr(nsd, xpath + "/project-nsd:name"))
+ self.assertEqual(None, rwxpath.getxattr(nsd, xpath + "/project-nsd:short-name"))
# Set the values of the 'name' and 'short-name' fields
- rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name", "test-name")
- rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name", "test-short-name")
+ rwxpath.setxattr(nsd, xpath + "/project-nsd:name", "test-name")
+ rwxpath.setxattr(nsd, xpath + "/project-nsd:short-name", "test-short-name")
# Check that the 'name' and 'short-name' fields are correctly set
- self.assertEqual(nsd.name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
- self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+ self.assertEqual(nsd.name, rwxpath.getxattr(nsd, xpath + "/project-nsd:name"))
+ self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, xpath + "/project-nsd:short-name"))
class TestInputParameterSubstitution(unittest.TestCase):
def setUp(self):
- self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger)
+ project = ManoProject(logger)
+ self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger, project)
def test_null_arguments(self):
"""
@@ -99,8 +112,8 @@
config, no exception should be raised.
"""
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
self.substitute_input_parameters(None, None)
self.substitute_input_parameters(nsd, None)
@@ -115,26 +128,26 @@
"""
# Define the original NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
nsd.name = "robert"
nsd.short_name = "bob"
# Define which parameters may be modified
nsd.input_parameter_xpath.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
label="NSD Name",
)
)
# Define the input parameters that are intended to be modified
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr_config.input_parameter.extend([
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
value="alice",
),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
value="alice",
),
@@ -153,30 +166,30 @@
"""
# Define the original NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
- nsd.name = "robert"
- nsd.short_name = "bob"
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+ # nsd.name = "robert"
+ # nsd.short_name = "bob"
# Define which parameters may be modified
nsd.input_parameter_xpath.extend([
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
label="NSD Name",
),
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
label="NSD Short Name",
),
])
# Define the input parameters that are intended to be modified
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr_config.input_parameter.extend([
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
value="robert",
),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
value="bob",
),
diff --git a/rwmon/plugins/vala/CMakeLists.txt b/rwmon/plugins/vala/CMakeLists.txt
index aa900de..5fd03fc 100644
--- a/rwmon/plugins/vala/CMakeLists.txt
+++ b/rwmon/plugins/vala/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,7 +35,7 @@
VALA_PACKAGES
rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
rw_log_yang-1.0 rw_base_yang-1.0 rwmon_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
- rw_log-1.0 rwcal_yang-1.0
+ rw_log-1.0 rwcal_yang-1.0 rw_project_yang-1.0
VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwmon/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
@@ -51,7 +51,7 @@
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- DEPENDS rwmon_yang rwcal_yang rwlog_gi rwschema_yang
+ DEPENDS rwmon_yang rwcal_yang rwlog_gi rwschema_yang rwproject_yang
)
rift_install_vala_artifacts(
diff --git a/rwmon/plugins/yang/rwmon.yang b/rwmon/plugins/yang/rwmon.yang
index 20c364d..4e1612d 100644
--- a/rwmon/plugins/yang/rwmon.yang
+++ b/rwmon/plugins/yang/rwmon.yang
@@ -1,7 +1,7 @@
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -43,6 +43,15 @@
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-10-28 {
description
"Initial revision.";
@@ -50,26 +59,28 @@
"RIFT monitoring";
}
- container nfvi-metrics {
- rwpb:msg-new NfviMetrics;
+ augment "/rw-project:project" {
+ container nfvi-metrics {
+ rwpb:msg-new NfviMetrics;
- leaf timestamp {
- description
+ leaf timestamp {
+ description
"This is the time when the metric was captured. The timestamp is
represented as the number of seconds since the beginning of the Unix
epoch.";
- type decimal64 {
- fraction-digits 3;
+ type decimal64 {
+ fraction-digits 3;
+ }
}
+
+ uses manotypes:nfvi-metrics;
}
- uses manotypes:nfvi-metrics;
- }
+ container alarm {
+ rwpb:msg-new Alarm;
- container alarm {
- rwpb:msg-new Alarm;
-
- uses manotypes:alarm;
+ uses manotypes:alarm;
+ }
}
}
diff --git a/rwprojectmano/CMakeLists.txt b/rwprojectmano/CMakeLists.txt
new file mode 100644
index 0000000..78e225f
--- /dev/null
+++ b/rwprojectmano/CMakeLists.txt
@@ -0,0 +1,28 @@
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME rwprojectmano)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs
+ plugins
+ )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwprojectmano/plugins/CMakeLists.txt b/rwprojectmano/plugins/CMakeLists.txt
new file mode 100644
index 0000000..21c8e94
--- /dev/null
+++ b/rwprojectmano/plugins/CMakeLists.txt
@@ -0,0 +1,24 @@
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs
+ yang
+ rwprojectmano
+ )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwprojectmano/plugins/rwprojectmano/CMakeLists.txt b/rwprojectmano/plugins/rwprojectmano/CMakeLists.txt
new file mode 100644
index 0000000..16b4b76
--- /dev/null
+++ b/rwprojectmano/plugins/rwprojectmano/CMakeLists.txt
@@ -0,0 +1,40 @@
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.8)
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwprojectmano)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(
+ ${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+ FILES
+ rift/tasklets/${TASKLET_NAME}/__init__.py
+ rift/tasklets/${TASKLET_NAME}/tasklet.py
+ rift/tasklets/${TASKLET_NAME}/projectmano.py
+ rift/tasklets/${TASKLET_NAME}/rolesmano.py
+ COMPONENT ${PKG_LONG_NAME}
+ PYTHON3_ONLY)
diff --git a/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/__init__.py b/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/__init__.py
new file mode 100644
index 0000000..24d7753
--- /dev/null
+++ b/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/__init__.py
@@ -0,0 +1 @@
+from .tasklet import ProjectMgrManoTasklet
diff --git a/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/projectmano.py b/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/projectmano.py
new file mode 100644
index 0000000..a59284a
--- /dev/null
+++ b/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/projectmano.py
@@ -0,0 +1,358 @@
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Project Manager tasklet is responsible for managing the Projects
+configurations required for Role Based Access Control feature.
+"""
+
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwProjectManoYang', '1.0')
+from gi.repository import (
+ RwDts as rwdts,
+ ProtobufC,
+ RwTypes,
+ RwProjectManoYang,
+)
+
+import rift.tasklets
+from rift.mano.utils.project import (
+ NS_PROJECT,
+ get_add_delete_update_cfgs,
+ ProjectConfigCallbacks,
+)
+
+
+MANO_PROJECT_ROLES = [
+ { 'mano-role':"rw-project-mano:catalog-oper",
+ 'description':("The catalog-oper Role has read permission to nsd-catalog "
+ "and vnfd-catalog under specific Projects, "
+ "as identified by /rw-project:project/rw-project:name. The "
+ "catatlog-oper Role may also have execute permission to specific "
+ "non-mutating RPCs. This Role is intended for read-only access to "
+ "catalogs under a specific project.") },
+
+ { 'mano-role':"rw-project-mano:catalog-admin",
+ 'description':("The catalog-admin Role has full CRUDX permissions to vnfd and nsd "
+ "catalogs under specific Projects, as identified by "
+ "/rw-project:project/rw-project:name.") },
+
+ { 'mano-role':"rw-project-mano:lcm-oper",
+ 'description':("The lcm-oper Role has read permission to the VL, VNF and NS "
+ "records within a Project. The lcm-oper Role may also have "
+ "execute permission to specific non-mutating RPCs.") },
+
+ { 'mano-role':"rw-project-mano:lcm-admin",
+ 'description':("The lcm-admin Role has full CRUDX permissions to the VL, VNF "
+ "and NS records within a Project. The lcm-admin Role does "
+ "not provide general CRUDX permissions to the Project as a whole, "
+ "nor to the RIFT.ware platform in general.") },
+
+ { 'mano-role':"rw-project-mano:account-oper",
+ 'description':("The account-oper Role has read permission to the VIM, SDN, VCA "
+ "and RO accounts within a Project. The account-oper Role may also have "
+ "execute permission to specific non-mutating RPCs.") },
+
+ { 'mano-role':"rw-project-mano:account-admin",
+ 'description':("The account-admin Role has full CRUDX permissions to the VIM, SDN, VCA "
+ "and RO accounts within a Project. The account-admin Role does "
+ "not provide general CRUDX permissions to the Project as a whole, "
+ "nor to the RIFT.ware platform in general.") },
+]
+
+
+class ProjectDtsHandler(object):
+ XPATH = "C,/{}".format(NS_PROJECT)
+
+ def __init__(self, dts, log, callbacks):
+ self._dts = dts
+ self._log = log
+ self._callbacks = callbacks
+
+ self.reg = None
+ self.projects = []
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ def add_project(self, cfg):
+ name = cfg.name
+ self.log.info("Adding project: {}".format(name))
+
+ if name not in self.projects:
+ self._callbacks.on_add_apply(name, cfg)
+ self.projects.append(name)
+ else:
+ self.log.error("Project already present: {}".
+ format(name))
+
+ def delete_project(self, name):
+ self._log.info("Deleting project: {}".format(name))
+ if name in self.projects:
+ self._callbacks.on_delete_apply(name)
+ self.projects.remove(name)
+ else:
+ self.log.error("Unrecognized project: {}".
+ format(name))
+
+ def update_project(self, cfg):
+ """ Update an existing project
+
+ Currently, we do not take any action on MANO for this,
+ so no callbacks are defined
+
+ Arguments:
+ msg - The project config message
+ """
+ name = cfg.name
+ self._log.info("Updating project: {}".format(name))
+ if name in self.projects:
+ pass
+ else:
+ self.log.error("Unrecognized project: {}".
+ format(name))
+
+ def register(self):
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got project apply config (xact: %s) (action: %s)", xact, action)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.debug("Project being re-added after restart.")
+ self.add_project(cfg.name)
+ else:
+ # When RIFT first comes up, an INSTALL is called with the current config
+ # Since confd doesn't actally persist data this never has any data so
+ # skip this for now.
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
+ xact=xact,
+ key_name="name",
+ )
+
+ # Handle Deletes
+ for cfg in delete_cfgs:
+ self.delete_project(cfg.name)
+
+ # Handle Adds
+ for cfg in add_cfgs:
+ self.add_project(cfg)
+
+ # Handle Updates
+ for cfg in update_cfgs:
+ self.update_project(cfg)
+
+ return RwTypes.RwStatus.SUCCESS
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for Project """
+
+ action = xact_info.query_action
+ name = msg.name
+
+ self._log.debug("Project %s on_prepare config received (action: %s): %s",
+ name, xact_info.query_action, msg)
+
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+ if name in self.projects:
+ self._log.debug("Project {} already exists. Ignore request".
+ format(name))
+
+ else:
+ self._log.debug("Project {}: Invoking on_prepare add request".
+ format(name))
+ yield from self._callbacks.on_add_prepare(name, msg)
+
+
+ elif action == rwdts.QueryAction.DELETE:
+ # Check if the entire project got deleted
+ fref = ProtobufC.FieldReference.alloc()
+ fref.goto_whole_message(msg.to_pbcm())
+ if fref.is_field_deleted():
+ if name in self.projects:
+ rc = yield from self._callbacks.on_delete_prepare(name)
+ if not rc:
+ self._log.error("Project {} should not be deleted".
+ format(name))
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+ else:
+ self._log.warning("Delete on unknown project: {}".
+ format(name))
+
+ else:
+ self._log.error("Action (%s) NOT SUPPORTED", action)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._log.debug("Registering for project config using xpath: %s",
+ ProjectDtsHandler.XPATH)
+
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ )
+
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=ProjectDtsHandler.XPATH,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare,
+ )
+
+
+class ProjectHandler(object):
+ def __init__(self, tasklet, project_class):
+ self._tasklet = tasklet
+ self._log = tasklet.log
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
+ self._class = project_class
+
+ self._log.debug("Creating project config handler")
+ self.project_cfg_handler = ProjectDtsHandler(
+ self._dts, self._log,
+ ProjectConfigCallbacks(
+ on_add_apply=self.on_project_added,
+ on_add_prepare=self.on_add_prepare,
+ on_delete_apply=self.on_project_deleted,
+ on_delete_prepare=self.on_delete_prepare,
+ )
+ )
+
+ def _get_tasklet_name(self):
+ return self._tasklet.tasklet_info.instance_name
+
+ def _get_project(self, name):
+ try:
+ proj = self._tasklet.projects[name]
+ except Exception as e:
+ self._log.exception("Project {} ({})not found for tasklet {}: {}".
+ format(name, list(self._tasklet.projects.keys()),
+ self._get_tasklet_name(), e))
+ raise e
+
+ return proj
+
+ def on_project_deleted(self, name):
+ self._log.debug("Project {} deleted".format(name))
+ try:
+ self._get_project(name).deregister()
+ except Exception as e:
+ self._log.exception("Project {} deregister for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ try:
+ proj = self._tasklet.projects.pop(name)
+ del proj
+ except Exception as e:
+ self._log.exception("Project {} delete for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ def on_project_added(self, name, cfg):
+ self._log.debug("Project {} added to tasklet {}".
+ format(name, self._get_tasklet_name()))
+ self._get_project(name)._apply = True
+
+ @asyncio.coroutine
+ def on_add_prepare(self, name, msg):
+ self._log.debug("Project {} to be added to {}".
+ format(name, self._get_tasklet_name()))
+
+ try:
+ self._tasklet.projects[name] = \
+ self._class(name, self._tasklet)
+ except Exception as e:
+ self._log.exception("Project {} create for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ try:
+ yield from self._get_project(name).register()
+ except Exception as e:
+ self._log.exception("Project {} register for tasklet {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ self._log.debug("Project {} added to {}".
+ format(name, self._get_tasklet_name()))
+
+ @asyncio.coroutine
+ def on_delete_prepare(self, name):
+ self._log.debug("Project {} being deleted for tasklet {}".
+ format(name, self._get_tasklet_name()))
+ rc = yield from self._get_project(name).delete_prepare()
+ return rc
+
+ def register(self):
+ self.project_cfg_handler.register()
+
+
+class ProjectStateRolePublisher(rift.tasklets.DtsConfigPublisher):
+
+ def __init__(self, tasklet):
+ super().__init__(tasklet)
+ self.proj_state = RwProjectManoYang.YangData_RwProject_Project_ProjectState()
+ self.projects = set()
+ self.roles = MANO_PROJECT_ROLES
+
+ def get_xpath(self):
+ return "D,/rw-project:project/rw-project:project-state/rw-project-mano:mano-role"
+
+ def role_xpath(self, project, role):
+ return "/rw-project:project[rw-project:name='{}']".format(project) + \
+ "/rw-project:project-state/rw-project-mano:mano-role" + \
+ "[rw-project-mano:role='{}']".format(role['mano-role'])
+
+ def pb_role(self, role):
+ pbRole = self.proj_state.create_mano_role()
+ pbRole.role = role['mano-role']
+ pbRole.description = role['description']
+ return pbRole
+
+ def publish_roles(self, project):
+ if not project in self.projects:
+ self.projects.add(project)
+ for role in self.roles:
+ xpath = self.role_xpath(project, role)
+ pb_role = self.pb_role(role)
+ self.log.debug("publishing xpath:{}".format(xpath))
+ self._regh.update_element(xpath, pb_role)
+
+ def unpublish_roles(self, project):
+ if project in self.projects:
+ self.projects.remove(project)
+ for role in self.roles:
+ xpath = self.role_xpath(project, role)
+ self.log.debug("unpublishing xpath:{}".format(xpath))
+ self._regh.delete_element(xpath)
diff --git a/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/rolesmano.py b/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/rolesmano.py
new file mode 100644
index 0000000..0083c06
--- /dev/null
+++ b/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/rolesmano.py
@@ -0,0 +1,401 @@
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Project Manager tasklet is responsible for managing the Projects
+configurations required for Role Based Access Control feature.
+"""
+
+
+import asyncio
+from enum import Enum
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+from gi.repository import (
+ RwDts as rwdts,
+ ProtobufC,
+ RwTypes,
+ RwRbacInternalYang,
+)
+
+import rift.tasklets
+from rift.tasklets.rwproject.project import (
+ StateMachine,
+ User,
+ UserState,
+ RoleKeys,
+ RoleKeysUsers,
+)
+from rift.mano.utils.project import (
+ NS_PROJECT,
+ get_add_delete_update_cfgs,
+)
+
+
+from .projectmano import MANO_PROJECT_ROLES
+
+
+class ProjectConfigSubscriber(object):
+ """Config subscriber for rw-user config"""
+
+ def __init__(self, project):
+ self.project_name = project.name
+ self._log = project.log
+ self._dts = project.dts
+
+ self.users = {}
+ self.pub = RoleConfigPublisher(project)
+
+ def get_xpath(self):
+ return "C,/{}[name='{}']/project-config/user".format(NS_PROJECT, self.project_name)
+
+
+ def role_inst(self, role, keys=None):
+ if not keys:
+ keys = self.project_name
+
+ r = RoleKeys()
+ r.role = role.role
+ r.keys = keys
+ return r
+
+ def delete_user(self, cfg):
+ user = User().pb(cfg)
+ self._log.error("Delete user {} for project {}".
+ format(user.key, self.project_name))
+ if user.key in self.users:
+ roles = self.users[user.key]
+ for role_key in list(roles):
+ self.delete_role(user, role_key)
+ self.users.pop(user.key)
+
+ def update_user(self, cfg):
+ user = User().pb(cfg)
+ self._log.debug("Update user {} for project {}".
+ format(user.key, self.project_name))
+ cfg_roles = {}
+ for cfg_role in cfg.mano_role:
+ r = self.role_inst(cfg_role)
+ cfg_roles[r.key] = r
+
+ if not user.key in self.users:
+ self.users[user.key] = set()
+ else:
+ #Check if any roles are deleted for the user
+ for role_key in list(self.users[user.key]):
+ if role_key not in cfg_roles:
+ self.delete_role(user, role_key)
+
+ for role_key in cfg_roles.keys():
+ if role_key not in self.users[user.key]:
+ self.update_role(user, cfg_roles[role_key])
+
+ def delete_role(self, user, role_key):
+ self._log.error("Delete role {} for user {}".
+ format(role_key, user.key))
+ user_key = user.key
+
+ try:
+ roles = self.users[user_key]
+ except KeyError:
+ roles = set()
+ self.users[user.key] = roles
+
+ if role_key in roles:
+ roles.remove(role_key)
+ self.pub.delete_role(role_key, user_key)
+
+ def update_role(self, user, role):
+ self._log.debug("Update role {} for user {}".
+ format(role.role, user.key))
+ user_key = user.key
+
+ try:
+ roles = self.users[user.key]
+ except KeyError:
+ roles = set()
+ self.users[user_key] = roles
+
+ role_key = role.key
+
+ if not role_key in roles:
+ roles.add(role_key)
+ self.pub.add_update_role(role_key, user_key)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got user apply config (xact: %s) (action: %s)",
+ xact, action)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.debug("Project being re-added after restart.")
+ self.add_user(cfg)
+ else:
+ # When RIFT first comes up, an INSTALL is called with the current config
+ # Since confd doesn't actally persist data this never has any data so
+ # skip this for now.
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ # TODO: There is user-name and user-domain as keys. Need to fix
+ # this
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
+ xact=xact,
+ key_name="user_name",
+ )
+
+ self._log.debug("Added: {}, Deleted: {}, Modified: {}".
+ format(add_cfgs, delete_cfgs, update_cfgs))
+ # Handle Deletes
+ for cfg in delete_cfgs:
+ self.delete_user(cfg)
+
+ # Handle Adds
+ for cfg in add_cfgs:
+ self.update_user(cfg)
+
+ # Handle Updates
+ for cfg in update_cfgs:
+ self.update_user(cfg)
+
+ return RwTypes.RwStatus.SUCCESS
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for Project """
+
+ action = xact_info.query_action
+
+ self._log.debug("Project %s on_prepare config received (action: %s): %s",
+ self.project_name, xact_info.query_action, msg)
+
+ user = User().pb(msg)
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+ if user.key in self.users:
+ self._log.debug("User {} update request".
+ format(user.key))
+
+ else:
+ self._log.debug("User {}: on_prepare add request".
+ format(user.key))
+
+ elif action == rwdts.QueryAction.DELETE:
+ # Check if the user got deleted
+ fref = ProtobufC.FieldReference.alloc()
+ fref.goto_whole_message(msg.to_pbcm())
+ if fref.is_field_deleted():
+ if user.key in self.users:
+ self._log.debug("User {} being deleted".format(user.key))
+ else:
+ self._log.warning("Delete on unknown user: {}".
+ format(user.key))
+
+ else:
+ self._log.error("Action (%s) NOT SUPPORTED", action)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self.get_xpath()
+ self._log.debug("Registering for project config using xpath: %s",
+ xpath,
+ )
+
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ )
+
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare,
+ )
+
+ yield from self.pub.register()
+ self.pub.create_project_roles()
+
+ def deregister(self):
+ self._log.debug("De-registering DTS handler for project {}".
+ format(self.project_name))
+
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
+ self.pub.deregister()
+
+
+class RoleConfigPublisher(rift.tasklets.DtsConfigPublisher):
+
+ def __init__(self, project):
+ super().__init__(project._tasklet)
+ self.project_name = project.name
+ self.rbac_int = RwRbacInternalYang.YangData_RwRbacInternal_RwRbacInternal()
+ self.roles = {}
+ self.proj_roles = [role['mano-role'] for role in MANO_PROJECT_ROLES]
+ self.proj_roles_published = False
+
+ def get_xpath(self):
+ return "D,/rw-rbac-internal:rw-rbac-internal/rw-rbac-internal:role"
+
+ def role_xpath(self, role_key):
+ return "D,/rw-rbac-internal:rw-rbac-internal/rw-rbac-internal:role" + \
+ "[rw-rbac-internal:role='{}']".format(role_key[0]) + \
+ "[rw-rbac-internal:keys='{}']".format(role_key[1])
+
+ def role_user_xpath(self, role_key, user_key):
+ return self.role_xpath(role_key) + \
+ "/rw-rbac-internal:user" + \
+ "[rw-rbac-internal:user-name='{}']".format(user_key[1]) + \
+ "[rw-rbac-internal:user-domain='{}']".format(user_key[0])
+
+ def create_project_roles(self):
+ for name in self.proj_roles:
+ role = RoleKeys()
+ role.role = name
+ role.keys = self.project_name
+ self.create_project_role(role)
+
+ def create_project_role(self, role):
+ self.log.error("Create project role for {}: {}".
+ format(self.project_name, role.role))
+ xpath = self.role_xpath(role.key)
+ pb_role = self.pb_role(role)
+ self._regh.update_element(xpath, pb_role)
+
+ def delete_project_roles(self):
+ for name in self.proj_roles:
+ role = RoleKeys()
+ role.role = name
+ role.keys = self.project_name
+ self.delete_project_role(role)
+
+ def delete_project_role(self, role):
+ self.log.error("Delete project role for {}: {}".
+ format(self.project_name, role.role))
+ xpath = self.role_xpath(role.key)
+ self._regh.delete_element(xpath)
+
+ def create_role(self, role_key, user_key):
+ return RoleKeysUsers(role_key, user_key)
+
+ def pb_role(self, role):
+
+ pbRole = self.rbac_int.create_role()
+ pbRole.role = role.role
+ pbRole.keys = role.keys
+ pbRole.state_machine.state = role.state.name
+
+ return pbRole
+
+ def pb_role_user(self, role, user):
+
+ pbRole = self.rbac_int.create_role()
+ pbRole.role = role.role
+ pbRole.keys = role.keys
+
+ pbUser = pbRole.create_user()
+ pbUser.user_name = user.user_name
+ pbUser.user_domain = user.user_domain
+ pbUser.state_machine.state = user.state.name
+
+ pbRole.user.append(pbUser)
+
+ return pbRole
+
+ def add_update_role(self, role_key, user_key):
+ update = True
+ try:
+ role = self.roles[role_key]
+ except KeyError:
+ role = RoleKeysUsers(role_key)
+ self.roles[role_key] = role
+ update = False
+
+ try:
+ user = role.user(user_key)
+ except KeyError:
+ user = UserState(user_key)
+ role.add_user(user)
+ update = False
+
+ if update:
+ user.state = StateMachine.new
+ else:
+ user.state = StateMachine.new
+
+ xpath = self.role_xpath(role_key)
+ self.log.debug("update role: {} user: {} ".format(role_key, user_key))
+
+
+ pb_role_user = self.pb_role_user(role, user)
+
+ self._regh.update_element(xpath, pb_role_user)
+
+ def delete_role(self, role_key, user_key):
+ try:
+ role = self.roles[role_key]
+ user = role.user(user_key)
+ except KeyError:
+ return
+
+ user.state = StateMachine.delete
+ xpath = self.role_xpath(role_key)
+ self.log.error("deleting role: {} user: {} ".format(role_key, user_key))
+
+ pb_role = self.pb_role_user(role, user)
+ self._regh.update_element(xpath, pb_role)
+
+ def do_prepare(self, xact_info, action, ks_path, msg):
+ """Handle on_prepare.
+ """
+ self.log.debug("do_prepare: action: {}, path: {} ks_path, msg: {}".format(action, ks_path, msg))
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ # TODO: See if we need this as this would be called in the platform also
+ # role_key = tuple([msg.role, msg.keys])
+
+ # state = msg.state_machine.state
+ # if state == 'init_done':
+ # msg.state_machine.state = 'active'
+ # xpath = self.role_xpath(role_key)
+ # self._regh.update_element(xpath, msg)
+
+ # for user in msg.users:
+ # user_key = tuple([user.user_domain, user.user_name])
+ # state = user.state_machine.state
+ # if state == 'init_done':
+ # user.state_machine.state = 'active'
+ # xpath = self.role_xpath(role_key)
+ # self._regh.update_element(xpath, msg)
+
+ def deregister(self):
+ if self._regh:
+ self.delete_project_roles()
+ self._regh.deregister()
+ self._regh = None
diff --git a/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/tasklet.py b/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/tasklet.py
new file mode 100644
index 0000000..fa392d6
--- /dev/null
+++ b/rwprojectmano/plugins/rwprojectmano/rift/tasklets/rwprojectmano/tasklet.py
@@ -0,0 +1,165 @@
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Mano Project Manager tasklet is responsible for managing the Projects
+configurations required for Role Based Access Control feature.
+"""
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwProjectManoYang', '1.0')
+from gi.repository import (
+ RwDts as rwdts,
+ RwLog as rwlog,
+ RwProjectYang,
+ RwProjectManoYang,
+)
+
+import rift.tasklets
+
+from rift.mano.utils.project import (
+ ManoProject,
+ )
+
+from .projectmano import (
+ ProjectHandler,
+ ProjectStateRolePublisher,
+)
+
+from .rolesmano import (
+ RoleConfigPublisher,
+ ProjectConfigSubscriber,
+)
+
+
+class ProjectMgrManoProject(ManoProject):
+
+ def __init__(self, name, tasklet):
+ super(ProjectMgrManoProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self.project_sub = ProjectConfigSubscriber(self)
+
+ @asyncio.coroutine
+ def register (self):
+ self._log.info("Initializing the ProjectMgrMano for %s", self.name)
+ yield from self.project_sub.register()
+ self.tasklet.project_state_role_pub.publish_roles(self.name)
+
+ def deregister(self):
+ self._log.debug("De-register project %s", self.name)
+ self.tasklet.project_state_role_pub.unpublish_roles(self.name)
+ self.project_sub.deregister()
+
+
+class ProjectMgrManoTasklet(rift.tasklets.Tasklet):
+ """Tasklet that manages the Project config
+ """
+ def __init__(self, *args, **kwargs):
+ """Constructs a ProjectManager tasklet"""
+ try:
+ super().__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-mano-log")
+
+ self.projects = {}
+
+ except Exception as e:
+ self.log.exception(e)
+
+
+ def start(self):
+ """Callback that gets invoked when a Tasklet is started"""
+ super().start()
+ self.log.info("Starting Mano Project Manager Tasklet")
+
+ self.log.debug("Registering with dts")
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ RwProjectManoYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ self.log.debug("Created DTS Api Object: %s", self.dts)
+
+ def stop(self):
+ """Callback that gets invoked when Tasklet is stopped"""
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ """DTS Init state handler"""
+ try:
+ self.log.info("Registering for Project Config")
+ self.project_handler = ProjectHandler(self, ProjectMgrManoProject)
+ self.project_handler.register()
+
+ self.project_state_role_pub = ProjectStateRolePublisher(self)
+ yield from self.project_state_role_pub.register()
+
+ except Exception as e:
+ self.log.exception("Registering for project failed: {}".format(e))
+
+ @asyncio.coroutine
+ def run(self):
+ """DTS run state handler"""
+ pass
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
+
+ def config_ready(self):
+ """Subscription is complete and ready to start publishing."""
+ self.log.debug("Configuration Ready")
+
+
+# vim: ts=4 sw=4 et
diff --git a/rwprojectmano/plugins/rwprojectmano/rwprojectmano.py b/rwprojectmano/plugins/rwprojectmano/rwprojectmano.py
new file mode 100755
index 0000000..ac7ac47
--- /dev/null
+++ b/rwprojectmano/plugins/rwprojectmano/rwprojectmano.py
@@ -0,0 +1,22 @@
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import rift.tasklets.rwprojectmano
+
+class Tasklet(rift.tasklets.rwprojectmano.ProjectMgrManoTasklet):
+ pass
+
+# vim: sw=4
diff --git a/rwprojectmano/plugins/yang/CMakeLists.txt b/rwprojectmano/plugins/yang/CMakeLists.txt
new file mode 100644
index 0000000..d99f941
--- /dev/null
+++ b/rwprojectmano/plugins/yang/CMakeLists.txt
@@ -0,0 +1,26 @@
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+rift_add_yang_target(
+ TARGET rwprojectmano_yang
+ YANG_FILES
+ rw-project-mano.yang
+ GIR_PATHS ${CMAKE_CURRENT_BINARY_DIR}
+ COMPONENT ${PKG_LONG_NAME}
+ LIBRARIES
+ rw_project_yang_gen
+ )
diff --git a/rwprojectmano/plugins/yang/Makefile b/rwprojectmano/plugins/yang/Makefile
new file mode 100644
index 0000000..2b691a8
--- /dev/null
+++ b/rwprojectmano/plugins/yang/Makefile
@@ -0,0 +1,36 @@
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+#
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwprojectmano/plugins/yang/rw-project-mano.tailf.yang b/rwprojectmano/plugins/yang/rw-project-mano.tailf.yang
new file mode 100644
index 0000000..61d7fe0
--- /dev/null
+++ b/rwprojectmano/plugins/yang/rw-project-mano.tailf.yang
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+module rw-project-mano-tailf
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-mano-tailf";
+ prefix "rw-project-mano-tailf";
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import tailf-common {
+ prefix tailf;
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-04-04 {
+ description
+ "Initial revision.";
+ }
+
+ tailf:annotate "/rw-project:project/rw-project:project-state/rw-project-mano:mano-role" {
+ tailf:callpoint rw_callpoint;
+ }
+}
diff --git a/rwprojectmano/plugins/yang/rw-project-mano.yang b/rwprojectmano/plugins/yang/rw-project-mano.yang
new file mode 100644
index 0000000..215236c
--- /dev/null
+++ b/rwprojectmano/plugins/yang/rw-project-mano.yang
@@ -0,0 +1,158 @@
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+module rw-project-mano
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-mano";
+ prefix "rw-project-mano";
+
+ import rw-rbac-base {
+ prefix "rw-rbac-base";
+ }
+
+ import rw-pb-ext {
+ prefix "rw-pb-ext";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-rbac-internal {
+ prefix "rw-rbac-internal";
+ }
+
+ revision 2017-03-08 {
+ description
+ "Initial revision. This YANG file defines the
+ MANO extentions for project based tenancy";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ identity catalog-oper {
+ base rw-project:project-role;
+ description
+ "The catalog-oper Role has read permission to the VNFD and NSD
+ catalogs within a Project. The catalog-oper Role may also have
+ execute permission to specific non-mutating RPCs.";
+ }
+
+ identity catalog-admin {
+ base rw-project:project-role;
+ description
+ "The catalog-admin Role has full CRUDX permissions to the VNFD
+ and NSD catalogs within a Project. The catalog-admin Role does
+ not provide general CRUDX permissions to the Project as a whole,
+ nor to the RIFT.ware platform in general.";
+ }
+
+ identity lcm-oper {
+ base rw-project:project-role;
+ description
+ "The lcm-oper Role has read permission to the VL, VNF and NS
+ records within a Project. The lcm-oper Role may also have
+ execute permission to specific non-mutating RPCs.";
+ }
+
+ identity lcm-admin {
+ base rw-project:project-role;
+ description
+ "The lcm-admin Role has full CRUDX permissions to the VL, VNF
+ and NS records within a Project. The lcm-admin Role does
+ not provide general CRUDX permissions to the Project as a whole,
+ nor to the RIFT.ware platform in general.";
+ }
+
+ identity account-oper {
+ base rw-project:project-role;
+ description
+ "The account-oper Role has read permission to the VIM, SDN, VCA
+ and RO accounts within a Project. The account-oper Role may also have
+ execute permission to specific non-mutating RPCs.";
+ }
+
+ identity account-admin {
+ base rw-project:project-role;
+ description
+ "The account-admin Role has full CRUDX permissions to the VIM, SDN, VCA
+ and RO accounts within a Project. The account-admin Role does
+ not provide general CRUDX permissions to the Project as a whole,
+ nor to the RIFT.ware platform in general.";
+ }
+
+ augment /rw-project:project/rw-project:project-config/rw-project:user {
+ description
+ "Configuration for MANO application-specific Roles.";
+
+ list mano-role {
+ description
+ "The list of MANO application-specific Roles the User has been
+ assigned, within the enclosing Project.";
+
+ key "role";
+ uses rw-rbac-base:simple-role;
+ }
+ }
+
+ augment /rw-project:project/rw-project:project-state/rw-project:user {
+ description
+ "The state for MANO application-specific Roles.";
+
+ list mano-role {
+ description
+ "The state of the MANO application-specific Role the User has
+ been assigned.";
+
+ key "role";
+ uses rw-rbac-base:simple-role;
+
+ leaf state {
+ description
+ "The assignment of a User to a Role may be an asynchronous
+ operation. This value indicates whether the Role
+ assignment is active. If the value is 'active', then the
+ assignment is complete and active. Any other value
+ indicates that Role assignment is in a transitional or
+ failed state, as described in the value.";
+ type string;
+ }
+ }
+ }
+
+ augment /rw-project:project/rw-project:project-state {
+ description
+ "State for MANO application-specific Roles.";
+
+ list mano-role {
+ description
+ "The set of Roles that may be configured into
+ /rw-project:project/rw-project:project-config/rw-project:user/
+ rw-project-mano:mano-role/rw-project-mano:role.";
+
+ key "role";
+ uses rw-rbac-base:simple-role;
+
+ leaf description {
+ description
+ "A description of the Role.";
+ type string;
+ }
+ }
+ }
+}
diff --git a/rwso/plugins/cli/cli_so_schema_listing.txt b/rwso/plugins/cli/cli_so_schema_listing.txt
index 3031b19..189acb6 100644
--- a/rwso/plugins/cli/cli_so_schema_listing.txt
+++ b/rwso/plugins/cli/cli_so_schema_listing.txt
@@ -1,4 +1,5 @@
rw-base
+rw-project
rw-mgmtagt
rw-manifest
rw-vcs