rw.core.mano-mano_yang_ylib-1.0 \
rw.core.mano-common-1.0 \
rw.core.mano-rwsdn_yang_ylib-1.0 \
+ rw.core.mano-rwsdnal_yang_ylib-1.0 \
+ rw.core.mano-rwsdn-1.0 \
rw.core.mano-mano-types_yang_ylib-1.0 \
rw.core.mano-rwcal-cloudsim-1.0 \
rw.core.mano-rwcal-1.0 \
# Yang targets
##
+rift_add_yang_target(
+ TARGET rwsdn_yang
+ YANG_FILES rw-sdn.yang
+ COMPONENT ${PKG_LONG_NAME}
+ LIBRARIES
+ rwsdnal_yang_gen
+ DEPENDS
+ rwsdnal_yang
+ ASSOCIATED_FILES
+ rw-sdn.role.xml
+)
+
rift_add_yang_target(
TARGET rwcloud_yang
- YANG_FILES rw-cloud.yang rw-sdn.yang
+ YANG_FILES rw-cloud.yang
COMPONENT ${PKG_LONG_NAME}
LIBRARIES
rwsdn_yang_gen
mano-types_yang_gen
DEPENDS
rwcal_yang
- rwsdn_yang
- rwprojectmano_yang
- mano-types_yang
+ rwsdnal_yang
ASSOCIATED_FILES
rw-cloud.role.xml
- rw-sdn.role.xml
)
rift_add_yang_target(
prefix "rwpb";
}
- import mano-types {
- prefix "manotypes";
- }
-
import rw-project {
prefix "rw-project";
}
- import rwsdn {
- prefix "rwsdn";
+ import rwsdnal {
+ prefix "rwsdnal";
+ }
+
+ import mano-types {
+ prefix "manotypes";
}
revision 2017-02-08 {
"Initial revision.";
}
- augment "/rw-project:project" {
- container sdn {
+augment "/rw-project:project" {
+ container sdn {
+ rwpb:msg-new SDNAccountConfig;
list account {
- rwpb:msg-new SDNAccountConfig;
+ rwpb:msg-new SDNAccount;
key "name";
leaf name {
- type string;
+ type string;
}
- uses rwsdn:sdn-provider-auth;
- uses rwsdn:connection-status;
+ uses rwsdnal:sdn-provider-auth;
+ uses rwsdnal:connection-status;
}
}
}
PYTHON3_ONLY
)
+rift_python_install_tree(
+ FILES
+ rift/mano/sdn/__init__.py
+ rift/mano/sdn/accounts.py
+ rift/mano/sdn/config.py
+ rift/mano/sdn/operdata.py
+ COMPONENT ${PKG_LONG_NAME}
+ PYTHON3_ONLY
+ )
+
rift_python_install_tree(
FILES
rift/mano/config_agent/operdata.py
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .accounts import (
+ SDNAccount,
+ SDNAccountCalError,
+ )
+
+from .config import (
+ SDNAccountConfigSubscriber,
+ SDNAccountConfigCallbacks
+ )
+
+from .operdata import (
+ SDNAccountDtsOperdataHandler,
+)
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import sys
+import asyncio
+from gi import require_version
+require_version('RwTypes', '1.0')
+require_version('RwsdnalYang', '1.0')
+require_version('RwSdnYang', '1.0')
+
+from gi.repository import (
+ RwTypes,
+ RwsdnalYang,
+ RwSdnYang,
+ )
+import rw_peas
+
+if sys.version_info < (3, 4, 4):
+ asyncio.ensure_future = asyncio.async
+
+
+class PluginLoadingError(Exception):
+ pass
+
+
+class SDNAccountCalError(Exception):
+ pass
+
+
+class SDNAccount(object):
+ def __init__(self, log, rwlog_hdl, account_msg):
+ self._log = log
+ self._account_msg = account_msg.deep_copy()
+
+ self._sdn_plugin = None
+ self._engine = None
+
+ self._sdn = self.plugin.get_interface("Topology")
+ self._sdn.init(rwlog_hdl)
+
+ self._status = RwsdnalYang.SDNAccount_ConnectionStatus(
+ status="unknown",
+ details="Connection status lookup not started"
+ )
+
+ self._validate_task = None
+
+ @property
+ def plugin(self):
+ if self._sdn_plugin is None:
+ try:
+ self._sdn_plugin = rw_peas.PeasPlugin(
+ getattr(self._account_msg, self.account_type).plugin_name,
+ 'RwSdn-1.0',
+ )
+
+ except AttributeError as e:
+ raise PluginLoadingError(str(e))
+
+ self._engine, _, _ = self._sdn_plugin()
+
+ return self._sdn_plugin
+
+ def _wrap_status_fn(self, fn, *args, **kwargs):
+ ret = fn(*args, **kwargs)
+ rw_status = ret[0]
+ if rw_status != RwTypes.RwStatus.SUCCESS:
+ msg = "%s returned %s" % (fn.__name__, str(rw_status))
+ self._log.error(msg)
+ raise SDNAccountCalError(msg)
+
+ # If there was only one other return value besides rw_status, then just
+ # return that element. Otherwise return the rest of the return values
+ # as a list.
+ return ret[1] if len(ret) == 2 else ret[1:]
+
+ @property
+ def sdn(self):
+ return self._sdn
+
+ @property
+ def name(self):
+ return self._account_msg.name
+
+ @property
+ def account_msg(self):
+ return self._account_msg
+
+ @property
+ def sdnal_account_msg(self):
+ return RwsdnalYang.SDNAccount.from_dict(
+ self.account_msg.as_dict(),
+ ignore_missing_keys=True,
+ )
+
+ def sdn_account_msg(self, account_dict):
+ self._account_msg = RwSdnYang.SDNAccount.from_dict(account_dict)
+
+ @property
+ def account_type(self):
+ return self._account_msg.account_type
+
+ @property
+ def connection_status(self):
+ return self._status
+
+ def update_from_cfg(self, cfg):
+ self._log.debug("Updating parent SDN Account to %s", cfg)
+
+ raise NotImplementedError("Update SDN account not yet supported")
+
+
+ @asyncio.coroutine
+ def validate_sdn_account_credentials(self, loop):
+ self._log.debug("Validating SDN Account credentials %s", self._account_msg)
+ self._status = RwSdnYang.SDNAccount_ConnectionStatus(
+ status="validating",
+ details="SDN account connection validation in progress"
+ )
+ rwstatus, status = yield from loop.run_in_executor(
+ None,
+ self._sdn.validate_sdn_creds,
+ self.sdnal_account_msg,
+ )
+ if rwstatus == RwTypes.RwStatus.SUCCESS:
+ self._status = RwSdnYang.SDNAccount_ConnectionStatus.from_dict(status.as_dict())
+ else:
+ self._status = RwSdnYang.SDNAccount_ConnectionStatus(
+ status="failure",
+ details="Error when calling SDNAL validate SDN creds"
+ )
+
+ self._log.info("Got SDN account validation response: %s", self._status)
+
+ def start_validate_credentials(self, loop):
+ if self._validate_task is not None:
+ self._validate_task.cancel()
+ self._validate_task = None
+
+ self._validate_task = asyncio.ensure_future(
+ self.validate_sdn_account_credentials(loop),
+ loop=loop
+ )
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+import rift.tasklets
+
+from gi.repository import (
+ RwDts as rwdts,
+ ProtobufC,
+ )
+
+from rift.mano.utils.project import get_add_delete_update_cfgs
+
+from . import accounts
+
+
+class SDNAccountNotFound(Exception):
+ pass
+
+
+class SDNAccountError(Exception):
+ pass
+
+
+class SDNAccountConfigCallbacks(object):
+ def __init__(self,
+ on_add_apply=None, on_add_prepare=None,
+ on_delete_apply=None, on_delete_prepare=None):
+
+ @asyncio.coroutine
+ def prepare_noop(*args, **kwargs):
+ pass
+
+ def apply_noop(*args, **kwargs):
+ pass
+
+ self.on_add_apply = on_add_apply
+ self.on_add_prepare = on_add_prepare
+ self.on_delete_apply = on_delete_apply
+ self.on_delete_prepare = on_delete_prepare
+
+ for f in ('on_add_apply', 'on_delete_apply'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, apply_noop)
+ continue
+
+ if asyncio.iscoroutinefunction(ref):
+ raise ValueError('%s cannot be a coroutine' % (f,))
+
+ for f in ('on_add_prepare', 'on_delete_prepare'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, prepare_noop)
+ continue
+
+ if not asyncio.iscoroutinefunction(ref):
+ raise ValueError("%s must be a coroutine" % f)
+
+
+class SDNAccountConfigSubscriber(object):
+ XPATH = "C,/rw-sdn:sdn/rw-sdn:account"
+
+ def __init__(self, dts, log, rwlog_hdl, sdn_callbacks, acctstore):
+ self._dts = dts
+ self._log = log
+ self._rwlog_hdl = rwlog_hdl
+ self._reg = None
+
+ self.accounts = acctstore
+
+ self._sdn_callbacks = sdn_callbacks
+
+ def add_account(self, account_msg):
+ self._log.info("adding sdn account: {}".format(account_msg))
+
+ account = accounts.SDNAccount(self._log, self._rwlog_hdl, account_msg)
+ self.accounts[account.name] = account
+
+ self._sdn_callbacks.on_add_apply(account)
+
+ def delete_account(self, account_name):
+ self._log.info("deleting sdn account: {}".format(account_name))
+ del self.accounts[account_name]
+
+ self._sdn_callbacks.on_delete_apply(account_name)
+
+ def update_account(self, account_msg):
+ """ Update an existing sdn account
+
+ In order to simplify update, turn an update into a delete followed by
+ an add. The drawback to this approach is that we will not support
+ updates of an "in-use" sdn account, but this seems like a
+ reasonable trade-off.
+
+
+ Arguments:
+ account_msg - The sdn account config message
+ """
+ self._log.info("updating sdn account: {}".format(account_msg))
+
+ self.delete_account(account_msg.name)
+ self.add_account(account_msg)
+
+ def deregister(self):
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
+ def register(self):
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, _):
+ self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.debug("SDN account being re-added after restart.")
+ if not cfg.has_field('account_type'):
+ raise SDNAccountError("New SDN account must contain account_type field.")
+ self.add_account(cfg)
+ else:
+ # When RIFT first comes up, an INSTALL is called with the current config
+ # Since confd doesn't actally persist data this never has any data so
+ # skip this for now.
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
+ xact=xact,
+ key_name="name",
+ )
+
+ # Handle Deletes
+ for cfg in delete_cfgs:
+ self.delete_account(cfg.name)
+
+ # Handle Adds
+ for cfg in add_cfgs:
+ self.add_account(cfg)
+
+ # Handle Updates
+ for cfg in update_cfgs:
+ self.update_account(cfg)
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for SDN Account """
+
+ action = xact_info.query_action
+ self._log.debug("SDN account on_prepare config received (action: %s): %s",
+ xact_info.query_action, msg)
+
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+ if msg.name in self.accounts:
+ self._log.debug("SDN account already exists. Invoking update request")
+
+ # Since updates are handled by a delete followed by an add, invoke the
+ # delete prepare callbacks to give clients an opportunity to reject.
+ yield from self._sdn_callbacks.on_delete_prepare(msg.name)
+
+ else:
+ self._log.debug("SDN account does not already exist. Invoking on_prepare add request")
+ if not msg.has_field('account_type'):
+ raise SDNAccountError("New sdn account must contain account_type field.")
+
+ account = accounts.SDNAccount(self._log, self._rwlog_hdl, msg)
+ yield from self._sdn_callbacks.on_add_prepare(account)
+
+ elif action == rwdts.QueryAction.DELETE:
+ # Check if the entire SDN account got deleted
+ fref = ProtobufC.FieldReference.alloc()
+ fref.goto_whole_message(msg.to_pbcm())
+ if fref.is_field_deleted():
+ yield from self._sdn_callbacks.on_delete_prepare(msg.name)
+
+ else:
+ self._log.error("Deleting individual fields for SDN account not supported")
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ else:
+ self._log.error("Action (%s) NOT SUPPORTED", action)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._log.debug("Registering for SDN Account config using xpath: %s",
+ SDNAccountConfigSubscriber.XPATH,
+ )
+
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ )
+
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=SDNAccountConfigSubscriber.XPATH,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare,
+ )
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import asyncio
+import rift.tasklets
+
+from gi.repository import(
+ RwSdnYang,
+ RwDts as rwdts,
+ )
+
+
+class SDNAccountNotFound(Exception):
+ pass
+
+
+class SDNAccountDtsOperdataHandler(object):
+ def __init__(self, dts, log, loop):
+ self._dts = dts
+ self._log = log
+ self._loop = loop
+
+ self.sdn_accounts = {}
+ self._oper = None
+ self._rpc = None
+
+ def add_sdn_account(self, account):
+ self.sdn_accounts[account.name] = account
+ account.start_validate_credentials(self._loop)
+
+ def delete_sdn_account(self, account_name):
+ del self.sdn_accounts[account_name]
+
+ def get_saved_sdn_accounts(self, sdn_account_name):
+ ''' Get SDN Account corresponding to passed name, or all saved accounts if name is None'''
+ saved_sdn_accounts = []
+
+ if sdn_account_name is None or sdn_account_name == "":
+ sdn_accounts = list(self.sdn_accounts.values())
+ saved_sdn_accounts.extend(sdn_accounts)
+ elif sdn_account_name in self.sdn_accounts:
+ account = self.sdn_accounts[sdn_account_name]
+ saved_sdn_accounts.append(account)
+ else:
+ errstr = "SDN account {} does not exist".format(sdn_account_name)
+ raise KeyError(errstr)
+
+ return saved_sdn_accounts
+
+ def _register_show_status(self):
+ def get_xpath(sdn_name=None):
+ return "D,/rw-sdn:sdn/account{}/connection-status".format(
+ "[name='%s']" % sdn_name if sdn_name is not None else ''
+ )
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ self._log.debug("Got show SDN connection status request: %s", ks_path.create_string())
+ path_entry = RwSdnYang.SDNAccount.schema().keyspec_to_entry(ks_path)
+ sdn_account_name = path_entry.key00.name
+
+ try:
+ saved_accounts = self.get_saved_sdn_accounts(sdn_account_name)
+ for account in saved_accounts:
+ connection_status = account.connection_status
+ self._log.debug("Responding to SDN connection status request: %s", connection_status)
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ xpath=get_xpath(account.name),
+ msg=account.connection_status,
+ )
+ except KeyError as e:
+ self._log.warning(str(e))
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._oper = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ def _register_validate_rpc(self):
+ def get_xpath():
+ return "/rw-sdn:update-sdn-status"
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ if self._project and not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
+ if not msg.has_field("sdn_account"):
+ raise SDNAccountNotFound("SDN account name not provided")
+
+ sdn_account_name = msg.sdn_account
+ try:
+ account = self.sdn_accounts[sdn_account_name]
+ except KeyError:
+ raise SDNAccountNotFound("SDN account name %s not found" % sdn_account_name)
+
+ account.start_validate_credentials(self._loop)
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._rpc = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ @asyncio.coroutine
+ def register(self):
+ yield from self._register_show_status()
+ yield from self._register_validate_rpc()
+
+ def deregister(self):
+ if self._oper:
+ self._oper.deregister()
+ self._oper = None
+
+ if self._rpc:
+ self._rpc.deregister()
+ self._rpc = None
description: >-
Type of Service Function.
NOTE- This needs to map with Service Function Type in ODL to
- support VNFFG. Service Function Type is manadatory param in ODL
+ support VNFFG. Service Function Type is mandatory param in ODL
SFC.
required: false
tosca.capabilities.Compute.Container.Architecture:
description: >-
Type of Service Function.
NOTE- This needs to map with Service Function Type in ODL to
- support VNFFG. Service Function Type is manadatory param in ODL
+ support VNFFG. Service Function Type is mandatory param in ODL
SFC.
required: false
tosca.capabilities.Compute.Container.Architecture:
if proc.returncode != 0:
self._log.error(
- "Openmano command failed (rc=%s) with stdout: %s",
- proc.returncode, stdout
+ "Openmano command %s failed (rc=%s) with stdout: %s",
+ cmd_args[1], proc.returncode, stdout
)
raise OpenmanoCommandFailed(stdout)
if vdu.vm_flavor.has_field("vcpu_count"):
vnfc["numas"][0]["cores"] = max(vdu.vm_flavor.vcpu_count, 1)
- else:
- if vdu.vm_flavor.has_field("vcpu_count") and vdu.vm_flavor.vcpu_count:
- vnfc["vcpus"] = vdu.vm_flavor.vcpu_count
+ if vdu.vm_flavor.has_field("vcpu_count") and vdu.vm_flavor.vcpu_count:
+ vnfc["vcpus"] = vdu.vm_flavor.vcpu_count
- if vdu.vm_flavor.has_field("memory_mb") and vdu.vm_flavor.memory_mb:
- vnfc["ram"] = vdu.vm_flavor.memory_mb
+ if vdu.vm_flavor.has_field("memory_mb") and vdu.vm_flavor.memory_mb:
+ vnfc["ram"] = vdu.vm_flavor.memory_mb
if vdu.has_field("hypervisor_epa"):
rwprojectmano_yang_gen
DEPENDS
rwcloud_yang
+ rwsdn_yang
rwconfig_agent_yang
rwprojectmano_yang
ASSOCIATED_FILES
}
leaf parameter-pool {
- description "NSD Parameter pool name to use for this paramter";
+ description "NSD parameter pool name to use for this parameter";
type string;
}
leaf read-only {
description
- "The value should be greyed out by the UI.
+ "The value should be dimmed by the UI.
Only applies to parameters with default values.";
type boolean;
}
leaf image {
description
"Image name for the software image.
- If the image name is found within the VNF packaage it will
- be uploaded to all cloud accounts during onboarding process.
- Otherwise, the image must be added to the cloud account with
+ If the image name is found within the VNF package it will
+ be uploaded to all VIM accounts during onboarding process.
+ Otherwise, the image must be added to the VIM account with
the same name as entered here.
";
type string;
}
leaf value {
+ description "Value of the configuration primitive.";
type string;
}
}
container vnf-configuration {
rwpb:msg-new VnfConfiguration;
description
- "Information regarding the VNF configuration
- is captured here. Note that if the NS contains
- multiple instances of the same VNF, each instance
- of the VNF may have different configuration";
+ "Information about the VNF configuration. Note:
+ If the NS contains multiple instances of the
+ same VNF, each instance could have a different
+ configuration.";
choice config-method {
description
leaf protocol {
description
- "Protocol to use for netconf (e.g. ssh)";
+ "Protocol to use for NETCONF such as ssh";
type enumeration {
enum None;
enum ssh;
leaf port {
description
- "Port for the netconf server.";
+ "Port for the NETCONF server.";
type inet:port-number;
}
}
description
"Use custom script for configuring the VNF.
This script is executed in the context of
- Orchestrator.";
+ Orchestrator (The same system and environment
+ as the Launchpad).";
container script {
leaf script-type {
description
leaf username {
description
- "username for configuration.";
+ "User name for configuration.";
type string;
}
container config-attributes {
description
- "Miscelaneous input parameters to be considered
+ "Miscellaneous input parameters to be considered
while processing the NSD to apply configuration";
leaf config-priority {
description
- "Configuration priority - order of confgiration
- to be applied to each VNF in this NS,
- low number gets precedence over high number";
+ "Configuration priority - order of configuration
+ to be applied to each VNF in this NS ,A low
+ number takes precedence over a high number";
type uint64;
}
typedef api-type {
description
- "Type of API to fetch monitoring params";
+ "Type of API to fetch monitoring parameters";
type enumeration {
enum HTTP;
"The method to extract a value from a JSON response
NAMEKEY - Use the name as the key for a non-nested value.
- JSONPATH - Use jsonpath-rw implemenation to extract a value.
- OBJECTPATH - Use objectpath implemenation to extract a value.";
+ JSONPATH - Use jsonpath-rw implementation to extract a value.
+ OBJECTPATH - Use objectpath implementation to extract a value.";
type enumeration {
enum NAMEKEY;
enum JSONPATH;
VPORT: Virtual Port
// VNIC_ADDR: Virtual NIC Address
// PNIC_ADDR: Physical NIC Address
- // PPORT: Phsical Port.";
+ // PPORT: Physical Port.";
type enumeration {
enum VPORT;
container vm-flavor {
leaf vcpu-count {
description
- "Number of vcpus for the VM.";
+ "Number of VCPUs for the VM.";
type uint16;
}
leaf om-cpu-model-string {
- description "Openmano CPU model string";
+ description "OpenMANO CPU model string";
type string;
}
list om-cpu-feature {
key "feature";
- description "List of openmano CPU features";
+ description "List of OpenMANO CPU features";
leaf feature {
description "CPU feature";
type string;
"Memory page allocation size. If a VM requires
hugepages, it should choose LARGE or SIZE_2MB
or SIZE_1GB. If the VM prefers hugepages it
- should chose PREFER_LARGE.
+ should choose PREFER_LARGE.
LARGE : Require hugepages (either 2MB or 1GB)
SMALL : Doesn't require hugepages
SIZE_2MB : Requires 2MB hugepages
SIZE_1GB : Requires 1GB hugepages
- PREFER_LARGE : Application perfers hugepages";
+ PREFER_LARGE : Application prefers hugepages";
type enumeration {
enum LARGE;
enum SMALL;
case numa-aware {
container numa-node-policy {
description
- "This policy defines numa topology of the
+ "This policy defines NUMA topology of the
guest. Specifically identifies if the guest
- should be run on a host with one numa
- node or multiple numa nodes. As an example
- a guest may want 8 vcpus and 4 GB of
- memory. But may want the vcpus and memory
- distributed across multiple numa nodes.
- The NUMA node 1 may run with 6 vcpus and
- 3GB, and NUMA node 2 may run with 2 vcpus
- and 1GB.";
+ should be run on a host with one NUMA
+ node or multiple NUMA nodes. As an example
+ a guest might need 8 VCPUs and 4 GB of
+ memory. However, it might need the VCPUs
+ and memory distributed across multiple
+ NUMA nodes. In this scenario, NUMA node
+ 1 could run with 6 VCPUs and 3GB, and
+ NUMA node 2 could run with 2 VCPUs and
+ 1GB.";
leaf node-cnt {
description
- "The number of numa nodes to expose to the VM.";
+ "The number of NUMA nodes to expose to the VM.";
type uint16;
}
strictly from the memory attached
to the NUMA node.
PREFERRED : The memory should be allocated
- perferentially from the memory
+ preferentially from the memory
attached to the NUMA node";
type enumeration {
enum STRICT;
list vcpu {
key "id";
description
- "List of vcpus to allocate on
- this numa node.";
+ "List of VCPUs to allocate on
+ this NUMA node.";
leaf id {
type uint64;
- description "List of vcpus ids to allocate on
- this numa node";
+ description "List of VCPUs ids to allocate on
+ this NUMA node";
}
}
choice om-numa-type {
description
- "Openmano Numa type selection";
+ "OpenMANO Numa type selection";
case cores {
leaf num-cores {
list paired-thread-ids {
description
- "List of thread pairs to use in case of paired-thread numa";
+ "List of thread pairs to use in case of paired-thread NUMA";
max-elements 16;
key thread-a;
description "Container for the provider network.";
leaf physical-network {
description
- "Name of the phsyical network on which the provider
+ "Name of the physical network on which the provider
network is built.";
type string;
}
leaf overlay-type {
description
- "Type of the overlay network.";
+ "Type of the overlay network.
+ LOCAL - Provider network implemented in a single compute node
+ FLAT - Provider network shared by all tenants
+ VLAN - Provider network implemented using 802.1Q tagging
+ VXLAN - Provider networks implemented using RFC 7348
+ GRE - Provider networks implemented using GRE tunnels";
type enumeration {
enum LOCAL;
enum FLAT;
}
leaf segmentation_id {
description
- "Segmentation ID";
+ "ID of segregated virtual networks";
type uint32;
}
}
leaf method {
description
- "This is the method to be performed at the uri.
- GET by default for action";
+ "Method that the URI should perform.
+ Deafult action is GET.";
type manotypes:http-method;
default "GET";
list monitoring-param {
description
- "List of monitoring parameters at the NS level";
+ "List of monitoring parameters at the network service level";
key id;
leaf id {
type string;
}
leaf group-tag {
- description "A simple tag to group monitoring parameters";
+ description "A tag to group monitoring parameters";
type string;
}
leaf widget-type {
+ description "Defines the UI Display variant of measured counters.";
type manotypes:widget-type;
}
leaf units {
+ description "Measured Counter Units (e.g., Packets, Kbps, Mbps, etc.)";
type string;
}
}
}
leaf max-value {
description
- "Maxium value for the parameter";
+ "Maximum value for the parameter";
type uint64;
}
}
}
leaf group-tag {
- description "A simple tag to group control parameters";
+ description "A tag to group control parameters";
type string;
}
leaf max-value {
description
- "Maxium value for the parameter";
+ "Maximum value for the parameter";
type uint64;
}
leaf url {
description
- "This is the URL where to perform the operation";
+ "This is the URL where the operation should be performed.";
type inet:uri;
}
leaf method {
description
- "This is the method to be performed at the uri.
- POST by default for action";
+ "Method that the URI should perform.
+ Default Action is POST";
type manotypes:http-method;
default "POST";
}
leaf group-tag {
- description "A simple tag to group monitoring parameter";
+ description "A tag to group monitoring parameter";
type string;
}
}
grouping input-parameter {
- description "";
+ description "List of input parameters that can be specified when instantiating a network service.";
list input-parameter {
description
leaf xpath {
description
- "A an xpath that specfies which element in a descriptor is to be
+ "An xpath that specfies which element in a descriptor is to be
modified.";
type string;
}
}
leaf default-value {
- description " A default value for this input parameter";
+ description "/nsd:nsd-catalog/nsd:nsd/nsd:vendor";
type string;
}
}
}
typedef alarm-severity-type {
- description "An indication of the importance or ugency of the alarm";
+ description "An indication of the importance or urgency of the alarm";
type enumeration {
enum LOW;
enum MODERATE;
typedef alarm-statistic-type {
description
- "The type of statistic to used to measure a metric to determine
- threshold crossing for an alarm.";
+ "Statistic type to use to determine threshold crossing
+ for an alarm.";
type enumeration {
enum AVERAGE;
enum MINIMUM;
typedef alarm-operation-type {
description
- "The relational operator used to define whether an alarm should be
- triggered when, say, the metric statistic goes above or below a
- specified value.";
+ "The relational operator used to define whether an alarm
+ should be triggered in certain scenarios, such as if the
+ metric statistic goes above or below a specified value.";
type enumeration {
enum GE; // greater than or equal
enum LE; // less than or equal
grouping alarm {
leaf alarm-id {
description
- "This field is reserved for the identifier assigned by the cloud
- provider";
+ "This field is reserved for the identifier assigned by the VIM provider";
type string;
}
}
leaf description {
- description "A string containing a description of this alarm";
+ description "A description of this alarm";
type string;
}
}
leaf severity {
- description "A measure of the important or urgency of the alarm";
+ description "A measure of the importance or urgency of the alarm";
type alarm-severity-type;
}
leaf operation {
description
- "The relational operator that defines whether the alarm should be
- triggered when the metric statistic is, say, above or below the
- specified threshold value.";
+ "The relational operator used to define whether an alarm should be
+ triggered in certain scenarios, such as if the metric statistic
+ goes above or below a specified value.";
type alarm-operation-type;
}
leaf evaluations {
description
- "This is the number of samples of the metric statistic used to
- evaluate threshold crossing. Each sample or evaluation is equal to
- the metric statistic obtained for a given period. This can be used
- to mitigate spikes in the metric that may skew the statistic of
- interest.";
+ "Defines the length of time (seconds) in which metric data are
+ collected in order to evaluate the chosen statistic.";
type uint32;
}
}
typedef cloud-account-type {
- description "cloud account type";
+ description "VIM account type";
type enumeration {
enum aws;
enum cloudsim;
key "metadata-key";
leaf metadata-key {
+ description
+ "Name of the additional information attached to the host-aggregate";
type string;
}
leaf metadata-value {
+ description
+ "Value of the corresponding metadata-key";
type string;
}
}
}
}
+ grouping cloud-config {
+ description "List of cloud config parameters";
+
+ list ssh-authorized-key {
+ key "key-pair-ref";
+
+ description "List of authorized ssh keys as part of cloud-config";
+
+ leaf key-pair-ref {
+ description "A reference to the key pair entry in the global key pair table";
+ type leafref {
+ path "/nsr:key-pair/nsr:name";
+ }
+ }
+ }
+ list user {
+ key "name";
+
+ description "List of users to be added through cloud-config";
+ leaf name {
+ description "Name of the user ";
+ type string;
+ }
+ leaf user-info {
+ description "The user name's real name";
+ type string;
+ }
+ list ssh-authorized-key {
+ key "key-pair-ref";
+
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+
+ leaf key-pair-ref {
+ description "A reference to the key pair entry in the global key pair table";
+ type leafref {
+ path "/nsr:key-pair/nsr:name";
+ }
+ }
+ }
+ }
+ }
+
+ list key-pair {
+ key "name";
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+ leaf name {
+ description "Name of this key pair";
+ type string;
+ }
+
+ leaf key {
+ description "Key associated with this key pair";
+ type string;
+ }
+ }
+
augment "/rw-project:project" {
list key-pair {
key "name";
NOTE: An issue with confd is preventing the
use of xpath. Seems to be an issue with leafref
to leafref, whose target is in a different module.
- Once that is resovled this will switched to use
+ Once that is resolved this will switched to use
leafref";
type yang:uuid;
}
uses manotypes:placement-group-input;
}
- list ssh-authorized-key {
- key "key-pair-ref";
-
- description "List of authorized ssh keys as part of cloud-config";
-
- leaf key-pair-ref {
- description "A reference to the key pair entry in the global key pair table";
- type leafref {
- path "../../../../key-pair/name";
- }
- }
- }
- list user {
- key "name";
-
- description "List of users to be added through cloud-config";
- leaf name {
- description "Name of the user ";
- type string;
- }
-
- leaf user-info {
- description "The user name's real name";
- type string;
- }
-
- list ssh-authorized-key {
- key "key-pair-ref";
-
- description "Used to configure the list of public keys to be injected as part
- of ns instantiation";
-
- leaf key-pair-ref {
- description "A reference to the key pair entry in the global key pair table";
- type leafref {
- path "../../../../../key-pair/name";
- }
- }
- }
- }
+ uses cloud-config;
}
grouping vnffgr {
description
"Type of Service Function.
NOTE: This needs to map with Service Function Type in ODL to
- support VNFFG. Service Function Type is manadatory param in ODL
+ support VNFFG. Service Function Type is mandatory param in ODL
SFC. This is temporarily set to string for ease of use";
type string;
}
list vnf-primitive-group {
description
- "List of service primitives grouped by VNF.";
+ "Reference to member-vnf within constituent-vnfds";
key "member-vnf-index-ref";
leaf member-vnf-index-ref {
leaf user-defined-script {
description
- "A user defined script.";
+ "A user defined script.";
type string;
}
}
NOTE: An issue with confd is preventing the
use of xpath. Seems to be an issue with leafref
to leafref, whose target is in a different module.
- Once that is resovled this will switched to use
+ Once that is resolved this will switched to use
leafref";
type string;
}
}
}
-
}
}
leaf short-name {
- description "PNFD short name.";
+ description "Short name to appear as label in the UI";
type string;
}
}
leaf short-name {
- description "Short name for VLR for UI";
+ description "Short name to appear as label in the UI";
type string;
}
# Exceptions
import keystoneclient.exceptions as KeystoneExceptions
+import neutronclient.common.exceptions as NeutronException
class ValidationError(Exception):
self.cinder_drv = ci_drv.CinderDriver(self.sess_drv,
region_name = region,
logger = self.log)
- except Exception as e:
+ except Exception:
self.cinder_drv = None
self.ceilo_drv = ce_drv.CeilometerDriver(self.sess_drv,
return self._cache['cinder']
def build_resource_cache(self):
- self.build_network_resource_cache()
+ try:
+ self.build_network_resource_cache()
+ except KeyError:
+ raise
self.build_nova_resource_cache()
self.build_cinder_resource_cache()
self.build_glance_resource_cache()
self.log.info("Discovering management network %s", self._mgmt_network)
network_list = self._cache_populate(self.neutron_drv.network_get,
None,
- **{'network_name':self._mgmt_network})
+ **{'network_name': self._mgmt_network})
if network_list:
self.neutron_cache['mgmt_net'] = network_list['id']
else:
- raise KeyError("Error")
+ msg = "Could not find management network %s" % self._mgmt_network
+ self.log.error(msg)
+ raise KeyError(msg)
def _build_glance_image_list(self):
def _build_cinder_volume_list(self):
self.log.info("Discovering volumes")
- vollist = self.cinder_volume_list()
self.cinder_cache['volumes'] = self._cache_populate(self.cinder_volume_list,
list())
return self.cinder_cache['volumes']
def build_network_resource_cache(self):
self.log.info("Building network resource cache")
- self._get_neutron_mgmt_network()
+ try:
+ self._get_neutron_mgmt_network()
+ except KeyError:
+ raise
self._build_neutron_security_group_list()
self._build_neutron_subnet_prefix_list()
self.sess_drv.invalidate_auth_token()
self.sess_drv.auth_token
self.build_resource_cache()
+ except KeystoneExceptions.Unauthorized as e:
+ self.log.error("Invalid credentials ")
+ raise ValidationError("Invalid Credentials: "+ str(e))
except KeystoneExceptions.AuthorizationFailure as e:
self.log.error("Unable to authenticate or validate the existing credentials. Exception: %s", str(e))
raise ValidationError("Invalid Credentials: "+ str(e))
+ except NeutronException.NotFound as e:
+ self.log.error("Given management network could not be found for Openstack account ")
+ raise ValidationError("Neutron network not found "+ str(e))
except Exception as e:
self.log.error("Could not connect to Openstack. Exception: %s", str(e))
raise ValidationError("Connection Error: "+ str(e))
-
+
def glance_image_create(self, **kwargs):
- if not 'disk_format' in kwargs:
+ if 'disk_format' not in kwargs:
kwargs['disk_format'] = 'qcow2'
- if not 'container_format' in kwargs:
+ if 'container_format' not in kwargs:
kwargs['container_format'] = 'bare'
- if not 'min_disk' in kwargs:
+ if 'min_disk' not in kwargs:
kwargs['min_disk'] = 0
- if not 'min_ram' in kwargs:
+ if 'min_ram' not in kwargs:
kwargs['min_ram'] = 0
return self.glance_drv.image_create(**kwargs)
def nova_server_create(self, **kwargs):
if 'security_groups' not in kwargs:
- kwargs['security_groups'] = [ s['name'] for s in self._nova_security_groups ]
+ kwargs['security_groups'] = [s['name'] for s in self._nova_security_groups]
return self.nova_drv.server_create(**kwargs)
def nova_server_add_port(self, server_id, port_id):
def nova_volume_list(self, server_id):
return self.nova_drv.volume_list(server_id)
+ def neutron_extensions_list(self):
+ return self.neutron_drv.extensions_list()
+
def neutron_network_list(self):
return self.neutron_drv.network_list()
def cinder_volume_list(self):
return self.cinder_drv.volume_list()
- def cinder_volume_get(self,vol_id):
+ def cinder_volume_get(self, vol_id):
return self.cinder_drv.volume_get(vol_id)
def cinder_volume_set_metadata(self, volumeid, metadata):
# limitations under the License.
#
-import contextlib
import logging
import os
import subprocess
import rw_status
import rift.cal.rwcal_status as rwcal_status
import rwlogger
-import neutronclient.common.exceptions as NeutronException
-import keystoneclient.exceptions as KeystoneExceptions
from gi.repository import (
PREPARE_VM_CMD = "prepare_vm.py --auth_url {auth_url} --username {username} --password {password} --tenant_name {tenant_name} --region {region} --user_domain {user_domain} --project_domain {project_domain} --mgmt_network {mgmt_network} --server_id {server_id} --port_metadata "
-rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
- KeyError: RwTypes.RwStatus.NOTFOUND,
- NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}
+rwstatus_exception_map = {IndexError: RwTypes.RwStatus.NOTFOUND,
+ KeyError: RwTypes.RwStatus.NOTFOUND,
+ NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED, }
rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
self.log = logger
try:
self._driver = openstack_drv.OpenstackDriver(logger = self.log, **kwargs)
- except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure,
- NeutronException.NotFound) as e:
- raise
except Exception as e:
self.log.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
raise
Validation Code and Details String
"""
status = RwcalYang.CloudConnectionStatus()
- drv = self._use_driver(account)
try:
+ drv = self._use_driver(account)
drv.validate_account_creds()
- except KeystoneExceptions.Unauthorized as e:
- self.log.error("Invalid credentials given for VIM account %s", account.name)
- status.status = "failure"
- status.details = "Invalid Credentials: %s" % str(e)
-
- except KeystoneExceptions.AuthorizationFailure as e:
- self.log.error("Bad authentication URL given for VIM account %s. Given auth url: %s",
- account.name, account.openstack.auth_url)
- status.status = "failure"
- status.details = "Invalid auth url: %s" % str(e)
-
- except NeutronException.NotFound as e:
- self.log.error("Given management network %s could not be found for VIM account %s",
- account.openstack.mgmt_network,
- account.name)
- status.status = "failure"
- status.details = "mgmt network does not exist: %s" % str(e)
-
- except openstack_drv.ValidationError as e:
- self.log.error("RwcalOpenstackPlugin: OpenstackDriver credential validation failed. Exception: %s", str(e))
- status.status = "failure"
- status.details = "Invalid Credentials: %s" % str(e)
-
except Exception as e:
- msg = "RwcalOpenstackPlugin: OpenstackDriver connection failed. Exception: %s" %(str(e))
+ msg = "RwcalOpenstackPlugin: Exception: %s" %(str(e))
self.log.error(msg)
status.status = "failure"
status.details = msg
kwargs['image_id'] = vminfo.image_id
### If floating_ip is required and we don't have one, better fail before any further allocation
- pool_name = None
floating_ip = False
if vminfo.has_field('allocate_public_address') and vminfo.allocate_public_address:
- if account.openstack.has_field('floating_ip_pool'):
- pool_name = account.openstack.floating_ip_pool
floating_ip = True
if vminfo.has_field('cloud_init') and vminfo.cloud_init.has_field('userdata'):
kwargs['availability_zone'] = None
if vminfo.has_field('server_group'):
- kwargs['scheduler_hints'] = {'group': vminfo.server_group }
+ kwargs['scheduler_hints'] = {'group': vminfo.server_group}
else:
kwargs['scheduler_hints'] = None
if key in vm.user_tags.fields:
setattr(vm.user_tags, key, value)
if 'OS-EXT-SRV-ATTR:host' in vm_info:
- if vm_info['OS-EXT-SRV-ATTR:host'] != None:
+ if vm_info['OS-EXT-SRV-ATTR:host'] is not None:
vm.host_name = vm_info['OS-EXT-SRV-ATTR:host']
if 'OS-EXT-AZ:availability_zone' in vm_info:
- if vm_info['OS-EXT-AZ:availability_zone'] != None:
+ if vm_info['OS-EXT-AZ:availability_zone'] is not None:
vm.availability_zone = vm_info['OS-EXT-AZ:availability_zone']
return vm
network = RwcalYang.NetworkInfoItem()
network.network_name = network_info['name']
network.network_id = network_info['id']
- if ('provider:network_type' in network_info) and (network_info['provider:network_type'] != None):
+ if ('provider:network_type' in network_info) and (network_info['provider:network_type'] is not None):
network.provider_network.overlay_type = network_info['provider:network_type'].upper()
if ('provider:segmentation_id' in network_info) and (network_info['provider:segmentation_id']):
network.provider_network.segmentation_id = network_info['provider:segmentation_id']
cmd += (" --vol_metadata {}").format(tmp_file.name)
exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
- exec_cmd = exec_path+'/'+cmd
+ exec_cmd = exec_path + '/' + cmd
self.log.info("Running command: %s" %(exec_cmd))
subprocess.call(exec_cmd, shell=True)
uses connection-status;
- typedef sdn-account-type {
- description "SDN account type";
- type enumeration {
- enum odl;
- enum mock;
- enum sdnsim;
- }
- }
-
- grouping sdn-provider-auth {
- leaf account-type {
- type sdn-account-type;
- }
-
- choice provider-specific-info {
- container odl {
- leaf username {
- type string {
- length "1..255";
- }
- }
-
- leaf password {
- type string {
- length "1..32";
- }
- }
-
- leaf url {
- type string {
- length "1..255";
- }
- }
- }
- container mock {
- leaf username {
- type string;
- }
- leaf plugin-name {
- type string;
- default "rwsdn_mock";
- }
- }
-
- container sdnsim {
- leaf username {
- type string;
- }
- leaf plugin-name {
- type string;
- default "rwsdn_sim";
- }
- }
- }
- }
-
grouping provider-auth {
leaf account-type {
type manotypes:cloud-account-type;
vdu_data = []
for vdu in vnfr['vdur']:
d = {}
- for k in ['name','management_ip', 'vm_management_ip', 'id']:
+ for k in ['name','management_ip', 'vm_management_ip', 'id', 'vdu_id_ref']:
if k in vdu:
d[k] = vdu[k]
vdu_data.append(d)
rw-project-vnfd
rw-resource-mgr
rw-restportforward
-rwsdn
+rwsdnal
rw-sdn
rwshell-mgmt
rw-sorch
return self.descriptor_msg.id
+ @property
+ def descriptor_name(self):
+ """ The descriptor name of this descriptor in the system """
+ if not self.descriptor_msg.has_field("name"):
+ msg = "Descriptor name not present"
+ self._log.error(msg)
+ raise PackageError(msg)
+
+ return self.descriptor_msg.name
+
@classmethod
def get_descriptor_patterns(cls):
""" Returns a tuple of descriptor regex and Package Types """
try:
# Copy the contents of the file to the correct path
+ # For folder creation (or nested folders), dest_file appears w/ trailing "/" like: dir1/ or dir1/dir2/
+ # For regular file upload, dest_file appears as dir1/abc.txt
dest_dir_path = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir_path):
os.makedirs(dest_dir_path)
+ if not os.path.basename(dest_file):
+ self._log.debug("Created dir path, no filename to insert in {}, skipping..".format(dest_dir_path))
+ return
with open(dest_file, 'wb') as dst_hdl:
with open(new_file, 'rb') as src_hdl:
self._created = True
- @asyncio.coroutine
def delete(self):
if not self._created:
return
self._log.warning("Openmano vnf id not set. Cannot delete.")
return
- yield from self._loop.run_in_executor(
- None,
- self._cli_api.vnf_delete,
- self._vnf_id,
- )
+ self._cli_api.vnf_delete(self._vnf_id)
class OpenmanoNSRecordState(Enum):
class OpenmanoNsr(object):
TIMEOUT_SECS = 300
+ INSTANCE_TERMINATE_TIMEOUT = 60
def __init__(self, dts, log, loop, publisher, cli_api, http_api, nsd_msg, nsr_config_msg,key_pairs):
self._dts = dts
yield from vnfr.create()
self._vnfrs.append(vnfr)
- @asyncio.coroutine
def delete(self):
if not self._created:
self._log.debug("NSD wasn't created. Skipping delete.")
return
self._log.debug("Deleting openmano nsr")
-
- yield from self._loop.run_in_executor(
- None,
- self._cli_api.ns_delete,
- self._nsd_uuid,
- )
+ self._cli_api.ns_delete(self._nsd_uuid)
self._log.debug("Deleting openmano vnfrs")
+ deleted_vnf_id_list = []
for vnfr in self._vnfrs:
- yield from vnfr.delete()
+ if vnfr.vnfr.vnfd.id not in deleted_vnf_id_list:
+ vnfr.delete()
+ deleted_vnf_id_list.append(vnfr.vnfr.vnfd.id)
@asyncio.coroutine
yield from self._publisher.publish_vnfr(None, vnfr_msg)
active_vnfs.append(vnfr)
-
except Exception as e:
vnfr_msg.operational_status = "failed"
self._state = OpenmanoNSRecordState.FAILED
self.instance_monitor_task(), loop=self._loop
)
- @asyncio.coroutine
def terminate(self):
-
- for _,handler in self._vdur_console_handler.items():
- handler._regh.deregister()
-
if self._nsr_uuid is None:
- self._log.warning("Cannot terminate an un-instantiated nsr")
- return
+ start_time = time.time()
+ while ((time.time() - start_time) < OpenmanoNsr.INSTANCE_TERMINATE_TIMEOUT) and (self._nsr_uuid is None):
+ time.sleep(5)
+ self._log.warning("Waiting for nsr to get instatiated")
+ if self._nsr_uuid is None:
+ self._log.warning("Cannot terminate an un-instantiated nsr")
+ return
if self._monitor_task is not None:
self._monitor_task.cancel()
self._monitor_task = None
self._log.debug("Terminating openmano nsr")
- yield from self._loop.run_in_executor(
- None,
- self._cli_api.ns_terminate,
- self._nsr_uuid,
- )
+ self._cli_api.ns_terminate(self._nsr_uuid)
@asyncio.coroutine
def create_vlr(self,vlr):
nsr_id = nsr.id
openmano_nsr = self._openmano_nsrs[nsr_id]
- yield from openmano_nsr.terminate()
- yield from openmano_nsr.delete()
+ for _,handler in openmano_nsr._vdur_console_handler.items():
+ handler._regh.deregister()
+
+ yield from self._loop.run_in_executor(
+ None,
+ self.terminate,
+ openmano_nsr,
+ )
with self._dts.transaction() as xact:
for vnfr in openmano_nsr.vnfrs:
del self._openmano_nsrs[nsr_id]
+ def terminate(self, openmano_nsr):
+ openmano_nsr.terminate()
+ openmano_nsr.delete()
+
@asyncio.coroutine
def terminate_vnf(self, vnfr):
"""
VnfrYang,
RwVnfrYang,
RwNsmYang,
- RwsdnYang,
+ RwsdnalYang,
RwDts as rwdts,
RwTypes,
ProtobufC,
vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
self._log.debug("Received VNFR is %s", vnfr)
- sff = RwsdnYang.VNFFGSff()
+ sff = RwsdnalYang.VNFFGSff()
sff_list[nsr_vnfr.vnfd.id] = sff
sff.name = nsr_vnfr.name
sff.function_type = nsr_vnfr.vnfd.service_function_chain
(member_vnfd.member_vnf_index_ref == str(const_vnfd.member_vnf_index)):
group_info = self.resolve_placement_group_cloud_construct(group)
if group_info is None:
- self._log.error("Could not resolve cloud-construct for placement group: %s", group.name)
+ self._log.info("Could not resolve cloud-construct for placement group: %s", group.name)
### raise PlacementGroupError("Could not resolve cloud-construct for placement group: {}".format(group.name))
else:
self._log.info("Successfully resolved cloud construct for placement group: %s for VNF: %s (Member Index: %s)",
from gi.repository import (
RwDts as rwdts,
- RwsdnYang,
+ RwsdnalYang,
RwTypes,
ProtobufC,
)
if (account.name in self._account):
self._log.error("SDN Account is already set")
else:
- sdn_account = RwsdnYang.SDNAccount()
+ sdn_account = RwsdnalYang.SDNAccount()
sdn_account.from_dict(account.as_dict())
sdn_account.name = account.name
self._account[account.name] = sdn_account
def get_sdn_account(self, name):
"""
- Creates an object for class RwsdnYang.SdnAccount()
+ Creates an object for class RwsdnalYang.SdnAccount()
"""
if (name in self._account):
return self._account[name]
sdn_plugin = self.get_sdn_plugin(sdn_acct_name)
for rsp in vnffgr.rsp:
- vnffg = RwsdnYang.VNFFGChain()
+ vnffg = RwsdnalYang.VNFFGChain()
vnffg.name = rsp.name
vnffg.classifier_name = rsp.classifier_name
else:
self._log.error("No RSP wiht name %s found; Skipping classifier %s creation",classifier.rsp_id_ref,classifier.name)
continue
- vnffgcl = RwsdnYang.VNFFGClassifier()
+ vnffgcl = RwsdnalYang.VNFFGClassifier()
vnffgcl.name = classifier.name
vnffgcl.rsp_name = cl_rsp_name
vnffgcl.port_id = vnffgr_cl[0].port_id
rift/tasklets/${TASKLET_NAME}/rpc.py
rift/tasklets/${TASKLET_NAME}/downloader/__init__.py
rift/tasklets/${TASKLET_NAME}/downloader/url.py
+ rift/tasklets/${TASKLET_NAME}/downloader/copy.py
rift/tasklets/${TASKLET_NAME}/proxy/__init__.py
rift/tasklets/${TASKLET_NAME}/proxy/base.py
rift/tasklets/${TASKLET_NAME}/proxy/filesystem.py
rift/tasklets/${TASKLET_NAME}/publisher/__init__.py
rift/tasklets/${TASKLET_NAME}/publisher/download_status.py
+ rift/tasklets/${TASKLET_NAME}/publisher/copy_status.py
rift/tasklets/${TASKLET_NAME}/subscriber/__init__.py
rift/tasklets/${TASKLET_NAME}/subscriber/download_status.py
COMPONENT ${PKG_LONG_NAME}
PYTHON3_ONLY)
-rift_add_subdirs(test)
\ No newline at end of file
+rift_add_subdirs(test)
# limitations under the License.
#
from .url import PackageFileDownloader
+from .copy import PackageFileCopier
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Author(s): Nandan Sinha
+#
+
+import os
+import uuid
+import shutil
+import enum
+
+import gi
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwNsdYang', '1.0')
+from gi.repository import (
+ RwYang,
+ NsdYang,
+ VnfdYang,
+ RwVnfdYang,
+ RwNsdYang,
+ RwPkgMgmtYang
+)
+
+class PackageCopyError(Exception):
+ pass
+
+class CopyStatus(enum.Enum):
+ UNINITIATED = 0
+ STARTED = 1
+ IN_PROGRESS = 2
+ COMPLETED = 3
+ FAILED = 4
+ CANCELLED = 5
+
+TaskStatus = RwPkgMgmtYang.TaskStatus
+
+class CopyMeta:
+ STATUS_MAP = {
+ CopyStatus.STARTED: TaskStatus.QUEUED.value_nick.upper(),
+ CopyStatus.UNINITIATED: TaskStatus.QUEUED.value_nick.upper(),
+ CopyStatus.IN_PROGRESS: TaskStatus.IN_PROGRESS.value_nick.upper(),
+ CopyStatus.COMPLETED: TaskStatus.COMPLETED.value_nick.upper(),
+ CopyStatus.FAILED: TaskStatus.FAILED.value_nick.upper(),
+ CopyStatus.CANCELLED: TaskStatus.CANCELLED.value_nick.upper()
+ }
+
+ def __init__(self, transaction_id):
+ self.transaction_id = transaction_id
+ self.state = CopyStatus.UNINITIATED
+
+ def set_state(self, state):
+ self.state = state
+
+ def as_dict(self):
+ return self.__dict__
+
+ def to_yang(self):
+ job = RwPkgMgmtYang.CopyJob.from_dict({
+ "transaction_id": self.transaction_id,
+ "status": CopyMeta.STATUS_MAP[self.state]
+ })
+ return job
+
+class PackageFileCopier:
+ DESCRIPTOR_MAP = {
+ "vnfd": (RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd, 'vnfd rw-vnfd'),
+ "nsd" : (RwNsdYang.YangData_Nsd_NsdCatalog_Nsd, 'nsd rw-nsd')
+ }
+
+ @classmethod
+ def from_rpc_input(cls, rpc_input, proxy, log=None):
+ return cls(
+ rpc_input.package_id,
+ rpc_input.package_type,
+ rpc_input.package_name,
+ proxy = proxy,
+ log=log)
+
+ def __init__(self,
+ pkg_id,
+ pkg_type,
+ pkg_name,
+ proxy,
+ log):
+ self.src_package_id = pkg_id
+ self.package_type = pkg_type.lower()
+ self.dest_package_name = pkg_name
+ self.dest_package_id = str(uuid.uuid4())
+ self.transaction_id = str(uuid.uuid4())
+ self.proxy = proxy
+ self.log = log
+ self.meta = CopyMeta(self.transaction_id)
+ self.src_package = None
+ self.dest_desc_msg = None
+
+ # Start of delegate calls
+ def call_delegate(self, event):
+ if not self.delegate:
+ return
+
+ # Send out the descriptor message to be posted on success
+ # Otherwise send out the CopyJob yang conversion from meta object.
+ if event == "on_download_succeeded":
+ getattr(self.delegate, event)(self.dest_desc_msg)
+ else:
+ getattr(self.delegate, event)(self.meta.to_yang())
+
+ def _copy_tree(self):
+ """
+ Locate directory tree of the source descriptor folder.
+ Copy directory tree to destination descriptor folder.
+
+ """
+ store = self.proxy._get_store(self.package_type)
+ src_path = store._get_package_dir(self.src_package_id)
+ self.src_package = store.get_package(self.src_package_id)
+ src_desc_name = self.src_package.descriptor_name
+ src_copy_path = os.path.join(src_path, src_desc_name)
+
+ self.dest_copy_path = os.path.join(store.DEFAULT_ROOT_DIR,
+ self.dest_package_id,
+ self.dest_package_name)
+ self.log.debug("Copying contents from {src} to {dest}".
+ format(src=src_copy_path, dest=self.dest_copy_path))
+
+ shutil.copytree(src_copy_path, self.dest_copy_path)
+
+ def _create_descriptor_file(self):
+ """ Update descriptor file for the newly copied descriptor catalog.
+ Use the existing descriptor file to create a descriptor proto gi object,
+ change some identifiers, and create a new descriptor yaml file from it.
+
+ """
+ src_desc_file = self.src_package.descriptor_file
+ src_desc_contents = self.src_package.descriptor_msg.as_dict()
+ src_desc_contents.update(
+ id =self.dest_package_id,
+ name = self.dest_package_name,
+ short_name = self.dest_package_name
+ )
+
+ desc_cls, modules = PackageFileCopier.DESCRIPTOR_MAP[self.package_type]
+ self.dest_desc_msg = desc_cls.from_dict(src_desc_contents)
+ dest_desc_path = os.path.join(self.dest_copy_path,
+ "{pkg_name}_{pkg_type}.yaml".format(pkg_name=self.dest_package_name, pkg_type=self.package_type))
+ model = RwYang.Model.create_libncx()
+ for module in modules.split():
+ model.load_module(module)
+
+ with open(dest_desc_path, "w") as fh:
+ fh.write(self.dest_desc_msg.to_yaml(model))
+
+ copied_desc_file = os.path.join(self.dest_copy_path, os.path.basename(src_desc_file))
+ if os.path.exists(copied_desc_file):
+ self.log.debug("Deleting copied yaml from old source %s" % (copied_desc_file))
+ os.remove(copied_desc_file)
+
+ def copy(self):
+ try:
+ if self.package_type not in PackageFileCopier.DESCRIPTOR_MAP:
+ raise PackageCopyError("Package type {} not currently supported for copy operations".format(self.package_type))
+
+ self._copy_tree()
+ self._create_descriptor_file()
+ self.copy_succeeded()
+
+ except Exception as e:
+ self.log.exception(str(e))
+ self.copy_failed()
+
+ self.copy_finished()
+
+ def copy_failed(self):
+ self.meta.set_state(CopyStatus.FAILED)
+ self.call_delegate("on_download_failed")
+
+ def copy_progress(self):
+ self.meta.set_state(CopyStatus.IN_PROGRESS)
+ self.call_delegate("on_download_progress")
+
+ def copy_succeeded(self):
+ self.meta.set_state(CopyStatus.COMPLETED)
+ self.call_delegate("on_download_succeeded")
+
+ def copy_finished(self):
+ self.call_delegate("on_download_finished")
+
# limitations under the License.
#
from .download_status import DownloadStatusPublisher
+from .copy_status import CopyStatusPublisher
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Author(s): Nandan Sinha
+#
+
+import sys
+import asyncio
+import uuid
+import abc
+import functools
+from concurrent.futures import Future
+
+from gi.repository import (RwDts as rwdts)
+import rift.mano.dts as mano_dts
+import rift.downloader as url_downloader
+import rift.tasklets.rwlaunchpad.onboard as onboard
+
+if sys.version_info < (3, 4, 4):
+ asyncio.ensure_future = asyncio.async
+
+
+class CopyStatusPublisher(mano_dts.DtsHandler, url_downloader.DownloaderProtocol):
+
+ def __init__(self, log, dts, loop, tasklet_info):
+ super().__init__(log, dts, loop)
+ self.tasks = {}
+ self.tasklet_info = tasklet_info
+
+ def xpath(self, transaction_id=None):
+ return ("D,/rw-pkg-mgmt:copy-jobs/rw-pkg-mgmt:job" +
+ ("[transaction-id='{}']".format(transaction_id) if transaction_id else ""))
+ pass
+
+ @asyncio.coroutine
+ def register(self):
+ self.reg = yield from self.dts.register(xpath=self.xpath(),
+ flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
+
+ assert self.reg is not None
+
+ @asyncio.coroutine
+ def register_copier(self, copier):
+ copier.delegate = self
+ future = self.loop.run_in_executor(None, copier.copy)
+ self.tasks[copier.transaction_id] = (copier, future)
+
+ return (copier.transaction_id, copier.dest_package_id)
+
+ @asyncio.coroutine
+ def _dts_publisher(self, job_msg):
+ # Publish the download state
+ self.reg.update_element(
+ self.xpath(transaction_id=job_msg.transaction_id), job_msg)
+
+ @staticmethod
+ def _async_add(func, fut):
+ try:
+ ret = func()
+ fut.set_result(ret)
+ except Exception as e:
+ fut.set_exception(e)
+
+ def _schedule_dts_work(self, job_msg):
+ f = functools.partial(
+ asyncio.ensure_future,
+ self._dts_publisher(job_msg),
+ loop = self.loop)
+ fut = Future()
+ self.loop.call_soon_threadsafe(CopyStatusPublisher._async_add, f, fut)
+ xx = fut.result()
+ if fut.exception() is not None:
+ self.log.error("Caught future exception during download: %s type %s", str(fut.exception()), type(fut.exception()))
+ raise fut.exception()
+ return xx
+
+ def on_download_progress(self, job_msg):
+ """callback that triggers update.
+ """
+ return self._schedule_dts_work(job_msg)
+
+ def on_download_finished(self, job_msg):
+ """callback that triggers update.
+ """
+ # clean up the local cache
+ key = job_msg.transaction_id
+ if key in self.tasks:
+ del self.tasks[key]
+
+ return self._schedule_dts_work(job_msg)
+
+ def on_download_succeeded(self, job_msg):
+ """Post the catalog descriptor object to the http endpoint.
+ Argument: job_msg (proto-gi descriptor_msg of the copied descriptor)
+
+ """
+ manifest = self.tasklet_info.get_pb_manifest()
+ use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
+ ssl_cert, ssl_key = None, None
+ if use_ssl:
+ ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+ ssl_key = manifest.bootstrap_phase.rwsecurity.key
+
+ onboarder = onboard.DescriptorOnboarder(self.log,
+ "127.0.0.1", 8008, use_ssl, ssl_cert, ssl_key)
+ try:
+ onboarder.onboard(job_msg)
+ except onboard.OnboardError as e:
+ self.log.error("Onboard exception triggered while posting copied catalog descriptor %s", e)
+ raise
+
+
RPC_SCHEMA_ENDPOINT = RwPkgMgmtYang.YangOutput_RwPkgMgmt_GetPackageSchema
RPC_PACKAGE_ADD_ENDPOINT = RwPkgMgmtYang.YangOutput_RwPkgMgmt_PackageFileAdd
RPC_PACKAGE_DELETE_ENDPOINT = RwPkgMgmtYang.YangOutput_RwPkgMgmt_PackageFileDelete
+RPC_PACKAGE_COPY_ENDPOINT = RwPkgMgmtYang.YangOutput_RwPkgMgmt_PackageCopy
class EndpointDiscoveryRpcHandler(mano_dts.AbstractRpcHandler):
class SchemaRpcHandler(mano_dts.AbstractRpcHandler):
"""RPC handler to generate the schema for the packages.
"""
- def __init__(self, log, dts, loop, proxy):
+ def __init__(self, log, dts, loop, project, proxy):
"""
Args:
proxy: Any impl of .proxy.AbstractPackageManagerProxy
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.proxy = proxy
@property
return rpc_op
+class PackageCopyOperationsRpcHandler(mano_dts.AbstractRpcHandler):
+ def __init__(self, log, dts, loop, project, proxy, publisher):
+ """
+ Args:
+ proxy: Any impl of .proxy.AbstractPackageManagerProxy
+ publisher: CopyStatusPublisher object
+ """
+ super().__init__(log, dts, loop, project)
+ self.proxy = proxy
+ self.publisher = publisher
+
+ @property
+ def xpath(self):
+ return "/rw-pkg-mgmt:package-copy"
+
+ @asyncio.coroutine
+ def callback(self, ks_path, msg):
+ import uuid
+ copier = pkg_downloader.PackageFileCopier.from_rpc_input(msg, proxy=self.proxy, log=self.log)
+
+ transaction_id, dest_package_id = yield from self.publisher.register_copier(copier)
+ rpc_op = RPC_PACKAGE_COPY_ENDPOINT.from_dict({
+ "transaction_id":transaction_id,
+ "package_id":dest_package_id,
+ "package_type":msg.package_type})
+
+ return rpc_op
class PackageDeleteOperationsRpcHandler(mano_dts.AbstractRpcHandler):
def __init__(self, log, dts, loop, proxy):
def __init__(self, name, tasklet, **kw):
super(PackageManagerProject, self).__init__(tasklet.log, name)
self.update(tasklet)
+ proxy = kw["proxy"]
args = [self.log, self.dts, self.loop, self]
self.job_handler = pkg_publisher.DownloadStatusPublisher(*args)
+ self.copy_publisher = pkg_publisher.CopyStatusPublisher(*args + [self.tasklet.tasklet_info])
+
# create catalog subscribers
self.vnfd_catalog_sub = subscriber.VnfdStatusSubscriber(*args)
self.nsd_catalog_sub = subscriber.NsdStatusSubscriber(*args)
-
+
+ args.append(proxy)
+ self.copy_rpc = rpc.PackageCopyOperationsRpcHandler(*(args + [self.copy_publisher]))
@asyncio.coroutine
def register (self):
yield from self.vnfd_catalog_sub.register()
yield from self.nsd_catalog_sub.register()
+ yield from self.copy_rpc.register()
+ yield from self.copy_publisher.register()
yield from self.job_handler.register()
def deregister (self):
yield from self.job_handler.deregister()
+ yield from self.copy_rpc.deregister()
+ yield from self.copy_publisher.deregister()
yield from self.vnfd_catalog_sub.deregister()
yield from self.nsd_catalog_sub.deregister()
self.log.exception(e)
def start(self):
- super().start()
self.log.debug("Registering with dts")
- self.dts = rift.tasklets.DTS(
+ try:
+ super().start()
+ self.dts = rift.tasklets.DTS(
self.tasklet_info,
RwPkgMgmtYang.get_schema(),
self.loop,
self.on_dts_state_change
)
- proxy = filesystem.FileSystemProxy(self.loop, self.log)
+ proxy = filesystem.FileSystemProxy(self.loop, self.log)
+ args = [self.log, self.dts, self.loop]
- args = [self.log, self.dts, self.loop]
- args.append(proxy)
- self.endpoint_rpc = rpc.EndpointDiscoveryRpcHandler(*args)
- self.schema_rpc = rpc.SchemaRpcHandler(*args)
- self.delete_rpc = rpc.PackageDeleteOperationsRpcHandler(*args)
+ args.append(proxy)
+ self.endpoint_rpc = rpc.EndpointDiscoveryRpcHandler(*args)
+ self.schema_rpc = rpc.SchemaRpcHandler(*args)
+ self.delete_rpc = rpc.PackageDeleteOperationsRpcHandler(*args)
+
+ args.append(self)
+ self.pkg_op = rpc.PackageOperationsRpcHandler(*args)
- args.append(self)
- self.pkg_op = rpc.PackageOperationsRpcHandler(*args)
+ self.project_handler = ProjectHandler(self, PackageManagerProject,
+ proxy=proxy,)
+ except Exception as e:
+ self.log.exception("Exception caught rwpkgmgr start: %s", str(e))
+ else:
+ self.log.debug("rwpkgmgr started successfully!")
def stop(self):
try:
#
import asyncio
-import logging
-import os
import sys
import gi
gi.require_version('RwDts', '1.0')
from gi.repository import (
RwVnsYang,
- RwSdnYang,
RwDts as rwdts,
RwTypes,
- ProtobufC,
)
import rift.tasklets
ManoProject,
ProjectHandler,
)
+import rift.mano.sdn
from rift.vlmgr import (
VlrDtsHandler,
NwtopStaticDtsHandler,
NwtopDiscoveryDtsHandler,
NwtopDataStore,
- SdnAccountMgr,
)
-class SdnInterfaceError(Exception):
- """ SDN interface creation Error """
- pass
-
-
-class SdnPluginError(Exception):
- """ SDN plugin creation Error """
- pass
-
-
class VlRecordError(Exception):
""" Vlr Record creation Error """
pass
""" Vlr Record not found"""
pass
-class SdnAccountError(Exception):
- """ Error while creating/deleting/updating SDN Account"""
- pass
-
-class SdnAccountNotFound(Exception):
- pass
-class SDNAccountDtsOperdataHandler(object):
- def __init__(self, dts, log, loop, parent):
- self._dts = dts
+class SDNAccountHandlers(object):
+ def __init__(self, dts, log, log_hdl, acctstore, loop, project):
self._log = log
+ self._log_hdl = log_hdl
+ self._dts = dts
self._loop = loop
- self._parent = parent
- self._project = self._parent._project
- self._regh = None
- self._rpch = None
-
- def _register_show_status(self):
- def get_xpath(sdn_name=None):
- return self._project.add_project("D,/rw-sdn:sdn/rw-sdn:account{}" \
- "/rw-sdn:connection-status".format(
- "[name='%s']" % sdn_name if sdn_name is not None else ''
- ))
-
- @asyncio.coroutine
- def on_prepare(xact_info, action, ks_path, msg):
- path_entry = RwSdnYang.SDNAccountConfig.schema().keyspec_to_entry(ks_path)
- sdn_account_name = path_entry.key00.name
- self._log.debug("Got show sdn connection status request: %s", ks_path.create_string())
-
- try:
- saved_accounts = self._parent._acctmgr.get_saved_sdn_accounts(sdn_account_name)
- for account in saved_accounts:
- sdn_acct = RwSdnYang.SDNAccountConfig()
- sdn_acct.from_dict(account.as_dict())
-
- self._log.debug("Responding to sdn connection status request: %s", sdn_acct.connection_status)
- xact_info.respond_xpath(
- rwdts.XactRspCode.MORE,
- xpath=get_xpath(account.name),
- msg=sdn_acct.connection_status,
- )
- except KeyError as e:
- self._log.warning(str(e))
- xact_info.respond_xpath(rwdts.XactRspCode.NA)
- return
-
- xact_info.respond_xpath(rwdts.XactRspCode.ACK)
-
- self._regh = yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare),
- flags=rwdts.Flag.PUBLISHER,
- )
-
- def _register_validate_rpc(self):
- def get_xpath():
- return "/rw-sdn:update-sdn-status"
-
- @asyncio.coroutine
- def on_prepare(xact_info, action, ks_path, msg):
-
- if not self._project.rpc_check(msg, xact_info=xact_info):
- return
-
- if not msg.has_field("sdn_account"):
- raise SdnAccountNotFound("SDN account name not provided")
-
- sdn_account_name = msg.sdn_account
- account = self._parent._acctmgr.get_sdn_account(sdn_account_name)
- if account is None:
- self._log.warning("SDN account %s does not exist", sdn_account_name)
- xact_info.respond_xpath(rwdts.XactRspCode.NA)
- return
-
- self._parent._acctmgr.start_validate_credentials(self._loop, sdn_account_name)
-
- xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ self._acctstore = acctstore
+ self._project = project
+
+ self._log.debug("Creating SDN account config handler")
+ self.sdn_cfg_handler = rift.mano.sdn.SDNAccountConfigSubscriber(
+ self._dts, self._log, project, self._log_hdl,
+ rift.mano.sdn.SDNAccountConfigCallbacks(
+ on_add_apply=self.on_sdn_account_added,
+ on_delete_apply=self.on_sdn_account_deleted,
+ ),
+ self._acctstore
- self._rpch = yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare
- ),
- flags=rwdts.Flag.PUBLISHER,
)
-
- @asyncio.coroutine
- def register(self):
- yield from self._register_show_status()
- yield from self._register_validate_rpc()
-
- def deregister(self):
- self._log.debug("De-register SDN opdata handler for project {}".
- format(self._project.name))
- if self._regh:
- self._regh.deregister()
- self._regh = None
-
- if self._rpch:
- self._rpch.deregister()
- self._rpch = None
-
-
-class SDNAccountDtsHandler(object):
- XPATH = "C,/rw-sdn:sdn/rw-sdn:account"
-
- def __init__(self, dts, log, parent):
- self._dts = dts
- self._log = log
- self._parent = parent
- self._project = parent._project
-
- self._sdn_account = {}
- self._regh = None
-
- @property
- def _xpath(self):
- return self._project.add_project(SDNAccountDtsHandler.XPATH)
-
- def _set_sdn_account(self, account):
- self._log.info("Setting sdn account: {}".format(account))
- if account.name in self._sdn_account:
- self._log.error("SDN Account with name %s already exists. Ignoring config", account.name);
- self._sdn_account[account.name] = account
- self._parent._acctmgr.set_sdn_account(account)
-
- def _del_sdn_account(self, account_name):
- self._log.info("Deleting sdn account: {}".format(account_name))
- del self._sdn_account[account_name]
-
- self._parent._acctmgr.del_sdn_account(account_name)
-
- def _update_sdn_account(self, account):
- self._log.info("Updating sdn account: {}".format(account))
- # No need to update locally saved sdn_account's updated fields, as they
- # are not used anywhere. Call the parent's update callback.
- self._parent._acctmgr.update_sdn_account(account)
-
+
+ self._log.debug("Creating SDN account opdata handler")
+ self.sdn_operdata_handler = rift.mano.sdn.SDNAccountDtsOperdataHandler(
+ self._dts, self._log, self._loop, project,
+ )
+
+ def on_sdn_account_deleted(self, account_name):
+ self._log.debug("SDN account deleted")
+ self.sdn_operdata_handler.delete_sdn_account(account_name)
+
+ def on_sdn_account_added(self, account):
+ self._log.debug("SDN account added")
+ self.sdn_operdata_handler.add_sdn_account(account)
+
@asyncio.coroutine
def register(self):
- def apply_config(dts, acg, xact, action, _):
- self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action)
- if action == rwdts.AppconfAction.INSTALL and xact.id is None:
- self._log.debug("No xact handle. Skipping apply config")
- return RwTypes.RwStatus.SUCCESS
-
- return RwTypes.RwStatus.SUCCESS
-
- @asyncio.coroutine
- def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
- """ Prepare callback from DTS for SDN Account config """
-
- self._log.info("SDN Cloud account config received: %s", msg)
-
- fref = ProtobufC.FieldReference.alloc()
- fref.goto_whole_message(msg.to_pbcm())
-
- if fref.is_field_deleted():
- # Delete the sdn account record
- self._del_sdn_account(msg.name)
- else:
- # If the account already exists, then this is an update.
- if msg.name in self._sdn_account:
- self._log.debug("SDN account already exists. Invoking on_prepare update request")
- if msg.has_field("account_type"):
- errmsg = "Cannot update SDN account's account-type."
- self._log.error(errmsg)
- xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- self._xpath,
- errmsg)
- raise SdnAccountError(errmsg)
-
- # Update the sdn account record
- self._update_sdn_account(msg)
- else:
- self._log.debug("SDN account does not already exist. Invoking on_prepare add request")
- if not msg.has_field('account_type'):
- errmsg = "New SDN account must contain account-type field."
- self._log.error(errmsg)
- xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- self._xpath,
- errmsg)
- raise SdnAccountError(errmsg)
-
- # Set the sdn account record
- self._set_sdn_account(msg)
-
- xact_info.respond_xpath(rwdts.XactRspCode.ACK)
-
-
- self._log.debug("Registering for Sdn Account config using xpath: {}".
- format(self._xpath))
-
- acg_handler = rift.tasklets.AppConfGroup.Handler(
- on_apply=apply_config,
- )
-
- with self._dts.appconf_group_create(acg_handler) as acg:
- self._regh = acg.register(
- xpath=self._xpath,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
- on_prepare=on_prepare
- )
+ self.sdn_cfg_handler.register()
+ yield from self.sdn_operdata_handler.register()
def deregister(self):
- self._log.debug("De-register VLR handler for project {}".
- format(self._project.name))
- if self._regh:
- self._regh.deregister()
- self._regh = None
+ self.sdn_cfg_handler.deregister()
+ self.sdn_operdata_handler.deregister()
class VnsManager(object):
self._log_hdl = log_hdl
self._loop = loop
self._project = project
-
+ self._acctstore = {}
self._vlr_handler = VlrDtsHandler(dts, log, loop, self)
self._vld_handler = VldDtsHandler(dts, log, loop, self)
- self._sdn_handler = SDNAccountDtsHandler(dts,log,self)
- self._sdn_opdata_handler = SDNAccountDtsOperdataHandler(dts,log, loop, self)
- self._acctmgr = SdnAccountMgr(self._log, self._log_hdl, self._loop, self._project)
+ self._sdn_handlers = SDNAccountHandlers(dts, log, log_hdl, self._acctstore, loop, project)
self._nwtopdata_store = NwtopDataStore(log)
- self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, self._project,
- self._acctmgr, self._nwtopdata_store)
- self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, self._project,
- self._acctmgr, self._nwtopdata_store)
+ self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, project,
+ self._acctstore, self._nwtopdata_store)
+ self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, project,
+ self._acctstore, self._nwtopdata_store)
self._vlrs = {}
@asyncio.coroutine
yield from self._vld_handler.register()
@asyncio.coroutine
- def register_sdn_handler(self):
- """ Register vlr DTS handler """
- self._log.debug("Registering SDN Account config handler")
- yield from self._sdn_handler.register()
- yield from self._sdn_opdata_handler.register()
+ def register_sdn_handlers(self):
+ """ Register SDN DTS handlers """
+ self._log.debug("Registering SDN Account handlers")
+ yield from self._sdn_handlers.register()
@asyncio.coroutine
def register_nwtopstatic_handler(self):
@asyncio.coroutine
def register(self):
""" Register all static DTS handlers"""
- yield from self.register_sdn_handler()
+ yield from self.register_sdn_handlers()
yield from self.register_vlr_handler()
yield from self.register_vld_handler()
yield from self.register_nwtopstatic_handler()
- # Not used for now
yield from self.register_nwtopdiscovery_handler()
def deregister(self):
self._nwtopstatic_handler.deregister()
self._vld_handler.deregister()
self._vlr_handler.deregister()
- self._sdn_opdata_handler.deregister()
- self._sdn_handler.deregister()
+ self._sdn_handlers.deregister()
def create_vlr(self, msg):
""" Create VLR """
from .rwtopmgr import (
NwtopDiscoveryDtsHandler,
NwtopStaticDtsHandler,
- SdnAccountMgr,
)
from .rwtopdatastore import (
IetfNetworkTopologyYang,
IetfL2TopologyYang,
RwTopologyYang,
- RwsdnYang,
+ RwsdnalYang,
RwTypes
)
from gi.repository.RwTypes import RwStatus
-import rw_peas
import rift.tasklets
-class SdnGetPluginError(Exception):
- """ Error while fetching SDN plugin """
- pass
-
-
-class SdnGetInterfaceError(Exception):
- """ Error while fetching SDN interface"""
- pass
-
-
-class SdnAccountMgr(object):
- """ Implements the interface to backend plugins to fetch topology """
- def __init__(self, log, log_hdl, loop, project):
- self._account = {}
- self._log = log
- self._log_hdl = log_hdl
- self._loop = loop
- self._project = project
- self._sdn = {}
-
- self._regh = None
-
- self._status = RwsdnYang.SdnConnectionStatus(
- status='unknown',
- details="Connection status lookup not started"
- )
-
- self._validate_task = None
-
- def set_sdn_account(self,account):
- if (account.name in self._account):
- self._log.error("SDN Account is already set")
- else:
- sdn_account = RwsdnYang.SDNAccount()
- sdn_account.from_dict(account.as_dict())
- sdn_account.name = account.name
- self._account[account.name] = sdn_account
- self._log.debug("Account set is %s , %s",type(self._account), self._account)
- self.start_validate_credentials(self._loop, account.name)
-
- def del_sdn_account(self, name):
- self._log.debug("Account deleted is %s , %s", type(self._account), name)
- del self._account[name]
-
- def update_sdn_account(self,account):
- self._log.debug("Account updated is %s , %s", type(self._account), account)
- if account.name in self._account:
- sdn_account = self._account[account.name]
-
- sdn_account.from_dict(
- account.as_dict(),
- ignore_missing_keys=True,
- )
- self._account[account.name] = sdn_account
- self.start_validate_credentials(self._loop, account.name)
-
- def get_sdn_account(self, name):
- """
- Creates an object for class RwsdnYang.SdnAccount()
- """
- if (name in self._account):
- return self._account[name]
- else:
- self._log.error("ERROR : SDN account is not configured")
-
- def get_saved_sdn_accounts(self, name):
- ''' Get SDN Account corresponding to passed name, or all saved accounts if name is None'''
- saved_sdn_accounts = []
-
- if name is None or name == "":
- sdn_accounts = list(self._account.values())
- saved_sdn_accounts.extend(sdn_accounts)
- elif name in self._account:
- account = self._account[name]
- saved_sdn_accounts.append(account)
- else:
- errstr = "SDN account {} does not exist".format(name)
- raise KeyError(errstr)
-
- return saved_sdn_accounts
-
- def get_sdn_plugin(self,name):
- """
- Loads rw.sdn plugin via libpeas
- """
- if (name in self._sdn):
- return self._sdn[name]
- account = self.get_sdn_account(name)
- plugin_name = getattr(account, account.account_type).plugin_name
- self._log.info("SDN plugin being created")
- plugin = rw_peas.PeasPlugin(plugin_name, 'RwSdn-1.0')
- engine, info, extension = plugin()
-
- self._sdn[name] = plugin.get_interface("Topology")
- try:
- rc = self._sdn[name].init(self._log_hdl)
- assert rc == RwStatus.SUCCESS
- except:
- self._log.error("ERROR:SDN plugin instantiation failed ")
- else:
- self._log.info("SDN plugin successfully instantiated")
- return self._sdn[name]
-
- @asyncio.coroutine
- def validate_sdn_account_credentials(self, loop, name):
- self._log.debug("Validating SDN Account credentials %s", name)
- self._status = RwsdnYang.SDNAccount_ConnectionStatus(
- status="validating",
- details="SDN account connection validation in progress"
- )
-
- _sdnacct = self.get_sdn_account(name)
- if (_sdnacct is None):
- raise SdnGetPluginError
- _sdnplugin = self.get_sdn_plugin(name)
- if (_sdnplugin is None):
- raise SdnGetInterfaceError
-
- rwstatus, status = yield from loop.run_in_executor(
- None,
- _sdnplugin.validate_sdn_creds,
- _sdnacct,
- )
-
- if rwstatus == RwTypes.RwStatus.SUCCESS:
- self._status = RwsdnYang.SdnConnectionStatus.from_dict(status.as_dict())
- else:
- self._status = RwsdnYang.SdnConnectionStatus(
- status="failure",
- details="Error when calling CAL validate sdn creds"
- )
-
- self._log.info("Got sdn account validation response: %s", self._status)
- _sdnacct.connection_status = self._status
-
- def start_validate_credentials(self, loop, name):
- if self._validate_task is not None:
- self._validate_task.cancel()
- self._validate_task = None
-
- self._validate_task = asyncio.ensure_future(
- self.validate_sdn_account_credentials(loop, name),
- loop=loop
- )
-
class NwtopDiscoveryDtsHandler(object):
""" Handles DTS interactions for the Discovered Topology registration """
DISC_XPATH = "D,/nd:network"
- def __init__(self, dts, log, loop, project, acctmgr, nwdatastore):
+ def __init__(self, dts, log, loop, project, acctstore, nwdatastore):
self._dts = dts
self._log = log
self._loop = loop
self._project = project
- self._acctmgr = acctmgr
+ self._acctstore = acctstore
self._nwdatastore = nwdatastore
self._regh = None
if action == rwdts.QueryAction.READ:
- for name in self._acctmgr._account:
- _sdnacct = self._acctmgr.get_sdn_account(name)
- if (_sdnacct is None):
- raise SdnGetPluginError
-
- _sdnplugin = self._acctmgr.get_sdn_plugin(name)
- if (_sdnplugin is None):
- raise SdnGetInterfaceError
+ for name, sdnacct in self._acctstore.items():
+ if sdnacct.account_type != "odl":
+ continue
+ sdnintf = sdnacct.sdn
- rc, nwtop = _sdnplugin.get_network_list(_sdnacct)
+ rc, nwtop = sdnintf.get_network_list(sdnacct.sdnal_account_msg)
#assert rc == RwStatus.SUCCESS
if rc != RwStatus.SUCCESS:
self._log.error("Fetching get network list for SDN Account %s failed", name)
""" Handles DTS interactions for the Static Topology registration """
STATIC_XPATH = "C,/nd:network"
- def __init__(self, dts, log, loop, project, acctmgr, nwdatastore):
+ def __init__(self, dts, log, loop, project, acctstore, nwdatastore):
self._dts = dts
self._log = log
self._loop = loop
self._project = project
- self._acctmgr = acctmgr
+ self._acctstore = acctstore
self._regh = None
self.pending = {}
import rw_peas
import rwlogger
-from gi.repository import RwsdnYang
+from gi.repository import RwsdnalYang
import gi
gi.require_version('RwTypes', '1.0')
-gi.require_version('RwSdn', '1.0')
+gi.require_version('RwSdnal', '1.0')
from gi.repository import RwcalYang
from gi.repository import IetfNetworkYang
from gi.repository.RwTypes import RwStatus
def get_sdn_account():
"""
- Creates an object for class RwsdnYang.SdnAccount()
+ Creates an object for class RwsdnalYang.SdnAccount()
"""
- account = RwsdnYang.SDNAccount()
+ account = RwsdnalYang.SDNAccount()
account.account_type = "mock"
account.mock.username = "rift"
account.mock.plugin_name = "rwsdn_mock"
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import logging
+import unittest
+
+import rw_peas
+import rwlogger
+
+import gi
+gi.require_version('RwTypes', '1.0')
+from gi.repository import RwsdnalYang
+from gi.repository.RwTypes import RwStatus
+
+
+logger = logging.getLogger('sdnodl')
+
+odl_info = {
+ 'username' : 'admin',
+ 'password' : 'admin',
+ 'url' : 'http://10.66.4.27:8181',
+}
+
+
+def get_sdn_account():
+ """
+ Creates an object for class RwsdnalYang.SdnAccount()
+ """
+ account = RwsdnalYang.SDNAccount()
+ account.name = "grunt27"
+ account.account_type = "odl"
+ account.odl.plugin_name = "rwsdn_odl"
+ account.odl.username = odl_info['username']
+ account.odl.password = odl_info['password']
+ account.odl.url = odl_info['url']
+
+ return account
+
+def get_sdn_plugin():
+ """
+ Loads rw.sdn plugin via libpeas
+ """
+ plugin = rw_peas.PeasPlugin('rwsdn_odl', 'RwSdn-1.0')
+ engine, info, extension = plugin()
+
+ # Get the RwLogger context
+ rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log")
+
+ sdn = plugin.get_interface("Topology")
+ try:
+ rc = sdn.init(rwloggerctx)
+ assert rc == RwStatus.SUCCESS
+ except:
+ logger.error("ERROR:SDN ODL plugin instantiation failed. Aborting tests")
+ else:
+ logger.info("SDN ODL plugin successfully instantiated")
+ return sdn
+
+
+
+class SdnOdlTest(unittest.TestCase):
+ def setUp(self):
+ """
+ Initialize test plugins
+ """
+ self._acct = get_sdn_account()
+ logger.info("SDN-Odl-Test: setUp")
+ self.sdn = get_sdn_plugin()
+ logger.info("SDN-Odl-Test: setUpEND")
+
+ def tearDown(self):
+ logger.info("SDN-Odl-Test: Done with tests")
+
+ def test_validate_sdn_creds(self):
+ """
+ First test case
+ """
+ logger.debug("SDN-Odl-Test: Starting validate creds ")
+ rc, status = self.sdn.validate_sdn_creds(self._acct)
+ logger.debug("SDN-Odl-Test: SDN return code %s resp %s", rc, status)
+ self.assertEqual(rc, RwStatus.SUCCESS)
+ logger.info("SDN-Odl-Test: Passed validate creds")
+
+ def test_get_network_list(self):
+ """
+ Get-network-list test case
+ """
+ logger.debug("SDN-Odl-Test: Getting network list ")
+ rc, status = self.sdn.get_network_list(self._acct)
+ logger.debug("SDN-Odl-Test: SDN return code %s resp %s", rc, status)
+ self.assertEqual(rc, RwStatus.SUCCESS)
+ logger.info("SDN-Odl-Test: Passed get network list")
+
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.DEBUG)
+ unittest.main()
+
+
+
+
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import logging
+import unittest
+
+import rw_peas
+import rwlogger
+
+import gi
+gi.require_version('RwTypes', '1.0')
+from gi.repository import RwsdnalYang
+from gi.repository.RwTypes import RwStatus
+
+
+logger = logging.getLogger('sdnopenstack')
+
+openstack_info = {
+ 'username' : 'pluto',
+ 'password' : 'mypasswd',
+ 'auth_url' : 'http://10.66.4.17:5000/v2.0/',
+ 'project_name' : 'demo',
+ 'user_domain_name' : 'default',
+ 'project_domain_name': 'default'
+}
+
+
+def get_sdn_account():
+ """
+ Creates an object for class RwsdnalYang.SdnAccount()
+ """
+ account = RwsdnalYang.SDNAccount()
+ account.name = "grunt17"
+ account.account_type = "openstack"
+ account.openstack.plugin_name = "rwsdn_openstack"
+ account.openstack.key = openstack_info['username']
+ account.openstack.secret = openstack_info['password']
+ account.openstack.auth_url = openstack_info['auth_url']
+ account.openstack.tenant = openstack_info['project_name']
+ account.openstack.user_domain = openstack_info['user_domain_name']
+ account.openstack.project_domain = openstack_info['project_domain_name']
+
+ return account
+
+def get_sdn_plugin():
+ """
+ Loads rw.sdn plugin via libpeas
+ """
+ plugin = rw_peas.PeasPlugin('rwsdn_openstack', 'RwSdn-1.0')
+ engine, info, extension = plugin()
+
+ # Get the RwLogger context
+ rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log")
+
+ sdn = plugin.get_interface("Topology")
+ try:
+ rc = sdn.init(rwloggerctx)
+ assert rc == RwStatus.SUCCESS
+ except:
+ logger.error("ERROR:SDN openstack plugin instantiation failed. Aborting tests")
+ else:
+ logger.info("SDN openstack plugin successfully instantiated")
+ return sdn
+
+
+
+class SdnOpenstackTest(unittest.TestCase):
+ def setUp(self):
+ """
+ Initialize test plugins
+ """
+ self._acct = get_sdn_account()
+ logger.info("SDN-Openstack-Test: setUp")
+ self.sdn = get_sdn_plugin()
+ logger.info("SDN-Openstack-Test: setUpEND")
+
+ def tearDown(self):
+ logger.info("SDN-Openstack-Test: Done with tests")
+
+ def test_validate_sdn_creds(self):
+ """
+ First test case
+ """
+ logger.debug("SDN-Openstack-Test: Starting validate creds ")
+ rc, status = self.sdn.validate_sdn_creds(self._acct)
+ logger.debug("SDN-Openstack-Test: SDN return code %s resp %s", rc, status)
+ self.assertEqual(rc, RwStatus.SUCCESS)
+ logger.info("SDN-Openstack-Test: Passed validate creds")
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.DEBUG)
+ unittest.main()
+
+
+
+
# limitations under the License.
#
-import datetime
import logging
import unittest
import gi
gi.require_version('RwTypes', '1.0')
-gi.require_version('RwSdn', '1.0')
-from gi.repository import RwsdnYang
-from gi.repository import IetfNetworkYang
+from gi.repository import RwsdnalYang
from gi.repository.RwTypes import RwStatus
-from gi.repository import RwSdn
logger = logging.getLogger('sdnsim')
def get_sdn_account():
"""
- Creates an object for class RwsdnYang.SdnAccount()
+ Creates an object for class RwsdnalYang.SdnAccount()
"""
- account = RwsdnYang.SDNAccount()
+ account = RwsdnalYang.SDNAccount()
account.account_type = "sdnsim"
account.sdnsim.username = "rift"
account.sdnsim.plugin_name = "rwsdn_sim"
import gi.repository.IetfL2TopologyYang as l2Tl
import gi.repository.RwTopologyYang as RwTl
import gi.repository.RwLaunchpadYang as launchpadyang
-from gi.repository import RwsdnYang
+from gi.repository import RwsdnalYang
from gi.repository.RwTypes import RwStatus
from create_stackedl2topology import MyL2Topology
VALA_FILES ${VALA_FILES}
VALA_PACKAGES
rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
- rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rwsdn_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
+ rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rwsdnal_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
ietf_network_yang-1.0 ietf_network_topology_yang-1.0
ietf_l2_topology_yang-1.0 rw_topology_yang-1.0
rw_log-1.0 rw_project_yang-1.0
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- DEPENDS rwcal_yang rwsdn_yang mano_yang rwlog_gi rwschema_yang rwproject_yang
+ DEPENDS rwcal_yang rwsdnal_yang mano_yang rwlog_gi rwschema_yang rwproject_yang
)
rift_install_vala_artifacts(
* Credential Validation related APIs
*/
public abstract RwTypes.RwStatus validate_sdn_creds(
- Rwsdn.SDNAccount account,
- out Rwsdn.SdnConnectionStatus status);
+ Rwsdnal.SDNAccount account,
+ out Rwsdnal.SdnConnectionStatus status);
/*
* Configuring related APIs
* Network related APIs
*/
public abstract RwTypes.RwStatus get_network_list(
- Rwsdn.SDNAccount account,
+ Rwsdnal.SDNAccount account,
out RwTopology.YangData_IetfNetwork network_topology);
/*
* VNFFG Chain related APIs
*/
public abstract RwTypes.RwStatus create_vnffg_chain(
- Rwsdn.SDNAccount account,
- Rwsdn.VNFFGChain vnffg_chain,
+ Rwsdnal.SDNAccount account,
+ Rwsdnal.VNFFGChain vnffg_chain,
out string vnffg_id);
/*
* VNFFG Chain Terminate related APIs
*/
public abstract RwTypes.RwStatus terminate_vnffg_chain(
- Rwsdn.SDNAccount account,
+ Rwsdnal.SDNAccount account,
string vnffg_id);
* Network related APIs
*/
public abstract RwTypes.RwStatus get_vnffg_rendered_paths(
- Rwsdn.SDNAccount account,
- out Rwsdn.VNFFGRenderedPaths rendered_paths);
+ Rwsdnal.SDNAccount account,
+ out Rwsdnal.VNFFGRenderedPaths rendered_paths);
/*
* Classifier related APIs
*/
public abstract RwTypes.RwStatus create_vnffg_classifier(
- Rwsdn.SDNAccount account,
- Rwsdn.VNFFGClassifier vnffg_classifier,
+ Rwsdnal.SDNAccount account,
+ Rwsdnal.VNFFGClassifier vnffg_classifier,
out string vnffg_classifier_id);
/*
* Classifier related APIs
*/
public abstract RwTypes.RwStatus terminate_vnffg_classifier(
- Rwsdn.SDNAccount account,
+ Rwsdnal.SDNAccount account,
string vnffg_classifier_id);
RwSdn, # Vala package
RwTypes,
RwTopologyYang as RwTl,
- RwsdnYang
+ RwsdnalYang
)
import rw_status
)
)
- account = RwsdnYang.SDNAccount()
+ account = RwsdnalYang.SDNAccount()
account.name = 'mock'
account.account_type = 'mock'
account.mock.username = 'rift'
import gi
gi.require_version('RwTypes', '1.0')
-gi.require_version('RwsdnYang', '1.0')
+gi.require_version('RwsdnalYang', '1.0')
gi.require_version('RwSdn', '1.0')
gi.require_version('RwTopologyYang','1.0')
GObject,
RwSdn, # Vala package
RwTypes,
- RwsdnYang,
+ RwsdnalYang,
RwTopologyYang as RwTl,
)
"""
Validate the SDN account credentials by accessing the rest API using the provided credentials
"""
- status = RwsdnYang.SdnConnectionStatus()
+ status = RwsdnalYang.SdnConnectionStatus()
url = '{}/{}'.format(account.odl.url,"restconf")
try:
r=requests.get(url,auth=(account.odl.username,account.odl.password))
status.status = "failure"
status.details = "Connection Failed (Invlaid URL): %s" % str(e)
else:
- print("SDN Successfully connected")
status.status = "success"
status.details = "Connection was successful"
self.delete_all_sf(account)
def _fill_rsp_list(self,sfc_rsp_list,sff_list):
- vnffg_rsps = RwsdnYang.VNFFGRenderedPaths()
+ vnffg_rsps = RwsdnalYang.VNFFGRenderedPaths()
for sfc_rsp in sfc_rsp_list['rendered-service-paths']['rendered-service-path']:
rsp = vnffg_rsps.vnffg_rendered_path.add()
rsp.name = sfc_rsp['name']
# limitations under the License.
#
-import contextlib
import logging
import gi
gi.require_version('RwSdn', '1.0')
-gi.require_version('RwCal', '1.0')
-gi.require_version('RwcalYang', '1.0')
+import rift.rwcal.openstack as openstack_drv
from rift.rwcal.openstack import session as sess_drv
from rift.rwcal.openstack import keystone as ks_drv
from rift.rwcal.openstack import neutron as nt_drv
from gi.repository import (
GObject,
- RwCal,
- RwSdn, # Vala package
- RwsdnYang,
- RwTypes,
- RwcalYang)
+ RwSdn, # Vala package
+ RwsdnalYang,
+ RwTypes)
-rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+rwstatus_exception_map = {IndexError: RwTypes.RwStatus.NOTFOUND,
KeyError: RwTypes.RwStatus.NOTFOUND,
- NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}
+ NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED, }
rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
project_domain_name = kwargs['project_domain'] if 'project_domain' in kwargs else None,
user_domain_name = kwargs['user_domain'] if 'user_domain' in kwargs else None,)
+ self.auth_url = kwargs['auth_url']
cert_validate = kwargs['cert_validate'] if 'cert_validate' in kwargs else False
region = kwargs['region_name'] if 'region_name' in kwargs else False
logger = self.log)
def validate_account_creds(self):
+ status = RwsdnalYang.SdnConnectionStatus()
try:
self.sess_drv.invalidate_auth_token()
self.sess_drv.auth_token
+ except KeystoneExceptions.Unauthorized as e:
+ self.log.error("Invalid credentials given for SDN account ")
+ status.status = "failure"
+ status.details = "Invalid Credentials: %s" % str(e)
+
except KeystoneExceptions.AuthorizationFailure as e:
- self.log.error("Unable to authenticate or validate the existing credentials. Exception: %s", str(e))
- raise ValidationError("Invalid Credentials: "+ str(e))
+ self.log.error("Bad authentication URL given for SDN account. Given auth url: %s",
+ self.auth_url)
+ status.status = "failure"
+ status.details = "Invalid auth url: %s" % str(e)
+
+ except NeutronException.NotFound as e:
+ status.status = "failure"
+ status.details = "Neutron exception %s" % str(e)
+
+ except openstack_drv.ValidationError as e:
+ self.log.error("RwcalOpenstackPlugin: OpenstackDriver credential validation failed. Exception: %s", str(e))
+ status.status = "failure"
+ status.details = "Invalid Credentials: %s" % str(e)
+
except Exception as e:
- self.log.error("Could not connect to Openstack. Exception: %s", str(e))
- raise ValidationError("Connection Error: "+ str(e))
+ msg = "RwsdnOpenstackPlugin: OpenstackDriver connection failed. Exception: %s" %(str(e))
+ self.log.error(msg)
+ status.status = "failure"
+ status.details = msg
+
+ else:
+ status.status = "success"
+ status.details = "Connection was successful"
+
+ return status
- def delete_port_chain(self,port_chain_id):
+ def delete_port_chain(self, port_chain_id):
"Delete port chain"
try:
result = self.portchain_drv.get_port_chain(port_chain_id)
port_pairs.extend(port_pair_group["port_pair_group"]["port_pairs"])
self.portchain_drv.delete_port_pair_group(port_pair_group_id)
- self.log.debug("Port pairs during delete is %s",port_pairs)
+ self.log.debug("Port pairs during delete is %s", port_pairs)
for port_pair_id in port_pairs:
self.portchain_drv.delete_port_pair(port_pair_id)
pass
except Exception as e:
- self.log.error("Error while delete port chain with id %s, exception %s", port_chain_id,str(e))
+ self.log.error("Error while delete port chain with id %s, exception %s", port_chain_id, str(e))
- def update_port_chain(self,port_chain_id,flow_classifier_list):
+ def update_port_chain(self, port_chain_id, flow_classifier_list):
result = self.portchain_drv.get_port_chain(port_chain_id)
result.raise_for_status()
port_chain = result.json()['port_chain']
if port_chain and port_chain['flow_classifiers']:
new_flow_classifier_list.extend(port_chain['flow_classifiers'])
new_flow_classifier_list.extend(flow_classifier_list)
- port_chain_id = self.portchain_drv.update_port_chain(port_chain['id'],flow_classifiers=new_flow_classifier_list)
+ port_chain_id = self.portchain_drv.update_port_chain(port_chain['id'], flow_classifiers=new_flow_classifier_list)
return port_chain_id
- def create_flow_classifer(self,classifier_name,classifier_dict):
+ def create_flow_classifer(self, classifier_name, classifier_dict):
"Create flow classifier"
- flow_classifier_id = self.portchain_drv.create_flow_classifier(classifier_name,classifier_dict)
+ flow_classifier_id = self.portchain_drv.create_flow_classifier(classifier_name, classifier_dict)
return flow_classifier_id
- def delete_flow_classifier(self,classifier_id):
+ def delete_flow_classifier(self, classifier_id):
"Create flow classifier"
try:
self.portchain_drv.delete_flow_classifier(classifier_id)
except Exception as e:
- self.log.error("Error while deleting flow classifier with id %s, exception %s", classifier_id,str(e))
+ self.log.error("Error while deleting flow classifier with id %s, exception %s", classifier_id, str(e))
def get_port_chain_list(self):
result = self.portchain_drv.get_port_chain_list()
class SdnOpenstackPlugin(GObject.Object, RwSdn.Topology):
instance_num = 1
+
def __init__(self):
GObject.Object.__init__(self)
self.log = logging.getLogger('rwsdn.openstack.%s' % SdnOpenstackPlugin.instance_num)
Returns:
Validation Code and Details String
"""
- status = RwsdnYang.SdnConnectionStatus()
- drv = self._use_driver(account)
+ status = RwsdnalYang.SdnConnectionStatus()
try:
+ drv = self._use_driver(account)
drv.validate_account_creds()
except openstack_drv.ValidationError as e:
return status
@rwstatus(ret_on_failure=[""])
- def do_create_vnffg_chain(self, account,vnffg):
+ def do_create_vnffg_chain(self, account, vnffg):
"""
Creates Service Function chain in ODL
@param account - a SDN account
"""
- self.log.debug('Received Create VNFFG chain for account {}, chain {}'.format(account,vnffg))
+ self.log.debug('Received Create VNFFG chain for account {}, chain {}'.format(account, vnffg))
drv = self._use_driver(account)
port_list = list()
vnf_chain_list = sorted(vnffg.vnf_chain_path, key = lambda x: x.order)
for path in vnf_chain_list:
if prev_vm_id and path.vnfr_ids[0].vdu_list[0].vm_id == prev_vm_id:
prev_entry = port_list.pop()
- port_list.append((prev_entry[0],path.vnfr_ids[0].vdu_list[0].port_id))
+ port_list.append((prev_entry[0], path.vnfr_ids[0].vdu_list[0].port_id))
prev_vm_id = None
else:
prev_vm_id = path.vnfr_ids[0].vdu_list[0].vm_id
- port_list.append((path.vnfr_ids[0].vdu_list[0].port_id,path.vnfr_ids[0].vdu_list[0].port_id))
- vnffg_id = drv.create_port_chain(vnffg.name,port_list)
+ port_list.append((path.vnfr_ids[0].vdu_list[0].port_id, path.vnfr_ids[0].vdu_list[0].port_id))
+ vnffg_id = drv.create_port_chain(vnffg.name, port_list)
return vnffg_id
@rwstatus
- def do_terminate_vnffg_chain(self, account,vnffg_id):
+ def do_terminate_vnffg_chain(self, account, vnffg_id):
"""
Terminate Service Function chain in ODL
@param account - a SDN account
"""
- self.log.debug('Received Create VNFFG classifier for account {}, classifier {}'.format(account,vnffg_classifier))
- protocol_map = {1:'ICMP',6:'TCP',17:'UDP'}
+ self.log.debug('Received Create VNFFG classifier for account {}, classifier {}'.format(account, vnffg_classifier))
+ protocol_map = {1: 'ICMP', 6: 'TCP', 17: 'UDP'}
flow_classifier_list = list()
drv = self._use_driver(account)
for rule in vnffg_classifier.match_attributes:
flow_dict = {}
for field, value in rule.as_dict().items():
if field == 'ip_proto':
- flow_dict['protocol'] = protocol_map.get(value,None)
+ flow_dict['protocol'] = protocol_map.get(value, None)
elif field == 'source_ip_address':
flow_dict['source_ip_prefix'] = value
elif field == 'destination_ip_address':
flow_dict['logical_source_port'] = vnffg_classifier.port_id
flow_classifier_id = drv.create_flow_classifer(classifier_name, flow_dict)
flow_classifier_list.append(flow_classifier_id)
- drv.update_port_chain(vnffg_classifier.rsp_id,flow_classifier_list)
+ drv.update_port_chain(vnffg_classifier.rsp_id, flow_classifier_list)
return flow_classifier_list
@rwstatus(ret_on_failure=[None])
@param account - a SDN account
"""
self.log.debug('Received get VNFFG rendered path for account %s ', account)
- vnffg_rsps = RwsdnYang.VNFFGRenderedPaths()
+ vnffg_rsps = RwsdnalYang.VNFFGRenderedPaths()
drv = self._use_driver(account)
port_chain_list = drv.get_port_chain_list()
for port_chain in port_chain_list:
GObject,
RwSdn, # Vala package
RwTypes,
- RwsdnYang,
+ RwsdnalYang,
#IetfL2TopologyYang as l2Tl,
RwTopologyYang as RwTl,
)
Returns:
Validation Code and Details String
"""
- status = RwsdnYang.SdnConnectionStatus()
+ status = RwsdnalYang.SdnConnectionStatus()
print("SDN Successfully connected")
status.status = "success"
status.details = "Connection was successful"
include(rift_yang)
-set(source_yang_files rwsdn.yang)
+set(source_yang_files rwsdnal.yang)
rift_add_yang_target(
- TARGET rwsdn_yang
+ TARGET rwsdnal_yang
YANG_FILES ${source_yang_files}
COMPONENT ${PKG_LONG_NAME}
LIBRARIES
+++ /dev/null
-
-/*
- *
- * Copyright 2016-2017 RIFT.IO Inc
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- *
- */
-
-module rwsdn
-{
- namespace "http://riftio.com/ns/riftware-1.0/rwsdn";
- prefix "rwsdn";
-
- import rw-base {
- prefix rwbase;
- }
-
- import rw-pb-ext {
- prefix "rwpb";
- }
-
- import rw-yang-types {
- prefix "rwt";
- }
-
- import rw-log {
- prefix "rwlog";
- }
-
- import mano-types {
- prefix "manotypes";
- }
-
- import ietf-inet-types {
- prefix "inet";
- }
-
- import ietf-yang-types {
- prefix "yang";
- }
-
- import rw-project {
- prefix "rw-project";
- }
-
- revision 2017-02-08 {
- description
- "Update model to support projects.";
- }
-
- revision 2014-12-30 {
- description
- "Initial revision.";
- reference
- "RIFT RWSDN cloud data";
- }
-
- typedef sdn-connection-status-enum {
- description "Connection status for the sdn account";
- type enumeration {
- enum unknown;
- enum validating;
- enum success;
- enum failure;
- }
- }
-
- grouping connection-status {
- container connection-status {
- config false;
- rwpb:msg-new SdnConnectionStatus;
- leaf status {
- type sdn-connection-status-enum;
- }
- leaf details {
- type string;
- }
- }
- }
-
- // uses connection-status;
-
- typedef sdn-account-type {
- description "SDN account type";
- type enumeration {
- enum odl;
- enum mock;
- enum sdnsim;
- enum openstack;
- }
- }
-
- grouping sdn-provider-auth {
- leaf account-type {
- type sdn-account-type;
- }
-
- choice provider-specific-info {
- container odl {
- leaf username {
- type string {
- length "1..255";
- }
- }
-
- leaf password {
- type string {
- length "1..32";
- }
- }
-
- leaf url {
- type string {
- length "1..255";
- }
- }
- leaf plugin-name {
- type string;
- default "rwsdn_odl";
- }
- }
- container mock {
- leaf username {
- type string;
- }
- leaf plugin-name {
- type string;
- default "rwsdn_mock";
- }
- }
-
- container sdnsim {
- leaf username {
- type string;
- }
- leaf topology-source {
- type string;
- }
- leaf plugin-name {
- type string;
- default "rwsdn_sim";
- }
- }
-
- container openstack {
- leaf key {
- type string;
- mandatory true;
- }
-
- leaf secret {
- type string;
- mandatory true;
- }
-
- leaf auth_url {
- type string;
- mandatory true;
- }
-
- leaf tenant {
- type string;
- mandatory true;
- }
-
- leaf admin {
- type boolean;
- default false;
- }
-
- leaf user-domain {
- type string;
- default "Default";
- description "Domain of the OpenStack user";
- }
-
- leaf project-domain {
- type string;
- default "Default";
- description "Domain of the OpenStack project";
- }
-
- leaf region {
- type string;
- default "RegionOne";
- }
-
- leaf plugin-name {
- type string;
- default "rwsdn_openstack";
- }
-
- leaf cert-validate {
- type boolean;
- default false;
- description "Certificate validatation policy in case of SSL/TLS connection";
- }
- }
-
- }
- }
-
- augment "/rw-project:project" {
- container sdn-accounts {
- list sdn-account-list {
- rwpb:msg-new SDNAccount;
- key "name";
-
- leaf name {
- type string;
- }
-
- uses sdn-provider-auth;
- uses connection-status;
- }
- }
- }
-
- augment "/rw-project:project" {
- container vnffgs {
- list vnffg-chain {
- key "name";
- rwpb:msg-new VNFFGChain;
-
- leaf name {
- type string;
- }
-
- list vnf-chain-path {
- key "order";
- leaf order {
- type uint32;
- description " Order of the VNF in VNFFG chain";
- }
- leaf service-function-type {
- type string;
- }
- leaf nsh-aware {
- type boolean;
- }
- leaf transport-type {
- type string;
- }
- list vnfr-ids {
- key "vnfr-id";
- leaf vnfr-id {
- type yang:uuid;
- }
- leaf vnfr-name {
- type string;
- }
- leaf mgmt-address {
- type inet:ip-address;
- }
- leaf mgmt-port {
- type inet:port-number;
- }
- list vdu-list {
- key "vm-id port-id";
- leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- leaf name {
- type string;
- }
- leaf address {
- type inet:ip-address;
- }
- leaf port {
- type inet:port-number;
- }
- }
- leaf sff-name {
- description "SFF name useful for non OVS based SFF";
- type string;
- }
- }
- }
- list sff {
- rwpb:msg-new VNFFGSff;
- key "name";
- leaf name {
- type string;
- }
- leaf function-type {
- type string;
- }
- leaf mgmt-address {
- type inet:ip-address;
- }
- leaf mgmt-port {
- type inet:port-number;
- }
- list dp-endpoints {
- key "name";
- leaf name {
- type string;
- }
- leaf address {
- type inet:ip-address;
- }
- leaf port {
- type inet:port-number;
- }
- }
- list vnfr-list {
- key "vnfr-name";
- leaf vnfr-name {
- type string;
- }
- }
- }
- leaf classifier-name {
- type string;
- }
- }
- }
- }
-
- augment "/rw-project:project" {
- container vnffg-rendered-paths {
- rwpb:msg-new VNFFGRenderedPaths;
- list vnffg-rendered-path {
- key "name";
- rwpb:msg-new VNFFGRenderedPath;
- config false;
- leaf name {
- type string;
- }
- leaf path-id {
- description
- "Unique Identifier for the service path";
- type uint32;
- }
- list rendered-path-hop {
- key "hop-number";
- leaf hop-number {
- type uint8;
- }
- leaf service-index {
- description
- "Location within the service path";
- type uint8;
- }
- leaf vnfr-name {
- type string;
- }
- container service-function-forwarder {
- leaf name {
- description
- "Service Function Forwarder name";
- type string;
- }
- leaf ip-address {
- description
- "Service Function Forwarder Data Plane IP address";
- type inet:ip-address;
- }
- leaf port {
- description
- "Service Function Forwarder Data Plane port";
- type inet:port-number;
- }
- }
- }
- }
- }
- }
-
- augment "/rw-project:project" {
- container vnffg-classifiers {
- list vnffg-classifier {
- key "name";
- rwpb:msg-new VNFFGClassifier;
-
- leaf name {
- type string;
- }
- leaf rsp-name {
- type string;
- }
- leaf rsp-id {
- type yang:uuid;
- }
- leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- leaf sff-name {
- type string;
- }
- container vnffg-metadata {
- leaf ctx1 {
- type string;
- }
- leaf ctx2 {
- type string;
- }
- leaf ctx3 {
- type string;
- }
- leaf ctx4 {
- type string;
- }
- }
- list match-attributes {
- description
- "List of match attributes.";
- key "name";
- leaf name {
- description
- "Name for the Access list";
- type string;
- }
-
- leaf ip-proto {
- description
- "IP Protocol.";
- type uint8;
- }
-
- leaf source-ip-address {
- description
- "Source IP address.";
- type inet:ip-prefix;
- }
-
- leaf destination-ip-address {
- description
- "Destination IP address.";
- type inet:ip-prefix;
- }
-
- leaf source-port {
- description
- "Source port number.";
- type inet:port-number;
- }
-
- leaf destination-port {
- description
- "Destination port number.";
- type inet:port-number;
- }
- } //match-attributes
- }
- }
- }
-
-}
-
-/* vim: set ts=2:sw=2: */
--- /dev/null
+
+/*
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rwsdnal
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rwsdnal";
+ prefix "rwsdnal";
+
+ import rw-base {
+ prefix rwbase;
+ }
+
+ import rw-pb-ext {
+ prefix "rwpb";
+ }
+
+ import rw-yang-types {
+ prefix "rwt";
+ }
+
+ import rw-log {
+ prefix "rwlog";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
+ revision 2014-12-30 {
+ description
+ "Initial revision.";
+ reference
+ "RIFT RWSDN cloud data";
+ }
+
+ typedef sdn-connection-status-enum {
+ description "Connection status for the sdn account";
+ type enumeration {
+ enum unknown;
+ enum validating;
+ enum success;
+ enum failure;
+ }
+ }
+
+ grouping connection-status {
+ container connection-status {
+ config false;
+ rwpb:msg-new SdnConnectionStatus;
+ leaf status {
+ type sdn-connection-status-enum;
+ }
+ leaf details {
+ type string;
+ }
+ }
+ }
+
+ // uses connection-status;
+
+ typedef sdn-account-type {
+ description "SDN account type";
+ type enumeration {
+ enum odl;
+ enum mock;
+ enum sdnsim;
+ enum openstack;
+ }
+ }
+
+ grouping sdn-provider-auth {
+ leaf account-type {
+ type sdn-account-type;
+ }
+
+ choice provider-specific-info {
+ container odl {
+ leaf username {
+ type string {
+ length "1..255";
+ }
+ }
+
+ leaf password {
+ type string {
+ length "1..32";
+ }
+ }
+
+ leaf url {
+ type string {
+ length "1..255";
+ }
+ }
+ leaf plugin-name {
+ type string;
+ default "rwsdn_odl";
+ }
+ }
+ container mock {
+ leaf username {
+ type string;
+ }
+ leaf plugin-name {
+ type string;
+ default "rwsdn_mock";
+ }
+ }
+
+ container sdnsim {
+ leaf username {
+ type string;
+ }
+ leaf topology-source {
+ type string;
+ }
+ leaf plugin-name {
+ type string;
+ default "rwsdn_sim";
+ }
+ }
+
+ container openstack {
+ leaf key {
+ type string;
+ mandatory true;
+ }
+
+ leaf secret {
+ type string;
+ mandatory true;
+ }
+
+ leaf auth_url {
+ type string;
+ mandatory true;
+ }
+
+ leaf tenant {
+ type string;
+ mandatory true;
+ }
+
+ leaf admin {
+ type boolean;
+ default false;
+ }
+
+ leaf user-domain {
+ type string;
+ default "Default";
+ description "Domain of the OpenStack user";
+ }
+
+ leaf project-domain {
+ type string;
+ default "Default";
+ description "Domain of the OpenStack project";
+ }
+
+ leaf region {
+ type string;
+ default "RegionOne";
+ }
+
+ leaf plugin-name {
+ type string;
+ default "rwsdn_openstack";
+ }
+
+ leaf cert-validate {
+ type boolean;
+ default false;
+ description "Certificate validatation policy in case of SSL/TLS connection";
+ }
+ }
+
+ }
+ }
+
+ augment "/rw-project:project" {
+ container sdn-accounts {
+ list sdn-account-list {
+ rwpb:msg-new SDNAccount;
+ key "name";
+
+ leaf name {
+ type string;
+ }
+
+ uses sdn-provider-auth;
+ uses connection-status;
+ }
+ }
+ }
+
+ augment "/rw-project:project" {
+ container vnffgs {
+ list vnffg-chain {
+ key "name";
+ rwpb:msg-new VNFFGChain;
+
+ leaf name {
+ type string;
+ }
+
+ list vnf-chain-path {
+ key "order";
+ leaf order {
+ type uint32;
+ description " Order of the VNF in VNFFG chain";
+ }
+ leaf service-function-type {
+ type string;
+ }
+ leaf nsh-aware {
+ type boolean;
+ }
+ leaf transport-type {
+ type string;
+ }
+ list vnfr-ids {
+ key "vnfr-id";
+ leaf vnfr-id {
+ type yang:uuid;
+ }
+ leaf vnfr-name {
+ type string;
+ }
+ leaf mgmt-address {
+ type inet:ip-address;
+ }
+ leaf mgmt-port {
+ type inet:port-number;
+ }
+ list vdu-list {
+ key "vm-id port-id";
+ leaf port-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ leaf vm-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ leaf name {
+ type string;
+ }
+ leaf address {
+ type inet:ip-address;
+ }
+ leaf port {
+ type inet:port-number;
+ }
+ }
+ leaf sff-name {
+ description "SFF name useful for non OVS based SFF";
+ type string;
+ }
+ }
+ }
+ list sff {
+ rwpb:msg-new VNFFGSff;
+ key "name";
+ leaf name {
+ type string;
+ }
+ leaf function-type {
+ type string;
+ }
+ leaf mgmt-address {
+ type inet:ip-address;
+ }
+ leaf mgmt-port {
+ type inet:port-number;
+ }
+ list dp-endpoints {
+ key "name";
+ leaf name {
+ type string;
+ }
+ leaf address {
+ type inet:ip-address;
+ }
+ leaf port {
+ type inet:port-number;
+ }
+ }
+ list vnfr-list {
+ key "vnfr-name";
+ leaf vnfr-name {
+ type string;
+ }
+ }
+ }
+ leaf classifier-name {
+ type string;
+ }
+ }
+ }
+ }
+
+ augment "/rw-project:project" {
+ container vnffg-rendered-paths {
+ rwpb:msg-new VNFFGRenderedPaths;
+ list vnffg-rendered-path {
+ key "name";
+ rwpb:msg-new VNFFGRenderedPath;
+ config false;
+ leaf name {
+ type string;
+ }
+ leaf path-id {
+ description
+ "Unique Identifier for the service path";
+ type uint32;
+ }
+ list rendered-path-hop {
+ key "hop-number";
+ leaf hop-number {
+ type uint8;
+ }
+ leaf service-index {
+ description
+ "Location within the service path";
+ type uint8;
+ }
+ leaf vnfr-name {
+ type string;
+ }
+ container service-function-forwarder {
+ leaf name {
+ description
+ "Service Function Forwarder name";
+ type string;
+ }
+ leaf ip-address {
+ description
+ "Service Function Forwarder Data Plane IP address";
+ type inet:ip-address;
+ }
+ leaf port {
+ description
+ "Service Function Forwarder Data Plane port";
+ type inet:port-number;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ augment "/rw-project:project" {
+ container vnffg-classifiers {
+ list vnffg-classifier {
+ key "name";
+ rwpb:msg-new VNFFGClassifier;
+
+ leaf name {
+ type string;
+ }
+ leaf rsp-name {
+ type string;
+ }
+ leaf rsp-id {
+ type yang:uuid;
+ }
+ leaf port-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ leaf vm-id {
+ rwpb:field-inline "true";
+ rwpb:field-string-max 64;
+ type string;
+ }
+ leaf sff-name {
+ type string;
+ }
+ container vnffg-metadata {
+ leaf ctx1 {
+ type string;
+ }
+ leaf ctx2 {
+ type string;
+ }
+ leaf ctx3 {
+ type string;
+ }
+ leaf ctx4 {
+ type string;
+ }
+ }
+ list match-attributes {
+ description
+ "List of match attributes.";
+ key "name";
+ leaf name {
+ description
+ "Name for the Access list";
+ type string;
+ }
+
+ leaf ip-proto {
+ description
+ "IP Protocol.";
+ type uint8;
+ }
+
+ leaf source-ip-address {
+ description
+ "Source IP address.";
+ type inet:ip-prefix;
+ }
+
+ leaf destination-ip-address {
+ description
+ "Destination IP address.";
+ type inet:ip-prefix;
+ }
+
+ leaf source-port {
+ description
+ "Source port number.";
+ type inet:port-number;
+ }
+
+ leaf destination-port {
+ description
+ "Destination port number.";
+ type inet:port-number;
+ }
+ } //match-attributes
+ }
+ }
+ }
+
+}
+
+/* vim: set ts=2:sw=2: */
}
}
+ grouping copy-task-status {
+ leaf status {
+ description "The status of the copy task";
+ type task-status;
+ default QUEUED;
+ }
+ }
+
augment "/rw-project:project" {
container download-jobs {
rwpb:msg-new DownloadJobs;
uses download-task-status;
}
}
+
+ container copy-jobs {
+ rwpb:msg-new CopyJobs;
+ description "Copy jobs";
+ config false;
+
+ list job {
+ rwpb:msg-new CopyJob;
+ key "transaction-id";
+
+ leaf transaction-id {
+ description "Unique UUID";
+ type string;
+ }
+
+ uses copy-task-status;
+ }
+ }
}
rpc get-package-endpoint {
}
}
+ rpc package-copy {
+ description "Copies the package specified in input and returns the copied package id";
+
+ input {
+ uses package-identifer;
+
+ leaf package-name {
+ description "Name of destination package";
+ type string;
+ }
+
+ uses manotypes:rpc-project-name;
+ }
+
+ output {
+ leaf transaction-id {
+ description "Valid ID to track the status of the task";
+ type string;
+ }
+
+ uses package-identifer;
+ }
+ }
+
rpc get-package-schema {
description "Retrieves the schema for the package type";
prefix "rwcal";
}
- import rwsdn {
- prefix "rwsdn";
+ import rwsdnal {
+ prefix "rwsdnal";
}
import pytest
import gi
-gi.require_version('RwsdnYang', '1.0')
+gi.require_version('RwsdnalYang', '1.0')
-from gi.repository import RwsdnYang
+from gi.repository import RwsdnalYang
@pytest.mark.setup('sdn')
@pytest.mark.feature('sdn')
Asserts:
SDN name and accout type.
'''
- proxy = mgmt_session.proxy(RwsdnYang)
- sdn_account = RwsdnYang.SDNAccount(
+ proxy = mgmt_session.proxy(RwsdnalYang)
+ sdn_account = RwsdnalYang.SDNAccount(
name=sdn_account_name,
account_type=sdn_account_type)
xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
Asserts:
sdn_account.account_type is what was configured
'''
- proxy = mgmt_session.proxy(RwsdnYang)
+ proxy = mgmt_session.proxy(RwsdnalYang)
xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
sdn_account = proxy.get_config(xpath)
assert sdn_account.account_type == sdn_account_type
class TestSdnTeardown:
def test_delete_odl_sdn_account(self, mgmt_session, sdn_account_name):
'''Unconfigure sdn account'''
- proxy = mgmt_session.proxy(RwsdnYang)
+ proxy = mgmt_session.proxy(RwsdnalYang)
xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
proxy.delete_config(xpath)