update from RIFT as of 696b75d2fe9fb046261b08c616f1bcf6c0b54a9b second try
Signed-off-by: Jeremy Mordkoff <Jeremy.Mordkoff@riftio.com>
diff --git a/common/python/rift/mano/cloud/accounts.py b/common/python/rift/mano/cloud/accounts.py
index d3aa860..13af56a 100644
--- a/common/python/rift/mano/cloud/accounts.py
+++ b/common/python/rift/mano/cloud/accounts.py
@@ -1,5 +1,5 @@
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,6 +18,7 @@
import sys
import asyncio
from gi import require_version
+require_version('RwCal', '1.0')
require_version('RwcalYang', '1.0')
require_version('RwTypes', '1.0')
require_version('RwCloudYang', '1.0')
@@ -52,8 +53,8 @@
self._cal = self.plugin.get_interface("Cloud")
self._cal.init(rwlog_hdl)
-
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+
+ self._status = RwCloudYang.YangData_RwProject_Project_Cloud_Account_ConnectionStatus(
status="unknown",
details="Connection status lookup not started"
)
@@ -103,13 +104,13 @@
@property
def cal_account_msg(self):
- return RwcalYang.CloudAccount.from_dict(
+ return RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(
self.account_msg.as_dict(),
ignore_missing_keys=True,
)
def cloud_account_msg(self, account_dict):
- self._account_msg = RwCloudYang.CloudAccount.from_dict(account_dict)
+ self._account_msg = RwCloudYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(account_dict)
@property
def account_type(self):
@@ -150,8 +151,9 @@
@asyncio.coroutine
def validate_cloud_account_credentials(self, loop):
- self._log.debug("Validating Cloud Account credentials %s", self._account_msg)
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._log.debug("Validating Cloud Account credentials for account %s",
+ self.name)
+ self._status = RwCloudYang.YangData_RwProject_Project_Cloud_Account_ConnectionStatus(
status="validating",
details="Cloud account connection validation in progress"
)
@@ -161,22 +163,24 @@
self.cal_account_msg,
)
if rwstatus == RwTypes.RwStatus.SUCCESS:
- self._status = RwCloudYang.CloudAccount_ConnectionStatus.from_dict(status.as_dict())
+ self._status = RwCloudYang.YangData_RwProject_Project_Cloud_Account_ConnectionStatus.from_dict(status.as_dict())
else:
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._status = RwCloudYang.YangData_RwProject_Project_Cloud_Account_ConnectionStatus(
status="failure",
details="Error when calling CAL validate cloud creds"
)
- self._log.info("Got cloud account validation response: %s", self._status)
+ if self._status.status == 'failure':
+ self._log.error("Cloud account validation failed. Acct: %s, response: %s",
+ self.name, self._status)
+ @asyncio.coroutine
def start_validate_credentials(self, loop):
if self._validate_task is not None:
self._validate_task.cancel()
self._validate_task = None
- self._validate_task = asyncio.ensure_future(
+ self._validate_task = yield from asyncio.ensure_future(
self.validate_cloud_account_credentials(loop),
loop=loop
)
-
diff --git a/common/python/rift/mano/cloud/config.py b/common/python/rift/mano/cloud/config.py
index 1b1847c..ca9d279 100644
--- a/common/python/rift/mano/cloud/config.py
+++ b/common/python/rift/mano/cloud/config.py
@@ -1,6 +1,6 @@
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,11 +21,14 @@
import gi
gi.require_version('RwDts', '1.0')
import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
from gi.repository import (
RwcalYang as rwcal,
RwDts as rwdts,
ProtobufC,
+ RwCloudYang,
+ RwTypes
)
from . import accounts
@@ -38,32 +41,6 @@
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class CloudAccountConfigCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
@@ -103,10 +80,11 @@
class CloudAccountConfigSubscriber(object):
XPATH = "C,/rw-cloud:cloud/rw-cloud:account"
- def __init__(self, dts, log, rwlog_hdl, cloud_callbacks):
+ def __init__(self, dts, log, rwlog_hdl, project, cloud_callbacks):
self._dts = dts
self._log = log
self._rwlog_hdl = rwlog_hdl
+ self._project = project
self._reg = None
self.accounts = {}
@@ -144,9 +122,17 @@
self.delete_account(account_msg.name)
self.add_account(account_msg)
+ def deregister(self):
+ self._log.debug("Project {}: De-register cloud account handler".
+ format(self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
+ @asyncio.coroutine
def register(self):
@asyncio.coroutine
- def apply_config(dts, acg, xact, action, _):
+ def apply_config(dts, acg, xact, action, scratch):
self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action)
if xact.xact is None:
@@ -155,7 +141,9 @@
for cfg in curr_cfg:
self._log.debug("Cloud account being re-added after restart.")
if not cfg.has_field('account_type'):
- raise CloudAccountError("New cloud account must contain account_type field.")
+ self._log.error("New cloud account must contain account_type field.")
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
self.add_account(cfg)
else:
# When RIFT first comes up, an INSTALL is called with the current config
@@ -165,6 +153,13 @@
return
+ #Updating the account incase individual fields of cloud accounts is being deleted.
+ if self._reg:
+ for cfg in self._reg.get_xact_elements(xact):
+ if cfg.name in scratch.get('cloud_accounts', []):
+ self.update_account(cfg)
+ scratch.pop('cloud_accounts', None)
+
add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
dts_member_reg=self._reg,
xact=xact,
@@ -188,12 +183,16 @@
""" Prepare callback from DTS for Cloud Account """
action = xact_info.query_action
+
+ xpath = ks_path.to_xpath(RwCloudYang.get_schema())
+
self._log.debug("Cloud account on_prepare config received (action: %s): %s",
xact_info.query_action, msg)
if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
if msg.name in self.accounts:
- self._log.debug("Cloud account already exists. Invoking update request")
+ self._log.debug("Cloud account {} already exists. " \
+ "Invoking update request".format(msg.name))
# Since updates are handled by a delete followed by an add, invoke the
# delete prepare callbacks to give clients an opportunity to reject.
@@ -202,7 +201,9 @@
else:
self._log.debug("Cloud account does not already exist. Invoking on_prepare add request")
if not msg.has_field('account_type'):
- raise CloudAccountError("New cloud account must contain account_type field.")
+ self._log.error("New cloud account must contain account_type field.")
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
account = accounts.CloudAccount(self._log, self._rwlog_hdl, msg)
yield from self._cloud_callbacks.on_add_prepare(account)
@@ -212,8 +213,13 @@
fref = ProtobufC.FieldReference.alloc()
fref.goto_whole_message(msg.to_pbcm())
if fref.is_field_deleted():
- yield from self._cloud_callbacks.on_delete_prepare(msg.name)
-
+ try:
+ yield from self._cloud_callbacks.on_delete_prepare(msg.name)
+ except Exception as e:
+ err_msg = str(e)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE, xpath, err_msg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
else:
fref.goto_proto_name(msg.to_pbcm(), "sdn_account")
if fref.is_field_deleted():
@@ -223,9 +229,9 @@
del dict_account["sdn_account"]
account.cloud_account_msg(dict_account)
else:
- self._log.error("Deleting individual fields for cloud account not supported")
- xact_info.respond_xpath(rwdts.XactRspCode.NACK)
- return
+ #Updating Account incase individuals fields are being deleted
+ cloud_accounts = scratch.setdefault('cloud_accounts', [])
+ cloud_accounts.append(msg.name)
else:
self._log.error("Action (%s) NOT SUPPORTED", action)
@@ -241,9 +247,10 @@
on_apply=apply_config,
)
+ xpath = self._project.add_project(CloudAccountConfigSubscriber.XPATH)
with self._dts.appconf_group_create(acg_handler) as acg:
self._reg = acg.register(
- xpath=CloudAccountConfigSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
on_prepare=on_prepare,
)
diff --git a/common/python/rift/mano/cloud/operdata.py b/common/python/rift/mano/cloud/operdata.py
index 4878691..bcf0519 100644
--- a/common/python/rift/mano/cloud/operdata.py
+++ b/common/python/rift/mano/cloud/operdata.py
@@ -1,6 +1,6 @@
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,28 +16,38 @@
#
import asyncio
+import gi
import rift.tasklets
from gi.repository import(
RwCloudYang,
RwDts as rwdts,
+ RwTypes,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
class CloudAccountNotFound(Exception):
pass
class CloudAccountDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
+ self._regh = None
+ self._rpc = None
self.cloud_accounts = {}
def add_cloud_account(self, account):
self.cloud_accounts[account.name] = account
- account.start_validate_credentials(self._loop)
+ asyncio.ensure_future(
+ account.start_validate_credentials(self._loop),
+ loop=self._loop
+ )
def delete_cloud_account(self, account_name):
del self.cloud_accounts[account_name]
@@ -69,26 +79,26 @@
self._log.info("Notification called by creating dts query: %s", ac_status)
+ @asyncio.coroutine
def _register_show_status(self):
def get_xpath(cloud_name=None):
return "D,/rw-cloud:cloud/account{}/connection-status".format(
- "[name='%s']" % cloud_name if cloud_name is not None else ''
- )
+ "[name=%s]" % quoted_key(cloud_name) if cloud_name is not None else ''
+ )
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
- path_entry = RwCloudYang.CloudAccount.schema().keyspec_to_entry(ks_path)
+ path_entry = RwCloudYang.YangData_RwProject_Project_Cloud_Account.schema().keyspec_to_entry(ks_path)
cloud_account_name = path_entry.key00.name
- self._log.debug("Got show cloud connection status request: %s", ks_path.create_string())
try:
saved_accounts = self.get_saved_cloud_accounts(cloud_account_name)
for account in saved_accounts:
connection_status = account.connection_status
- self._log.debug("Responding to cloud connection status request: %s", connection_status)
+ xpath = self._project.add_project(get_xpath(account.name))
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- xpath=get_xpath(account.name),
+ xpath=xpath,
msg=account.connection_status,
)
except KeyError as e:
@@ -98,13 +108,15 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
+ xpath = self._project.add_project(get_xpath())
+ self._regh = yield from self._dts.register(
+ xpath=xpath,
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare),
flags=rwdts.Flag.PUBLISHER,
)
+ @asyncio.coroutine
def _register_validate_rpc(self):
def get_xpath():
return "/rw-cloud:update-cloud-status"
@@ -113,20 +125,28 @@
def on_prepare(xact_info, action, ks_path, msg):
if not msg.has_field("cloud_account"):
raise CloudAccountNotFound("Cloud account name not provided")
-
cloud_account_name = msg.cloud_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
account = self.cloud_accounts[cloud_account_name]
except KeyError:
- raise CloudAccountNotFound("Cloud account name %s not found" % cloud_account_name)
+ errmsg = "Cloud account name {} not found in project {}". \
+ format(cloud_account_name, self._project.name)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ get_xpath(),
+ errmsg)
+ raise CloudAccountNotFound(errmsg)
- account.start_validate_credentials(self._loop)
+ yield from account.start_validate_credentials(self._loop)
yield from self.create_notification(account)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
+ self._rpc = yield from self._dts.register(
xpath=get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare
@@ -136,5 +156,11 @@
@asyncio.coroutine
def register(self):
+ self._log.debug("Register cloud account for project %s", self._project.name)
yield from self._register_show_status()
yield from self._register_validate_rpc()
+
+ def deregister(self):
+ self._log.debug("De-register cloud account for project %s", self._project.name)
+ self._rpc.deregister()
+ self._regh.deregister()
diff --git a/common/python/rift/mano/config_agent/config.py b/common/python/rift/mano/config_agent/config.py
index 7500bac..e0f39d8 100644
--- a/common/python/rift/mano/config_agent/config.py
+++ b/common/python/rift/mano/config_agent/config.py
@@ -1,5 +1,5 @@
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -21,6 +21,7 @@
import gi
gi.require_version('RwDts', '1.0')
import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
from gi.repository import (
RwcalYang as rwcal,
@@ -36,32 +37,6 @@
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class ConfigAgentCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
@@ -101,9 +76,10 @@
class ConfigAgentSubscriber(object):
XPATH = "C,/rw-config-agent:config-agent/account"
- def __init__(self, dts, log, config_callbacks):
+ def __init__(self, dts, log, project, config_callbacks):
self._dts = dts
self._log = log
+ self._project = project
self._reg = None
self.accounts = {}
@@ -139,15 +115,27 @@
self.delete_account(account_msg)
self.add_account(account_msg)
+ def deregister(self):
+ self._log.debug("De-register config agent handler for project {}".
+ format(self._project.name))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
def register(self):
def apply_config(dts, acg, xact, action, _):
self._log.debug("Got config account apply config (xact: %s) (action: %s)", xact, action)
if xact.xact is None:
- # When RIFT first comes up, an INSTALL is called with the current config
- # Since confd doesn't actally persist data this never has any data so
- # skip this for now.
- self._log.debug("No xact handle. Skipping apply config")
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.info("Config Agent Account {} being re-added after restart.".
+ format(cfg.name))
+ self.add_account(cfg)
+ else:
+ self._log.debug("No xact handle. Skipping apply config")
+
return
add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
@@ -212,17 +200,17 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for Config Account config using xpath: %s",
- ConfigAgentSubscriber.XPATH,
- )
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self._dts.appconf_group_create(acg_handler) as acg:
+ xpath = self._project.add_project(ConfigAgentSubscriber.XPATH)
+ self._log.debug("Registering for Config Account config using xpath: %s",
+ xpath)
self._reg = acg.register(
- xpath=ConfigAgentSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
on_prepare=on_prepare,
)
diff --git a/common/python/rift/mano/config_agent/operdata.py b/common/python/rift/mano/config_agent/operdata.py
index fbf3c43..83912db 100644
--- a/common/python/rift/mano/config_agent/operdata.py
+++ b/common/python/rift/mano/config_agent/operdata.py
@@ -1,4 +1,4 @@
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,7 +16,11 @@
import asyncio
import concurrent.futures
+import gi
import time
+import gi
+
+gi.require_version('RwNsrYang', '1.0')
from gi.repository import (
NsrYang,
@@ -25,9 +29,10 @@
RwNsrYang,
RwConfigAgentYang,
RwDts as rwdts)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.tasklets
-
import rift.mano.utils.juju_api as juju
@@ -48,7 +53,7 @@
def validate_account_creds(self):
- status = RwcalYang.CloudConnectionStatus()
+ status = RwcalYang.YangData_Rwcal_ConnectionStatus()
try:
env = self._api._get_env()
except juju.JujuEnvError as e:
@@ -86,7 +91,7 @@
else:
self._cfg_agent_client_plugin = None
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus(
status="unknown",
details="Connection status lookup not started"
)
@@ -117,13 +122,13 @@
def validate_cfg_agent_account_credentials(self, loop):
self._log.debug("Validating Config Agent Account %s, credential status %s", self._account_msg, self._status)
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus(
status="validating",
details="Config Agent account connection validation in progress"
)
if self._cfg_agent_client_plugin is None:
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus(
status="unknown",
details="Config Agent account does not support validation of account creds"
)
@@ -133,9 +138,9 @@
None,
self._cfg_agent_client_plugin.validate_account_creds
)
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus.from_dict(status.as_dict())
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus.from_dict(status.as_dict())
except Exception as e:
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus(
status="failure",
details="Error - " + str(e)
)
@@ -153,12 +158,15 @@
)
class CfgAgentDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self.cfg_agent_accounts = {}
+ self._show_reg = None
+ self._rpc_reg = None
def add_cfg_agent_account(self, account_msg):
account = ConfigAgentAccount(self._log, account_msg)
@@ -191,12 +199,12 @@
def _register_show_status(self):
def get_xpath(cfg_agent_name=None):
return "D,/rw-config-agent:config-agent/account{}/connection-status".format(
- "[name='%s']" % cfg_agent_name if cfg_agent_name is not None else ''
+ "[name=%s]" % quoted_key(cfg_agent_name) if cfg_agent_name is not None else ''
)
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
- path_entry = RwConfigAgentYang.ConfigAgentAccount.schema().keyspec_to_entry(ks_path)
+ path_entry = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account.schema().keyspec_to_entry(ks_path)
cfg_agent_account_name = path_entry.key00.name
self._log.debug("Got show cfg_agent connection status request: %s", ks_path.create_string())
@@ -205,9 +213,10 @@
for account in saved_accounts:
connection_status = account.connection_status
self._log.debug("Responding to config agent connection status request: %s", connection_status)
+ xpath = self._project.add_project(get_xpath(account.name))
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- xpath=get_xpath(account.name),
+ xpath=xpath,
msg=account.connection_status,
)
except KeyError as e:
@@ -217,12 +226,13 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare),
- flags=rwdts.Flag.PUBLISHER,
- )
+ xpath = self._project.add_project(get_xpath())
+ self._show_reg = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
def _register_validate_rpc(self):
def get_xpath():
@@ -234,6 +244,10 @@
raise ConfigAgentAccountNotFound("Config Agent account name not provided")
cfg_agent_account_name = msg.cfg_agent_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
account = self.cfg_agent_accounts[cfg_agent_account_name]
except KeyError:
@@ -243,24 +257,29 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._rpc_reg = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
@asyncio.coroutine
def register(self):
yield from self._register_show_status()
yield from self._register_validate_rpc()
+ def deregister(self):
+ self._show_reg.deregister()
+ self._rpc_reg.deregister()
+
+
class ConfigAgentJob(object):
"""A wrapper over the config agent job object, providing some
convenience functions.
- YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob contains
+ YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob contains
||
==> VNFRS
||
@@ -274,17 +293,19 @@
"running" : "pending",
"failed" : "failure"}
- def __init__(self, nsr_id, job, tasks=None):
+ def __init__(self, nsr_id, job, project, tasks=None):
"""
Args:
nsr_id (uuid): ID of NSR record
- job (YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
+ job (YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
tasks: List of asyncio.tasks. If provided the job monitor will
use it to monitor the tasks instead of the execution IDs
"""
self._job = job
self.nsr_id = nsr_id
self.tasks = tasks
+ self._project = project
+
self._regh = None
@property
@@ -315,10 +336,10 @@
@property
def xpath(self):
"""Xpath of the job"""
- return ("D,/nsr:ns-instance-opdata" +
- "/nsr:nsr[nsr:ns-instance-config-ref='{}']" +
- "/nsr:config-agent-job[nsr:job-id='{}']"
- ).format(self.nsr_id, self.id)
+ return self._project.add_project(("D,/nsr:ns-instance-opdata" +
+ "/nsr:nsr[nsr:ns-instance-config-ref={}]" +
+ "/nsr:config-agent-job[nsr:job-id={}]"
+ ).format(quoted_key(self.nsr_id), quoted_key(str(self.id))))
@property
def regh(self):
@@ -331,9 +352,9 @@
self._regh = hdl
@staticmethod
- def convert_rpc_input_to_job(nsr_id, rpc_output, tasks):
+ def convert_rpc_input_to_job(nsr_id, rpc_output, tasks, project):
"""A helper function to convert the YangOutput_Nsr_ExecNsConfigPrimitive
- to YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
+ to YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
Args:
nsr_id (uuid): NSR ID
@@ -344,10 +365,10 @@
ConfigAgentJob
"""
# Shortcuts to prevent the HUUGE names.
- CfgAgentJob = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob
- CfgAgentVnfr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
- CfgAgentPrimitive = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
- CfgAgentPrimitiveParam = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
+ CfgAgentJob = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob
+ CfgAgentVnfr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
+ CfgAgentPrimitive = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
+ CfgAgentPrimitiveParam = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
job = CfgAgentJob.from_dict({
"job_id": rpc_output.job_id,
@@ -370,7 +391,8 @@
vnf_primitive = CfgAgentPrimitive.from_dict({
"name": primitive.name,
"execution_status": ConfigAgentJob.STATUS_MAP[primitive.execution_status],
- "execution_id": primitive.execution_id
+ "execution_id": primitive.execution_id,
+ "execution_error_details": primitive.execution_error_details,
})
# Copy over the input param
@@ -385,7 +407,7 @@
job.vnfr.append(vnfr_job)
- return ConfigAgentJob(nsr_id, job, tasks)
+ return ConfigAgentJob(nsr_id, job, project, tasks)
class ConfigAgentJobMonitor(object):
@@ -448,13 +470,10 @@
registration_handle.update_element(self.job.xpath, self.job.job)
- def get_error_details(self):
+ def get_execution_details(self):
'''Get the error details from failed primitives'''
errs = ''
for vnfr in self.job.job.vnfr:
- if vnfr.vnf_job_status != "failure":
- continue
-
for primitive in vnfr.primitive:
if primitive.execution_status == "failure":
errs += '<error>'
@@ -463,7 +482,11 @@
else:
errs += '{}: Unknown error'.format(primitive.name)
errs += "</error>"
-
+ else:
+ if primitive.execution_error_details:
+ errs += '<{status}>{details}</{status}>'.format(
+ status=primitive.execution_status,
+ details=primitive.execution_error_details)
return errs
@asyncio.coroutine
@@ -514,14 +537,15 @@
if "failure" in job_status:
self.job.job_status = "failure"
- errs = self.get_error_details()
- if len(errs):
- self.job.job.job_status_details = errs
elif "pending" in job_status:
self.job.job_status = "pending"
else:
self.job.job_status = "success"
+ errs = self.get_execution_details()
+ if len(errs):
+ self.job.job.job_status_details = errs
+
# self.log.debug("Publishing job status: {} at {} for nsr id: {}".format(
# self.job.job_status,
# self.job.xpath,
@@ -529,6 +553,7 @@
registration_handle.update_element(self.job.xpath, self.job.job)
+ registration_handle.update_element(self.job.xpath, self.job.job)
except Exception as e:
self.log.exception(e)
@@ -551,6 +576,9 @@
for primitive in vnfr.primitive:
if primitive.execution_status != 'pending':
+ if primitive.execution_id == "":
+ # We may not have processed the status for these yet
+ job_status.append(primitive.execution_status)
continue
if primitive.execution_id == "":
@@ -558,7 +586,7 @@
job_status.append(primitive.execution_status)
continue
- elif primitive.execution_id == "config":
+ if primitive.execution_id == "config":
# Config job. Check if service is active
task = self.loop.create_task(self.get_service_status(vnfr.id, primitive))
@@ -668,7 +696,7 @@
self._nsm = nsm
self._regh = None
- self._nsr_regh = None
+ self._project = cfgm.project
@property
def regh(self):
@@ -685,11 +713,10 @@
""" Return the ConfigManager manager instance """
return self._cfgm
- @staticmethod
- def cfg_job_xpath(nsr_id, job_id):
- return ("D,/nsr:ns-instance-opdata" +
- "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" +
- "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id)
+ def cfg_job_xpath(self, nsr_id, job_id):
+ return self._project.add_project(("D,/nsr:ns-instance-opdata" +
+ "/nsr:nsr[nsr:ns-instance-config-ref={}]" +
+ "/nsr:config-agent-job[nsr:job-id={}]").format(quoted_key(nsr_id), quoted_key(str(job_id))))
@asyncio.coroutine
def register(self):
@@ -700,7 +727,7 @@
""" prepare callback from dts """
xpath = ks_path.to_xpath(RwNsrYang.get_schema())
if action == rwdts.QueryAction.READ:
- schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema()
+ schema = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
try:
nsr_id = path_entry.key00.ns_instance_config_ref
@@ -719,7 +746,7 @@
for job in jobs:
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- CfgAgentJobDtsHandler.cfg_job_xpath(nsr_id, job.id),
+ self.cfg_job_xpath(nsr_id, job.id),
job.job)
except Exception as e:
@@ -731,17 +758,17 @@
hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=CfgAgentJobDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._project.add_project(
+ CfgAgentJobDtsHandler.XPATH),
handler=hdl,
flags=rwdts.Flag.PUBLISHER,
)
- @asyncio.coroutine
def _terminate_nsr(self, nsr_id):
self._log.debug("NSR {} being terminated".format(nsr_id))
jobs = self.cfgm.get_job(nsr_id)
for job in jobs:
- path = CfgAgentJobDtsHandler.cfg_job_xpath(nsr_id, job.id)
+ path = self.cfg_job_xpath(nsr_id, job.id)
with self._dts.transaction() as xact:
self._log.debug("Deleting job: {}".format(path))
job.regh.delete_element(path)
@@ -752,40 +779,14 @@
@property
def nsr_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
- @asyncio.coroutine
- def register_for_nsr(self):
- """ Register for NSR changes """
-
- @asyncio.coroutine
- def on_prepare(xact_info, query_action, ks_path, msg):
- """ This NSR is created """
- self._log.debug("Received NSR instantiate on_prepare (%s:%s:%s)",
- query_action,
- ks_path,
- msg)
-
- if (query_action == rwdts.QueryAction.UPDATE or
- query_action == rwdts.QueryAction.CREATE):
- pass
- elif query_action == rwdts.QueryAction.DELETE:
- nsr_id = msg.ns_instance_config_ref
- asyncio.ensure_future(self._terminate_nsr(nsr_id), loop=self._loop)
- else:
- raise NotImplementedError(
- "%s action on cm-state not supported",
- query_action)
-
- xact_info.respond_xpath(rwdts.XactRspCode.ACK)
-
- try:
- handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- self._nsr_regh = yield from self._dts.register(self.nsr_xpath,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
- handler=handler)
- except Exception as e:
- self._log.error("Failed to register for NSR changes as %s", str(e))
+ def deregister(self):
+ self._log.debug("De-register config agent job for project".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
class ConfigAgentJobManager(object):
@@ -794,7 +795,7 @@
TODO: Needs to support multiple config agents.
"""
- def __init__(self, dts, log, loop, nsm):
+ def __init__(self, dts, log, loop, project, nsm):
"""
Args:
dts : Dts handle
@@ -807,11 +808,12 @@
self.log = log
self.loop = loop
self.nsm = nsm
+ self.project = project
self.handler = CfgAgentJobDtsHandler(dts, log, loop, nsm, self)
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def add_job(self, rpc_output, tasks=None):
- """Once an RPC is trigger add a now job
+ """Once an RPC is triggered, add a new job
Args:
rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): Rpc output
@@ -821,7 +823,8 @@
"""
nsr_id = rpc_output.nsr_id_ref
- job = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output, tasks)
+ job = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output,
+ tasks, self.project)
self.log.debug("Creating a job monitor for Job id: {}".format(
rpc_output.job_id))
@@ -840,6 +843,14 @@
ca = agent
break
+ def done_callback(fut):
+ e = fut.exception()
+ if e:
+ self.log.error("Exception on monitor job {}: {}".
+ format(rpc_output.job_id, e))
+ fut.print_stack()
+ self.log.debug("Monitor job done for {}".format(rpc_output.job_id))
+
# For every Job we will schedule a new monitoring process.
job_monitor = ConfigAgentJobMonitor(
self.dts,
@@ -850,6 +861,7 @@
ca
)
task = self.loop.create_task(job_monitor.publish_action_status())
+ task.add_done_callback(done_callback)
def get_job(self, nsr_id):
"""Get the job associated with the NSR Id, if present."""
@@ -866,4 +878,8 @@
@asyncio.coroutine
def register(self):
yield from self.handler.register()
- yield from self.handler.register_for_nsr()
+ # yield from self.handler.register_for_nsr()
+
+ def deregister(self):
+ self.handler.deregister()
+ self.handler = None
diff --git a/common/python/rift/mano/config_data/config.py b/common/python/rift/mano/config_data/config.py
index 63a2e48..a186324 100644
--- a/common/python/rift/mano/config_data/config.py
+++ b/common/python/rift/mano/config_data/config.py
@@ -1,4 +1,4 @@
-############################################################################
+###########################################################################
# Copyright 2016 RIFT.io Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
@@ -20,8 +20,8 @@
import os
import yaml
-from gi.repository import NsdYang
-from gi.repository import VnfdYang
+from gi.repository import ProjectNsdYang as NsdYang
+from gi.repository import ProjectVnfdYang as VnfdYang
class InitialConfigReadError(Exception):
@@ -117,23 +117,23 @@
super(VnfInitialConfigPrimitiveReader, self).__init__(primitive_input)
def get_initial_config_primitive(self, seq, name):
- return VnfdYang.InitialConfigPrimitive(seq=seq, name=name)
+ return VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive(seq=seq, name=name)
-class NsInitialConfigPrimitiveReader(InitialConfigPrimitiveReader):
+class NsInitialServicePrimitiveReader(InitialConfigPrimitiveReader):
'''Class to read the NS initial config primitives'''
def __init__(self, primitive_input):
- super(NsInitialConfigPrimitiveReader, self).__init__(primitive_input)
+ super(NsInitialServicePrimitiveReader, self).__init__(primitive_input)
def get_initial_config_primitive(self, seq, name):
- return NsdYang.NsdInitialConfigPrimitive(seq=seq, name=name)
+ return NsdYang.YangData_Nsd_NsdCatalog_Nsd_InitialServicePrimitive(seq=seq, name=name)
class ConfigPrimitiveConvertor(object):
PARAMETER = "parameter"
PARAMETER_GROUP = "parameter_group"
- CONFIG_PRIMITIVE = "service_primitive"
+ SERVICE_PRIMITIVE = "service_primitive"
INITIAL_CONFIG_PRIMITIVE = "initial_config_primitive"
def _extract_param(self, param, field="default_value"):
@@ -180,25 +180,25 @@
input_data = {}
if config_primitives:
- input_data[self.CONFIG_PRIMITIVE] = {}
+ input_data[self.SERVICE_PRIMITIVE] = {}
for config_primitive in config_primitives:
- input_data[self.CONFIG_PRIMITIVE][config_primitive.name] = {}
+ input_data[self.SERVICE_PRIMITIVE][config_primitive.name] = {}
self._extract_parameters(
config_primitive.parameter,
- input_data[self.CONFIG_PRIMITIVE][config_primitive.name])
+ input_data[self.SERVICE_PRIMITIVE][config_primitive.name])
try:
self._extract_parameter_group(
config_primitive.parameter_group,
- input_data[self.CONFIG_PRIMITIVE][config_primitive.name])
+ input_data[self.SERVICE_PRIMITIVE][config_primitive.name])
except AttributeError:
pass
- if not input_data[self.CONFIG_PRIMITIVE][config_primitive.name]:
- del input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+ if not input_data[self.SERVICE_PRIMITIVE][config_primitive.name]:
+ del input_data[self.SERVICE_PRIMITIVE][config_primitive.name]
- if not input_data[self.CONFIG_PRIMITIVE]:
- del input_data[self.CONFIG_PRIMITIVE]
+ if not input_data[self.SERVICE_PRIMITIVE]:
+ del input_data[self.SERVICE_PRIMITIVE]
if initial_configs:
@@ -238,7 +238,7 @@
initial_conf = None
try:
- initial_conf = nsd.initial_config_primitive
+ initial_conf = nsd.initial_service_primitive
except AttributeError:
pass
@@ -250,7 +250,7 @@
def extract_vnfd_config(self, vnfd, format="yaml"):
config_prim = None
try:
- config_prim = vnfd.vnf_configuration.service_primitive
+ config_prim = vnfd.vnf_configuration.config_primitive
except AttributeError:
pass
@@ -273,7 +273,7 @@
pass
def add_nsd_initial_config(self, nsd_init_cfg_prim_msg, input_data):
- """ Add initial config primitives from NS Initial Config Input Data
+ """ Add initial service primitives from NS Initial Config Input Data
Arguments:
nsd_init_cfg_prim_msg - manotypes:nsd/initial_config_primitive pb msg
@@ -285,38 +285,37 @@
if self.INITIAL_CONFIG_PRIMITIVE in input_data:
ns_input_data = input_data[self.INITIAL_CONFIG_PRIMITIVE]
- reader = NsInitialConfigPrimitiveReader(ns_input_data)
+ reader = NsInitialServicePrimitiveReader(ns_input_data)
for prim in reader.primitives:
nsd_init_cfg_prim_msg.append(prim)
def merge_nsd_initial_config(self, nsd, input_data):
try:
- for config_primitive in nsd.initial_config_primitive:
+ for service_primitive in nsd.initial_service_primitive:
for cfg in input_data[self.INITIAL_CONFIG_PRIMITIVE]:
- if cfg['name'] == config_primitive.name:
+ if cfg['name'] == service_primitive.name:
self.merge_params(
- config_primitive.parameter,
+ service_primitive.parameter,
cfg[self.PARAMETER],
field="value")
break
except AttributeError as e:
- self._log.debug("Did not find initial-config-primitive for NSD {}: {}".
+ self._log.debug("Did not find initial-service-primitive for NSD {}: {}".
format(nsd.name, e))
-
def merge_nsd_config(self, nsd, input_data):
- for config_primitive in nsd.service_primitive:
+ for service_primitive in nsd.service_primitive:
try:
- cfg = input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+ cfg = input_data[self.SERVICE_PRIMITIVE][service_primitive.name]
except KeyError:
continue
self.merge_params(
- config_primitive.parameter,
+ service_primitive.parameter,
cfg[self.PARAMETER])
- for param_group in config_primitive.parameter_group:
+ for param_group in service_primitive.parameter_group:
self.merge_params(
param_group.parameter,
cfg[self.PARAMETER_GROUP][param_group.name])
@@ -339,9 +338,9 @@
vnfd_init_cfg_prim_msg.append(prim)
def merge_vnfd_config(self, vnfd, input_data):
- for config_primitive in vnfd.vnf_configuration.service_primitive:
+ for config_primitive in vnfd.vnf_configuration.config_primitive:
try:
- cfg = input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+ cfg = input_data[self.SERVICE_PRIMITIVE][config_primitive.name]
except KeyError:
continue
@@ -352,7 +351,7 @@
class ConfigStore(object):
"""Convenience class that fetches all the instance related data from the
- $RIFT_ARTIFACTS/launchpad/libs directory.
+ $RIFT_VAR_ROOT/launchpad/libs directory.
"""
def __init__(self, log):
@@ -363,7 +362,7 @@
self._log = log
self.converter = ConfigPrimitiveConvertor()
- def merge_vnfd_config(self, nsd_id, vnfd, member_vnf_index):
+ def merge_vnfd_config(self,project_name, nsd_id, vnfd, member_vnf_index):
"""Merges the vnfd config from the config directory.
Args:
@@ -372,10 +371,11 @@
the member index ref.
"""
nsd_archive = os.path.join(
- os.getenv('RIFT_ARTIFACTS'),
- "launchpad/libs",
- nsd_id,
- "config")
+ os.getenv('RIFT_VAR_ROOT'),
+ "launchpad/packages/vnfd/",
+ project_name,
+ vnfd.id,
+ "vnf_config")
self._log.info("Looking for config from the archive {}".format(nsd_archive))
@@ -404,12 +404,13 @@
input_data = yaml.load(fh)
return input_data
- def merge_nsd_config(self, nsd):
+ def merge_nsd_config(self, nsd, project_name):
nsd_archive = os.path.join(
- os.getenv('RIFT_ARTIFACTS'),
- "launchpad/libs",
+ os.getenv('RIFT_VAR_ROOT'),
+ "launchpad/packages/nsd/",
+ project_name,
nsd.id,
- "config")
+ "ns_config")
self._log.info("Looking for config from the archive {}".format(nsd_archive))
diff --git a/common/python/rift/mano/config_data/test/test_converter.py b/common/python/rift/mano/config_data/test/test_converter.py
index 1bfd7d7..42521c0 100644
--- a/common/python/rift/mano/config_data/test/test_converter.py
+++ b/common/python/rift/mano/config_data/test/test_converter.py
@@ -17,20 +17,23 @@
import pytest
import uuid
-from gi.repository import NsdYang, VnfdYang
+from gi.repository import (
+ ProjectNsdYang as NsdYang,
+ ProjectVnfdYang as VnfdYang,
+ )
from ..config import ConfigPrimitiveConvertor
import yaml
@pytest.fixture(scope="function")
def nsd():
- catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ catalog = NsdYang.YangData_RwProject_Project_NsdCatalog()
nsd = catalog.nsd.add()
nsd.id = str(uuid.uuid1())
return nsd
@pytest.fixture(scope="function")
def vnfd():
- catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+ catalog = VnfdYang.YangData_RwProject_Project_VnfdCatalog()
vnfd = catalog.vnfd.add()
vnfd.id = str(uuid.uuid1())
return vnfd
@@ -222,14 +225,14 @@
],
})
- vnf_config.service_primitive.add().from_dict({
+ vnf_config.config_primitive.add().from_dict({
"name": "PE1",
"parameter": [
{"name": "Foo", "default_value": "Bar"}
]
})
- expected_yaml = """service_primitive:
+ expected_yaml = """config_primitive:
PE1:
parameter:
Foo: Bar
@@ -267,12 +270,12 @@
"parameter": [{"name": "cidr"}],
})
- vnf_config.service_primitive.add().from_dict({
+ vnf_config.config_primitive.add().from_dict({
"name": "PE1",
"parameter": [{"name": "Foo",}]
})
- ip_yaml = """service_primitive:
+ ip_yaml = """config_primitive:
PE1:
parameter:
Foo: Bar
@@ -287,7 +290,7 @@
cidr: 10.10.10.2/30
"""
- catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+ catalog = VnfdYang.YangData_RwProject_Project_VnfdCatalog()
expected_vnfd = catalog.vnfd.add()
vnf_config = expected_vnfd.vnf_configuration
expected_vnfd.id = vnfd.id
@@ -311,7 +314,7 @@
],
})
- vnf_config.service_primitive.add().from_dict({
+ vnf_config.config_primitive.add().from_dict({
"name": "PE1",
"parameter": [
{"name": "Foo", "default_value": "Bar"}
@@ -374,7 +377,7 @@
Vlan ID: '3000'
"""
- catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ catalog = NsdYang.YangData_RwProject_Project_NsdCatalog()
expected_nsd = catalog.nsd.add()
expected_nsd.id = nsd.id
expected_nsd.service_primitive.add().from_dict(
diff --git a/common/python/rift/mano/dts/__init__.py b/common/python/rift/mano/dts/__init__.py
index e3ffbbb..2c44daa 100644
--- a/common/python/rift/mano/dts/__init__.py
+++ b/common/python/rift/mano/dts/__init__.py
@@ -25,6 +25,5 @@
NsdCatalogSubscriber,
NsInstanceConfigSubscriber)
from .subscriber.store import SubscriberStore
-from .subscriber.ro_account import ROAccountConfigSubscriber
from .rpc.core import AbstractRpcHandler
\ No newline at end of file
diff --git a/common/python/rift/mano/dts/core.py b/common/python/rift/mano/dts/core.py
index 4894e16..3a04945 100644
--- a/common/python/rift/mano/dts/core.py
+++ b/common/python/rift/mano/dts/core.py
@@ -25,7 +25,7 @@
"""A common class to hold the barebone objects to build a publisher or
subscriber
"""
- def __init__(self, log, dts, loop):
+ def __init__(self, log, dts, loop, project):
"""Constructor
Args:
@@ -34,7 +34,39 @@
loop : Asyncio event loop.
"""
# Reg handle
- self.reg = None
- self.log = log
- self.dts = dts
- self.loop = loop
+ self._reg = None
+ self._log = log
+ self._dts = dts
+ self._loop = loop
+ self._project = project
+
+ @property
+ def reg(self):
+ return self._reg
+
+ @reg.setter
+ def reg(self, val):
+ self._reg = val
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @property
+ def project(self):
+ return self._project
+
+ def deregister(self):
+ self._log.debug("De-registering DTS handler ({}) for project {}".
+ format(self.__class__.__name__, self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
diff --git a/common/python/rift/mano/dts/rpc/core.py b/common/python/rift/mano/dts/rpc/core.py
index dfa08bb..72016f1 100644
--- a/common/python/rift/mano/dts/rpc/core.py
+++ b/common/python/rift/mano/dts/rpc/core.py
@@ -36,8 +36,8 @@
class AbstractRpcHandler(DtsHandler):
"""Base class to simplify RPC implementation
"""
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project=None):
+ super().__init__(log, dts, loop, project)
if not asyncio.iscoroutinefunction(self.callback):
raise ValueError('%s has to be a coroutine' % (self.callback))
@@ -61,6 +61,9 @@
def on_prepare(self, xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
+ if self.project and not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
rpc_op = yield from self.callback(ks_path, msg)
xact_info.respond_xpath(
@@ -76,6 +79,11 @@
@asyncio.coroutine
def register(self):
+ if self.reg:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
reg_event = asyncio.Event(loop=self.loop)
@asyncio.coroutine
@@ -94,6 +102,10 @@
yield from reg_event.wait()
+ def deregister(self):
+ self.reg.deregister()
+ self.reg = None
+
@abc.abstractmethod
@asyncio.coroutine
def callback(self, ks_path, msg):
diff --git a/common/python/rift/mano/dts/subscriber/core.py b/common/python/rift/mano/dts/subscriber/core.py
index dd2513e..53c2e8c 100644
--- a/common/python/rift/mano/dts/subscriber/core.py
+++ b/common/python/rift/mano/dts/subscriber/core.py
@@ -1,6 +1,6 @@
"""
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -27,6 +27,9 @@
from gi.repository import (RwDts as rwdts, ProtobufC)
import rift.tasklets
+from rift.mano.utils.project import (
+ get_add_delete_update_cfgs,
+ )
from ..core import DtsHandler
@@ -35,11 +38,11 @@
"""A common class for all subscribers.
"""
@classmethod
- def from_tasklet(cls, tasklet, callback=None):
+ def from_project(cls, proj, callback=None):
"""Convenience method to build the object from tasklet
Args:
- tasklet (rift.tasklets.Tasklet): Tasklet
+ proj (rift.mano.utils.project.ManoProject): Project
callback (None, optional): Callable, which will be invoked on
subscriber changes.
@@ -48,20 +51,41 @@
msg: The Gi Object msg from DTS
action(rwdts.QueryAction): Action type
"""
- return cls(tasklet.log, tasklet.dts, tasklet.loop, callback=callback)
+ return cls(proj.log, proj.dts, proj.loop, proj, callback=callback)
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
self.callback = callback
+ @abc.abstractmethod
+ def get_xpath(self):
+ """
+ Returns:
+ str: xpath
+ """
+ pass
+
def get_reg_flags(self):
"""Default set of REG flags, can be over-ridden by sub classes.
-
+
Returns:
Set of rwdts.Flag types.
"""
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY|rwdts.Flag.CACHE
+ @asyncio.coroutine
+ def data(self):
+ itr = yield from self.dts.query_read(
+ self.get_xpath())
+
+ values = []
+ for res in itr:
+ result = yield from res
+ result = result.result
+ values.append(result)
+
+ return values
+
class AbstractOpdataSubscriber(SubscriberDtsHandler):
@@ -70,29 +94,32 @@
Opdata subscriber can be created in one step by subclassing and implementing
the MANDATORY get_xpath() method
-
+
"""
- @abc.abstractmethod
- def get_xpath(self):
- """
- Returns:
- str: xpath
- """
- pass
@asyncio.coroutine
def register(self):
"""Triggers the registration
"""
+
+ if self._reg:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
xacts = {}
def on_commit(xact_info):
- xact_id = xact_info.handle.get_xact().id
- if xact_id in xacts:
- msg, action = xacts.pop(xact_id)
+ try:
+ xact_id = xact_info.handle.get_xact().id
+ if xact_id in xacts:
+ msg, action = xacts.pop(xact_id)
- if self.callback:
- self.callback(msg, action)
+ if self.callback:
+ self.callback(msg, action)
+ except Exception as e:
+ self.log.error("Exception when committing data for registration:{} exception:{}".format(self.get_xpath(), e))
+ self.log.exception(e)
return rwdts.MemberRspCode.ACTION_OK
@@ -105,8 +132,11 @@
except Exception as e:
self.log.exception(e)
- finally:
+ try:
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ except rift.tasklets.dts.ResponseError as e:
+ self._log.warning("Reg handle is None during action {} for {}: {}".
+ format(action, self.__class__, e))
reg_event = asyncio.Event(loop=self.loop)
@@ -120,17 +150,14 @@
on_commit=on_commit
)
- self.reg = yield from self.dts.register(
- xpath=self.get_xpath(),
+ self._reg = yield from self.dts.register(
+ xpath=self.project.add_project(self.get_xpath()),
flags=self.get_reg_flags(),
handler=handler)
# yield from reg_event.wait()
- assert self.reg is not None
-
- def deregister(self):
- self.reg.deregister()
+ assert self._reg is not None
class AbstractConfigSubscriber(SubscriberDtsHandler):
@@ -139,7 +166,7 @@
Config subscriber can be created in one step by subclassing and implementing
the MANDATORY get_xpath() method
-
+
"""
KEY = "msgs"
@@ -151,42 +178,31 @@
def key_name(self):
pass
- def get_add_delete_update_cfgs(self, dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
@asyncio.coroutine
def register(self):
""" Register for VNFD configuration"""
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
- is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ try:
+ if self._reg:
+ for cfg in self._reg.elements:
+ if self.callback:
+ self.callback(cfg, rwdts.QueryAction.CREATE)
+ else:
+ self._log.error("Reg handle is None during action {} for {}".
+ format(action, self.__class__))
- add_cfgs, delete_cfgs, update_cfgs = self.get_add_delete_update_cfgs(
- dts_member_reg=self.reg,
+ except Exception as e:
+ self._log.exception("Adding config {} during restart failed: {}".
+ format(cfg, e))
+ return
+
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
xact=xact,
key_name=self.key_name())
@@ -202,14 +218,18 @@
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
""" on prepare callback """
- xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ self._log.debug("Subscriber DTS prepare for project %s: %s",
+ self.project, xact_info.query_action)
+ try:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ except rift.tasklets.dts.ResponseError as e:
+ self._log.warning(
+ "Subscriber DTS prepare for project {}, action {} in class {} failed: {}".
+ format(self.project, xact_info.query_action, self.__class__, e))
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self.dts.appconf_group_create(handler=acg_hdl) as acg:
- self.reg = acg.register(
- xpath=self.get_xpath(),
+ self._reg = acg.register(
+ xpath=self.project.add_project(self.get_xpath()),
flags=self.get_reg_flags(),
on_prepare=on_prepare)
-
- def deregister(self):
- self.reg.deregister()
diff --git a/common/python/rift/mano/dts/subscriber/ns_subscriber.py b/common/python/rift/mano/dts/subscriber/ns_subscriber.py
index c16f771..4258afa 100644
--- a/common/python/rift/mano/dts/subscriber/ns_subscriber.py
+++ b/common/python/rift/mano/dts/subscriber/ns_subscriber.py
@@ -39,7 +39,7 @@
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
def get_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
class NsdCatalogSubscriber(core.AbstractConfigSubscriber):
@@ -49,7 +49,7 @@
return "id"
def get_xpath(self):
- return "C,/nsd:nsd-catalog/nsd:nsd"
+ return self._project.add_project("C,/project-nsd:nsd-catalog/project-nsd:nsd")
class NsInstanceConfigSubscriber(core.AbstractConfigSubscriber):
@@ -59,4 +59,4 @@
return "id"
def get_xpath(self):
- return "C,/nsr:ns-instance-config/nsr:nsr"
+ return self._project.add_project("C,/nsr:ns-instance-config/nsr:nsr")
diff --git a/common/python/rift/mano/dts/subscriber/ro_account.py b/common/python/rift/mano/dts/subscriber/ro_account.py
deleted file mode 100644
index 575d649..0000000
--- a/common/python/rift/mano/dts/subscriber/ro_account.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-@file ro_account.py
-@author Varun Prasad (varun.prasad@riftio.com)
-@date 09-Jul-2016
-
-"""
-
-import gi
-gi.require_version('RwDts', '1.0')
-from gi.repository import RwDts as rwdts
-
-from . import core
-
-class ROAccountConfigSubscriber(core.AbstractConfigSubscriber):
-
- def key_name(self):
- return "name"
-
- def get_xpath(self):
- return("C,/rw-launchpad:resource-orchestrator")
\ No newline at end of file
diff --git a/common/python/rift/mano/dts/subscriber/store.py b/common/python/rift/mano/dts/subscriber/store.py
index 88cb79a..222d444 100644
--- a/common/python/rift/mano/dts/subscriber/store.py
+++ b/common/python/rift/mano/dts/subscriber/store.py
@@ -33,10 +33,10 @@
"""
KEY = enum.Enum('KEY', 'NSR NSD VNFD VNFR')
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
- params = (self.log, self.dts, self.loop)
+ params = (self.log, self.dts, self.loop, self.project)
self._nsr_sub = ns_subscriber.NsrCatalogSubscriber(*params, callback=self.on_nsr_change)
self._nsrs = {}
@@ -92,6 +92,14 @@
yield from self._vnfr_sub.register()
yield from self._nsr_sub.register()
+ def deregister(self):
+ self._log.debug("De-register store for project {}".
+ format(self._project))
+ self._vnfd_sub.deregister()
+ self._nsd_sub.deregister()
+ self._vnfr_sub.deregister()
+ self._nsr_sub.deregister()
+
@asyncio.coroutine
def refresh_store(self, subsriber, store):
itr = yield from self.dts.query_read(subsriber.get_xpath())
diff --git a/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py b/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py
index a69a00f..49c622c 100644
--- a/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py
+++ b/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py
@@ -16,26 +16,27 @@
#
import asyncio
+import gi
import sys
import types
import unittest
import uuid
-
import rift.test.dts
import rift.mano.dts as store
-import gi
gi.require_version('RwDtsYang', '1.0')
from gi.repository import (
RwLaunchpadYang as launchpadyang,
RwDts as rwdts,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwVnfrYang,
RwNsrYang,
- RwNsdYang,
+ RwProjectNsdYang as RwNsdYang,
VnfrYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
class DescriptorPublisher(object):
@@ -107,11 +108,11 @@
def test_vnfd_handler(self):
yield from self.store.register()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
mock_vnfd.id = str(uuid.uuid1())
- w_xpath = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
- xpath = "{}[vnfd:id='{}']".format(w_xpath, mock_vnfd.id)
+ w_xpath = "C,/rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ xpath = "{}[project-vnfd:id={}]".format(w_xpath, quoted_key(mock_vnfd.id))
yield from self.publisher.publish(w_xpath, xpath, mock_vnfd)
yield from asyncio.sleep(5, loop=self.loop)
@@ -128,11 +129,11 @@
def test_vnfr_handler(self):
yield from self.store.register()
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
mock_vnfr.id = str(uuid.uuid1())
- w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
- xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
+ w_xpath = "D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr"
+ xpath = "{}[vnfr:id={}]".format(w_xpath, quoted_key(mock_vnfr.id))
yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
yield from asyncio.sleep(5, loop=self.loop)
@@ -151,12 +152,12 @@
def test_nsr_handler(self):
yield from self.store.register()
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr()
mock_nsr.ns_instance_config_ref = str(uuid.uuid1())
mock_nsr.name_ref = "Foo"
- w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
- xpath = "{}[nsr:ns-instance-config-ref='{}']".format(w_xpath, mock_nsr.ns_instance_config_ref)
+ w_xpath = "D,/rw-project:project/nsr:ns-instance-opdata/nsr:nsr"
+ xpath = "{}[nsr:ns-instance-config-ref={}]".format(w_xpath, quoted_key(mock_nsr.ns_instance_config_ref))
yield from self.publisher.publish(w_xpath, xpath, mock_nsr)
yield from asyncio.sleep(5, loop=self.loop)
@@ -175,11 +176,11 @@
def test_nsd_handler(self):
yield from self.store.register()
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
mock_nsd.id = str(uuid.uuid1())
- w_xpath = "C,/nsd:nsd-catalog/nsd:nsd"
- xpath = "{}[nsd:id='{}']".format(w_xpath, mock_nsd.id)
+ w_xpath = "C,/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd"
+ xpath = "{}[project-nsd:id={}]".format(w_xpath, quoted_key(mock_nsd.id))
yield from self.publisher.publish(w_xpath, xpath, mock_nsd)
yield from asyncio.sleep(2, loop=self.loop)
@@ -206,22 +207,22 @@
# publish
yield from vnf_handler.register()
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
mock_vnfr.id = str(uuid.uuid1())
def mon_xpath(param_id=None):
""" Monitoring params xpath """
- return("D,/vnfr:vnfr-catalog" +
- "/vnfr:vnfr[vnfr:id='{}']".format(mock_vnfr.id) +
+ return("D,/rw-project:project/vnfr:vnfr-catalog" +
+ "/vnfr:vnfr[vnfr:id={}]".format(quoted_key(mock_vnfr.id)) +
"/vnfr:monitoring-param" +
- ("[vnfr:id='{}']".format(param_id) if param_id else ""))
+ ("[vnfr:id={}]".format(quoted_key(param_id)) if param_id else ""))
- w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
- xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
+ w_xpath = "D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr"
+ xpath = "{}[vnfr:id={}]".format(w_xpath, quoted_key(mock_vnfr.id))
yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
- mock_param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+ mock_param = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
"id": "1"
})
mock_vnfr.monitoring_param.append(mock_param)
@@ -238,4 +239,4 @@
)
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
diff --git a/common/python/rift/mano/dts/subscriber/vnf_subscriber.py b/common/python/rift/mano/dts/subscriber/vnf_subscriber.py
index 76a58ab..524fb41 100644
--- a/common/python/rift/mano/dts/subscriber/vnf_subscriber.py
+++ b/common/python/rift/mano/dts/subscriber/vnf_subscriber.py
@@ -38,7 +38,7 @@
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
def get_xpath(self):
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+ return self.project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr")
class VnfdCatalogSubscriber(core.AbstractConfigSubscriber):
@@ -48,4 +48,4 @@
return "id"
def get_xpath(self):
- return "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ return self.project.add_project("C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd")
diff --git a/common/python/rift/mano/ncclient.py b/common/python/rift/mano/ncclient.py
index 9b87030..1c52215 100644
--- a/common/python/rift/mano/ncclient.py
+++ b/common/python/rift/mano/ncclient.py
@@ -49,7 +49,7 @@
self.loop = loop
self._nc_mgr = None
- self._model = RwYang.Model.create_libncx()
+ self._model = RwYang.Model.create_libyang()
@asyncio.coroutine
def connect(self, timeout=240):
diff --git a/common/python/rift/mano/ro_account/__init__.py b/common/python/rift/mano/ro_account/__init__.py
new file mode 100644
index 0000000..100cfd8
--- /dev/null
+++ b/common/python/rift/mano/ro_account/__init__.py
@@ -0,0 +1,29 @@
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .accounts import (
+ ROAccount
+ )
+
+from .config import (
+ ROAccountConfigSubscriber,
+ ROAccountConfigCallbacks
+ )
+
+from .operdata import (
+ ROAccountDtsOperdataHandler
+ )
diff --git a/common/python/rift/mano/ro_account/accounts.py b/common/python/rift/mano/ro_account/accounts.py
new file mode 100644
index 0000000..74e5ea2
--- /dev/null
+++ b/common/python/rift/mano/ro_account/accounts.py
@@ -0,0 +1,126 @@
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import asyncio
+
+from gi.repository import (
+ RwDts as rwdts,
+ RwRoAccountYang,
+ )
+
+import rift.mano.dts as mano_dts
+import rift.tasklets
+
+from rift.tasklets.rwnsmtasklet import openmano_nsm
+from rift.tasklets.rwnsmtasklet import rwnsmplugin
+
+class ROAccount(object):
+ """
+ RO Account Model class
+ """
+ DEFAULT_PLUGIN = rwnsmplugin.RwNsPlugin
+
+ def __init__(self, dts=None, log=None, loop=None, project=None, records_publisher=None, account_msg=None):
+ self._dts = dts
+ self._log = log
+ self._loop = loop
+ self._project = project
+ self._records_publisher = records_publisher
+ self._account_msg = None
+ if account_msg is not None:
+ self._account_msg = account_msg.deep_copy()
+ self._name = self._account_msg.name
+
+ self._datacenters = []
+ self._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="unknown",
+ details="Connection status lookup not started"
+ )
+ self.live_instances = 0
+
+ if self._dts is None:
+ return
+
+ self._nsm_plugins = rwnsmplugin.NsmPlugins()
+ self._nsm_cls = self.DEFAULT_PLUGIN
+
+ try:
+ self._nsm_cls = self._nsm_plugins.class_by_plugin_name(
+ account_msg.ro_account_type
+ )
+ except KeyError as e:
+ self._log.warning(
+ "RO account nsm plugin not found: %s. Using standard rift nsm.",
+ account_msg.name
+ )
+
+ self._ro_plugin = self._create_plugin(self._nsm_cls, account_msg)
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def account_msg(self):
+ return self._account_msg
+
+ @property
+ def ro_acccount_type(self):
+ return self._account_msg.ro_account_type if self._account_msg else 'rift'
+
+ @property
+ def ro_plugin(self):
+ return self._ro_plugin
+
+ @property
+ def connection_status(self):
+ return self._status
+
+ def _create_plugin(self, nsm_cls, account_msg):
+ self._log.debug("Instantiating new RO account using class: %s", nsm_cls)
+ nsm_instance = nsm_cls(self._dts, self._log, self._loop,
+ self._records_publisher, account_msg, self._project)
+ return nsm_instance
+
+ def check_ro_account_status(self):
+ self._log.debug("Checking RO Account Status. Acct: %s",
+ self.name)
+ self._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="validating",
+ details="RO account connection status check in progress"
+ )
+ try:
+ self._datacenters = []
+ for uuid, name in self._ro_plugin._cli_api.datacenter_list():
+ self._datacenters.append({
+ 'uuid':uuid,
+ 'name':name
+ }
+ )
+ self._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="success",
+ details="RO account connection status success"
+ )
+ except:
+ self._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="failure",
+ details="RO account connection status failure"
+ )
+ self._log.warning("RO account connection status failure, Acct:%s, status:%s",
+ self.name, self._status)
+
+ def start_validate_ro_account(self, loop):
+ loop.run_in_executor(None, self.check_ro_account_status)
diff --git a/common/python/rift/mano/ro_account/config.py b/common/python/rift/mano/ro_account/config.py
new file mode 100644
index 0000000..1eeed51
--- /dev/null
+++ b/common/python/rift/mano/ro_account/config.py
@@ -0,0 +1,171 @@
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
+
+from gi.repository import (
+ RwDts as rwdts,
+ ProtobufC,
+ RwRoAccountYang,
+ )
+
+from . import accounts
+
+class ROAccountConfigCallbacks(object):
+ def __init__(self,
+ on_add_apply=None, on_delete_apply=None):
+
+ @asyncio.coroutine
+ def prepare_noop(*args, **kwargs):
+ pass
+
+ def apply_noop(*args, **kwargs):
+ pass
+
+ self.on_add_apply = on_add_apply
+ self.on_delete_apply = on_delete_apply
+
+ for f in ('on_add_apply', 'on_delete_apply'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, apply_noop)
+ continue
+
+ if asyncio.iscoroutinefunction(ref):
+ raise ValueError('%s cannot be a coroutine' % (f,))
+
+class ROAccountConfigSubscriber(object):
+ XPATH = "C,/rw-ro-account:ro-account/rw-ro-account:account"
+
+ def __init__(self, dts, log, loop, project, records_publisher, ro_callbacks):
+ self._dts = dts
+ self._log = log
+ self._loop = loop
+ self._project = project
+ self._records_publisher = records_publisher
+ self._ro_callbacks = ro_callbacks
+
+ self._reg = None
+ self.accounts = {}
+ self._log.debug("Inside RO Account Config Subscriber init")
+
+ def add_account(self, account_msg):
+ self._log.debug("adding ro account: {}".format(account_msg))
+
+ account = accounts.ROAccount(self._dts,
+ self._log,
+ self._loop,
+ self._project,
+ self._records_publisher,
+ account_msg)
+ self.accounts[account.name] = account
+ self._ro_callbacks.on_add_apply(account)
+
+ def delete_account(self, account_name):
+ self._log.debug("Deleting RO account: {}".format(account_name))
+ account = self.accounts[account_name]
+ del self.accounts[account_name]
+ self._ro_callbacks.on_delete_apply(account_name)
+
+ def deregister(self):
+ self._log.debug("Project {}: De-register ro account handler".
+ format(self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
+ def update_account(self, account):
+ """ Update an existing ro account
+
+ In order to simplify update, turn an update into a delete followed by
+ an add. The drawback to this approach is that we will not support
+ updates of an "in-use" ro account, but this seems like a
+ reasonable trade-off.
+
+ """
+ self._log.debug("updating ro account: {}".format(account))
+
+ self.delete_account(account.name)
+ self.add_account(account)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got ro account apply config (xact: %s) (action: %s)", xact, action)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.debug("RO account being re-added after restart.")
+ self.add_account(cfg)
+ else:
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
+ xact=xact,
+ key_name="name",
+ )
+
+ # Handle Deletes
+ for cfg in delete_cfgs:
+ self.delete_account(cfg.name)
+
+ # Handle Adds
+ for cfg in add_cfgs:
+ self.add_account(cfg)
+
+ # Handle Updates
+ for cfg in update_cfgs:
+ self.update_account(cfg)
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for RO Account """
+
+ self._log.debug("RO account on_prepare config received (action: %s): %s",
+ xact_info.query_action, msg)
+ try:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ except rift.tasklets.dts.ResponseError as e:
+ self._log.error(
+ "Subscriber DTS prepare for project {}, action {} in class {} failed: {}".
+ format(self._project, xact_info.query_action, self.__class__, e))
+
+ self._log.debug("Registering for RO Account config using xpath: %s",
+ ROAccountConfigSubscriber.XPATH,
+ )
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ )
+
+ xpath = self._project.add_project(ROAccountConfigSubscriber.XPATH)
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare,
+ )
diff --git a/common/python/rift/mano/ro_account/operdata.py b/common/python/rift/mano/ro_account/operdata.py
new file mode 100644
index 0000000..c14a7c7
--- /dev/null
+++ b/common/python/rift/mano/ro_account/operdata.py
@@ -0,0 +1,329 @@
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import asyncio
+import gi
+import rift.mano.dts as mano_dts
+import rift.tasklets
+from . import accounts
+
+from gi.repository import(
+ RwRoAccountYang,
+ RwDts as rwdts,
+ RwTypes,
+ )
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+class ROAccountNotFound(Exception):
+ pass
+
+class ROAccountDtsOperdataHandler(object):
+ def __init__(self, dts, log, loop, project):
+ self._dts = dts
+ self._log = log
+ self._loop = loop
+ self._project = project
+
+ self._regh = None
+ self._rpc = None
+ self._rsic = None
+ self._rdcp = None
+ self.ro_accounts = {}
+ self._nsr_sub = mano_dts.NsInstanceConfigSubscriber(
+ self._log,
+ self._dts,
+ self._loop,
+ self._project,
+ callback=self.handle_nsr)
+
+ def handle_nsr(self, nsr, action):
+ if action == rwdts.QueryAction.CREATE:
+ try:
+ self.ro_accounts[nsr.resource_orchestrator].live_instances += 1
+ except KeyError as e:
+ self.ro_accounts['rift'].live_instances += 1
+ elif action == rwdts.QueryAction.DELETE:
+ try:
+ self.ro_accounts[nsr.resource_orchestrator].live_instances -= 1
+ except KeyError as e:
+ self.ro_accounts['rift'].live_instances -= 1
+
+ def get_xpath(self):
+ return "D,/rw-ro-account:ro-account-state/account"
+
+ def get_qualified_xpath(self, ro_account_name):
+ if ro_account_name is None:
+ raise Exception("Account name cannot be None")
+
+ return self._project.add_project("D,/rw-ro-account:ro-account-state/account{}".format(
+ "[name=%s]" % quoted_key(ro_account_name))
+ )
+
+ def add_rift_ro_account(self):
+ rift_acc = accounts.ROAccount()
+ rift_acc._name = 'rift'
+ rift_acc._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="success",
+ details="RO account connection status success"
+ )
+ self.ro_accounts[rift_acc.name] = rift_acc
+ rift_acc_state = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account(name=rift_acc.name)
+ self._regh.create_element(self.get_qualified_xpath(rift_acc.name), rift_acc_state)
+
+ def add_ro_account(self, account):
+ self.ro_accounts[account.name] = account
+ account.start_validate_ro_account(self._loop)
+
+ def delete_ro_account(self, account_name):
+ account = self.ro_accounts[account_name]
+ del self.ro_accounts[account_name]
+
+ def get_saved_ro_accounts(self, ro_account_name):
+ ''' Get RO Account corresponding to passed name, or all saved accounts if name is None'''
+ saved_ro_accounts = []
+
+ if ro_account_name is None or ro_account_name == "":
+ ro_accounts = list(self.ro_accounts.values())
+ saved_ro_accounts.extend(ro_accounts)
+ elif ro_account_name in self.ro_accounts:
+ account = self.ro_accounts[ro_account_name]
+ saved_ro_accounts.append(account)
+ else:
+ errstr = "RO account {} does not exist".format(ro_account_name)
+ raise KeyError(errstr)
+
+ return saved_ro_accounts
+
+ @asyncio.coroutine
+ def _register_show_status(self):
+ def get_xpath(ro_account_name):
+ return "D,/rw-ro-account:ro-account-state/account{}/connection-status".format(
+ "[name=%s]" % quoted_key(ro_account_name)
+ )
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ path_entry = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account.schema().keyspec_to_entry(ks_path)
+ ro_account_name = path_entry.key00.name
+
+ try:
+ saved_accounts = self.get_saved_ro_accounts(ro_account_name)
+ for account in saved_accounts:
+ connection_status = account._status
+
+ xpath = self._project.add_project(get_xpath(account.name))
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ xpath=xpath,
+ msg=account._status,
+ )
+ except Exception as e:
+ self._log.warning(str(e))
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self._project.add_project(self.get_xpath())
+ self._regh = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ #ATTN: TODO: Should ideally wait for
+ #on_ready callback to be called.
+ self.add_rift_ro_account()
+
+ @asyncio.coroutine
+ def _register_show_instance_count(self):
+ def get_xpath(ro_account_name=None):
+ return "D,/rw-ro-account:ro-account-state/account{}/instance-ref-count".format(
+ "[name=%s]" % quoted_key(ro_account_name) if ro_account_name is not None else ''
+ )
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ path_entry = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account.schema().keyspec_to_entry(ks_path)
+ ro_account_name = path_entry.key00.name
+
+ try:
+ saved_accounts = self.get_saved_ro_accounts(ro_account_name)
+ for account in saved_accounts:
+ instance_count = account.live_instances
+ xpath = self._project.add_project(get_xpath(account.name))
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ xpath=xpath,
+ msg=RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_InstanceRefCount(count=instance_count)
+ )
+ except KeyError as e:
+ self._log.warning(str(e))
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self._project.add_project(get_xpath())
+ self._rsic = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ @asyncio.coroutine
+ def _register_validate_rpc(self):
+ def get_xpath():
+ return "/rw-ro-account:update-ro-account-status"
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ if not msg.has_field("ro_account"):
+ raise ROAccountNotFound("RO account name not provided")
+ ro_account_name = msg.ro_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
+ try:
+ account = self.ro_accounts[ro_account_name]
+ except KeyError:
+ errmsg = "RO account name {} not found in project {}". \
+ format(ro_account_name, self._project.name)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ get_xpath(),
+ errmsg)
+ raise ROAccountNotFound(errmsg)
+
+ if ro_account_name != 'rift':
+ account.start_validate_ro_account(self._loop)
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._rpc = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ @asyncio.coroutine
+ def _register_data_center_publisher(self):
+ def get_xpath(ro_account_name=None):
+ return "D,/rw-ro-account:ro-account-state/account{}/datacenters".format(
+ "[name=%s]" % quoted_key(ro_account_name) if ro_account_name is not None else ''
+ )
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ path_entry = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account.schema().keyspec_to_entry(ks_path)
+ ro_account_name = path_entry.key00.name
+
+ try:
+ saved_accounts = self.get_saved_ro_accounts(ro_account_name)
+ for account in saved_accounts:
+ datacenters = []
+ if account.name == 'rift':
+ datacenters = [{'name': cloud.name, 'datacenter_type': cloud.account_type}
+ for cloud in self._project.cloud_accounts]
+ else :
+ datacenters = account._datacenters
+
+ response = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_Datacenters()
+ response.from_dict({'datacenters': datacenters})
+ xpath = self._project.add_project(get_xpath(account.name))
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ xpath=xpath,
+ msg=response
+ )
+ except KeyError as e:
+ self._log.warning(str(e))
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self._project.add_project(get_xpath())
+ self._rdcp = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ @asyncio.coroutine
+ def _register_config_data_publisher(self):
+ def get_xpath(ro_account_name=None):
+ return "D,/rw-ro-account:ro-account-state/account{}/config-data".format(
+ "[name=%s]" % quoted_key(ro_account_name) if ro_account_name is not None else ''
+ )
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ path_entry = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account.schema().keyspec_to_entry(ks_path)
+ ro_account_name = path_entry.key00.name
+
+ try:
+ saved_accounts = self.get_saved_ro_accounts(ro_account_name)
+ for account in saved_accounts:
+ ro_acct_type = account.ro_acccount_type
+
+ response = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConfigData(ro_account_type=ro_acct_type)
+ xpath = self._project.add_project(get_xpath(account.name))
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ xpath=xpath,
+ msg=response
+ )
+ except KeyError as e:
+ self._log.warning(str(e))
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self._project.add_project(get_xpath())
+ self._rcdp = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ @asyncio.coroutine
+ def register(self):
+ self._log.debug("Register RO account for project %s", self._project.name)
+ yield from self._register_show_status()
+ yield from self._register_validate_rpc()
+ yield from self._register_show_instance_count()
+ yield from self._register_data_center_publisher()
+ yield from self._register_config_data_publisher()
+ yield from self._nsr_sub.register()
+
+ def deregister(self):
+ self._log.debug("De-register RO account for project %s", self._project.name)
+ self._rpc.deregister()
+ self._regh.deregister()
+ self._rsic.deregister()
+ self._rdcp.deregister()
+ self._rcdp.deregister()
+ self._nsr_sub.deregister()
diff --git a/common/python/rift/mano/sdn/accounts.py b/common/python/rift/mano/sdn/accounts.py
index d539ead..fdd20f5 100644
--- a/common/python/rift/mano/sdn/accounts.py
+++ b/common/python/rift/mano/sdn/accounts.py
@@ -1,5 +1,5 @@
-#
+#
# Copyright 2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -52,7 +52,7 @@
self._sdn = self.plugin.get_interface("Topology")
self._sdn.init(rwlog_hdl)
- self._status = RwsdnalYang.SDNAccount_ConnectionStatus(
+ self._status = RwSdnYang.YangData_RwProject_Project_Sdn_Account_ConnectionStatus(
status="unknown",
details="Connection status lookup not started"
)
@@ -102,13 +102,13 @@
@property
def sdnal_account_msg(self):
- return RwsdnalYang.SDNAccount.from_dict(
+ return RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList.from_dict(
self.account_msg.as_dict(),
ignore_missing_keys=True,
)
def sdn_account_msg(self, account_dict):
- self._account_msg = RwSdnYang.SDNAccount.from_dict(account_dict)
+ self._account_msg = RwSdnYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList.from_dict(account_dict)
@property
def account_type(self):
@@ -126,8 +126,9 @@
@asyncio.coroutine
def validate_sdn_account_credentials(self, loop):
- self._log.debug("Validating SDN Account credentials %s", self._account_msg)
- self._status = RwSdnYang.SDNAccount_ConnectionStatus(
+ self._log.debug("Validating SDN Account credentials %s",
+ self.name)
+ self._status = RwSdnYang.YangData_RwProject_Project_Sdn_Account_ConnectionStatus(
status="validating",
details="SDN account connection validation in progress"
)
@@ -137,14 +138,16 @@
self.sdnal_account_msg,
)
if rwstatus == RwTypes.RwStatus.SUCCESS:
- self._status = RwSdnYang.SDNAccount_ConnectionStatus.from_dict(status.as_dict())
+ self._status = RwSdnYang.YangData_RwProject_Project_Sdn_Account_ConnectionStatus.from_dict(status.as_dict())
else:
- self._status = RwSdnYang.SDNAccount_ConnectionStatus(
+ self._status = RwSdnYang.YangData_RwProject_Project_Sdn_Account_ConnectionStatus(
status="failure",
details="Error when calling SDNAL validate SDN creds"
)
- self._log.info("Got SDN account validation response: %s", self._status)
+ if self._status.status == 'failure':
+ self._log.error("SDN account validation failed; Acct: %s status: %s",
+ self.name, self._status)
def start_validate_credentials(self, loop):
if self._validate_task is not None:
diff --git a/common/python/rift/mano/sdn/config.py b/common/python/rift/mano/sdn/config.py
index a9de01b..1ae65a5 100644
--- a/common/python/rift/mano/sdn/config.py
+++ b/common/python/rift/mano/sdn/config.py
@@ -26,6 +26,8 @@
ProtobufC,
)
+from rift.mano.utils.project import get_add_delete_update_cfgs
+
from . import accounts
@@ -37,32 +39,6 @@
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class SDNAccountConfigCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
@@ -102,9 +78,10 @@
class SDNAccountConfigSubscriber(object):
XPATH = "C,/rw-sdn:sdn/rw-sdn:account"
- def __init__(self, dts, log, rwlog_hdl, sdn_callbacks, acctstore):
+ def __init__(self, dts, log, project, rwlog_hdl, sdn_callbacks, acctstore):
self._dts = dts
self._log = log
+ self._project = project
self._rwlog_hdl = rwlog_hdl
self._reg = None
@@ -143,6 +120,11 @@
self.delete_account(account_msg.name)
self.add_account(account_msg)
+ def deregister(self):
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
def register(self):
@asyncio.coroutine
def apply_config(dts, acg, xact, action, _):
@@ -224,8 +206,9 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ xpath = self._project.add_project(SDNAccountConfigSubscriber.XPATH)
self._log.debug("Registering for SDN Account config using xpath: %s",
- SDNAccountConfigSubscriber.XPATH,
+ xpath,
)
acg_handler = rift.tasklets.AppConfGroup.Handler(
@@ -234,7 +217,7 @@
with self._dts.appconf_group_create(acg_handler) as acg:
self._reg = acg.register(
- xpath=SDNAccountConfigSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
on_prepare=on_prepare,
)
diff --git a/common/python/rift/mano/sdn/operdata.py b/common/python/rift/mano/sdn/operdata.py
index b29f100..3478bcf 100644
--- a/common/python/rift/mano/sdn/operdata.py
+++ b/common/python/rift/mano/sdn/operdata.py
@@ -16,12 +16,18 @@
#
import asyncio
+import gi
+
import rift.tasklets
from gi.repository import(
RwSdnYang,
+ RwsdnalYang,
RwDts as rwdts,
+ RwTypes,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
class SDNAccountNotFound(Exception):
@@ -29,12 +35,15 @@
class SDNAccountDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self.sdn_accounts = {}
+ self._oper = None
+ self._rpc = None
def add_sdn_account(self, account):
self.sdn_accounts[account.name] = account
@@ -59,23 +68,39 @@
return saved_sdn_accounts
+ @asyncio.coroutine
+ def create_notification(self, account):
+ xpath = "N,/rw-sdn:sdn-notif"
+ ac_status = RwSdnYang.YangNotif_RwSdn_SdnNotif()
+ ac_status.name = account.name
+ ac_status.message = account.connection_status.details
+
+ yield from self._dts.query_create(xpath, rwdts.XactFlag.ADVISE, ac_status)
+ self._log.info("Notification called by creating dts query: %s", ac_status)
+
+
+ @asyncio.coroutine
def _register_show_status(self):
+ self._log.debug("Registering for show for project {}".format(self._project))
def get_xpath(sdn_name=None):
- return "D,/rw-sdn:sdn/account{}/connection-status".format(
- "[name='%s']" % sdn_name if sdn_name is not None else ''
- )
+ return self._project.add_project("D,/rw-sdn:sdn/rw-sdn:account{}/rw-sdn:connection-status".
+ format(
+ "[rw-sdn:name=%s]" % quoted_key(sdn_name)
+ if sdn_name is not None else ''))
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
- self._log.debug("Got show SDN connection status request: %s", ks_path.create_string())
- path_entry = RwSdnYang.SDNAccount.schema().keyspec_to_entry(ks_path)
+ xpath = ks_path.to_xpath(RwSdnYang.get_schema())
+ self._log.debug("Got show SDN connection status request: %s", xpath)
+ path_entry = RwSdnYang.YangData_RwProject_Project_Sdn_Account.schema().keyspec_to_entry(ks_path)
sdn_account_name = path_entry.key00.name
try:
saved_accounts = self.get_saved_sdn_accounts(sdn_account_name)
for account in saved_accounts:
connection_status = account.connection_status
- self._log.debug("Responding to SDN connection status request: %s", connection_status)
+ self._log.debug("Responding to SDN connection status request: %s",
+ connection_status)
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
xpath=get_xpath(account.name),
@@ -88,19 +113,26 @@
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
+ self._oper = yield from self._dts.register(
xpath=get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare),
flags=rwdts.Flag.PUBLISHER,
)
+ @asyncio.coroutine
def _register_validate_rpc(self):
+ self._log.debug("Registering for rpc for project {}".format(self._project))
def get_xpath():
return "/rw-sdn:update-sdn-status"
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
+ if self._project and not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
+ self._log.debug("Got update SDN connection status request: %s", msg)
+
if not msg.has_field("sdn_account"):
raise SDNAccountNotFound("SDN account name not provided")
@@ -108,21 +140,39 @@
try:
account = self.sdn_accounts[sdn_account_name]
except KeyError:
- raise SDNAccountNotFound("SDN account name %s not found" % sdn_account_name)
+ errmsg = "SDN account name %s not found" % sdn_account_name
+ self._log.error(errmsg)
+ xpath = ks_path.to_xpath(RwSdnYang.get_schema())
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ xpath,
+ errmsg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
account.start_validate_credentials(self._loop)
+ yield from self.create_notification(account)
+
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._rpc = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
@asyncio.coroutine
def register(self):
yield from self._register_show_status()
yield from self._register_validate_rpc()
+
+ def deregister(self):
+ if self._oper:
+ self._oper.deregister()
+ self._oper = None
+
+ if self._rpc:
+ self._rpc.deregister()
+ self._rpc = None
diff --git a/common/python/rift/mano/tosca_translator/dummy_vnf_node.yaml b/common/python/rift/mano/tosca_translator/dummy_vnf_node.yaml
index 4debc76..21e32f4 100644
--- a/common/python/rift/mano/tosca_translator/dummy_vnf_node.yaml
+++ b/common/python/rift/mano/tosca_translator/dummy_vnf_node.yaml
@@ -1,5 +1,5 @@
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
-description: Toy NS
+description: Translated from Tosca
data_types:
tosca.datatypes.nfv.riftio.dashboard_params:
properties:
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
index aa6b83b..83e054f 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
@@ -42,7 +42,7 @@
# TODO(Philip): Harcoding for now, need to make this generic
def get_xpath(self):
- xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:' + self.name
+ xpath = '/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/nsd:' + self.name
return xpath
def get_dict_output(self):
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py
index d263e6f..76a1fcd 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py
@@ -69,6 +69,8 @@
nsd.vendor = self.metadata['vendor']
nsd.short_name = self.metadata['name']
nsd.version = self.metadata['version']
+ if 'logo' in self.metadata:
+ nsd.logo = self.metadata['logo']
except Exception as e:
self.log.warning(_("Unable to use YANG GI to generate "
"descriptors, falling back to alternate "
@@ -91,10 +93,25 @@
if resource.type == 'vld':
resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+ vnf_type_duplicate = []
+ vnfd_resources = []
+ vnfd_duplicate_resource_list = []
for resource in self.resources:
- # Do the vnfds next
if resource.type == 'vnfd':
+ vnfd_resources.append(resource)
+
+ vnfd_resources.sort(key=lambda x: x.member_vnf_id, reverse=False)
+ vnf_type_to_vnf_id = {}
+ for resource in vnfd_resources:
+ if resource.vnf_type not in vnf_type_duplicate:
resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+ vnf_type_to_vnf_id[resource.vnf_type] = resource.id
+ vnf_type_duplicate.append(resource.vnf_type)
+ else:
+ vnfd_duplicate_resource_list.append(resource)
+
+ for resource in vnfd_duplicate_resource_list:
+ resource.generate_nsd_constiuent(nsd, vnf_type_to_vnf_id[resource.vnf_type])
for resource in self.resources:
# Do the other nodes
@@ -111,7 +128,7 @@
if use_gi:
for param in self.parameters:
nsd.input_parameter_xpath.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath=param.get_xpath(),
)
)
@@ -127,7 +144,7 @@
# Need to add support to get script names, charms, etc.
other_files = {}
for resource in self.resources:
- resource.get_supporting_files(other_files)
+ resource.get_supporting_files(other_files, desc_id=nsd_id)
for policy in self.policies:
policy.get_supporting_files(other_files, desc_id=nsd_id)
@@ -200,6 +217,7 @@
self.YANG: vnfd_pf,
}
+
if vnfd_id in other_files:
vnfd_out[self.FILES] = other_files[vnfd_id]
@@ -256,7 +274,8 @@
return {pf+'-catalog': {pf: [desc]}}
def get_yaml(self, module_list, desc):
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
+
for module in module_list:
model.load_module(module)
return desc.to_yaml(model)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
index 7938485..190a5b4 100755
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
@@ -101,12 +101,7 @@
format(self.name, tosca_props))
vdu_props = {}
for key, value in tosca_props.items():
- if key == 'cloud_init':
- vdu_props['cloud-init'] = value
- elif key == 'cloud-init-file':
- self._cloud_init = "../cloud_init/{}".format(value)
- else:
- vdu_props[key] = value
+ vdu_props[key] = value
if 'name' not in vdu_props:
vdu_props['name'] = self.name
@@ -290,8 +285,6 @@
self.properties['guest-epa'] = get_guest_epa(tosca_caps['numa_extension'], tosca_caps['nfv_compute'])
if 'monitoring_param' in tosca_caps:
self._monitor_param.append(get_monitor_param(tosca_caps['monitoring_param'], '1'))
- if 'monitoring_param_1' in tosca_caps:
- self._monitor_param.append(get_monitor_param(tosca_caps['monitoring_param_1'], '2'))
if 'mgmt_interface' in tosca_caps:
self._mgmt_interface = get_mgmt_interface(tosca_caps['mgmt_interface'])
if len(self._mgmt_interface) > 0:
@@ -303,7 +296,20 @@
prop['port'] = self._mgmt_interface['dashboard-params']['port']
self._http_endpoint = prop
+ mon_idx = 2
+ monitoring_param_name = 'monitoring_param_1'
+ while True:
+ if monitoring_param_name in tosca_caps:
+ self._monitor_param.append(get_monitor_param(tosca_caps[monitoring_param_name], str(mon_idx)))
+ mon_idx += 1
+ monitoring_param_name = 'monitoring_param_{}'.format(mon_idx)
+ else:
+ break
+ # THis is a quick hack to remove monitor params without name
+ for mon_param in list(self._monitor_param):
+ if 'name' not in mon_param:
+ self._monitor_param.remove(mon_param)
def handle_artifacts(self):
if self.artifacts is None:
@@ -343,8 +349,8 @@
self.artifacts = arts
def handle_interfaces(self):
- # Currently, we support only create operation
- operations_deploy_sequence = ['create']
+ # Currently, we support the following:
+ operations_deploy_sequence = ['create', 'configure']
operations = ManoResource._get_all_operations(self.nodetemplate)
@@ -357,7 +363,11 @@
self.operations[operation.name] = operation.implementation
for name, details in self.artifacts.items():
if name == operation.implementation:
- self._image = details['file']
+ if operation.name == 'create':
+ self._image = details['file']
+ elif operation.name == 'configure':
+ self._cloud_init = details['file']
+ break
except KeyError as e:
self.log.exception(e)
return None
@@ -412,6 +422,9 @@
if self._image_cksum:
self.properties['image-checksum'] = self._image_cksum
+ if self._cloud_init:
+ self.properties['cloud-init-file'] = os.path.basename(self._cloud_init)
+
for key in ToscaCompute.IGNORE_PROPS:
if key in self.properties:
self.properties.pop(key)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
index 9b7cd03..262c11a 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
@@ -53,8 +53,7 @@
self.log.debug(_("{0} with tosca properties: {1}").
format(self, tosca_props))
self.properties['name'] = tosca_props['name']
- self.properties['seq'] = \
- tosca_props['seq']
+ self.properties['seq'] = int(tosca_props['seq'])
self.properties['user-defined-script'] = \
tosca_props['user_defined_script']
self.scripts.append('../scripts/{}'. \
@@ -62,12 +61,11 @@
if 'parameter' in tosca_props:
self.properties['parameter'] = []
- for name, value in tosca_props['parameter'].items():
+ for parameter in tosca_props['parameter']:
self.properties['parameter'].append({
- 'name': name,
- 'value': value,
+ 'name': parameter['name'],
+ 'value': str(parameter['value']),
})
-
self.log.debug(_("{0} properties: {1}").format(self, self.properties))
def get_policy_props(self):
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
index a9f9c77..88a8a31 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
@@ -91,8 +91,8 @@
ip_profile_param['ip-version'] = 'ipv' + str(specs['ip_version'])
if 'cidr' in specs:
ip_profile_param['subnet-address'] = specs['cidr']
+ ip_profile_prop['ip-profile-params'] = ip_profile_param
- ip_profile_prop['ip-profile-params'] = ip_profile_param
return ip_profile_prop
tosca_props = self.get_tosca_props()
self._vld = get_vld_props(tosca_props)
@@ -128,7 +128,8 @@
ip_profile_props = convert_keys_to_python(self._ip_profile)
try:
nsd.vld.add().from_dict(vld_props)
- nsd.ip_profiles.add().from_dict(ip_profile_props)
+ if len(ip_profile_props) > 1:
+ nsd.ip_profiles.add().from_dict(ip_profile_props)
except Exception as e:
err_msg = _("{0} Exception vld from dict {1}: {2}"). \
format(self, props, e)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
index e4e045e..8f8bbbd 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
@@ -26,7 +26,7 @@
import gi
gi.require_version('RwVnfdYang', '1.0')
- from gi.repository import RwVnfdYang
+ from gi.repository import RwVnfdYang as RwVnfdYang
except ImportError:
pass
except ValueError:
@@ -61,7 +61,9 @@
self._policies = []
self._cps = []
self.vnf_type = nodetemplate.type
+ self.member_vnf_id = None
self._reqs = {}
+ self.logo = None
def map_tosca_name_to_mano(self, name):
new_name = super().map_tosca_name_to_mano(name)
@@ -120,6 +122,7 @@
if key == 'id':
self._const_vnfd['member-vnf-index'] = int(value)
self._const_vnfd['vnfd-id-ref'] = self.id
+ self.member_vnf_id = int(value)
elif key == 'vnf_configuration':
self._vnf_config = get_vnf_config(value)
else:
@@ -145,6 +148,8 @@
vnf_props.pop('start_by_default')
if 'logo' in self.metadata:
vnf_props['logo'] = self.metadata['logo']
+ self.logo = self.metadata['logo']
+
self.log.debug(_("VNF {0} with constituent vnf: {1}").
format(self.name, self._const_vnfd))
@@ -295,6 +300,12 @@
nsd['constituent-vnfd'] = []
nsd['constituent-vnfd'].append(self._const_vnfd)
+ def generate_nsd_constiuent(self, nsd, vnf_id):
+ self._const_vnfd['vnfd-id-ref'] = vnf_id
+ props = convert_keys_to_python(self._const_vnfd)
+ nsd.constituent_vnfd.add().from_dict(props)
+
+
def get_member_vnf_index(self):
return self._const_vnfd['member-vnf-index']
@@ -311,3 +322,10 @@
'type': 'cloud_init',
'name': vdu.cloud_init,
},)
+ if self.logo is not None:
+ files[desc_id] = []
+ file_location = "../icons/{}".format(self.logo)
+ files[desc_id].append({
+ 'type': 'icons',
+ 'name': file_location,
+ },)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
index 7f427f3..e745988 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
@@ -99,7 +99,7 @@
conf = {}
if _validate_action(value):
conf['trigger'] = action
- conf['ns-config-primitive-name-ref'] = value
+ conf['ns-service-primitive-name-ref'] = value
self.properties['scaling-config-action'].append(conf)
else:
err_msg = _("{0}: Did not find the action {1} in "
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_configuration.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_configuration.py
index f90c187..bba2d73 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_configuration.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_configuration.py
@@ -57,13 +57,13 @@
self._vnf_id = vnf_node.id
self.properties["vnf-configuration"] = {}
prop = {}
- prop["config-attributes"] = {}
+ #prop["config-attributes"] = {}
prop["script"] = {}
if 'config' in tosca_props:
- if 'config_delay' in tosca_props['config']:
- prop["config-attributes"]['config-delay'] = tosca_props['config']['config_delay']
- if 'config_priority' in tosca_props['config']:
- prop["config-attributes"]['config-priority'] = tosca_props['config']['config_priority']
+ # if 'config_delay' in tosca_props['config']:
+ # prop["config-attributes"]['config-delay'] = tosca_props['config']['config_delay']
+ # if 'config_priority' in tosca_props['config']:
+ # prop["config-attributes"]['config-priority'] = tosca_props['config']['config_priority']
if 'config_template' in tosca_props['config']:
prop["config-template"] = tosca_props['config']['config_template']
if 'config_details' in tosca_props['config']:
@@ -71,15 +71,16 @@
prop["script"]["script-type"] = tosca_props['config']['config_details']['script_type']
if 'initial_config' in tosca_props:
prop['initial-config-primitive'] = []
- #print("Weleek " + str(tosca_props['initial_config']))
for init_config in tosca_props['initial_config']:
if 'parameter' in init_config:
parameters = init_config.pop('parameter')
init_config['parameter'] = []
- for key, value in parameters.items():
- init_config['parameter'].append({'name': key, 'value': str(value)})
- if 'user_defined_script' in init_config:
- self.scripts.append('../scripts/{}'. \
+ for parameter in parameters:
+ for key, value in parameter.items():
+ init_config['parameter'].append({'name': key, 'value': str(value)})
+
+ if 'user_defined_script' in init_config:
+ self.scripts.append('../scripts/{}'. \
format(init_config['user_defined_script']))
prop['initial-config-primitive'].append(init_config)
@@ -111,10 +112,10 @@
return
if self._vnf_id not in files:
- files[desc_id] = []
+ files[self._vnf_id] = []
for script in self.scripts:
files[self._vnf_id].append({
'type': 'script',
'name': script,
- },)
\ No newline at end of file
+ },)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_ns_service_primitive.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_ns_service_primitive.py
new file mode 100644
index 0000000..eef4a9e
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_ns_service_primitive.py
@@ -0,0 +1,119 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+from toscaparser.functions import GetInput
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaVnfNSServiceConfiguration'
+
+
+class ToscaVnfNSServiceConfiguration(ManoResource):
+ '''Translate TOSCA node type tosca.policies.Scaling.'''
+
+ toscatype = 'tosca.policies.nfv.riftio.ns_service_primitives'
+
+ IGNORE_PROPS = []
+ VALUE_TYPE_CONVERSION_MAP = {
+ 'integer': 'INTEGER',
+ 'string':'STRING',
+ 'float':'DECIMAL',
+ 'INTEGER': 'INTEGER',
+ 'FLOAT':'DECIMAL'
+
+ }
+
+ def __init__(self, log, policy, metadata=None, vnf_name = None):
+ self.log = log
+ self.name = policy.name
+ self.type_ = 'place-grp'
+ self.metadata = metadata
+ self.linked_to_vnf = False
+ self.policy = policy
+ self.service_primitive = None
+ self.properties = {}
+ self.scripts = []
+
+ def __str__(self):
+ return "%s(%s)" % (self.name, self.type)
+
+ def handle_properties(self, nodes, groups):
+ tosca_props = self.get_policy_props()
+ service_primitive = {}
+ if 'name' in tosca_props:
+ service_primitive['name'] = tosca_props['name']
+ if 'user_defined_script' in tosca_props:
+ service_primitive['user_defined_script'] = tosca_props['user_defined_script']
+ self.scripts.append('../scripts/{}'. \
+ format(tosca_props['user_defined_script']))
+
+
+ if 'parameter' in tosca_props:
+ service_primitive['parameter'] = []
+ for parameter in tosca_props['parameter']:
+ prop = {}
+ if 'name' in parameter:
+ prop['name'] = parameter['name']
+ if 'hidden' in parameter:
+ prop['hidden'] = parameter['hidden']
+ if 'mandatory' in parameter:
+ prop['mandatory'] = parameter['mandatory']
+ if 'data_type' in parameter:
+ prop['data_type'] = ToscaVnfNSServiceConfiguration.VALUE_TYPE_CONVERSION_MAP[parameter['data_type']]
+ if 'default_value' in parameter:
+ prop['default_value'] = str(parameter['default_value'])
+ service_primitive['parameter'].append(prop)
+
+ self.service_primitive = service_primitive
+
+
+
+
+ #self.properties = prop
+
+ def generate_yang_submodel_gi(self, vnfd):
+ pass
+
+ def generate_yang_model(self, nsd, vnfds, use_gi):
+ if self.service_primitive is not None:
+ nsd.service_primitive.add().from_dict(self.service_primitive)
+
+ def get_policy_props(self):
+ tosca_props = {}
+
+ for prop in self.policy.get_properties_objects():
+ if isinstance(prop.value, GetInput):
+ tosca_props[prop.name] = {'get_param': prop.value.input_name}
+ else:
+ tosca_props[prop.name] = prop.value
+ return tosca_props
+ def get_supporting_files(self, files, desc_id=None):
+ if not len(self.scripts):
+ return
+ if desc_id not in files:
+ return
+ for script in self.scripts:
+ files[desc_id].append({
+ 'type': 'script',
+ 'name': script,
+ },)
\ No newline at end of file
diff --git a/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py b/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py
index 2d6c3e1..ef36e56 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py
@@ -167,6 +167,7 @@
self.log.debug(_("Metadata {0}").format(metadata))
self.metadata = metadata
+
def _recursive_handle_properties(self, resource):
'''Recursively handle the properties of the depends_on_nodes nodes.'''
# Use of hashtable (dict) here should be faster?
@@ -227,8 +228,9 @@
vnf_type_to_vdus_map[vnf_type].append(node.name)
for policy in template.policies:
policies.append(policy.name)
- for req in template.substitution_mappings.requirements:
- vnf_type_substitution_mapping[template.substitution_mappings.node_type].append(req)
+ if template.substitution_mappings.requirements:
+ for req in template.substitution_mappings.requirements:
+ vnf_type_substitution_mapping[template.substitution_mappings.node_type].append(req)
if template.substitution_mappings.capabilities:
for capability in template.substitution_mappings.capabilities:
sub_list = template.substitution_mappings.capabilities[capability]
@@ -273,7 +275,6 @@
metadata=self.metadata)
mano_node.vnf_type = vnf_type
self.mano_resources.append(mano_node)
- print("Adding a new node")
for node in self.tosca.nodetemplates:
if 'VDU' in node.type:
@@ -317,6 +318,7 @@
vnf_name=vnf_node)
self.mano_policies.append(policy_node)
+ vnfd_resources = []
for node in self.mano_resources:
self.log.debug(_("Handle properties for {0} of type {1}").
format(node.name, node.type_))
@@ -338,6 +340,22 @@
format(node.name, node.type_))
node.update_image_checksum(self.tosca.path)
+ for node in list(self.mano_resources):
+ if node.type == "vnfd":
+ vnfd_resources.append(node)
+ self.mano_resources.remove(node)
+
+ vnfd_resources.sort(key=lambda x: x.member_vnf_id, reverse=True)
+ vnf_type_duplicate_map = {}
+ for node in reversed(vnfd_resources):
+ if node.vnf_type in vnf_type_duplicate_map:
+ for policy in self.mano_policies:
+ if hasattr(policy, '_vnf_name') and policy._vnf_name == node.name:
+ policy._vnf_name = vnf_type_duplicate_map[node.vnf_type]
+ continue
+ vnf_type_duplicate_map[node.vnf_type] = node.name
+
+ self.mano_resources.extend(vnfd_resources)
for node in self.mano_resources:
# Handle vnf and vdu dependencies first
if node.type == "vnfd":
diff --git a/common/python/rift/mano/tosca_translator/shell.py b/common/python/rift/mano/tosca_translator/shell.py
index 9221c79..b5529f0 100644
--- a/common/python/rift/mano/tosca_translator/shell.py
+++ b/common/python/rift/mano/tosca_translator/shell.py
@@ -364,6 +364,8 @@
dest = os.path.join(output_dir, 'images')
elif ty == 'script':
dest = os.path.join(output_dir, 'scripts')
+ elif ty == 'icons':
+ dest = os.path.join(output_dir, 'icons')
elif ty == 'cloud_init':
dest = os.path.join(output_dir, 'cloud_init')
else:
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml
index 9a68023..c74a782 100644
--- a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml
+++ b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml
@@ -7,21 +7,8 @@
data_types:
tosca.datatypes.network.riftio.vnf_configuration:
properties:
- config_delay:
- constraints:
- - greater_or_equal: 0
- default: 0
- required: no
- type: integer
config_details:
type: map
- config_priority:
- constraints:
- - greater_than: 0
- type: integer
- config_template:
- required: no
- type: string
config_type:
type: string
capability_types:
@@ -196,21 +183,7 @@
vnf_configuration:
config_delay: 0
config_details:
- script_type: bash
- config_priority: 2
- config_template: "\n#!/bin/bash\n\n# Rest API config\nping_mgmt_ip=<rw_mgmt_ip>\n\
- ping_mgmt_port=18888\n\n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
- \ pong_vnfd/cp0>\nping_rate=5\nserver_port=5555\n\n# Make rest API calls\
- \ to configure VNF\ncurl -D /dev/stdout \\\n -H \"Accept: application/vnd.yang.data+xml\"\
- \ \\\n -H \"Content-Type: application/vnd.yang.data+json\" \\\n \
- \ -X POST \\\n -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
- \":$server_port}\" \\\n http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set server info for\
- \ ping!\"\n exit $rc\nfi\n\ncurl -D /dev/stdout \\\n -H \"Accept:\
- \ application/vnd.yang.data+xml\" \\\n -H \"Content-Type: application/vnd.yang.data+json\"\
- \ \\\n -X POST \\\n -d \"{\\\"rate\\\":$ping_rate}\" \\\n http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set ping rate!\"\n\
- \ exit $rc\nfi\n\nexit 0\n"
+ script_type: rift
config_type: script
capabilities:
http_endpoint:
@@ -313,18 +286,7 @@
vnf_configuration:
config_delay: 60
config_details:
- script_type: bash
- config_priority: 1
- config_template: "\n#!/bin/bash\n\n# Rest API configuration\npong_mgmt_ip=<rw_mgmt_ip>\n\
- pong_mgmt_port=18889\n# username=<rw_username>\n# password=<rw_password>\n\
- \n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
- \ pong_vnfd/cp0>\nserver_port=5555\n\n# Make Rest API calls to configure\
- \ VNF\ncurl -D /dev/stdout \\\n -H \"Accept: application/vnd.yang.data+xml\"\
- \ \\\n -H \"Content-Type: application/vnd.yang.data+json\" \\\n \
- \ -X POST \\\n -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
- \":$server_port}\" \\\n http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set server(own) info\
- \ for pong!\"\n exit $rc\nfi\n\nexit 0\n"
+ script_type: rift
config_type: script
capabilities:
http_endpoint:
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa.zip b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa.zip
new file mode 100644
index 0000000..5ca9064
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa.zip
Binary files differ
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/ping_pong_nsd.yaml b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/ping_pong_nsd.yaml
new file mode 100644
index 0000000..05bfd91
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/ping_pong_nsd.yaml
@@ -0,0 +1,57 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: Toy NS
+metadata:
+ ID: ping_pong_nsd
+ vendor: RIFT.io
+ version: 1.0
+imports:
+- "ping_vnfd.yaml"
+- "pong_vnfd.yaml"
+topology_template:
+ policies:
+ - initial_config_primitive:
+ properties:
+ name: start traffic
+ seq: 1
+ user_defined_script: start_traffic.py
+ targets: [pong_vnfd]
+ type: tosca.policies.nfv.riftio.initial_config_primitive
+ - placement_0:
+ properties:
+ name: Orcus
+ requirement: Place this VM on the Kuiper belt object Orcus
+ strategy: COLOCATION
+ targets: [ping_vnfd, pong_vnfd]
+ type: tosca.policies.nfv.riftio.placement
+ - placement_1:
+ properties:
+ name: Quaoar
+ requirement: Place this VM on the Kuiper belt object Quaoar
+ strategy: COLOCATION
+ targets: [ping_vnfd, pong_vnfd]
+ type: tosca.policies.nfv.riftio.placement
+ node_templates:
+ pong_vnfd:
+ type: tosca.nodes.nfv.riftio.pongvnfdVNF
+ properties:
+ id: 2
+ vendor: RIFT.io
+ version: 1.0
+ requirements:
+ - virtualLink1: ping_pong_vld
+ ping_pong_vld:
+ type: tosca.nodes.nfv.riftio.ELAN
+ properties:
+ cidr: 31.31.31.0/24
+ description: Toy VL
+ gateway_ip: 31.31.31.210
+ ip_version: 4
+ vendor: RIFT.io
+ ping_vnfd:
+ type: tosca.nodes.nfv.riftio.pingvnfdVNF
+ properties:
+ id: 1
+ vendor: RIFT.io
+ version: 1.0
+ requirements:
+ - virtualLink1: ping_pong_vld
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/ping_vnfd.yaml b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/ping_vnfd.yaml
new file mode 100644
index 0000000..5bf52ee
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/ping_vnfd.yaml
@@ -0,0 +1,121 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: This is an example RIFT.ware VNF
+metadata:
+ ID: ping_vnfd
+ vendor: RIFT.io
+ version: 1.0
+imports:
+- riftiotypes.yaml
+node_types:
+ tosca.nodes.nfv.riftio.pingvnfdVNF:
+ derived_from: tosca.nodes.nfv.riftio.VNF1
+ requirements:
+ - virtualLink1:
+ type: tosca.nodes.nfv.VL
+topology_template:
+ policies:
+ - configuration:
+ properties:
+ config:
+ config_details:
+ script_type: rift
+ config_type: script
+ initial_config_primitive:
+ - name: set ping rate
+ parameter:
+ - rate: 5
+ seq: 1
+ user_defined_script: ping_set_rate.py
+ targets: [ping_vnfd_iovdu_0]
+ type: tosca.policies.nfv.riftio.vnf_configuration
+ substitution_mappings:
+ node_type: tosca.nodes.nfv.riftio.pingvnfdVNF
+ requirements:
+ - virtualLink1: [ping_vnfd_cp0, virtualLink]
+ node_templates:
+ ping_vnfd_iovdu_0:
+ type: tosca.nodes.nfv.riftio.VDU1
+ properties:
+ cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+ ssh_pwauth: True\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl,\
+ \ enable, ping.service ]\n - [ systemctl, start, --no-block, ping.service\
+ \ ]\n - [ ifup, eth1 ]\n"
+ count: 1
+ capabilities:
+ hypervisor_epa:
+ properties:
+ type: PREFER_KVM
+ version: 1
+ mgmt_interface:
+ properties:
+ dashboard_params:
+ path: api/v1/ping/stats
+ port: 18888
+ port: 18888
+ protocol: tcp
+ monitoring_param:
+ properties:
+ description: no of ping requests
+ json_query_method: namekey
+ name: ping-request-tx-count
+ ui_data:
+ group_tag: Group1
+ units: packets
+ widget_type: counter
+ url_path: api/v1/ping/stats
+ monitoring_param_1:
+ properties:
+ description: no of ping responses
+ json_query_method: namekey
+ name: ping-response-rx-count
+ ui_data:
+ group_tag: Group1
+ units: packets
+ widget_type: counter
+ url_path: api/v1/ping/stats
+ nfv_compute:
+ properties:
+ cpu_allocation:
+ cpu_affinity: dedicated
+ thread_allocation: prefer
+ disk_size: 4 GB
+ mem_page_size: normal
+ mem_size: 1024 MB
+ num_cpus: 4
+ numa_extension:
+ properties:
+ mem_policy: STRICT
+ node:
+ - id: 0
+ mem_size: 512 MB
+ vcpus:
+ - 0
+ - 1
+ - id: 1
+ mem_size: 512 MB
+ vcpus:
+ - 2
+ - 3
+ node_cnt: 2
+ vswitch_epa:
+ properties:
+ ovs_acceleration: DISABLED
+ ovs_offload: DISABLED
+ artifacts:
+ ping_vnfd_iovdu_0_vm_image:
+ file: ../images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
+ image_checksum: a6ffaa77f949a9e4ebb082c6147187cf
+ type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+ interfaces:
+ Standard:
+ create: ping_vnfd_iovdu_0_vm_image
+ ping_vnfd_cp0:
+ type: tosca.nodes.nfv.riftio.CP1
+ properties:
+ cp_type: VPORT
+ name: ping_vnfd/cp0
+ vdu_intf_name: eth0
+ vdu_intf_type: VIRTIO
+ requirements:
+ - virtualBinding:
+ node: ping_vnfd_iovdu_0
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/pong_vnfd.yaml b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/pong_vnfd.yaml
new file mode 100644
index 0000000..46500b4
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/pong_vnfd.yaml
@@ -0,0 +1,115 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: This is an example RIFT.ware VNF
+metadata:
+ ID: pong_vnfd
+ vendor: RIFT.io
+ version: 1.0
+imports:
+- riftiotypes.yaml
+node_types:
+ tosca.nodes.nfv.riftio.pongvnfdVNF:
+ derived_from: tosca.nodes.nfv.riftio.VNF1
+ requirements:
+ - virtualLink1:
+ type: tosca.nodes.nfv.VL
+topology_template:
+ policies:
+ - configuration:
+ properties:
+ config:
+ config_details:
+ script_type: rift
+ config_type: script
+ targets: [pong_vnfd_iovdu_0]
+ type: tosca.policies.nfv.riftio.vnf_configuration
+ substitution_mappings:
+ node_type: tosca.nodes.nfv.riftio.pongvnfdVNF
+ requirements:
+ - virtualLink1: [pong_vnfd_cp0, virtualLink]
+ node_templates:
+ pong_vnfd_iovdu_0:
+ type: tosca.nodes.nfv.riftio.VDU1
+ properties:
+ cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+ ssh_pwauth: True\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl,\
+ \ enable, pong.service ]\n - [ systemctl, start, --no-block, pong.service\
+ \ ]\n - [ ifup, eth1 ]\n"
+ count: 1
+ capabilities:
+ hypervisor_epa:
+ properties:
+ type: PREFER_KVM
+ version: 1
+ mgmt_interface:
+ properties:
+ dashboard_params:
+ path: api/v1/pong/stats
+ port: 18889
+ port: 18889
+ protocol: tcp
+ monitoring_param:
+ properties:
+ description: no of ping requests
+ json_query_method: namekey
+ name: ping-request-rx-count
+ ui_data:
+ group_tag: Group1
+ units: packets
+ widget_type: counter
+ url_path: api/v1/pong/stats
+ monitoring_param_1:
+ properties:
+ description: no of ping responses
+ json_query_method: namekey
+ name: ping-response-tx-count
+ ui_data:
+ group_tag: Group1
+ units: packets
+ widget_type: counter
+ url_path: api/v1/pong/stats
+ nfv_compute:
+ properties:
+ cpu_allocation:
+ cpu_affinity: dedicated
+ thread_allocation: prefer
+ disk_size: 4 GB
+ mem_page_size: normal
+ mem_size: 1024 MB
+ num_cpus: 4
+ numa_extension:
+ properties:
+ mem_policy: STRICT
+ node:
+ - id: 0
+ mem_size: 512 MB
+ vcpus:
+ - 0
+ - 1
+ - id: 1
+ mem_size: 512 MB
+ vcpus:
+ - 2
+ - 3
+ node_cnt: 2
+ vswitch_epa:
+ properties:
+ ovs_acceleration: DISABLED
+ ovs_offload: DISABLED
+ artifacts:
+ pong_vnfd_iovdu_0_vm_image:
+ file: ../images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
+ image_checksum: 977484d95575f80ef8399c9cf1d45ebd
+ type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+ interfaces:
+ Standard:
+ create: pong_vnfd_iovdu_0_vm_image
+ pong_vnfd_cp0:
+ type: tosca.nodes.nfv.riftio.CP1
+ properties:
+ cp_type: VPORT
+ name: pong_vnfd/cp0
+ vdu_intf_name: eth0
+ vdu_intf_type: VIRTIO
+ requirements:
+ - virtualBinding:
+ node: pong_vnfd_iovdu_0
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/riftiotypes.yaml b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/riftiotypes.yaml
new file mode 100644
index 0000000..18a0728
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/Definitions/riftiotypes.yaml
@@ -0,0 +1,1493 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: Extended types
+
+
+data_types:
+ tosca.datatypes.nfv.riftio.dashboard_params:
+ properties:
+ path:
+ type: string
+ description: >-
+ The HTTP path for the dashboard
+ port:
+ type: tosca.datatypes.network.PortDef
+ description: >-
+ The HTTP port for the dashboard
+ default: 80
+ https:
+ type: boolean
+ description: >-
+ Pick HTTPS instead of HTTP , Default is false
+ default: false
+ required: false
+ tosca.datatypes.nfv.riftio.monitoring_param_ui:
+ properties:
+ description:
+ type: string
+ required: false
+ group_tag:
+ type: string
+ description: >-
+ A simple tag to group monitoring parameters
+ required: false
+ widget_type:
+ type: string
+ description: >-
+ Type of the widget
+ default: counter
+ constraints:
+ - valid_values:
+ - histogram
+ - bar
+ - gauge
+ - slider
+ - counter
+ - textbox
+ units:
+ type: string
+ required: false
+ tosca.datatypes.nfv.riftio.monitoring_param_value:
+ properties:
+ value_type:
+ type: string
+ default: integer
+ constraints:
+ - valid_values:
+ - integer
+ - float
+ - string
+ numeric_min:
+ type: integer
+ description: >-
+ Minimum value for the parameter
+ required: false
+ numeric_max:
+ type: integer
+ description: >-
+ Maxium value for the parameter
+ required: false
+ string_min:
+ type: integer
+ description: >-
+ Minimum string length for the parameter
+ required: false
+ constraints:
+ - greater_or_equal: 0
+ string_max:
+ type: integer
+ description: >-
+ Maximum string length for the parameter
+ required: false
+ constraints:
+ - greater_or_equal: 0
+ tosca.datatypes.compute.Container.Architecture.CPUAllocation:
+ derived_from: tosca.datatypes.Root
+ properties:
+ cpu_affinity:
+ type: string
+ required: false
+ constraints:
+ - valid_values: [shared, dedicated, any]
+ thread_allocation:
+ type: string
+ required: false
+ constraints:
+ - valid_values: [avoid, separate, isolate, prefer]
+ socket_count:
+ type: integer
+ required: false
+ core_count:
+ type: integer
+ required: false
+ thread_count:
+ type: integer
+ required: false
+
+ tosca.datatypes.compute.Container.Architecture.NUMA:
+ derived_from: tosca.datatypes.Root
+ properties:
+ id:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ vcpus:
+ type: list
+ entry_schema:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ mem_size:
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 0 MB
+ tosca.datatypes.nfv.riftio.paired_thread_map:
+ properties:
+ thread_a:
+ type: integer
+ required: true
+ constraints:
+ - greater_or_equal: 0
+ thread_b:
+ type: integer
+ required: true
+ constraints:
+ - greater_or_equal: 0
+
+ tosca.datatypes.nfv.riftio.paired_threads:
+ properties:
+ num_paired_threads:
+ type: integer
+ constraints:
+ - greater_or_equal: 1
+ paired_thread_ids:
+ type: list
+ entry_schema:
+ type: tosca.datatypes.nfv.riftio.paired_thread_map
+ constraints:
+ - max_length: 16
+ required: false
+
+ tosca.datatypes.compute.riftio.numa:
+ properties:
+ id:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ vcpus:
+ type: list
+ entry_schema:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ required: false
+ mem_size:
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 0 MB
+ required: false
+ om_numa_type:
+ type: string
+ description: Openmano Numa type selection
+ constraints:
+ - valid_values: [cores, paired-threads, threads]
+ required: false
+ num_cores:
+ type: integer
+ description: Use when om_numa_type is cores
+ constraints:
+ - greater_or_equal: 1
+ required: false
+ paired_threads:
+ type: tosca.datatypes.nfv.riftio.paired_threads
+ description: Use when om_numa_type is paired-threads
+ required: false
+ num_threads:
+ type: integer
+ description: Use when om_numa_type is threads
+ constraints:
+ - greater_or_equal: 1
+ required: false
+
+ tosca.nfv.datatypes.pathType:
+ properties:
+ forwarder:
+ type: string
+ required: true
+ capability:
+ type: string
+ required: true
+
+ tosca.nfv.datatypes.aclType:
+ properties:
+ eth_type:
+ type: string
+ required: false
+ eth_src:
+ type: string
+ required: false
+ eth_dst:
+ type: string
+ required: false
+ vlan_id:
+ type: integer
+ constraints:
+ - in_range: [ 1, 4094 ]
+ required: false
+ vlan_pcp:
+ type: integer
+ constraints:
+ - in_range: [ 0, 7 ]
+ required: false
+ mpls_label:
+ type: integer
+ constraints:
+ - in_range: [ 16, 1048575]
+ required: false
+ mpls_tc:
+ type: integer
+ constraints:
+ - in_range: [ 0, 7 ]
+ required: false
+ ip_dscp:
+ type: integer
+ constraints:
+ - in_range: [ 0, 63 ]
+ required: false
+ ip_ecn:
+ type: integer
+ constraints:
+ - in_range: [ 0, 3 ]
+ required: false
+ ip_src_prefix:
+ type: string
+ required: false
+ ip_dst_prefix:
+ type: string
+ required: false
+ ip_proto:
+ type: integer
+ constraints:
+ - in_range: [ 1, 254 ]
+ required: false
+ destination_port_range:
+ type: integer
+ required: false
+ source_port_range:
+ type: integer
+ required: false
+ network_src_port_id:
+ type: string
+ required: false
+ network_dst_port_id:
+ type: string
+ required: false
+ network_id:
+ type: string
+ required: false
+ network_name:
+ type: string
+ required: false
+ tenant_id:
+ type: string
+ required: false
+ icmpv4_type:
+ type: integer
+ constraints:
+ - in_range: [ 0, 254 ]
+ required: false
+ icmpv4_code:
+ type: integer
+ constraints:
+ - in_range: [ 0, 15 ]
+ required: false
+ arp_op:
+ type: integer
+ constraints:
+ - in_range: [ 1, 25 ]
+ required: false
+ arp_spa:
+ type: string
+ required: false
+ arp_tpa:
+ type: string
+ required: false
+ arp_sha:
+ type: string
+ required: false
+ arp_tha:
+ type: string
+ required: false
+ ipv6_src:
+ type: string
+ required: false
+ ipv6_dst:
+ type: string
+ required: false
+ ipv6_flabel:
+ type: integer
+ constraints:
+ - in_range: [ 0, 1048575]
+ required: false
+ icmpv6_type:
+ type: integer
+ constraints:
+ - in_range: [ 0, 255]
+ required: false
+ icmpv6_code:
+ type: integer
+ constraints:
+ - in_range: [ 0, 7]
+ required: false
+ ipv6_nd_target:
+ type: string
+ required: false
+ ipv6_nd_sll:
+ type: string
+ required: false
+ ipv6_nd_tll:
+ type: string
+ required: false
+
+
+ tosca.datatypes.nfv.riftio.vnf_configuration:
+ properties:
+ config_type:
+ type: string
+ description: >-
+ Type of the configuration agent to use
+ constraints:
+ - valid_values: [script, netconf, rest, juju]
+ config_details:
+ type: map
+ description: >-
+ Specify the details for the config agent, like
+ script type, juju charm to use, etc.
+ config_template:
+ required: false
+ type: string
+ config_delay:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ default: 0
+ required: false
+ config_priority:
+ type: integer
+ constraints:
+ - greater_than: 0
+
+ tosca.datatypes.nfv.riftio.parameter_value:
+ properties:
+ name:
+ type: string
+ description: Name of the parameter
+ value:
+ type: string
+ description: Value of the parameter
+
+ tosca.datatypes.nfv.riftio.config_primitive:
+ properties:
+ name:
+ type: string
+ seq:
+ type: integer
+ description: >-
+ Order in which to apply, when multiple ones are defined
+ default: 0
+ constraints:
+ - greater_or_equal: 0
+ parameter:
+ type: list
+ entry_schema:
+ type: tosca.datatypes.nfv.riftio.parameter_value
+ user_defined_script:
+ type: string
+ tosca.datatypes.nfv.riftio.primitive_parameter:
+ properties:
+ data_type:
+ type: string
+ description: >-
+ Data type associated with the name
+ constraints:
+ - valid_values: [string, integer, boolean]
+ mandatory:
+ type: boolean
+ description: >-
+ If this field is mandatory
+ default: false
+ required: false
+ default_value:
+ type: string
+ description: >-
+ The default value for this field
+ required: false
+ parameter_pool:
+ type: string
+ description: >-
+ Parameter pool name to use for this parameter
+ required: false
+ read_only:
+ type: boolean
+ description: >-
+ The value should be greyed out by the UI.
+ Only applies to parameters with default values.
+ required: false
+ default: false
+ hidden:
+ type: boolean
+ description: >-
+ The field should be hidden by the UI.
+ Only applies to parameters with default values.
+ required: false
+ default: false
+ tosca.datatypes.nfv.riftio.primitive_parameter_group:
+ properties:
+ name:
+ type: string
+ description: >-
+ Name of the parameter group
+ mandatory:
+ type: boolean
+ description: >-
+ If this group is mandatory
+ default: false
+ required: false
+ parameter:
+ type: map
+ description: >-
+ List of parameters for the service primitive
+ entry_schema: osca.datatypes.riftio.primitive_parameter
+
+ tosca.datatypes.nfv.riftio.vnf_primitive_group:
+ properties:
+ vnf_name:
+ type: string
+ description: >-
+ Name of the VNF in the NS
+ primitive:
+ type: map
+ entry_schema:
+ type: string
+ description: >-
+ Index and name of the primitive
+
+
+capability_types:
+ tosca.capabilities.nfv.riftio.mgmt_interface:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ static_ip:
+ type: string
+ required: false
+ description: >-
+ Specifies the static IP address for managing the VNF
+ connection_point:
+ type: string
+ required: false
+ description: >-
+ Use the ip address associated with this connection point
+ dashboard_params:
+ type: tosca.datatypes.nfv.riftio.dashboard_params
+ required: false
+ description: >-
+ Parameters for the VNF dashboard
+ tosca.capabilities.nfv.riftio.monitoring_param:
+ derived_from: tosca.capabilities.nfv.Metric
+ properties:
+ name:
+ type: string
+ required: false
+ description:
+ type: string
+ required: false
+ protocol:
+ type: string
+ default: http
+ constraints:
+ - equal: http
+ polling_interval:
+ type: scalar-unit.time
+ description: >-
+ The HTTP polling interval in seconds
+ default: 2 s
+ username:
+ type: string
+ description: >-
+ The HTTP basic auth username
+ required: false
+ password:
+ type: string
+ description: >-
+ The HTTP basic auth password
+ required: false
+ method:
+ type: string
+ description: >-
+ This is the method to be performed at the uri.
+ GET by default for action
+ default: get
+ constraints:
+ - valid_values: [post, put, get, delete, options, patch]
+ headers:
+ type: map
+ entry_schema:
+ type: string
+ description: >-
+ Custom HTTP headers to put on HTTP request
+ required: false
+ json_query_method:
+ type: string
+ description: >-
+ The method to extract a value from a JSON response
+ namekey - Use the name as the key for a non-nested value.
+ jsonpath - Use jsonpath-rw implemenation to extract a value.
+ objectpath - Use objectpath implemenation to extract a value.
+ constraints:
+ - valid_values: [namekey, jsonpath, objectpath]
+ default: namekey
+ json_query_path:
+ type: string
+ description: >-
+ The json path to use to extract value from JSON structure
+ required: false
+ json_object_path:
+ type: string
+ description: >-
+ The object path to use to extract value from JSON structure
+ required: false
+ ui_data:
+ type: tosca.datatypes.nfv.riftio.monitoring_param_ui
+ required: false
+ constraints:
+ type: tosca.datatypes.nfv.riftio.monitoring_param_value
+ required: false
+ tosca.capabilities.nfv.riftio.numa_extension:
+ derived_from: tosca.capabilities.Root
+ properties:
+ node_cnt:
+ type: integer
+ description: >-
+ The number of numa nodes to expose to the VM
+ constraints:
+ - greater_or_equal: 0
+ mem_policy:
+ type: string
+ description: >-
+ This policy specifies how the memory should
+ be allocated in a multi-node scenario.
+ STRICT - The memory must be allocated
+ strictly from the memory attached
+ to the NUMA node.
+ PREFERRED - The memory should be allocated
+ preferentially from the memory
+ attached to the NUMA node
+ constraints:
+ - valid_values: [strict, preferred, STRICT, PREFERRED]
+ node:
+ type: list
+ entry_schema:
+ type: tosca.datatypes.compute.riftio.numa
+ tosca.capabilities.nfv.riftio.vswitch_epa:
+ derived_from: tosca.capabilities.Root
+ properties:
+ ovs_acceleration:
+ type: string
+ description: |-
+ Specifies Open vSwitch acceleration mode.
+ MANDATORY - OVS acceleration is required
+ PREFERRED - OVS acceleration is preferred
+ constraints:
+ - valid_values: [mandatory, preferred, disabled, MANDATORY, PREFERRED, DISABLED]
+ ovs_offload:
+ type: string
+ description: |-
+ Specifies Open vSwitch hardware offload mode.
+ MANDATORY - OVS offload is required
+ PREFERRED - OVS offload is preferred
+ constraints:
+ - valid_values: [mandatory, preferred, disabled, MANDATORY, PREFERRED, DISABLED]
+
+ tosca.capabilities.nfv.riftio.hypervisor_epa:
+ derived_from: tosca.capabilities.Root
+ properties:
+ type:
+ type: string
+ description: |-
+ Specifies the type of hypervisor.
+ constraints:
+ - valid_values: [prefer_kvm, require_kvm, PREFER_KVM, REQUIRE_KVM]
+ version:
+ type: string
+
+ tosca.capabilities.nfv.riftio.host_epa:
+ derived_from: tosca.capabilities.Root
+ properties:
+ cpu_model:
+ type: string
+ description: >-
+ Host CPU model. Examples include SandyBridge,
+ IvyBridge, etc.
+ required: false
+ constraints:
+ - valid_values:
+ - prefer_westmere
+ - require_westmere
+ - prefer_sandbridge
+ - require_sandybridge
+ - prefer_ivybridge
+ - require_ivybridge
+ - prefer_haswell
+ - require_haswell
+ - prefer_broadwell
+ - require_broadwell
+ - prefer_nehalem
+ - require_nehalem
+ - prefer_penryn
+ - require_penryn
+ - prefer_conroe
+ - require_conroe
+ - prefer_core2duo
+ - require_core2duo
+ - PREFER_WESTMERE
+ - REQUIRE_WESTMERE
+ - PREFER_SANDBRIDGE
+ - REQUIRE_SANDYBRIDGE
+ - PREFER_IVYBRIDGE
+ - REQUIRE_IVYBRIDGE
+ - PREFER_HASWELL
+ - REQUIRE_HASWELL
+ - PREFER_BROADWELL
+ - REQUIRE_BROADWELL
+ - PREFER_NEHALEM
+ - REQUIRE_NEHALEM
+ - PREFER_PENRYN
+ - REQUIRE_PENRYN
+ - PREFER_CONROE
+ - REQUIRE_CONROE
+ - PREFER_CORE2DUO
+ - REQUIRE_CORE2DUO
+ cpu_arch:
+ type: string
+ description: >-
+ Host CPU architecture
+ required: false
+ constraints:
+ - valid_values:
+ - prefer_x86
+ - require_x86
+ - prefer_x86_64
+ - require_x86_64
+ - prefer_i686
+ - require_i686
+ - prefer_ia64
+ - require_ia64
+ - prefer_armv7
+ - require_armv7
+ - prefer_armv8
+ - require_armv8
+ - PREFER_X86
+ - REQUIRE_X86
+ - PREFER_X86_64
+ - REQUIRE_X86_64
+ - PREFER_I686
+ - REQUIRE_I686
+ - PREFER_IA64
+ - REQUIRE_IA64
+ - PREFER_ARMV7
+ - REQUIRE_ARMV7
+ - PREFER_ARMV8
+ - REQUIRE_ARMV8
+ cpu_vendor:
+ type: string
+ description: >-
+ Host CPU vendor
+ required: false
+ constraints:
+ - valid_values:
+ - prefer_intel
+ - require_intel
+ - prefer_amd
+ - requie_amd
+ - PREFER_INTEL
+ - REQUIRE_INTEL
+ - PREFER_AMD
+ - REQUIE_AMD
+ cpu_socket_count:
+ type: integer
+ description: >-
+ Number of sockets on the host
+ required: false
+ constraints:
+ - greater_than : 0
+ cpu_core_count:
+ type: integer
+ description: >-
+ Number of cores on the host
+ required: false
+ constraints:
+ - greater_than : 0
+ cpu_core_thread_count:
+ type: integer
+ description: >-
+ Number of threads per core on the host
+ required: false
+ constraints:
+ - greater_than : 0
+ cpu_feature:
+ type: list
+ entry_schema:
+ type: string
+ description: |-
+ Enumeration for CPU features.
+
+ AES- CPU supports advanced instruction set for
+ AES (Advanced Encryption Standard).
+
+ CAT- Cache Allocation Technology (CAT) allows
+ an Operating System, Hypervisor, or similar
+ system management agent to specify the amount
+ of L3 cache (currently the last-level cache
+ in most server and client platforms) space an
+ application can fill (as a hint to hardware
+ functionality, certain features such as power
+ management may override CAT settings).
+
+ CMT- Cache Monitoring Technology (CMT) allows
+ an Operating System, Hypervisor, or similar
+ system management agent to determine the
+ usage of cache based on applications running
+ on the platform. The implementation is
+ directed at L3 cache monitoring (currently
+ the last-level cache in most server and
+ client platforms).
+
+ DDIO- Intel Data Direct I/O (DDIO) enables
+ Ethernet server NICs and controllers talk
+ directly to the processor cache without a
+ detour via system memory. This enumeration
+ specifies if the VM requires a DDIO
+ capable host.
+ required: false
+ constraints:
+ -valid_values:
+ - prefer_aes
+ - require_aes
+ - prefer_cat
+ - require_cat
+ - prefer_cmt
+ - require_cmt
+ - prefer_ddio
+ - require_ddio
+ - prefer_vme
+ - require_vme
+ - prefer_de
+ - require_de
+ - prefer_pse
+ - require_pse
+ - prefer_tsc
+ - require_tsc
+ - prefer_msr
+ - require_msr
+ - prefer_pae
+ - require_pae
+ - prefer_mce
+ - require_mce
+ - prefer_cx8
+ - require_cx8
+ - prefer_apic
+ - require_apic
+ - prefer_sep
+ - require_sep
+ - prefer_mtrr
+ - require_mtrr
+ - prefer_pge
+ - require_pge
+ - prefer_mca
+ - require_mca
+ - prefer_cmov
+ - require_cmov
+ - prefer_pat
+ - require_pat
+ - prefer_pse36
+ - require_pse36
+ - prefer_clflush
+ - require_clflush
+ - prefer_dts
+ - require_dts
+ - prefer_acpi
+ - require_acpi
+ - prefer_mmx
+ - require_mmx
+ - prefer_fxsr
+ - require_fxsr
+ - prefer_sse
+ - require_sse
+ - prefer_sse2
+ - require_sse2
+ - prefer_ss
+ - require_ss
+ - prefer_ht
+ - require_ht
+ - prefer_tm
+ - require_tm
+ - prefer_ia64
+ - require_ia64
+ - prefer_pbe
+ - require_pbe
+ - prefer_rdtscp
+ - require_rdtscp
+ - prefer_pni
+ - require_pni
+ - prefer_pclmulqdq
+ - require_pclmulqdq
+ - prefer_dtes64
+ - require_dtes64
+ - prefer_monitor
+ - require_monitor
+ - prefer_ds_cpl
+ - require_ds_cpl
+ - prefer_vmx
+ - require_vmx
+ - prefer_smx
+ - require_smx
+ - prefer_est
+ - require_est
+ - prefer_tm2
+ - require_tm2
+ - prefer_ssse3
+ - require_ssse3
+ - prefer_cid
+ - require_cid
+ - prefer_fma
+ - require_fma
+ - prefer_cx16
+ - require_cx16
+ - prefer_xtpr
+ - require_xtpr
+ - prefer_pdcm
+ - require_pdcm
+ - prefer_pcid
+ - require_pcid
+ - prefer_dca
+ - require_dca
+ - prefer_sse4_1
+ - require_sse4_1
+ - prefer_sse4_2
+ - require_sse4_2
+ - prefer_x2apic
+ - require_x2apic
+ - prefer_movbe
+ - require_movbe
+ - prefer_popcnt
+ - require_popcnt
+ - prefer_tsc_deadline_timer
+ - require_tsc_deadline_timer
+ - prefer_xsave
+ - require_xsave
+ - prefer_avx
+ - require_avx
+ - prefer_f16c
+ - require_f16c
+ - prefer_rdrand
+ - require_rdrand
+ - prefer_fsgsbase
+ - require_fsgsbase
+ - prefer_bmi1
+ - require_bmi1
+ - prefer_hle
+ - require_hle
+ - prefer_avx2
+ - require_avx2
+ - prefer_smep
+ - require_smep
+ - prefer_bmi2
+ - require_bmi2
+ - prefer_erms
+ - require_erms
+ - prefer_invpcid
+ - require_invpcid
+ - prefer_rtm
+ - require_rtm
+ - prefer_mpx
+ - require_mpx
+ - prefer_rdseed
+ - require_rdseed
+ - prefer_adx
+ - require_adx
+ - prefer_smap
+ - require_smap
+ - PREFER_AES
+ - REQUIRE_AES
+ - PREFER_CAT
+ - REQUIRE_CAT
+ - PREFER_CMT
+ - REQUIRE_CMT
+ - PREFER_DDIO
+ - REQUIRE_DDIO
+ - PREFER_VME
+ - REQUIRE_VME
+ - PREFER_DE
+ - REQUIRE_DE
+ - PREFER_PSE
+ - REQUIRE_PSE
+ - PREFER_TSC
+ - REQUIRE_TSC
+ - PREFER_MSR
+ - REQUIRE_MSR
+ - PREFER_PAE
+ - REQUIRE_PAE
+ - PREFER_MCE
+ - REQUIRE_MCE
+ - PREFER_CX8
+ - REQUIRE_CX8
+ - PREFER_APIC
+ - REQUIRE_APIC
+ - PREFER_SEP
+ - REQUIRE_SEP
+ - PREFER_MTRR
+ - REQUIRE_MTRR
+ - PREFER_PGE
+ - REQUIRE_PGE
+ - PREFER_MCA
+ - REQUIRE_MCA
+ - PREFER_CMOV
+ - REQUIRE_CMOV
+ - PREFER_PAT
+ - REQUIRE_PAT
+ - PREFER_PSE36
+ - REQUIRE_PSE36
+ - PREFER_CLFLUSH
+ - REQUIRE_CLFLUSH
+ - PREFER_DTS
+ - REQUIRE_DTS
+ - PREFER_ACPI
+ - REQUIRE_ACPI
+ - PREFER_MMX
+ - REQUIRE_MMX
+ - PREFER_FXSR
+ - REQUIRE_FXSR
+ - PREFER_SSE
+ - REQUIRE_SSE
+ - PREFER_SSE2
+ - REQUIRE_SSE2
+ - PREFER_SS
+ - REQUIRE_SS
+ - PREFER_HT
+ - REQUIRE_HT
+ - PREFER_TM
+ - REQUIRE_TM
+ - PREFER_IA64
+ - REQUIRE_IA64
+ - PREFER_PBE
+ - REQUIRE_PBE
+ - PREFER_RDTSCP
+ - REQUIRE_RDTSCP
+ - PREFER_PNI
+ - REQUIRE_PNI
+ - PREFER_PCLMULQDQ
+ - REQUIRE_PCLMULQDQ
+ - PREFER_DTES64
+ - REQUIRE_DTES64
+ - PREFER_MONITOR
+ - REQUIRE_MONITOR
+ - PREFER_DS_CPL
+ - REQUIRE_DS_CPL
+ - PREFER_VMX
+ - REQUIRE_VMX
+ - PREFER_SMX
+ - REQUIRE_SMX
+ - PREFER_EST
+ - REQUIRE_EST
+ - PREFER_TM2
+ - REQUIRE_TM2
+ - PREFER_SSSE3
+ - REQUIRE_SSSE3
+ - PREFER_CID
+ - REQUIRE_CID
+ - PREFER_FMA
+ - REQUIRE_FMA
+ - PREFER_CX16
+ - REQUIRE_CX16
+ - PREFER_XTPR
+ - REQUIRE_XTPR
+ - PREFER_PDCM
+ - REQUIRE_PDCM
+ - PREFER_PCID
+ - REQUIRE_PCID
+ - PREFER_DCA
+ - REQUIRE_DCA
+ - PREFER_SSE4_1
+ - REQUIRE_SSE4_1
+ - PREFER_SSE4_2
+ - REQUIRE_SSE4_2
+ - PREFER_X2APIC
+ - REQUIRE_X2APIC
+ - PREFER_MOVBE
+ - REQUIRE_MOVBE
+ - PREFER_POPCNT
+ - REQUIRE_POPCNT
+ - PREFER_TSC_DEADLINE_TIMER
+ - REQUIRE_TSC_DEADLINE_TIMER
+ - PREFER_XSAVE
+ - REQUIRE_XSAVE
+ - PREFER_AVX
+ - REQUIRE_AVX
+ - PREFER_F16C
+ - REQUIRE_F16C
+ - PREFER_RDRAND
+ - REQUIRE_RDRAND
+ - PREFER_FSGSBASE
+ - REQUIRE_FSGSBASE
+ - PREFER_BMI1
+ - REQUIRE_BMI1
+ - PREFER_HLE
+ - REQUIRE_HLE
+ - PREFER_AVX2
+ - REQUIRE_AVX2
+ - PREFER_SMEP
+ - REQUIRE_SMEP
+ - PREFER_BMI2
+ - REQUIRE_BMI2
+ - PREFER_ERMS
+ - REQUIRE_ERMS
+ - PREFER_INVPCID
+ - REQUIRE_INVPCID
+ - PREFER_RTM
+ - REQUIRE_RTM
+ - PREFER_MPX
+ - REQUIRE_MPX
+ - PREFER_RDSEED
+ - REQUIRE_RDSEED
+ - PREFER_ADX
+ - REQUIRE_ADX
+ - PREFER_SMAP
+ - REQUIRE_SMAP
+ om_cpu_model_string:
+ type: string
+ description: >-
+ Openmano CPU model string
+ required: false
+ om_cpu_feature:
+ type: list
+ entry_schema:
+ type: string
+ description: >-
+ List of openmano CPU features
+ required: false
+
+ tosca.capabilities.nfv.riftio.sfc:
+ derived_from: tosca.capabilities.Root
+ description: >-
+ Service Function Chaining support on this VDU
+ properties:
+ sfc_type:
+ type: string
+ description: >-
+ Type of node in Service Function Chaining Architecture
+ constraints:
+ - valid_values: [unaware, classifier, sf, sff, UNAWARE, CLASSIFIER, SF, SFF]
+ default: unaware
+ sf_type:
+ type: string
+ description: >-
+ Type of Service Function.
+ NOTE- This needs to map with Service Function Type in ODL to
+ support VNFFG. Service Function Type is manadatory param in ODL
+ SFC.
+ required: false
+ tosca.capabilities.Compute.Container.Architecture:
+ derived_from: tosca.capabilities.Container
+ properties:
+ mem_page_size:
+ type: string
+ description: >-
+ Memory page allocation size. If a VM requires
+ hugepages, it should choose huge or size_2MB
+ or size_1GB. If the VM prefers hugepages, it
+ should chose prefer_huge.
+ huge/large - Require hugepages (either 2MB or 1GB)
+ normal - Does not require hugepages
+ size_2MB - Requires 2MB hugepages
+ size_1GB - Requires 1GB hugepages
+ prefer_huge - Application perfers hugepages
+ NOTE - huge and normal is only defined in standards as of
+ now.
+ required: false
+ constraints:
+ - valid_values: [normal, large, huge, size_2MB, size_1GB, prefer_huge, NORMAL,LARGE, HUGE, SIZE_2MB, SIZE_1GB, PREFER_HUGE]
+ cpu_allocation:
+ type: tosca.datatypes.compute.Container.Architecture.CPUAllocation
+ required: false
+ numa_nodes:
+ type: map
+ required: false
+ entry_schema:
+ type: tosca.datatypes.compute.Container.Architecture.NUMA
+
+
+node_types:
+ tosca.nodes.nfv.riftio.VDU1:
+ derived_from: tosca.nodes.nfv.VDU
+ properties:
+ description:
+ type: string
+ required: false
+ image:
+ description: >-
+ If an image is specified here, it is assumed that the image
+ is already present in the RO or VIM and not in the package.
+ type: string
+ required: false
+ image_checksum:
+ type: string
+ description: >-
+ Image checksum for the image in RO or VIM.
+ required: false
+ cloud_init:
+ description: >-
+ Inline cloud-init specification
+ required: false
+ type: string
+ count:
+ default: 1
+ type: integer
+ capabilities:
+ virtualLink:
+ type: tosca.capabilities.nfv.VirtualLinkable
+ monitoring_param_1:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ mgmt_interface:
+ type: tosca.capabilities.nfv.riftio.mgmt_interface
+ monitoring_param:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ numa_extension:
+ type: tosca.capabilities.nfv.riftio.numa_extension
+ vswitch_epa:
+ type: tosca.capabilities.nfv.riftio.vswitch_epa
+ hypervisor_epa:
+ type: tosca.capabilities.nfv.riftio.hypervisor_epa
+ host_epa:
+ type: tosca.capabilities.nfv.riftio.host_epa
+ tosca.nodes.nfv.riftio.CP1:
+ derived_from: tosca.nodes.nfv.CP
+ properties:
+ cp_type:
+ description: Type of the connection point
+ type: string
+ default: VPORT
+ constraints:
+ - valid_values: [VPORT]
+ name:
+ description: Name of the connection point
+ type: string
+ required: false
+ vdu_intf_name:
+ description: Name of the interface on VDU
+ type: string
+ vdu_intf_type:
+ description: >-
+ Specifies the type of virtual interface
+ between VM and host.
+ VIRTIO - Use the traditional VIRTIO interface.
+ PCI-PASSTHROUGH - Use PCI-PASSTHROUGH interface.
+ SR-IOV - Use SR-IOV interface.
+ E1000 - Emulate E1000 interface.
+ RTL8139 - Emulate RTL8139 interface.
+ PCNET - Emulate PCNET interface.
+ OM-MGMT - Used to specify openmano mgmt external-connection type
+ type: string
+ constraints:
+ - valid_values: [OM-MGMT, VIRTIO, E1000, SR-IOV]
+ bandwidth:
+ type: integer
+ description: Aggregate bandwidth of the NIC
+ constraints:
+ - greater_or_equal: 0
+ required: false
+ vpci:
+ type: string
+ description: >-
+ Specifies the virtual PCI address. Expressed in
+ the following format dddd:dd:dd.d. For example
+ 0000:00:12.0. This information can be used to
+ pass as metadata during the VM creation.
+ required: false
+ capabilities:
+ sfc:
+ type: tosca.capabilities.nfv.riftio.sfc
+ tosca.nodes.nfv.riftio.VNF1:
+ derived_from: tosca.nodes.nfv.VNF
+ properties:
+ member_index:
+ type: integer
+ constraints:
+ - greater_or_equal: 1
+ description: Index of the VNF in the NS
+ required: false
+ start_by_default:
+ type: boolean
+ default: true
+ description: Start this VNF on NS instantiate
+ logo:
+ type: string
+ description: >-
+ Logo to display with the VNF in the orchestrator
+ required: false
+ capabilities:
+ mgmt_interface:
+ type: tosca.capabilities.nfv.riftio.mgmt_interface
+ monitoring_param:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ sfc:
+ type: tosca.capabilities.nfv.riftio.sfc
+ tosca.nodes.nfv.riftio.ELAN:
+ derived_from: tosca.nodes.nfv.VL.ELAN
+ properties:
+ description:
+ type: string
+ required: false
+ network_name:
+ type: string
+ description: >-
+ Name of network in VIM account. This is used to indicate
+ pre-provisioned network name in cloud account.
+ required: false
+ root_bandwidth:
+ type: integer
+ description: >-
+ This is the aggregate bandwidth
+ constraints:
+ - greater_or_equal: 0
+ required: false
+ leaf_bandwidth:
+ type: integer
+ description: >-
+ This is the bandwidth of branches
+ constraints:
+ - greater_or_equal: 0
+ required: false
+ tosca.nodes.nfv.riftio.FP1:
+ derived_from: tosca.nodes.nfv.FP
+ properties:
+ id:
+ type: integer
+ required: false
+ policy:
+ type: tosca.nfv.datatypes.policyType
+ required: true
+ description: policy to use to match traffic for this FP
+ path:
+ type: list
+ required: true
+ entry_schema:
+ type: tosca.nfv.datatypes.pathType
+ cp:
+ type: tosca.nfv.datatypes.pathType
+ required: true
+
+
+
+artifact_types:
+ tosca.artifacts.Deployment.riftio.cloud_init_file:
+ derived_from: tosca.artifacts.Deployment
+ file:
+ type: string
+
+ tosca.artifacts.Deployment.Image.riftio.QCOW2:
+ derived_from: tosca.artifacts.Deployment.Image.VM.QCOW2
+ image_checksum:
+ required: false
+ type: string
+
+group_types:
+ tosca.groups.nfv.VNFFG:
+ derived_from: tosca.groups.Root
+ properties:
+ vendor:
+ type: string
+ required: true
+ description: name of the vendor who generate this VNFFG
+ version:
+ type: string
+ required: true
+ description: version of this VNFFG
+ number_of_endpoints:
+ type: integer
+ required: true
+ description: count of the external endpoints included in this VNFFG
+ dependent_virtual_link:
+ type: list
+ entry_schema:
+ type: string
+ required: true
+ description: Reference to a VLD used in this Forwarding Graph
+ connection_point:
+ type: list
+ entry_schema:
+ type: string
+ required: true
+ description: Reference to Connection Points forming the VNFFG
+ constituent_vnfs:
+ type: list
+ entry_schema:
+ type: string
+ required: true
+ description: Reference to a list of VNFD used in this VNF Forwarding Graph
+ members: [ tosca.nodes.nfv.FP ]
+
+ tosca.groups.nfv.riftio.scaling:
+ derived_from: tosca.groups.Root
+ properties:
+ name:
+ type: string
+ min_instances:
+ type: integer
+ description: >-
+ Minimum instances of the scaling group which are allowed.
+ These instances are created by default when the network service
+ is instantiated.
+ max_instances:
+ type: integer
+ description: >-
+ Maximum instances of this scaling group that are allowed
+ in a single network service. The network service scaling
+ will fail, when the number of service group instances
+ exceed the max-instance-count specified.
+ cooldown_time:
+ type: integer
+ description: >-
+ The duration after a scaling-in/scaling-out action has been
+ triggered, for which there will be no further optional
+ ratio:
+ type: map
+ entry_schema:
+ type: integer
+ description: >-
+ Specify the number of instances of each VNF to instantiate
+ for a scaling action
+ members: [tosca.nodes.nfv.VNF]
+ interfaces:
+ action:
+ type: tosca.interfaces.nfv.riftio.scaling.action
+
+interface_types:
+ tosca.interfaces.nfv.riftio.scaling.action:
+ pre_scale_in:
+ description: Operation to execute before a scale in
+ post_scale_in:
+ description: Operation to execute after a scale in
+ pre_scale_out:
+ description: Operation to execute before a scale out
+ post_scale_out:
+ description: Operation to execute after a scale out
+
+policy_types:
+ tosca.policies.nfv.riftio.placement:
+ derived_from: tosca.policies.Placement
+ properties:
+ name:
+ type: string
+ description: >-
+ Place group construct to define the compute resource placement strategy
+ in cloud environment
+ requirement:
+ type: string
+ description: >-
+ This is free text space used to describe the intent/rationale
+ behind this placement group. This is for human consumption only
+ strategy:
+ type: string
+ description: >-
+ Strategy associated with this placement group
+ Following values are possible
+ COLOCATION - Colocation strategy imply intent to share the physical
+ infrastructure (hypervisor/network) among all members
+ of this group.
+ ISOLATION - Isolation strategy imply intent to not share the physical
+ infrastructure (hypervisor/network) among the members
+ of this group.
+ constraints:
+ valid_values:
+ - COLOCATION
+ - ISOLATION
+ tosca.policies.nfv.riftio.vnf_configuration:
+ derived_from: tosca.policies.Root
+ properties:
+ config:
+ type: tosca.datatypes.nfv.riftio.vnf_configuration
+ initial_config:
+ type: list
+ entry_schema:
+ type: tosca.datatypes.nfv.riftio.config_primitive
+ tosca.policies.nfv.riftio.vnf_service_primitives:
+ derived_from: tosca.policies.Root
+ properties:
+ parameter:
+ type: map
+ entry_schema:
+ type: primitive_parameter
+ tosca.policies.nfv.riftio.ns_service_primitives:
+ derived_from: tosca.policies.Root
+ properties:
+ parameter:
+ type: map
+ entry_schema:
+ type: primitive_parameter
+ parameter_group:
+ type: tosca.datatypes.nfv.riftio.primitive_parameter_group
+ description: >-
+ Grouping of parameters which are logically grouped in UI
+ required: false
+ vnf_primitive_group:
+ type: tosca.datatypes.nfv.riftio.vnf_primitive_group
+ description: >-
+ List of service primitives grouped by VNF
+ required: false
+ user_defined_script:
+ type: string
+ description: >-
+ A user defined script
+ required: false
+ tosca.policies.nfv.riftio.initial_config_primitive:
+ derived_from: tosca.policies.Root
+ properties:
+ name:
+ type: string
+ seq:
+ type: integer
+ description: >-
+ Order in which to apply, when multiple ones are defined
+ default: 0
+ constraints:
+ - greater_or_equal: 0
+ parameter:
+ type: map
+ entry_schema:
+ type: string
+ user_defined_script:
+ type: string
+ tosca.policies.nfv.riftio.users:
+ derived_from: tosca.policies.Root
+ description: >-
+ Specify list of public keys to be injected as
+ part of NS instantitation. Use default as entry,
+ to specify the key pairs for default user.
+ properties:
+ user_info:
+ type: string
+ description: >-
+ The user\'s real name
+ required: false
+ key_pairs:
+ type: map
+ description: >-
+ List of public keys for the user
+ entry_schema:
+ type: string
+ required: true
+ tosca.policies.nfv.riftio.dependency:
+ derived_from: tosca.policies.Root
+ description: >-
+ Map dependency between VDUs or VNFs
+ properties:
+ parameter:
+ type: map
+ entry_schema:
+ type: string
+ description: >-
+ Parameter and value for the config
+ tosca.nfv.datatypes.policyType:
+ properties:
+ type:
+ type: string
+ required: false
+ constraints:
+ - valid_values: [ ACL ]
+ criteria:
+ type: list
+ required: true
+ entry_schema:
+ type: tosca.nfv.datatypes.aclType
+
+
+
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/TOSCA-Metadata/TOSCA.meta b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/TOSCA-Metadata/TOSCA.meta
new file mode 100644
index 0000000..2351efd
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/TOSCA-Metadata/TOSCA.meta
@@ -0,0 +1,4 @@
+TOSCA-Meta-File-Version: 1.0
+CSAR-Version: 1.1
+Created-By: RIFT.io
+Entry-Definitions: Definitions/ping_pong_nsd.yaml
\ No newline at end of file
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/scripts/ping_set_rate.py b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/scripts/ping_set_rate.py
new file mode 100755
index 0000000..54629e8
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/scripts/ping_set_rate.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def ping_set_rate(yaml_cfg, logger):
+ '''Use curl and set traffic rate on ping vnf'''
+
+ def set_rate(mgmt_ip, port, rate):
+ curl_cmd = '''curl -D /dev/stdout \
+ -H "Accept: application/vnd.yang.data+xml" \
+ -H "Content-Type: application/vnd.yang.data+json" \
+ -X POST \
+ -d "{{ \\"rate\\":{ping_rate} }}" \
+ http://{ping_mgmt_ip}:{ping_mgmt_port}/api/v1/ping/rate
+'''.format(ping_mgmt_ip=mgmt_ip,
+ ping_mgmt_port=port,
+ ping_rate=rate)
+
+ logger.debug("Executing cmd: %s", curl_cmd)
+ subprocess.check_call(curl_cmd, shell=True)
+
+ # Get the ping rate
+ rate = yaml_cfg['parameter']['rate']
+
+ # Set ping rate
+ for index, vnfr in yaml_cfg['vnfr'].items():
+ logger.debug("VNFR {}: {}".format(index, vnfr))
+
+ # Check if it is pong vnf
+ if 'ping_vnfd' in vnfr['name']:
+ vnf_type = 'ping'
+ port = 18888
+ set_rate(vnfr['mgmt_ip_address'], port, rate)
+ break
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/ping_set_rate-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+ logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger()
+
+ except Exception as e:
+ print("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+ try:
+ yaml_str = args.yaml_cfg_file.read()
+ # logger.debug("Input YAML file:\n{}".format(yaml_str))
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ ping_set_rate(yaml_cfg, logger)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+if __name__ == "__main__":
+ main()
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/scripts/start_traffic.py b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/scripts/start_traffic.py
new file mode 100755
index 0000000..22de542
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_ping_pong_epa/scripts/start_traffic.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def start_traffic(yaml_cfg, logger):
+ '''Use curl and set admin status to enable on pong and ping vnfs'''
+
+ def enable_service(mgmt_ip, port, vnf_type):
+ curl_cmd = 'curl -D /dev/null -H "Accept: application/vnd.yang.data' \
+ '+xml" -H "Content-Type: application/vnd.yang.data+json" ' \
+ '-X POST -d "{{\\"enable\\":true}}" http://{mgmt_ip}:' \
+ '{mgmt_port}/api/v1/{vnf_type}/adminstatus/state'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=port,
+ vnf_type=vnf_type)
+
+ logger.debug("Executing cmd: %s", curl_cmd)
+ subprocess.check_call(curl_cmd, shell=True)
+
+ # Enable pong service first
+ for index, vnfr in yaml_cfg['vnfr'].items():
+ logger.debug("VNFR {}: {}".format(index, vnfr))
+
+ # Check if it is pong vnf
+ if 'pong_vnfd' in vnfr['name']:
+ vnf_type = 'pong'
+ port = 18889
+ enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+ break
+
+ # Add a delay to provide pong port to come up
+ time.sleep(0.1)
+
+ # Enable ping service next
+ for index, vnfr in yaml_cfg['vnfr'].items():
+ logger.debug("VNFR {}: {}".format(index, vnfr))
+
+ # Check if it is pong vnf
+ if 'ping_vnfd' in vnfr['name']:
+ vnf_type = 'ping'
+ port = 18888
+ enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+ break
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/ping_pong_start_traffic-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+ logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger()
+
+ except Exception as e:
+ print("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+ try:
+ yaml_str = args.yaml_cfg_file.read()
+ # logger.debug("Input YAML file:\n{}".format(yaml_str))
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ start_traffic(yaml_cfg, logger)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+if __name__ == "__main__":
+ main()
diff --git a/common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py b/common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py
index 1b5b156..1dc43d0 100755
--- a/common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py
+++ b/common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py
@@ -66,7 +66,7 @@
tosca_helloworld = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
- "data/tosca_helloworld.yaml")
+ "data/tosca_helloworld_nfv.yaml")
template_file = '--template-file=' + tosca_helloworld
template_validation = "--validate-only"
debug="--debug"
@@ -109,6 +109,7 @@
(self.template_file,
'--parameters=key'))
+ @unittest.skip
def test_valid_template(self):
try:
shell.main([self.template_file])
@@ -116,6 +117,7 @@
self.log.exception(e)
self.fail(self.failure_msg)
+ @unittest.skip
def test_validate_only(self):
try:
shell.main([self.template_file,
@@ -213,7 +215,7 @@
test_base_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'data')
template_file = os.path.join(test_base_dir,
- "ping_pong_csar/Definitions/ping_pong_nsd.yaml")
+ "tosca_ping_pong_epa/Definitions/ping_pong_nsd.yaml")
template = '--template-file='+template_file
temp_dir = tempfile.mkdtemp()
output_dir = "--output-dir=" + temp_dir
@@ -233,12 +235,13 @@
shutil.rmtree(temp_dir)
else:
self.log.warn("Generated desc in {}".format(temp_dir))
+
def test_input_csar(self):
test_base_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data')
- template_file = os.path.join(test_base_dir, "ping_pong_csar.zip")
+ template_file = os.path.join(test_base_dir, "tosca_ping_pong_epa.zip")
template = '--template-file='+template_file
temp_dir = tempfile.mkdtemp()
output_dir = "--output-dir=" + temp_dir
@@ -259,12 +262,13 @@
shutil.rmtree(temp_dir)
else:
self.log.warn("Generated desc in {}".format(temp_dir))
-
+
+ @unittest.skip
def test_input_csar_no_gi(self):
test_base_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data')
- template_file = os.path.join(test_base_dir, "ping_pong_csar.zip")
+ template_file = os.path.join(test_base_dir, "tosca_ping_pong_epa.zip")
template = '--template-file='+template_file
temp_dir = tempfile.mkdtemp()
output_dir = "--output-dir=" + temp_dir
diff --git a/common/python/rift/mano/utils/project.py b/common/python/rift/mano/utils/project.py
new file mode 100644
index 0000000..6e43177
--- /dev/null
+++ b/common/python/rift/mano/utils/project.py
@@ -0,0 +1,694 @@
+#!/usr/bin/env python3
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import asyncio
+import logging
+from time import sleep
+
+import gi
+gi.require_version('RwProjectManoYang', '1.0')
+gi.require_version('RwDts', '1.0')
+from gi.repository import (
+ RwProjectManoYang,
+ RwDts as rwdts,
+ ProtobufC,
+ RwTypes,
+)
+
+import rift.tasklets
+
+
+class ManoProjectError(Exception):
+ pass
+
+
+class ManoProjNameSetErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathNoProjErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathKeyErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathNotRootErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathPresentErr(ManoProjectError):
+ pass
+
+
+NS = 'rw-project'
+PROJECT = 'project'
+NS_PROJECT = '{}:{}'.format(NS, PROJECT)
+XPATH = '/{}'.format(NS_PROJECT)
+XPATH_LEN = len(XPATH)
+
+NAME = 'name'
+NAME_LEN = len(NAME)
+NS_NAME = '{}:{}'.format(NS, NAME)
+
+DEFAULT_PROJECT = 'default'
+DEFAULT_PREFIX = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ DEFAULT_PROJECT)
+
+
+class ManoProject(object):
+ '''Class to handle the project name'''
+
+ log = None
+
+ @classmethod
+ def instance_from_xpath(cls, xpath, log):
+ name = cls.from_xpath(xpath, log)
+ if name is None:
+ return None
+
+ proj = ManoProject(log, name=name)
+ return proj
+
+ @classmethod
+ def from_xpath(cls, xpath, log):
+ log.debug("Get project name from {}".format(xpath));
+
+ if XPATH in xpath:
+ idx = xpath.find(XPATH)
+ if idx == -1:
+ msg = "Project not found in XPATH: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNoProjErr(msg)
+
+ sub = xpath[idx+XPATH_LEN:].strip()
+ if (len(sub) < NAME_LEN) or (sub[0] != '['):
+ msg = "Project name not found in XPath: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr(msg)
+
+ sub = sub[1:].strip()
+ idx = sub.find(NS_NAME)
+ if idx == -1:
+ idx = sub.find(NAME)
+ if idx != 0:
+ msg = "Project name not found in XPath: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr(msg)
+
+ idx = sub.find(']')
+ if idx == -1:
+ msg = "XPath is invalid: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr(msg)
+
+ sub = sub[:idx].strip()
+ try:
+ log.debug("Key and value found: {}".format(sub))
+ k, n = sub.split("=", 2)
+ name = n.strip(' \'"')
+ if name is None:
+ msg = "Project name is empty in XPath".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr (msg)
+
+ log.debug("Found project name {} from XPath {}".
+ format(name, xpath))
+ return name
+
+ except ValueError as e:
+ msg = "Project name not found in XPath: {}, exception: {}" \
+ .format(xpath, e)
+ log.exception(msg)
+ raise ManoProjXpathKeyErr(msg)
+ else:
+ msg = "Project not found in XPATH: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNoProjErr(msg)
+
+ @classmethod
+ def get_log(cls):
+ if not cls.log:
+ cls.log = logging.getLogger('rw-mano-log.rw-project')
+ cls.log.setLevel(logging.ERROR)
+
+ @classmethod
+ def prefix_project(cls, xpath, project=None, log=None):
+ if log is None:
+ log = cls.get_log()
+
+ if project is None:
+ project = DEFAULT_PROJECT
+ proj_prefix = DEFAULT_PREFIX
+ else:
+ proj_prefix = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ project)
+
+ prefix = ''
+ suffix = xpath
+ idx = xpath.find('C,/')
+ if idx == -1:
+ idx = xpath.find('D,/')
+
+ suffix = xpath
+ if idx != -1:
+ prefix = xpath[:2]
+ suffix = xpath[2:]
+
+ if suffix[0] != '/':
+ msg = "Non-rooted xpath provided: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNotRootErr(msg)
+
+ idx = suffix.find(XPATH)
+ if idx == 0:
+ name = cls.from_xpath(xpath, log)
+ if name == project:
+ log.debug("Project already in the XPATH: {}".format(xpath))
+ return xpath
+
+ else:
+ msg = "Different project {} already in XPATH {}". \
+ format(name, xpath)
+ log.error(msg)
+ raise ManoProjXpathPresentErr(msg)
+
+ ret = prefix + proj_prefix + suffix
+ return ret
+
+
+ def __init__(self, log, name=None, tasklet=None):
+ self._log = log
+ self._name = None
+ self._prefix = None
+ self._pbcm = None
+ self._tasklet = None
+ self._dts = None
+ self._loop = None
+ self._log_hdl = None
+
+ # Track if the apply config was received
+ self._apply = False
+
+ if name:
+ self.name = name
+
+ def update(self, tasklet):
+ # Store the commonly used properties from a tasklet
+ self._tasklet = tasklet
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def prefix(self):
+ return self._prefix
+
+ @property
+ def pbcm(self):
+ return self._pbcm
+
+ @property
+ def config(self):
+ return self._pbcm.project_config
+
+ @property
+ def tasklet(self):
+ return self._tasklet
+
+ @property
+ def log_hdl(self):
+ return self._log_hdl
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @name.setter
+ def name(self, value):
+ if self._name is None:
+ self._name = value
+ self._prefix = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ self._name)
+ self._pbcm = RwProjectManoYang.YangData_RwProject_Project(
+ name=self._name)
+
+ elif self._name == value:
+ self._log.debug("Setting the same name again for project {}".
+ format(value))
+ else:
+ msg = "Project name already set to {}".format(self._name)
+ self._log.error(msg)
+ raise ManoProjNameSetErr(msg)
+
+ def set_from_xpath(self, xpath):
+ self.name = ManoProject.from_xpath(xpath, self._log)
+
+ def add_project(self, xpath):
+ return ManoProject.prefix_project(xpath, log=self._log, project=self._name)
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def delete_prepare(self):
+ self._log.debug("Delete prepare for project {}".format(self._name))
+ return (True, "True")
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def register(self):
+ msg = "Register not implemented for project type {}". \
+ format(self.__class__.__name__)
+ self._log.error(msg)
+ raise NotImplementedError(msg)
+
+ @abc.abstractmethod
+ def deregister(self):
+ msg = "De-register not implemented for project type {}". \
+ format(self.__class__.__name__)
+ self._log.error(msg)
+ raise NotImplementedError(msg)
+
+ def rpc_check(self, msg, xact_info=None):
+ '''Check if the rpc is for this project'''
+ try:
+ project = msg.project_name
+ except AttributeError as e:
+ project = DEFAULT_PROJECT
+
+ if project != self.name:
+ self._log.debug("Project {}: RPC is for different project {}".
+ format(self.name, project))
+ if xact_info:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ return False
+
+ return True
+
+ @asyncio.coroutine
+ def create_project(self, dts):
+ proj_xpath = "C,{}/config".format(self.prefix)
+ self._log.info("Creating project: {} with {}".
+ format(proj_xpath, self.config.as_dict()))
+
+ yield from dts.query_create(proj_xpath,
+ rwdts.XactFlag.ADVISE,
+ self.config)
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+ #TODO: Check why this is getting called during project delete
+ if not dts_member_reg:
+ return [], [], []
+
+ # Unforunately, it is currently difficult to figure out what has exactly
+ # changed in this xact without Pbdelta support (RIFT-4916)
+ # As a workaround, we can fetch the pre and post xact elements and
+ # perform a comparison to figure out adds/deletes/updates
+ xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+ curr_cfgs = list(dts_member_reg.elements)
+
+ xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+ curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+ # Find Adds
+ added_keys = set(xact_key_map) - set(curr_key_map)
+ added_cfgs = [xact_key_map[key] for key in added_keys]
+
+ # Find Deletes
+ deleted_keys = set(curr_key_map) - set(xact_key_map)
+ deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+ # Find Updates
+ updated_keys = set(curr_key_map) & set(xact_key_map)
+ updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+ return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class ProjectConfigCallbacks(object):
+ def __init__(self,
+ on_add_apply=None, on_add_prepare=None,
+ on_delete_apply=None, on_delete_prepare=None):
+
+ @asyncio.coroutine
+ def prepare_noop(*args, **kwargs):
+ pass
+
+ def apply_noop(*args, **kwargs):
+ pass
+
+ self.on_add_apply = on_add_apply
+ self.on_add_prepare = on_add_prepare
+ self.on_delete_apply = on_delete_apply
+ self.on_delete_prepare = on_delete_prepare
+
+ for f in ('on_add_apply', 'on_delete_apply'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, apply_noop)
+ continue
+
+ if asyncio.iscoroutinefunction(ref):
+ raise ValueError('%s cannot be a coroutine' % (f,))
+
+ for f in ('on_add_prepare', 'on_delete_prepare'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, prepare_noop)
+ continue
+
+ if not asyncio.iscoroutinefunction(ref):
+ raise ValueError("%s must be a coroutine" % f)
+
+
+class ProjectDtsHandler(object):
+ XPATH = "C,{}/project-config".format(XPATH)
+
+ def __init__(self, dts, log, callbacks, sub_config=True):
+ self._dts = dts
+ self._log = log
+ self._callbacks = callbacks
+
+ if sub_config:
+ self.xpath = ProjectDtsHandler.XPATH
+ self._key = 'name_ref'
+ else:
+ self.xpath = "C,{}".format(XPATH)
+ self._key = 'name'
+
+ self.reg = None
+ self.projects = []
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ def add_project(self, name):
+ self._log.info("Adding project: {}".format(name))
+
+ if name not in self.projects:
+ self._callbacks.on_add_apply(name)
+ self.projects.append(name)
+ else:
+ self._log.error("Project already present: {}".
+ format(name))
+
+ def delete_project(self, name):
+ self._log.info("Deleting project: {}".format(name))
+ if name in self.projects:
+ self._callbacks.on_delete_apply(name)
+ self.projects.remove(name)
+ else:
+ self._log.error("Unrecognized project: {}".
+ format(name))
+
+ def update_project(self, name):
+ """ Update an existing project
+
+ Currently, we do not take any action on MANO for this,
+ so no callbacks are defined
+
+ Arguments:
+ msg - The project config message
+ """
+ self._log.info("Updating project: {}".format(name))
+ if name in self.projects:
+ pass
+ else:
+ self.add_project(name)
+
+ def register(self):
+ def on_init(acg, xact, scratch):
+ self._log.debug("on_init")
+ scratch["projects"] = {
+ "added": [],
+ "deleted": [],
+ "updated": [],
+ }
+ return scratch
+
+ def readd_projects(xact):
+ self._log.info("Re-add projects")
+
+ for cfg, ks in self._reg.get_xact_elements(xact, include_keyspec=True):
+ xpath = ks.to_xpath(RwProjectManoYang.get_schema())
+ self._log.debug("Got ks {} for cfg {}".format(xpath, cfg.as_dict()))
+ name = ManoProject.from_xpath(xpath, self._log)
+ self._log.debug("Project to add: {}".format(name))
+ self.add_project(name)
+
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got project apply config (xact: %s) (action: %s): %s",
+ xact, action, scratch)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ readd_projects(xact)
+ else:
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ try:
+ add_cfgs = scratch["projects"]["added"]
+ except KeyError:
+ add_cfgs = []
+
+ try:
+ del_cfgs = scratch["projects"]["deleted"]
+ except KeyError:
+ del_cfgs = []
+
+ try:
+ update_cfgs = scratch["projects"]["updated"]
+ except KeyError:
+ update_cfgs = []
+
+
+ # Handle Deletes
+ for name in del_cfgs:
+ self.delete_project(name)
+
+ # Handle Adds
+ for name, msg in add_cfgs:
+ self.add_project(name)
+
+ # Handle Updates
+ for name, msg in update_cfgs:
+ self.update_project(name)
+
+ try:
+ del scratch["projects"]
+ except KeyError:
+ pass
+
+ return RwTypes.RwStatus.SUCCESS
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for Project """
+
+ action = xact_info.query_action
+ xpath = ks_path.to_xpath(RwProjectManoYang.get_schema())
+ self._log.debug("Project xpath: {}".format(xpath))
+ name = ManoProject.from_xpath(xpath, self._log)
+
+ self._log.debug("Project %s on_prepare config received (action: %s): %s",
+ name, xact_info.query_action, msg)
+
+ if action == rwdts.QueryAction.CREATE:
+ if name in self.projects:
+ self._log.debug("Project {} already exists. Ignore request".
+ format(name))
+ else:
+ yield from self._callbacks.on_add_prepare(name)
+ scratch["projects"]["added"].append((name, msg))
+
+ elif action == rwdts.QueryAction.UPDATE:
+ if name in self.projects:
+ scratch["projects"]["updated"].append((name, msg))
+ else:
+ self._log.debug("Project {}: Invoking on_prepare add request".
+ format(name))
+ yield from self._callbacks.on_add_prepare(name)
+ scratch["projects"]["added"].append((name, msg))
+
+
+ elif action == rwdts.QueryAction.DELETE:
+ # Check if the entire project got deleted
+ fref = ProtobufC.FieldReference.alloc()
+ fref.goto_whole_message(msg.to_pbcm())
+ if fref.is_field_deleted():
+ if name in self.projects:
+ rc, delete_msg = yield from self._callbacks.on_delete_prepare(name)
+ if not rc:
+ self._log.error("Project {} should not be deleted. Reason : {}".
+ format(name, delete_msg))
+
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ ProjectDtsHandler.XPATH,
+ delete_msg)
+
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ scratch["projects"]["deleted"].append(name)
+ else:
+ self._log.warning("Delete on unknown project: {}".
+ format(name))
+ else:
+ self._log.error("Action (%s) NOT SUPPORTED", action)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._log.debug("Registering for project config using xpath: %s",
+ ProjectDtsHandler.XPATH,
+ )
+
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ on_init=on_init)
+
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=ProjectDtsHandler.XPATH,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare,
+ )
+
+
+class ProjectHandler(object):
+ def __init__(self, tasklet, project_class, **kw):
+ self._tasklet = tasklet
+ self._log = tasklet.log
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
+ self._class = project_class
+ self._kw = kw
+
+ self._log.debug("Creating project config handler")
+ self.project_cfg_handler = ProjectDtsHandler(
+ self._dts, self._log,
+ ProjectConfigCallbacks(
+ on_add_apply=self.on_project_added,
+ on_add_prepare=self.on_add_prepare,
+ on_delete_apply=self.on_project_deleted,
+ on_delete_prepare=self.on_delete_prepare,
+ )
+ )
+
+ def _get_tasklet_name(self):
+ return self._tasklet.tasklet_info.instance_name
+
+ def _get_project(self, name):
+ try:
+ proj = self._tasklet.projects[name]
+ except Exception as e:
+ self._log.exception("Project {} ({})not found for tasklet {}: {}".
+ format(name, list(self._tasklet.projects.keys()),
+ self._get_tasklet_name(), e))
+ raise e
+
+ return proj
+
+ def on_project_deleted(self, name):
+ self._log.debug("Project {} deleted".format(name))
+ try:
+ self._get_project(name).deregister()
+ except Exception as e:
+ self._log.exception("Project {} deregister for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ try:
+ proj = self._tasklet.projects.pop(name)
+ del proj
+ except Exception as e:
+ self._log.exception("Project {} delete for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ def on_project_added(self, name):
+ if name not in self._tasklet.projects:
+ try:
+ self._tasklet.projects[name] = \
+ self._class(name, self._tasklet, **(self._kw))
+ task = asyncio.ensure_future(self._get_project(name).register(),
+ loop=self._loop)
+
+ self._log.debug("Project {} register: {}".format(name, str(task)))
+
+ except Exception as e:
+ self._log.exception("Project {} create for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+ raise e
+
+ self._log.debug("Project {} added to tasklet {}".
+ format(name, self._get_tasklet_name()))
+ self._get_project(name)._apply = True
+
+ @asyncio.coroutine
+ def on_add_prepare(self, name):
+ self._log.debug("Project {} to be added to {}".
+ format(name, self._get_tasklet_name()))
+ if name in self._tasklet.projects:
+ self._log.error("Project {} already exists for {}".
+ format(name, self._get_tasklet_name()))
+ return
+
+ try:
+ self._tasklet.projects[name] = \
+ self._class(name, self._tasklet, **(self._kw))
+ yield from self._get_project(name).register()
+
+ except Exception as e:
+ self._log.exception("Project {} create for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+ raise e
+
+ @asyncio.coroutine
+ def on_delete_prepare(self, name):
+ self._log.debug("Project {} being deleted for tasklet {}".
+ format(name, self._get_tasklet_name()))
+ rc, delete_msg = yield from self._get_project(name).delete_prepare()
+ return rc, delete_msg
+
+ def register(self):
+ self.project_cfg_handler.register()
diff --git a/common/python/rift/mano/utils/ssh_keys.py b/common/python/rift/mano/utils/ssh_keys.py
new file mode 100644
index 0000000..6453f88
--- /dev/null
+++ b/common/python/rift/mano/utils/ssh_keys.py
@@ -0,0 +1,147 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import argparse
+import logging
+import os
+import socket
+import stat
+import sys
+import tempfile
+
+from Crypto.PublicKey import RSA
+
+
+class ManoSshKey(object):
+ '''
+ Generate a SSH key pair and store them in a file
+ '''
+
+ def __init__(self, log, size=2048):
+ self._log = log
+ self._size = size
+
+ self._key = None
+ self._key_pem = None
+ self._pub_ssh = None
+ self._key_file = None
+ self._pub_file = None
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def size(self):
+ return self._size
+
+ @property
+ def private_key(self):
+ if self._key is None:
+ self._gen_keys()
+ return self._key_pem
+
+ @property
+ def public_key(self):
+ if self._key is None:
+ self._gen_keys()
+ return self._pub_ssh
+
+ @property
+ def private_key_file(self):
+ return self._key_file
+
+ @property
+ def public_key_file(self):
+ return self._pub_file
+
+ def _gen_keys(self):
+ if self._key:
+ return
+
+ self.log.info("Generating key of size: {}".format(self.size))
+
+ self._key = RSA.generate(self.size, os.urandom)
+ self._key_pem = self._key.exportKey('PEM').decode('utf-8')
+ self.log.debug("Private key PEM: {}".format(self._key_pem))
+
+ # Public key export as 'OpenSSH' has a bug
+ # (https://github.com/dlitz/pycrypto/issues/99)
+
+ username = None
+ try:
+ username = os.getlogin()
+ hostname = socket.getfqdn()
+ except OSError:
+ pass
+
+ pub = self._key.publickey().exportKey('OpenSSH').decode('utf-8')
+ if username:
+ self._pub_ssh = '{} {}@{}'.format(pub, username, hostname)
+ else:
+ self._pub_ssh = pub
+ self.log.debug("Public key SSH: {}".format(self._pub_ssh))
+
+ def write_to_disk(self,
+ name="id_rsa",
+ directory="."):
+ if self._key is None:
+ self._gen_keys()
+
+ path = os.path.abspath(directory)
+ self._pub_file = "{}/{}.pub".format(path, name)
+ self._key_file = "{}/{}.key".format(path, name)
+
+ with open(self._key_file, 'w') as content_file:
+ content_file.write(self.private_key)
+ os.chmod(self._key_file, stat.S_IREAD|stat.S_IWRITE)
+
+ with open(self._pub_file, 'w') as content_file:
+ content_file.write(self.public_key)
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Generate SSH key pair')
+ parser.add_argument("-s", "--size", type=int, default=2048, help="Key size")
+ parser.add_argument("-d", "--directory", help="Directory to store the keys")
+ parser.add_argument("-n", "--name", help="Name for the key file")
+ parser.add_argument("--debug", help="Enable debug logging",
+ action="store_true")
+ args = parser.parse_args()
+
+ fmt = logging.Formatter(
+ '%(asctime)-23s %(levelname)-5s (%(name)s@%(process)d:' \
+ '%(filename)s:%(lineno)d) - %(message)s')
+ stderr_handler = logging.StreamHandler(stream=sys.stderr)
+ stderr_handler.setFormatter(fmt)
+ if args.debug:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+ log = logging.getLogger('rw-mano-ssh-keys')
+ log.addHandler(stderr_handler)
+
+ log.info("Args passed: {}".format(args))
+ if args.directory:
+ path = args.directory
+ else:
+ path = tempfile.mkdtemp()
+
+ kp = ManoSshKey(log, size=args.size)
+ kp.write_to_disk(directory=path)
+ log.info("Private Key: {}".format(kp.private_key))
+ log.info("Public key: {}".format(kp.public_key))
+ log.info("Key file: {}, Public file: {}".format(kp.private_key_file,
+ kp.public_key_file))
diff --git a/common/python/rift/mano/yang_translator/riftiotypes.yaml b/common/python/rift/mano/yang_translator/riftiotypes.yaml
index 7e4158b..7388d32 100644
--- a/common/python/rift/mano/yang_translator/riftiotypes.yaml
+++ b/common/python/rift/mano/yang_translator/riftiotypes.yaml
@@ -1148,6 +1148,20 @@
type: tosca.capabilities.nfv.riftio.hypervisor_epa
host_epa:
type: tosca.capabilities.nfv.riftio.host_epa
+ monitoring_param_2:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_3:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_4:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_5:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_6:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_7:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_8:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
tosca.nodes.nfv.riftio.CP1:
derived_from: tosca.nodes.nfv.CP
properties:
@@ -1428,6 +1442,11 @@
description: >-
A user defined script
required: false
+ name:
+ type: string
+ description: >-
+ Name of primitive
+ required: false
tosca.policies.nfv.riftio.initial_config_primitive:
derived_from: tosca.policies.Root
properties:
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
index 57b0a31..9204134 100644
--- a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
+++ b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
@@ -23,8 +23,8 @@
# from REQUIRED_FIELDS below
NAME = 'name'
- REQUIRED_FIELDS = (DESC, VERSION, VENDOR, ID) = \
- ('description', 'version', 'vendor', 'id')
+ REQUIRED_FIELDS = (DESC, VERSION, VENDOR, ID, LOGO) = \
+ ('description', 'version', 'vendor', 'id', 'logo')
COMMON_FIELDS = (PATH, PORT, HOST, XPATH, TYPE, COUNT, FILE,
NFV_COMPUTE, HOST_EPA, VSWITCH_EPA, HYPERVISOR_EPA, GUEST_EPA) = \
@@ -98,6 +98,7 @@
T_ELAN,
T_VNFFG,
T_FP,
+ T_NS_PRIMITIVE,
) = \
('tosca.policies.nfv.riftio.vnf_configuration',
'tosca.capabilities.riftio.http_endpoint_type',
@@ -116,13 +117,14 @@
'tosca.nodes.nfv.riftio.ELAN',
'tosca.groups.nfv.VNFFG',
'tosca.nodes.nfv.riftio.FP1',
+ 'tosca.policies.nfv.riftio.ns_service_primitives',
)
SUPPORT_FILES = ( SRC, DEST, EXISTING) = \
('source', 'destination', 'existing')
- SUPPORT_DIRS = (IMAGE_DIR, SCRIPT_DIR, CLOUD_INIT_DIR) = \
- ('images', 'scripts','cloud_init')
+ SUPPORT_DIRS = (IMAGE_DIR, SCRIPT_DIR, CLOUD_INIT_DIR, ICON_DIR) = \
+ ('images', 'scripts','cloud_init', 'icons')
def __init__(self,
log,
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py
index 95f2cb2..039648f 100644
--- a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py
+++ b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py
@@ -138,7 +138,7 @@
# Add all types
types_list = [ToscaResource.DATA_TYPES, ToscaResource.CAPABILITY_TYPES,
- ToscaResource.NODE_TYPES,
+ ToscaResource.NODE_TYPES, ToscaResource.ARTIFACT_TYPES,
ToscaResource.GROUP_TYPES, ToscaResource.POLICY_TYPES]
for typ in types_list:
if typ in tosca:
diff --git a/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py b/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
index 707ab7f..2023db5 100644
--- a/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
+++ b/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
@@ -148,26 +148,31 @@
for nsd in self.yangs[self.NSD]:
self.log.debug(_("Translate descriptor of type nsd: {}").
format(nsd))
+ node_name = nsd.pop(ToscaResource.NAME).replace(' ','_')
+ node_name = node_name if node_name.endswith('nsd') else ''.join([node_name, '_nsd'])
tosca_node = TranslateDescriptors. \
YANG_TO_TOSCA_TYPE[self.NSD](
self.log,
- nsd.pop(ToscaResource.NAME),
+ node_name,
self.NSD,
nsd,
self.vnfd_files)
self.tosca_resources.append(tosca_node)
+ vnfd_name_list = []
if self.VNFD in self.yangs:
for vnfd in self.yangs[self.VNFD]:
- self.log.debug(_("Translate descriptor of type vnfd: {}").
- format(vnfd))
- tosca_node = TranslateDescriptors. \
- YANG_TO_TOSCA_TYPE[self.VNFD](
- self.log,
- vnfd.pop(ToscaResource.NAME),
- self.VNFD,
- vnfd)
- self.tosca_resources.append(tosca_node)
+ if vnfd['name'] not in vnfd_name_list:
+ self.log.debug(_("Translate descriptor of type vnfd: {}").
+ format(vnfd))
+ vnfd_name_list.append(vnfd['name'])
+ tosca_node = TranslateDescriptors. \
+ YANG_TO_TOSCA_TYPE[self.VNFD](
+ self.log,
+ vnfd.pop(ToscaResource.NAME),
+ self.VNFD,
+ vnfd)
+ self.tosca_resources.append(tosca_node)
# First translate VNFDs
for node in self.tosca_resources:
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
index d28b3e1..63d8157 100644
--- a/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
@@ -22,6 +22,7 @@
import ToscaResource
from rift.mano.yang_translator.rwmano.yang.yang_vld import YangVld
from collections import OrderedDict
+import re
TARGET_CLASS_NAME = 'YangNsd'
@@ -40,11 +41,11 @@
INITIAL_CFG,) = \
('scaling_group_descriptor', 'service_primitive',
'user_defined_script', 'scaling_config_action',
- 'trigger', 'ns_config_primitive_name_ref',
+ 'trigger', 'ns_service_primitive_name_ref',
'constituent_vnfd', 'vnfd_member',
'min_instance_count', 'max_instance_count',
'input_parameter_xpath', 'config_actions',
- 'initial_config_primitive',)
+ 'initial_config_primitive', )
def __init__(self,
log,
@@ -63,6 +64,7 @@
self.conf_prims = []
self.scale_grps = []
self.initial_cfg = []
+ self.service_primitive = []
self.placement_groups = []
self.vnf_id_to_vnf_map = {}
self.vnfd_files = vnfd_files
@@ -73,6 +75,7 @@
self.forwarding_paths = {}
self.substitution_mapping_forwarder = []
self.vnfd_sfc_map = None
+ self.duplicate_vnfd_name_list = []
def handle_yang(self, vnfds):
self.log.debug(_("Process NSD desc {0}: {1}").
@@ -85,7 +88,7 @@
self.inputs.append({
self.NAME:
self.map_yang_name_to_tosca(
- val.replace('/nsd:nsd-catalog/nsd:nsd/nsd:', ''))})
+ val.replace('/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/nsd:', ''))})
if len(param):
self.log.warn(_("{0}, Did not process the following for "
"input param {1}: {2}").
@@ -155,12 +158,12 @@
if key in dic:
icp[key] = dic.pop(key)
- params = {}
+ params = []
if self.PARAM in dic:
for p in dic.pop(self.PARAM):
if (self.NAME in p and
self.VALUE in p):
- params[p[self.NAME]] = p[self.VALUE]
+ params.append({self.NAME: p[self.NAME], self.VALUE:p[self.VALUE]})
else:
# TODO (pjoseph): Need to add support to read the
# config file and get the value from that
@@ -175,6 +178,31 @@
self.log.debug(_("{0}, Initial config {1}").format(self, icp))
self.initial_cfg.append({self.PROPERTIES : icp})
+ def process_service_primitive(dic):
+ prop = {}
+ params = []
+ for key in [self.NAME, self.USER_DEF_SCRIPT]:
+ if key in dic:
+ prop[key] = dic.pop(key)
+
+ if self.PARAM in dic:
+ for p in dic.pop(self.PARAM):
+ p_entry = {}
+ for name, value in p.items():
+ p_entry[name] = value
+ params.append(p_entry)
+
+ if len(params):
+ prop[self.PARAM] = params
+
+ conf_prim = {self.NAME: prop[self.NAME], self.DESC : 'TestDescription'}
+ if self.USER_DEF_SCRIPT in prop:
+ conf_prim[self.USER_DEF_SCRIPT] = prop[self.USER_DEF_SCRIPT]
+ self.conf_prims.append(conf_prim)
+
+ self.service_primitive.append({self.PROPERTIES : prop})
+
+
def process_vld(vld, dic):
vld_conf = {}
vld_prop = {}
@@ -415,14 +443,22 @@
dic = deepcopy(self.yang)
try:
for key in self.REQUIRED_FIELDS:
- self.props[key] = dic.pop(key)
+ if key in dic:
+ self.props[key] = dic.pop(key)
self.id = self.props[self.ID]
# Process constituent VNFDs
+
+ vnfd_name_list = []
+ member_vnf_index_list = []
if self.CONST_VNFD in dic:
for cvnfd in dic.pop(self.CONST_VNFD):
- process_const_vnfd(cvnfd)
+ if cvnfd[self.VNFD_ID_REF] not in member_vnf_index_list:
+ member_vnf_index_list.append(cvnfd[self.VNFD_ID_REF])
+ process_const_vnfd(cvnfd)
+ else:
+ self.duplicate_vnfd_name_list.append(self.vnf_id_to_vnf_map[cvnfd[self.VNFD_ID_REF]])
# Process VLDs
if self.VLD in dic:
@@ -435,33 +471,23 @@
process_vnffgd(dic[self.VNFFGD], dic)
- #if self.
-
- # Process config primitives
- if self.CONF_PRIM in dic:
- for cprim in dic.pop(self.CONF_PRIM):
- conf_prim = {self.NAME: cprim.pop(self.NAME), self.DESC : 'TestDescription'}
- if self.USER_DEF_SCRIPT in cprim:
- conf_prim[self.USER_DEF_SCRIPT] = \
- cprim.pop(self.USER_DEF_SCRIPT)
- self.conf_prims.append(conf_prim)
- else:
- err_msg = (_("{0}, Only user defined script supported "
- "in config-primitive for now {}: {}").
- format(self, conf_prim, cprim))
- self.log.error(err_msg)
- raise ValidationError(message=err_msg)
-
- # Process scaling group
- if self.SCALE_GRP in dic:
- for sg_dic in dic.pop(self.SCALE_GRP):
- process_scale_grp(sg_dic)
+
# Process initial config primitives
if self.INITIAL_CFG in dic:
for icp_dic in dic.pop(self.INITIAL_CFG):
process_initial_config(icp_dic)
+ # NS service prmitive
+ if self.CONF_PRIM in dic:
+ for icp_dic in dic.pop(self.CONF_PRIM):
+ process_service_primitive(icp_dic)
+
+ # Process scaling group
+ if self.SCALE_GRP in dic:
+ for sg_dic in dic.pop(self.SCALE_GRP):
+ process_scale_grp(sg_dic)
+
# Process the input params
if self.INPUT_PARAM_XPATH in dic:
for param in dic.pop(self.INPUT_PARAM_XPATH):
@@ -556,10 +582,13 @@
self.VENDOR: self.props[self.VENDOR],
self.VERSION: self.props[self.VERSION],
}
+ if self.LOGO in self.props:
+ tosca[self.METADATA][self.LOGO] = self.props[self.LOGO]
+
if len(self.vnfd_files) > 0:
tosca[self.IMPORT] = []
imports = []
- for vnfd_file in self.vnfd_files:
+ for vnfd_file in set(self.vnfd_files):
tosca[self.IMPORT].append('"{0}.yaml"'.format(vnfd_file))
tosca[self.TOPOLOGY_TMPL] = {}
@@ -578,6 +607,7 @@
tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL] = {}
# Add the VNFDs and VLDs
+ vnf_type_vld_list = []
for idx, vnfd in self.vnfds.items():
#vnfd.generate_vnf_template(tosca, idx)
node = {
@@ -591,28 +621,44 @@
if vnfd.name in self.vnf_to_vld_map:
vld_list = self.vnf_to_vld_map[vnfd.name]
node[self.REQUIREMENTS] = []
+
for vld_idx in range(0, len(vld_list)):
- vld_link_name = "{0}{1}".format("virtualLink", vld_idx + 1)
- vld_prop = {}
- vld_prop[vld_link_name] = vld_list[vld_idx]
- node[self.REQUIREMENTS].append(vld_prop)
- if vnfd.name in self._vnf_vld_conn_point_map:
- vnf_vld_list = self._vnf_vld_conn_point_map[vnfd.name]
- for vnf_vld in vnf_vld_list:
- vnfd.generate_vld_link(vld_link_name, vnf_vld[1])
+ if vnfd.vnf_type not in vnf_type_vld_list:
+ vld_link_name = "{0}{1}".format("virtualLink", vld_idx + 1)
+ vld_prop = {}
+ vld_prop[vld_link_name] = vld_list[vld_idx]
+ node[self.REQUIREMENTS].append(vld_prop)
+ if vnfd.vnf_type not in vnf_type_vld_list:
+ vnf_type_vld_list.append(vnfd.vnf_type)
+ if vnfd.name in self._vnf_vld_conn_point_map:
+ vnf_vld_list = set(self._vnf_vld_conn_point_map[vnfd.name])
+ for vnf_vld in vnf_vld_list:
+ vnfd.generate_vld_link(vld_link_name, vnf_vld[1])
for sub_mapping in self.substitution_mapping_forwarder:
if sub_mapping[0] == vnfd.name:
vnfd.generate_forwarder_sub_mapping(sub_mapping)
- for vnfd_name, cp_name in self.vnfd_sfc_map.items():
- if vnfd.name == vnfd_name:
- vnfd.generate_sfc_link(cp_name)
+ if self.vnfd_sfc_map:
+ for vnfd_name, cp_name in self.vnfd_sfc_map.items():
+ if vnfd.name == vnfd_name:
+ vnfd.generate_sfc_link(cp_name)
tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vnfd.name] = node
+ v_idx = len(self.vnfds) + 1 + len(self.duplicate_vnfd_name_list)
+ for vnfd_name in self.duplicate_vnfd_name_list:
+ node = tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vnfd_name]
+ new_node = deepcopy(node)
+ st = re.sub(r'\d+$', '', vnfd_name.rstrip('_vnfd'))
+
+ new_node[self.PROPERTIES][self.ID] = v_idx
+ node_name = "{}{}_vnfd".format(st, v_idx)
+ tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][node_name] = new_node
+ v_idx += 1
+
for vld_node_name in self.vlds:
tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vld_node_name] = self.vlds[vld_node_name]
@@ -672,6 +718,23 @@
self.INITIAL_CFG: icpt
})
+ if len(self.service_primitive) > 0:
+ if self.POLICIES not in tosca[self.TOPOLOGY_TMPL]:
+ tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
+
+ for icp in self.service_primitive:
+ if len(tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL]) > 0:
+ node_name = list(tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL].keys())[0]
+ icpt = {
+ self.TYPE: self.T_NS_PRIMITIVE,
+ self.TARGETS : "[{0}]".format(node_name)
+ }
+ icpt.update(icp)
+ tosca[self.TOPOLOGY_TMPL][self.POLICIES].append({
+ 'ns_service_primitives': icpt
+ })
+
+
if len(self.placement_groups) > 0:
if self.POLICIES not in tosca[self.TOPOLOGY_TMPL]:
tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
@@ -701,6 +764,25 @@
self.DEST: "{}/{}".format(self.SCRIPT_DIR, script),
})
+ for prim in self.service_primitive:
+ if 'properties' in prim:
+ if 'user_defined_script' in prim['properties']:
+ script = os.path.basename(prim['properties']['user_defined_script'])
+ files.append({
+ self.TYPE: 'script',
+ self.NAME: script,
+ self.DEST: "{}/{}".format(self.SCRIPT_DIR, script),
+ })
+
+ if 'logo' in self.props:
+ icon = os.path.basename(self.props['logo'])
+ files.append({
+ self.TYPE: 'icons',
+ self.NAME: icon,
+ self.DEST: "{}/{}".format(self.ICON_DIR, icon),
+ })
+
+
# TODO (pjoseph): Add support for config scripts,
# charms, etc
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
index ec21e3c..84fdf22 100644
--- a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
@@ -35,6 +35,15 @@
('mgmt_interface', 'http_endpoint', 'monitoring_param')
vnf_prefix_type = 'tosca.nodes.nfv.riftio.'
+ VALUE_TYPE_CONVERSION_MAP = {
+ 'INTEGER' : 'integer',
+ 'INT' : 'integer',
+ 'STRING' : 'string',
+ 'DECIMAL' : 'float',
+ 'INTEGER': 'INTEGER',
+ 'DECIMAL' : 'float'
+
+ }
def __init__(self,
log,
@@ -100,7 +109,7 @@
for parameter in init_conf_prim['parameter']:
init_conf['parameter'].append({parameter['name']: parameter['value']})
init_config_prims.append(init_conf)
- vnf_conf['initial_config_primitive'] = init_config_prims
+ vnf_conf['initial_config'] = init_config_prims
self.vnf_configuration = vnf_conf
@@ -168,6 +177,9 @@
mon_param['url_path'] = param['http_endpoint_ref']
if 'json_query_method' in param:
mon_param['json_query_method'] = param['json_query_method'].lower()
+ #if 'value_type' in param:
+ # mon_param['constraints'] = {}
+ # mon_param['constraints']['value_type'] = YangVnfd.VALUE_TYPE_CONVERSION_MAP[param['value_type'].upper()]
if 'group_tag' in param:
ui_param['group_tag'] = param['group_tag']
if 'widget_type' in param:
@@ -210,7 +222,8 @@
dic = deepcopy(self.yang)
try:
for key in self.REQUIRED_FIELDS:
- self.props[key] = dic.pop(key)
+ if key in dic:
+ self.props[key] = dic.pop(key)
self.id = self.props[self.ID]
@@ -292,11 +305,13 @@
mon_param = {}
mon_param['properties'] = self.mon_param[0]
tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vdu.get_name(self.name)][self.CAPABILITIES]['monitoring_param'] = mon_param #TEST
- if len(self.mon_param) == 2:
- mon_param = {}
- mon_param = {}
- mon_param['properties'] = self.mon_param[1]
- tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vdu.get_name(self.name)][self.CAPABILITIES]['monitoring_param_1'] = mon_param
+ if len(self.mon_param) > 1:
+ for idx in range(1, len(self.mon_param)):
+ monitor_param_name = "monitoring_param_{}".format(idx)
+ mon_param = {}
+ mon_param = {}
+ mon_param['properties'] = self.mon_param[idx]
+ tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vdu.get_name(self.name)][self.CAPABILITIES][monitor_param_name] = mon_param
node = {}
node[self.TYPE] = self.T_VNF1
@@ -363,13 +378,17 @@
for vdu in self.vdus:
if conn_point in vdu.cp_name_to_cp_node:
conn_point_node_name = vdu.cp_name_to_cp_node[conn_point]
- self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.REQUIREMENTS].\
- append({virtualLink : "[{0}, {1}]".format(conn_point_node_name, "virtualLink")})
+ if {virtualLink : "[{0}, {1}]".format(conn_point_node_name, "virtualLink")} not in \
+ self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.REQUIREMENTS]:
+ self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.REQUIREMENTS].\
+ append({virtualLink : "[{0}, {1}]".format(conn_point_node_name, "virtualLink")})
if self.REQUIREMENTS not in self.tosca[self.NODE_TYPES][self.vnf_type]:
self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS] = []
- self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS].append({virtualLink : {
- "type": "tosca.nodes.nfv.VL"}})
+ if {virtualLink : {"type": "tosca.nodes.nfv.VL"}} not in self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS]:
+ self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS].append({virtualLink : {
+ "type": "tosca.nodes.nfv.VL"}})
+
def generate_forwarder_sub_mapping(self, sub_link):
if self.CAPABILITIES not in self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING]:
self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.CAPABILITIES] = {}
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang_translator.py b/common/python/rift/mano/yang_translator/rwmano/yang_translator.py
index 0919494..a62bff3 100644
--- a/common/python/rift/mano/yang_translator/rwmano/yang_translator.py
+++ b/common/python/rift/mano/yang_translator/rwmano/yang_translator.py
@@ -32,7 +32,7 @@
import rift.package.cloud_init
import rift.package.script
import rift.package.store
-
+import rift.package.icon
class YangTranslator(object):
'''Invokes translation methods.'''
@@ -59,10 +59,10 @@
self.get_yangs()
else:
if 'nsd' in self.yangs:
- self.output_files['nsd'].append(self.yangs['nsd'][0]['short_name'])
+ self.output_files['nsd'].append(self.yangs['nsd'][0]['short_name'].replace(' ','_'))
if 'vnfd' in self.yangs:
for yang_vnfd in self.yangs['vnfd']:
- self.output_files['vnfd'].append(yang_vnfd['short_name'])
+ self.output_files['vnfd'].append(yang_vnfd['short_name'].replace(' ','_'))
self.node_translator = TranslateDescriptors(self.log,
self.yangs,
@@ -133,9 +133,9 @@
raise ValidationError(message="No NSD or VNFD uploaded")
else:
if 'nsd' in self.yangs:
- sub_folder_name = self.yangs['nsd'][0]['short_name']
+ sub_folder_name = self.yangs['nsd'][0]['short_name'].replace(' ','_')
elif 'vnfd' in self.yangs:
- sub_folder_name = self.yangs['vnfd'][0]['short_name']
+ sub_folder_name = self.yangs['vnfd'][0]['short_name'].replace(' ','_')
subdir = os.path.join(output_dir, sub_folder_name)
@@ -147,14 +147,20 @@
def_dir = os.path.join(subdir, 'Definitions')
os.makedirs(def_dir)
shutil.copy2(riftio_src_file, def_dir + "/riftiotypes.yaml")
+ tosca_meta_entry_file = None
for tmpl_key in tmpl_out:
tmpl = tmpl_out[tmpl_key]
- entry_file = os.path.join(def_dir, tmpl_key+'.yaml')
+ file_name = tmpl_key.replace(' ','_')
+ entry_file = os.path.join(def_dir, file_name+'.yaml')
+ if file_name.endswith('nsd'):
+ tosca_meta_entry_file = file_name
self.log.debug(_("Writing file {0}").
format(entry_file))
with open(entry_file, 'w+') as f:
f.write(tmpl[ToscaTemplate.TOSCA])
+ if tosca_meta_entry_file is None:
+ tosca_meta_entry_file = sub_folder_name
# Create the Tosca meta
meta_dir = os.path.join(subdir, 'TOSCA-Metadata')
os.makedirs(meta_dir)
@@ -162,7 +168,7 @@
CSAR-Version: 1.1
Created-By: RIFT.io
Entry-Definitions: Definitions/'''
- meta_data = "{}{}".format(meta, sub_folder_name+'.yaml')
+ meta_data = "{}{}".format(meta, tosca_meta_entry_file+'.yaml')
meta_file = os.path.join(meta_dir, 'TOSCA.meta')
self.log.debug(_("Writing file {0}:\n{1}").
format(meta_file, meta_data))
@@ -213,6 +219,15 @@
pkg.extract_file(script_file_map[fname],
dest_path)
break
+ elif ftype == 'icons':
+ icon_file_map = \
+ rift.package.icon.PackageIconExtractor.package_icon_files(pkg)
+ if fname in icon_file_map:
+ self.log.debug(_("Extracting script {0} to {1}").
+ format(fname, dest_path))
+ pkg.extract_file(icon_file_map[fname],
+ dest_path)
+ break
else:
self.log.warn(_("Unknown file type {0}: {1}").
@@ -226,7 +241,7 @@
os.chdir(subdir)
try:
- zip_file = key + '.zip'
+ zip_file = sub_folder_name + '.zip'
zip_path = os.path.join(output_dir, zip_file)
self.log.debug(_("Creating zip file {0}").format(zip_path))
zip_cmd = "zip -r {}.partial ."
diff --git a/common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml b/common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml
index 9a35a7e..a7fea0b 100644
--- a/common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml
+++ b/common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml
@@ -7,21 +7,8 @@
data_types:
tosca.datatypes.network.riftio.vnf_configuration:
properties:
- config_delay:
- constraints:
- - greater_or_equal: 0
- default: 0
- required: no
- type: integer
config_details:
type: map
- config_priority:
- constraints:
- - greater_than: 0
- type: integer
- config_template:
- required: no
- type: string
config_type:
type: string
capability_types:
@@ -194,23 +181,8 @@
vendor: RIFT.io
version: 1.0
vnf_configuration:
- config_delay: 0
config_details:
script_type: bash
- config_priority: 2
- config_template: "\n#!/bin/bash\n\n# Rest API config\nping_mgmt_ip=<rw_mgmt_ip>\n\
- ping_mgmt_port=18888\n\n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
- \ pong_vnfd/cp0>\nping_rate=5\nserver_port=5555\n\n# Make rest API calls\
- \ to configure VNF\ncurl -D /dev/stdout \\\n -H \"Accept: application/vnd.yang.data+xml\"\
- \ \\\n -H \"Content-Type: application/vnd.yang.data+json\" \\\n \
- \ -X POST \\\n -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
- \":$server_port}\" \\\n http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set server info for\
- \ ping!\"\n exit $rc\nfi\n\ncurl -D /dev/stdout \\\n -H \"Accept:\
- \ application/vnd.yang.data+xml\" \\\n -H \"Content-Type: application/vnd.yang.data+json\"\
- \ \\\n -X POST \\\n -d \"{\\\"rate\\\":$ping_rate}\" \\\n http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set ping rate!\"\n\
- \ exit $rc\nfi\n\nexit 0\n"
config_type: script
capabilities:
http_endpoint: