FILES
rift/downloader/__init__.py
rift/downloader/base.py
+ rift/downloader/local_file.py
rift/downloader/url.py
COMPONENT ${PKG_LONG_NAME}
PYTHON3_ONLY
--- /dev/null
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Taken from http://stackoverflow.com/a/27786580
+
+
+import logging
+import requests
+import os
+from urllib.parse import urlparse
+
+
+class LocalFileAdapter(requests.adapters.BaseAdapter):
+ """Protocol Adapter to allow Requests to GET file:// URLs
+
+ @todo: Properly handle non-empty hostname portions.
+ """
+
+ @staticmethod
+ def _chkpath(method, path):
+ """Return an HTTP status for the given filesystem path."""
+ if method.lower() in ('put', 'delete'):
+ return 501, "Not Implemented" # TODO
+ elif method.lower() not in ('get', 'head'):
+ return 405, "Method Not Allowed"
+ elif os.path.isdir(path):
+ return 400, "Path Not A File"
+ elif not os.path.isfile(path):
+ return 404, "File Not Found"
+ elif not os.access(path, os.R_OK):
+ return 403, "Access Denied"
+ else:
+ return 200, "OK"
+
+ def send(self, req, **kwargs): # pylint: disable=unused-argument
+ """Return the file specified by the given request
+
+ @type req: C{PreparedRequest}
+ @todo: Should I bother filling `response.headers` and processing
+ If-Modified-Since and friends using `os.stat`?
+ """
+
+ log = logging.getLogger('rw-mano-log')
+ log.debug("Request: {}".format(req))
+
+ url = urlparse(req.path_url)
+ path = os.path.normcase(os.path.normpath(url.path))
+ response = requests.Response()
+
+ response.status_code, response.reason = self._chkpath(req.method, path)
+ log.debug("Response {}: {}".format(response.status_code, response.reason))
+ if response.status_code == 200 and req.method.lower() != 'head':
+ try:
+ response.raw = open(path, 'rb')
+ except (OSError, IOError) as err:
+ response.status_code = 500
+ response.reason = str(err)
+
+ if isinstance(req.url, bytes):
+ response.url = req.url.decode('utf-8')
+ else:
+ response.url = req.url
+
+ response.request = req
+ response.connection = self
+
+
+ log.debug("Response {}: {}".format(response.status_code, response))
+ return response
+
+ def close(self):
+ pass
from gi.repository import RwPkgMgmtYang
from . import base
+from .local_file import LocalFileAdapter as LocalFileAdapter
class UrlDownloader(base.AbstractDownloader):
retries = Retry(total=5, backoff_factor=1)
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
+ session.mount("file://", LocalFileAdapter())
return session
self._cal = self.plugin.get_interface("Cloud")
self._cal.init(rwlog_hdl)
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._status = RwCloudYang.CloudAcc_ConnectionStatus(
status="unknown",
details="Connection status lookup not started"
)
@asyncio.coroutine
def validate_cloud_account_credentials(self, loop):
self._log.debug("Validating Cloud Account credentials %s", self._account_msg)
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._status = RwCloudYang.CloudAcc_ConnectionStatus(
status="validating",
details="Cloud account connection validation in progress"
)
self.cal_account_msg,
)
if rwstatus == RwTypes.RwStatus.SUCCESS:
- self._status = RwCloudYang.CloudAccount_ConnectionStatus.from_dict(status.as_dict())
+ self._status = RwCloudYang.CloudAcc_ConnectionStatus.from_dict(status.as_dict())
else:
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._status = RwCloudYang.CloudAcc_ConnectionStatus(
status="failure",
details="Error when calling CAL validate cloud creds"
)
import gi
gi.require_version('RwDts', '1.0')
import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
from gi.repository import (
RwcalYang as rwcal,
RwDts as rwdts,
ProtobufC,
+ RwCloudYang,
)
from . import accounts
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class CloudAccountConfigCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
class CloudAccountConfigSubscriber(object):
- XPATH = "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account"
+ XPATH = "C,/rw-cloud:cloud/rw-cloud:account"
- def __init__(self, dts, log, rwlog_hdl, cloud_callbacks):
+ def __init__(self, dts, log, rwlog_hdl, project, cloud_callbacks):
self._dts = dts
self._log = log
self._rwlog_hdl = rwlog_hdl
+ self._project = project
self._reg = None
self.accounts = {}
self.delete_account(account_msg.name)
self.add_account(account_msg)
+ def deregister(self):
+ self._log.debug("Project {}: De-register cloud account handler".
+ format(self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
def register(self):
@asyncio.coroutine
- def apply_config(dts, acg, xact, action, _):
+ def apply_config(dts, acg, xact, action, scratch):
self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action)
if xact.xact is None:
""" Prepare callback from DTS for Cloud Account """
action = xact_info.query_action
+
+ xpath = ks_path.to_xpath(RwCloudYang.get_schema())
+
self._log.debug("Cloud account on_prepare config received (action: %s): %s",
xact_info.query_action, msg)
if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
if msg.name in self.accounts:
- self._log.debug("Cloud account already exists. Invoking update request")
+ self._log.debug("Cloud account {} already exists. " \
+ "Invoking update request".format(msg.name))
# Since updates are handled by a delete followed by an add, invoke the
# delete prepare callbacks to give clients an opportunity to reject.
on_apply=apply_config,
)
+ xpath = self._project.add_project(CloudAccountConfigSubscriber.XPATH)
with self._dts.appconf_group_create(acg_handler) as acg:
self._reg = acg.register(
- xpath=CloudAccountConfigSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
on_prepare=on_prepare,
)
from gi.repository import(
RwCloudYang,
RwDts as rwdts,
+ RwTypes,
)
class CloudAccountNotFound(Exception):
class CloudAccountDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
+ self._regh = None
+ self._rpc = None
self.cloud_accounts = {}
def add_cloud_account(self, account):
def _register_show_status(self):
def get_xpath(cloud_name=None):
- return "D,/rw-project:project/rw-cloud:cloud/account{}/connection-status".format(
- "[name='%s']" % cloud_name if cloud_name is not None else ''
- )
+ return "D,/rw-cloud:cloud/account{}/connection-status".format(
+ "[name='%s']" % cloud_name if cloud_name is not None else ''
+ )
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
- path_entry = RwCloudYang.CloudAccount.schema().keyspec_to_entry(ks_path)
+ path_entry = RwCloudYang.CloudAcc.schema().keyspec_to_entry(ks_path)
cloud_account_name = path_entry.key00.name
self._log.debug("Got show cloud connection status request: %s", ks_path.create_string())
for account in saved_accounts:
connection_status = account.connection_status
self._log.debug("Responding to cloud connection status request: %s", connection_status)
+ xpath = self._project.add_project(get_xpath(account.name))
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- xpath=get_xpath(account.name),
+ xpath=xpath,
msg=account.connection_status,
)
except KeyError as e:
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
+ xpath = self._project.add_project(get_xpath())
+ self._regh = yield from self._dts.register(
+ xpath=xpath,
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare),
flags=rwdts.Flag.PUBLISHER,
def on_prepare(xact_info, action, ks_path, msg):
if not msg.has_field("cloud_account"):
raise CloudAccountNotFound("Cloud account name not provided")
-
cloud_account_name = msg.cloud_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
account = self.cloud_accounts[cloud_account_name]
except KeyError:
- raise CloudAccountNotFound("Cloud account name %s not found" % cloud_account_name)
+ errmsg = "Cloud account name {} not found in project {}". \
+ format(cloud_account_name, self._project.name)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ get_xpath(),
+ errmsg)
+ raise CloudAccountNotFound(errmsg)
account.start_validate_credentials(self._loop)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
+ self._rpc = yield from self._dts.register(
xpath=get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare
def register(self):
yield from self._register_show_status()
yield from self._register_validate_rpc()
+
+ def deregister(self):
+ yield from self._rpc.deregister()
+ yield from self._regh.deregister()
import gi
gi.require_version('RwDts', '1.0')
import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
from gi.repository import (
RwcalYang as rwcal,
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class ConfigAgentCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
class ConfigAgentSubscriber(object):
XPATH = "C,/rw-config-agent:config-agent/account"
- def __init__(self, dts, log, config_callbacks):
+ def __init__(self, dts, log, project, config_callbacks):
self._dts = dts
self._log = log
+ self._project = project
self._reg = None
self.accounts = {}
self.delete_account(account_msg)
self.add_account(account_msg)
+ def deregister(self):
+ self._log.debug("De-register config agent handler for project {}".
+ format(self._project.name))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
def register(self):
def apply_config(dts, acg, xact, action, _):
self._log.debug("Got config account apply config (xact: %s) (action: %s)", xact, action)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for Config Account config using xpath: %s",
- ConfigAgentSubscriber.XPATH,
- )
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self._dts.appconf_group_create(acg_handler) as acg:
+ xpath = self._project.add_project(ConfigAgentSubscriber.XPATH)
+ self._log.debug("Registering for Config Account config using xpath: %s",
+ xpath)
self._reg = acg.register(
- xpath=ConfigAgentSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
on_prepare=on_prepare,
)
RwDts as rwdts)
import rift.tasklets
-
import rift.mano.utils.juju_api as juju
)
class CfgAgentDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self.cfg_agent_accounts = {}
+ self._show_reg = None
+ self._rpc_reg = None
def add_cfg_agent_account(self, account_msg):
account = ConfigAgentAccount(self._log, account_msg)
for account in saved_accounts:
connection_status = account.connection_status
self._log.debug("Responding to config agent connection status request: %s", connection_status)
+ xpath = self._project.add_project(get_xpath(account.name))
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- xpath=get_xpath(account.name),
+ xpath=xpath,
msg=account.connection_status,
)
except KeyError as e:
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare),
- flags=rwdts.Flag.PUBLISHER,
- )
+ xpath = self._project.add_project(get_xpath())
+ self._show_reg = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
def _register_validate_rpc(self):
def get_xpath():
raise ConfigAgentAccountNotFound("Config Agent account name not provided")
cfg_agent_account_name = msg.cfg_agent_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
account = self.cfg_agent_accounts[cfg_agent_account_name]
except KeyError:
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._rpc_reg = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
@asyncio.coroutine
def register(self):
yield from self._register_show_status()
yield from self._register_validate_rpc()
+ def deregister(self):
+ self._show_reg.deregister()
+ self._rpc_reg.deregister()
+
+
class ConfigAgentJob(object):
"""A wrapper over the config agent job object, providing some
convenience functions.
- YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob contains
+ YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob contains
||
==> VNFRS
||
"running" : "pending",
"failed" : "failure"}
- def __init__(self, nsr_id, job, tasks=None):
+ def __init__(self, nsr_id, job, project, tasks=None):
"""
Args:
nsr_id (uuid): ID of NSR record
- job (YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
+ job (YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
tasks: List of asyncio.tasks. If provided the job monitor will
use it to monitor the tasks instead of the execution IDs
"""
self._job = job
self.nsr_id = nsr_id
self.tasks = tasks
+ self._project = project
+
self._regh = None
@property
@property
def xpath(self):
"""Xpath of the job"""
- return ("D,/nsr:ns-instance-opdata" +
+ return self._project.add_project(("D,/nsr:ns-instance-opdata" +
"/nsr:nsr[nsr:ns-instance-config-ref='{}']" +
"/nsr:config-agent-job[nsr:job-id='{}']"
- ).format(self.nsr_id, self.id)
+ ).format(self.nsr_id, self.id))
@property
def regh(self):
@staticmethod
def convert_rpc_input_to_job(nsr_id, rpc_output, tasks):
"""A helper function to convert the YangOutput_Nsr_ExecNsConfigPrimitive
- to YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
+ to YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
Args:
nsr_id (uuid): NSR ID
ConfigAgentJob
"""
# Shortcuts to prevent the HUUGE names.
- CfgAgentJob = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob
- CfgAgentVnfr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
- CfgAgentPrimitive = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
- CfgAgentPrimitiveParam = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
+ CfgAgentJob = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob
+ CfgAgentVnfr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
+ CfgAgentPrimitive = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
+ CfgAgentPrimitiveParam = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
job = CfgAgentJob.from_dict({
"job_id": rpc_output.job_id,
job.vnfr.append(vnfr_job)
- return ConfigAgentJob(nsr_id, job, tasks)
+ return ConfigAgentJob(nsr_id, job, project, tasks)
class ConfigAgentJobMonitor(object):
self._regh = None
self._nsr_regh = None
+ self._project = cfgm.project
@property
def regh(self):
@staticmethod
def cfg_job_xpath(nsr_id, job_id):
- return ("D,/nsr:ns-instance-opdata" +
+ return self._project.add_project(("D,/nsr:ns-instance-opdata" +
"/nsr:nsr[nsr:ns-instance-config-ref = '{}']" +
- "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id)
+ "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id))
@asyncio.coroutine
def register(self):
""" prepare callback from dts """
xpath = ks_path.to_xpath(RwNsrYang.get_schema())
if action == rwdts.QueryAction.READ:
- schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema()
+ schema = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
try:
nsr_id = path_entry.key00.ns_instance_config_ref
hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=CfgAgentJobDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._project.add_project(
+ CfgAgentJobDtsHandler.XPATH),
handler=hdl,
flags=rwdts.Flag.PUBLISHER,
)
@property
def nsr_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
@asyncio.coroutine
def register_for_nsr(self):
except Exception as e:
self._log.error("Failed to register for NSR changes as %s", str(e))
+ def deregister(self):
+ self._log.debug("De-register config agent job for project".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
+ if self._nsr_regh:
+ self._nsr_regh.deregister()
+ self._nsr_regh = None
+
class ConfigAgentJobManager(object):
"""A central class that manager all the Config Agent related data,
TODO: Needs to support multiple config agents.
"""
- def __init__(self, dts, log, loop, nsm):
+ def __init__(self, dts, log, loop, project, nsm):
"""
Args:
dts : Dts handle
self.log = log
self.loop = loop
self.nsm = nsm
+ self.project = project
self.handler = CfgAgentJobDtsHandler(dts, log, loop, nsm, self)
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def add_job(self, rpc_output, tasks=None):
- """Once an RPC is trigger add a now job
+ """Once an RPC is triggered, add a new job
Args:
rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): Rpc output
"""
nsr_id = rpc_output.nsr_id_ref
- job = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output, tasks)
+ job = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output,
+ tasks, self.project)
self.log.debug("Creating a job monitor for Job id: {}".format(
rpc_output.job_id))
def register(self):
yield from self.handler.register()
yield from self.handler.register_for_nsr()
+
+ def deregister(self):
+ yield from self.handler.deregister()
@pytest.fixture(scope="function")
def nsd():
- catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ catalog = NsdYang.YangData_RwProject_Project_NsdCatalog()
nsd = catalog.nsd.add()
nsd.id = str(uuid.uuid1())
return nsd
@pytest.fixture(scope="function")
def vnfd():
- catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+ catalog = VnfdYang.YangData_RwProject_Project_VnfdCatalog()
vnfd = catalog.vnfd.add()
vnfd.id = str(uuid.uuid1())
return vnfd
cidr: 10.10.10.2/30
"""
- catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+ catalog = VnfdYang.YangData_RwProject_Project_VnfdCatalog()
expected_vnfd = catalog.vnfd.add()
vnf_config = expected_vnfd.vnf_configuration
expected_vnfd.id = vnfd.id
Vlan ID: '3000'
"""
- catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ catalog = NsdYang.YangData_RwProject_Project_NsdCatalog()
expected_nsd = catalog.nsd.add()
expected_nsd.id = nsd.id
expected_nsd.service_primitive.add().from_dict(
"""A common class to hold the barebone objects to build a publisher or
subscriber
"""
- def __init__(self, log, dts, loop):
+ def __init__(self, log, dts, loop, project):
"""Constructor
Args:
loop : Asyncio event loop.
"""
# Reg handle
- self.reg = None
- self.log = log
- self.dts = dts
- self.loop = loop
+ self._reg = None
+ self._log = log
+ self._dts = dts
+ self._loop = loop
+ self._project = project
+
+ @property
+ def reg(self):
+ return self._reg
+
+ @reg.setter
+ def reg(self, val):
+ self._reg = val
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @property
+ def project(self):
+ return self._project
+
+ def deregister(self):
+ self._log.debug("De-registering DTS handler ({}) for project {}".
+ format(self.__class__.__name__, self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
class AbstractRpcHandler(DtsHandler):
"""Base class to simplify RPC implementation
"""
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project=None):
+ super().__init__(log, dts, loop, project)
if not asyncio.iscoroutinefunction(self.callback):
raise ValueError('%s has to be a coroutine' % (self.callback))
def on_prepare(self, xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
+ if self.project and not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
rpc_op = yield from self.callback(ks_path, msg)
xact_info.respond_xpath(
@asyncio.coroutine
def register(self):
+ if self.reg:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
reg_event = asyncio.Event(loop=self.loop)
@asyncio.coroutine
yield from reg_event.wait()
+ def deregister(self):
+ self.reg.deregister()
+ self.reg = None
+
@abc.abstractmethod
@asyncio.coroutine
def callback(self, ks_path, msg):
"""
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from gi.repository import (RwDts as rwdts, ProtobufC)
import rift.tasklets
+from rift.mano.utils.project import (
+ get_add_delete_update_cfgs,
+ )
from ..core import DtsHandler
"""A common class for all subscribers.
"""
@classmethod
- def from_tasklet(cls, tasklet, callback=None):
+ def from_project(cls, proj, callback=None):
"""Convenience method to build the object from tasklet
Args:
- tasklet (rift.tasklets.Tasklet): Tasklet
+ proj (rift.mano.utils.project.ManoProject): Project
callback (None, optional): Callable, which will be invoked on
subscriber changes.
msg: The Gi Object msg from DTS
action(rwdts.QueryAction): Action type
"""
- return cls(tasklet.log, tasklet.dts, tasklet.loop, callback=callback)
+ return cls(proj.log, proj.dts, proj.loop, proj, callback=callback)
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
self.callback = callback
def get_reg_flags(self):
"""Default set of REG flags, can be over-ridden by sub classes.
-
+
Returns:
Set of rwdts.Flag types.
"""
Opdata subscriber can be created in one step by subclassing and implementing
the MANDATORY get_xpath() method
-
+
"""
@abc.abstractmethod
def get_xpath(self):
def register(self):
"""Triggers the registration
"""
+
+ if self.reg:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
xacts = {}
def on_commit(xact_info):
)
self.reg = yield from self.dts.register(
- xpath=self.get_xpath(),
+ xpath=self.project.add_project(self.get_xpath()),
flags=self.get_reg_flags(),
handler=handler)
assert self.reg is not None
- def deregister(self):
- self.reg.deregister()
-
class AbstractConfigSubscriber(SubscriberDtsHandler):
"""Abstract class that simplifies the process of creating subscribers
Config subscriber can be created in one step by subclassing and implementing
the MANDATORY get_xpath() method
-
+
"""
KEY = "msgs"
def key_name(self):
pass
- def get_add_delete_update_cfgs(self, dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
@asyncio.coroutine
def register(self):
""" Register for VNFD configuration"""
+ if self.reg:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
- add_cfgs, delete_cfgs, update_cfgs = self.get_add_delete_update_cfgs(
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
dts_member_reg=self.reg,
xact=xact,
key_name=self.key_name())
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self.dts.appconf_group_create(handler=acg_hdl) as acg:
self.reg = acg.register(
- xpath=self.get_xpath(),
+ xpath=self.project.add_project(self.get_xpath()),
flags=self.get_reg_flags(),
on_prepare=on_prepare)
-
- def deregister(self):
- self.reg.deregister()
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
def get_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
class NsdCatalogSubscriber(core.AbstractConfigSubscriber):
return "id"
def get_xpath(self):
- return "C,/nsd:nsd-catalog/nsd:nsd"
+ return self._project.add_project("C,/nsd:nsd-catalog/nsd:nsd")
class NsInstanceConfigSubscriber(core.AbstractConfigSubscriber):
return "id"
def get_xpath(self):
- return "C,/nsr:ns-instance-config/nsr:nsr"
+ return self._project.add_project("C,/nsr:ns-instance-config/nsr:nsr")
return "name"
def get_xpath(self):
- return("C,/rw-launchpad:resource-orchestrator")
\ No newline at end of file
+ return self._project.add_project("C,/rw-launchpad:resource-orchestrator")
"""
KEY = enum.Enum('KEY', 'NSR NSD VNFD VNFR')
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
- params = (self.log, self.dts, self.loop)
+ params = (self.log, self.dts, self.loop, self.project)
self._nsr_sub = ns_subscriber.NsrCatalogSubscriber(*params, callback=self.on_nsr_change)
self._nsrs = {}
yield from self._vnfr_sub.register()
yield from self._nsr_sub.register()
+ def deregister(self):
+ self._log.debug("De-register store for project {}".
+ format(self._project))
+ self._vnfd_sub.deregister()
+ self._nsd_sub.deregister()
+ self._vnfr_sub.deregister()
+ self._nsr_sub.deregister()
+
@asyncio.coroutine
def refresh_store(self, subsriber, store):
itr = yield from self.dts.query_read(subsriber.get_xpath())
def test_vnfd_handler(self):
yield from self.store.register()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
mock_vnfd.id = str(uuid.uuid1())
- w_xpath = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ w_xpath = "C,/rw-project:project/vnfd:vnfd-catalog/vnfd:vnfd"
xpath = "{}[vnfd:id='{}']".format(w_xpath, mock_vnfd.id)
yield from self.publisher.publish(w_xpath, xpath, mock_vnfd)
def test_vnfr_handler(self):
yield from self.store.register()
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
mock_vnfr.id = str(uuid.uuid1())
- w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+ w_xpath = "D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr"
xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
def test_nsr_handler(self):
yield from self.store.register()
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr()
mock_nsr.ns_instance_config_ref = str(uuid.uuid1())
mock_nsr.name_ref = "Foo"
- w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
+ w_xpath = "D,/rw-project:project/nsr:ns-instance-opdata/nsr:nsr"
xpath = "{}[nsr:ns-instance-config-ref='{}']".format(w_xpath, mock_nsr.ns_instance_config_ref)
yield from self.publisher.publish(w_xpath, xpath, mock_nsr)
def test_nsd_handler(self):
yield from self.store.register()
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
mock_nsd.id = str(uuid.uuid1())
- w_xpath = "C,/nsd:nsd-catalog/nsd:nsd"
+ w_xpath = "C,/rw-project:project/nsd:nsd-catalog/nsd:nsd"
xpath = "{}[nsd:id='{}']".format(w_xpath, mock_nsd.id)
yield from self.publisher.publish(w_xpath, xpath, mock_nsd)
# publish
yield from vnf_handler.register()
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
mock_vnfr.id = str(uuid.uuid1())
def mon_xpath(param_id=None):
""" Monitoring params xpath """
- return("D,/vnfr:vnfr-catalog" +
+ return("D,/rw-project:project/vnfr:vnfr-catalog" +
"/vnfr:vnfr[vnfr:id='{}']".format(mock_vnfr.id) +
"/vnfr:monitoring-param" +
("[vnfr:id='{}']".format(param_id) if param_id else ""))
- w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+ w_xpath = "D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr"
xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
- mock_param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+ mock_param = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
"id": "1"
})
mock_vnfr.monitoring_param.append(mock_param)
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
def get_xpath(self):
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+ return self.project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr")
class VnfdCatalogSubscriber(core.AbstractConfigSubscriber):
return "id"
def get_xpath(self):
- return "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ return self.project.add_project("C,/vnfd:vnfd-catalog/vnfd:vnfd")
# TODO(Philip): Harcoding for now, need to make this generic
def get_xpath(self):
- xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:' + self.name
+ xpath = '/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:' + self.name
return xpath
def get_dict_output(self):
if use_gi:
try:
- nsd_cat = RwNsdYang.YangData_Nsd_NsdCatalog()
+ nsd_cat = RwNsdYang.YangData_RwProject_Project_NsdCatalog()
nsd = nsd_cat.nsd.add()
nsd.id = nsd_id
nsd.name = self.metadata['name']
if use_gi:
for param in self.parameters:
nsd.input_parameter_xpath.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath=param.get_xpath(),
)
)
format(self.name, self.properties))
def generate_yang_model_gi(self, nsd, vnfds):
- vnfd_cat = RwVnfdYang.YangData_Vnfd_VnfdCatalog()
+ vnfd_cat = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog()
vnfd = vnfd_cat.vnfd.add()
props = convert_keys_to_python(self.properties)
try:
# limitations under the License.
#
+import abc
+import asyncio
+import logging
+
+import gi
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwDtsYang', '1.0')
+from gi.repository import (
+ RwProjectYang,
+ RwDts as rwdts,
+ ProtobufC,
+)
+
+import rift.tasklets
+
class ManoProjectError(Exception):
pass
pass
+class ManoProjXpathNotRootErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathPresentErr(ManoProjectError):
+ pass
+
+
+NS = 'rw-project'
+PROJECT = 'project'
+NS_PROJECT = '{}:{}'.format(NS, PROJECT)
+XPATH = '/{}'.format(NS_PROJECT)
+XPATH_LEN = len(XPATH)
+
+NAME = 'name'
+NAME_LEN = len(NAME)
+NS_NAME = '{}:{}'.format(NS, NAME)
+
+DEFAULT_PROJECT = 'default'
+DEFAULT_PREFIX = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ DEFAULT_PROJECT)
+
+
class ManoProject(object):
'''Class to handle the project name'''
- NS = 'rw-project'
- XPATH = '/{}:project'.format(NS)
- XPATH_LEN = len(XPATH)
-
- NAME = 'name'
- NAME_LEN = len(NAME)
- NS_NAME = '{}:{}'.format(NS, NAME)
+ log = None
@classmethod
- def create_from_xpath(cls, xpath, log):
+ def instance_from_xpath(cls, xpath, log):
name = cls.from_xpath(xpath, log)
if name is None:
return None
def from_xpath(cls, xpath, log):
log.debug("Get project name from {}".format(xpath));
- if cls.XPATH in xpath:
- idx = xpath.find(cls.XPATH) + cls.XPATH_LEN
+ if XPATH in xpath:
+ idx = xpath.find(XPATH)
if idx == -1:
msg = "Project not found in XPATH: {}".format(xpath)
log.error(msg)
raise ManoProjXpathNoProjErr(msg)
- sub = xpath[idx:]
- sub = sub.strip()
- if (len(sub) < cls.NAME_LEN) or (sub[0] != '['):
+ sub = xpath[idx+XPATH_LEN:].strip()
+ if (len(sub) < NAME_LEN) or (sub[0] != '['):
msg = "Project name not found in XPath: {}".format(xpath)
log.error(msg)
raise ManoProjXpathKeyErr(msg)
sub = sub[1:].strip()
- idx = sub.find(cls.NS_NAME)
+ idx = sub.find(NS_NAME)
if idx == -1:
- idx = sub.find(cls.NAME)
+ idx = sub.find(NAME)
if idx != 0:
msg = "Project name not found in XPath: {}".format(xpath)
log.error(msg)
log.error(msg)
raise ManoProjXpathKeyErr(msg)
- sub = sub[:idx-1].strip()
+ sub = sub[:idx].strip()
try:
+ log.debug("Key and value found: {}".format(sub))
k, n = sub.split("=", 2)
- name = n.strip()
+ name = n.strip(' \'"')
if name is None:
msg = "Project name is empty in XPath".format(xpath)
log.error(msg)
.format(xpath, e)
log.exception(msg)
raise ManoProjXpathKeyErr(msg)
+ else:
+ msg = "Project not found in XPATH: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNoProjErr(msg)
+
+ @classmethod
+ def get_log(cls):
+ if not cls.log:
+ cls.log = logging.getLogger('rw-mano-log.rw-project')
+ cls.log.setLevel(logging.ERROR)
+
+ @classmethod
+ def prefix_project(cls, xpath, project=None, log=None):
+ if log is None:
+ log = cls.get_log()
+
+ if project is None:
+ project = DEFAULT_PROJECT
+ proj_prefix = DEFAULT_PREFIX
+ else:
+ proj_prefix = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ project)
+
+ log.debug("Add project {} to {}".format(project, xpath))
+
+ prefix = ''
+ suffix = xpath
+ idx = xpath.find('C,/')
+ if idx == -1:
+ idx = xpath.find('D,/')
+
+ suffix = xpath
+ if idx != -1:
+ prefix = xpath[:2]
+ suffix = xpath[2:]
+
+ if suffix[0] != '/':
+ msg = "Non-rooted xpath provided: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNotRootErr(msg)
+
+ idx = suffix.find(XPATH)
+ if idx == 0:
+ name = cls.from_xpath(xpath, log)
+ if name == project:
+ log.warning("Project already in the XPATH: {}".format(xpath))
+ return xpath
+
+ else:
+ msg = "Different project {} already in XPATH {}". \
+ format(name, xpath)
+ log.error(msg)
+ raise ManoProjXpathPresentErr(msg)
+
+ ret = prefix + proj_prefix + suffix
+ log.debug("XPath with project: {}".format(ret))
+ return ret
+
- def __init__(self, log, name=None):
+ def __init__(self, log, name=None, tasklet=None):
self._log = log
- self._name = name
+ self._name = None
+ self._prefix = None
+ self._pbcm = None
+ self._tasklet = None
+ self._dts = None
+ self._loop = None
+ self._log_hdl = None
+
+ # Track if the apply config was received
+ self._apply = False
+
+ if name:
+ self.name = name
+
+ def update(self, tasklet):
+ # Store the commonly used properties from a tasklet
+ self._tasklet = tasklet
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
@property
def name(self):
return self._name
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def prefix(self):
+ return self._prefix
+
+ @property
+ def pbcm(self):
+ return self._pbcm
+
+ @property
+ def config(self):
+ return self._pbcm.project_config
+
+ @property
+ def tasklet(self):
+ return self._tasklet
+
+ @property
+ def log_hdl(self):
+ return self._log_hdl
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
@name.setter
def name(self, value):
if self._name is None:
self._name = value
+ self._prefix = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ self._name)
+ self._pbcm = RwProjectYang.YangData_RwProject_Project(
+ name=self._name)
+
+ elif self._name == value:
+ self._log.debug("Setting the same name again for project {}".
+ format(value))
else:
msg = "Project name already set to {}".format(self._name)
self._log.error(msg)
raise ManoProjNameSetErr(msg)
def set_from_xpath(self, xpath):
- self.name = ManoProject.get_from_xpath(xpath, self._log)
+ self.name = ManoProject.from_xpath(xpath, self._log)
+
+ def add_project(self, xpath):
+ return ManoProject.prefix_project(xpath, log=self._log, project=self._name)
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def delete_prepare(self):
+ self._log.debug("Delete prepare for project {}".format(self._name))
+ return True
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def register(self):
+ msg = "Register not implemented for project type {}". \
+ format(self.__class__.__name__)
+ self._log.error(msg)
+ raise NotImplementedError(msg)
+
+ @abc.abstractmethod
+ def deregister(self):
+ msg = "De-register not implemented for project type {}". \
+ format(self.__class__.__name__)
+ self._log.error(msg)
+ raise NotImplementedError(msg)
+
+ def rpc_check(self, msg, xact_info=None):
+ '''Check if the rpc is for this project'''
+ try:
+ project = msg.project_name
+ except AttributeError as e:
+ project = DEFAULT_PROJECT
+
+ if project != self.name:
+ self._log.debug("Project {}: RPC is for different project {}".
+ format(self.name, project))
+ if xact_info:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ return False
+
+ return True
+
+ @asyncio.coroutine
+ def create_project(self, dts):
+ proj_xpath = "C,{}/project-config".format(self.prefix)
+ self._log.info("Creating project: {} with {}".
+ format(proj_xpath, self.config.as_dict()))
+
+ yield from dts.query_create(proj_xpath,
+ rwdts.XactFlag.ADVISE,
+ self.config)
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+ #TODO: Check why this is getting called during project delete
+ if not dts_member_reg:
+ return [], [], []
+
+ # Unforunately, it is currently difficult to figure out what has exactly
+ # changed in this xact without Pbdelta support (RIFT-4916)
+ # As a workaround, we can fetch the pre and post xact elements and
+ # perform a comparison to figure out adds/deletes/updates
+ xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+ curr_cfgs = list(dts_member_reg.elements)
+
+ xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+ curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+ # Find Adds
+ added_keys = set(xact_key_map) - set(curr_key_map)
+ added_cfgs = [xact_key_map[key] for key in added_keys]
+
+ # Find Deletes
+ deleted_keys = set(curr_key_map) - set(xact_key_map)
+ deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+ # Find Updates
+ updated_keys = set(curr_key_map) & set(xact_key_map)
+ updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+ return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class ProjectConfigCallbacks(object):
+ def __init__(self,
+ on_add_apply=None, on_add_prepare=None,
+ on_delete_apply=None, on_delete_prepare=None):
+
+ @asyncio.coroutine
+ def prepare_noop(*args, **kwargs):
+ pass
+
+ def apply_noop(*args, **kwargs):
+ pass
+
+ self.on_add_apply = on_add_apply
+ self.on_add_prepare = on_add_prepare
+ self.on_delete_apply = on_delete_apply
+ self.on_delete_prepare = on_delete_prepare
+
+ for f in ('on_add_apply', 'on_delete_apply'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, apply_noop)
+ continue
+
+ if asyncio.iscoroutinefunction(ref):
+ raise ValueError('%s cannot be a coroutine' % (f,))
+
+ for f in ('on_add_prepare', 'on_delete_prepare'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, prepare_noop)
+ continue
+
+ if not asyncio.iscoroutinefunction(ref):
+ raise ValueError("%s must be a coroutine" % f)
+
+
+class ProjectDtsHandler(object):
+ XPATH = "C,{}/project-config".format(XPATH)
+
+ def __init__(self, dts, log, callbacks):
+ self._dts = dts
+ self._log = log
+ self._callbacks = callbacks
+
+ self.reg = None
+ self.projects = []
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ def add_project(self, name):
+ self.log.info("Adding project: {}".format(name))
+
+ if name not in self.projects:
+ self._callbacks.on_add_apply(name)
+ self.projects.append(name)
+ else:
+ self.log.error("Project already present: {}".
+ format(name))
+
+ def delete_project(self, name):
+ self._log.info("Deleting project: {}".format(name))
+ if name in self.projects:
+ self._callbacks.on_delete_apply(name)
+ self.projects.remove(name)
+ else:
+ self.log.error("Unrecognized project: {}".
+ format(name))
+
+ def update_project(self, name):
+ """ Update an existing project
+
+ Currently, we do not take any action on MANO for this,
+ so no callbacks are defined
+
+ Arguments:
+ msg - The project config message
+ """
+ self._log.info("Updating project: {}".format(name))
+ if name in self.projects:
+ pass
+ else:
+ self.log.error("Unrecognized project: {}".
+ format(name))
+
+ def register(self):
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got project apply config (xact: %s) (action: %s)", xact, action)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.debug("Project being re-added after restart.")
+ self.add_project(cfg)
+ else:
+ # When RIFT first comes up, an INSTALL is called with the current config
+ # Since confd doesn't actally persist data this never has any data so
+ # skip this for now.
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
+ xact=xact,
+ key_name="name_ref",
+ )
+
+ # Handle Deletes
+ for cfg in delete_cfgs:
+ self.delete_project(cfg.name_ref)
+
+ # Handle Adds
+ for cfg in add_cfgs:
+ self.add_project(cfg.name_ref)
+
+ # Handle Updates
+ for cfg in update_cfgs:
+ self.update_project(cfg.name_ref)
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for Project """
+
+ # xpath = ks_path.to_xpath(RwProjectYang.get_schema())
+ # name = ManoProject.from_xpath(xpath, self._log)
+ # if not name:
+ # self._log.error("Did not find the project name in ks: {}".
+ # format(xpath))
+ # xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ # return
+
+ action = xact_info.query_action
+ name = msg.name_ref
+ self._log.debug("Project %s on_prepare config received (action: %s): %s",
+ name, xact_info.query_action, msg)
+
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+ if name in self.projects:
+ self._log.debug("Project {} already exists. Ignore request".
+ format(name))
+
+ else:
+ self._log.debug("Project {}: Invoking on_prepare add request".
+ format(name))
+ yield from self._callbacks.on_add_prepare(name)
+
+ elif action == rwdts.QueryAction.DELETE:
+ # Check if the entire project got deleted
+ fref = ProtobufC.FieldReference.alloc()
+ fref.goto_whole_message(msg.to_pbcm())
+ if fref.is_field_deleted():
+ if name in self.projects:
+ rc = yield from self._callbacks.on_delete_prepare(name)
+ if not rc:
+ self._log.error("Project {} should not be deleted".
+ format(name))
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ else:
+ self._log.warning("Delete on unknown project: {}".
+ format(name))
+
+ else:
+ self._log.error("Action (%s) NOT SUPPORTED", action)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._log.debug("Registering for project config using xpath: %s",
+ ProjectDtsHandler.XPATH,
+ )
+
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ )
+
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=ProjectDtsHandler.XPATH,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare,
+ )
+
+class ProjectHandler(object):
+ def __init__(self, tasklet, project_class, **kw):
+ self._tasklet = tasklet
+ self._log = tasklet.log
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
+ self._class = project_class
+ self._kw = kw
+
+ self._log.debug("Creating project config handler")
+ self.project_cfg_handler = ProjectDtsHandler(
+ self._dts, self._log,
+ ProjectConfigCallbacks(
+ on_add_apply=self.on_project_added,
+ on_add_prepare=self.on_add_prepare,
+ on_delete_apply=self.on_project_deleted,
+ on_delete_prepare=self.on_delete_prepare,
+ )
+ )
+
+ def _get_tasklet_name(self):
+ return self._tasklet.tasklet_info.instance_name
+
+ def _get_project(self, name):
+ try:
+ proj = self._tasklet.projects[name]
+ except Exception as e:
+ self._log.exception("Project {} ({})not found for tasklet {}: {}".
+ format(name, list(self._tasklet.projects.keys()),
+ self._get_tasklet_name(), e))
+ raise e
+
+ return proj
+
+ def on_project_deleted(self, name):
+ self._log.debug("Project {} deleted".format(name))
+ try:
+ self._get_project(name).deregister()
+ except Exception as e:
+ self._log.exception("Project {} deregister for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ try:
+ proj = self._tasklet.projects.pop(name)
+ del proj
+ except Exception as e:
+ self._log.exception("Project {} delete for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ def on_project_added(self, name):
+ self._log.debug("Project {} added to tasklet {}".
+ format(name, self._get_tasklet_name()))
+ self._get_project(name)._apply = True
+
+ @asyncio.coroutine
+ def on_add_prepare(self, name):
+ self._log.debug("Project {} to be added to {}".
+ format(name, self._get_tasklet_name()))
+
+ try:
+ self._tasklet.projects[name] = \
+ self._class(name, self._tasklet, **(self._kw))
+ except Exception as e:
+ self._log.exception("Project {} create for {} failed: {}".
+ formatname, self._get_tasklet_name(), e())
+
+ try:
+ yield from self._get_project(name).register()
+ except Exception as e:
+ self._log.exception("Project {} register for tasklet {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ @asyncio.coroutine
+ def on_delete_prepare(self, name):
+ self._log.debug("Project {} being deleted for tasklet {}".
+ format(name, self._get_tasklet_name()))
+ rc = yield from self._get_project(name).delete_prepare()
+ return rc
+
+ def register(self):
+ self.project_cfg_handler.register()
self.inputs.append({
self.NAME:
self.map_yang_name_to_tosca(
- val.replace('/nsd:nsd-catalog/nsd:nsd/nsd:', ''))})
+ val.replace('/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:', ''))})
if len(param):
self.log.warn(_("{0}, Did not process the following for "
"input param {1}: {2}").
--- /dev/null
+#!/usr/bin/env python3
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import logging
+import os
+import sys
+import unittest
+import xmlrunner
+
+from rift.mano.utils import project
+
+NAME = 'test'
+XPATH = "/rw-project:project[rw-project:name='{}']".format(NAME)
+
+class TestCase(unittest.TestCase):
+ log = None
+
+ @classmethod
+ def set_logger(cls, log):
+ cls.log = log
+
+ def setUp(self):
+ if not TestCase.log:
+ log = logging.getLogger()
+ log.setLevel( logging.ERROR)
+
+ def test_create_from_xpath(self):
+ """
+ Asserts:
+ 1. Instance of project from xpath
+ 2. project name in instance is correct
+ """
+ proj = project.ManoProject.create_from_xpath(XPATH, TestCase.log)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_create(self):
+ """
+ Asserts:
+ 1. Instance of project
+ 2. project name in instance is correct
+ """
+ proj = project.ManoProject(TestCase.log, name=NAME)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_create_update(self):
+ """
+ Asserts:
+ 1. Instance of project
+ 2. Set project name later
+ 3. project name in instance is correct
+ """
+ proj = project.ManoProject(TestCase.log)
+ assert proj
+ assert None == proj.name
+
+ proj.name = NAME
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ try:
+ proj.name = 'testing'
+ except project.ManoProjNameSetErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_update_from_xpath(self):
+ """
+ Asserts:
+ 1. Instance of project
+ 2. Update from XPATH
+ 2. project name in instance is correct
+ """
+ proj = project.ManoProject(TestCase.log)
+ assert proj
+ assert proj.name is None
+
+ proj.set_from_xpath(XPATH)
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ try:
+ proj.set_from_xpath(XPATH)
+ except project.ManoProjNameSetErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_create_from_xpath1(self):
+ """
+ Asserts:
+ 1. Instance of project from xpath
+ 2. project name in instance is correct
+ """
+ xpath = XPATH + '/rw:project/rw-project:project/rw-project:project/rw-project:project/rw-project:project/nsd:nsd-catalog/nsd:nsd[id=\'1232334\']'
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ def test_create_from_xpath2(self):
+ """
+ Asserts:
+ 1. Instance of project from xpath
+ 2. project name in instance is correct
+ """
+ xpath = '/rw-project:project [ name = "{}" ]'.format(NAME)
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ def test_create_from_xpath_invalid(self):
+ """
+ Asserts:
+ 1. Exception due to invalid XPATH format for extracting project
+ """
+ xpath = '/'
+ try:
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ except project.ManoProjXpathNoProjErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ def test_create_from_xpath_invalid1(self):
+ """
+ Asserts:
+ 1. Exception due to invalid XPATH format for extracting project
+ """
+ xpath = '/rw-project:project/{}'.format(NAME)
+ try:
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ except project.ManoProjXpathKeyErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ def test_create_from_xpath_invalid2(self):
+ """
+ Asserts:
+ 1. Exception due to invalid XPATH format for extracting project
+ """
+ xpath = '/rw-project:project[id=test]'
+ try:
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ except project.ManoProjXpathKeyErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ def tearDown(self):
+ pass
+
+
+def main(argv=sys.argv[1:]):
+ logging.basicConfig(format='TEST %(message)s')
+
+ runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-v', '--verbose', action='store_true')
+ parser.add_argument('-n', '--no-runner', action='store_true')
+
+ args, unknown = parser.parse_known_args(argv)
+ if args.no_runner:
+ runner = None
+
+ # Set the global logging level
+ log = logging.getLogger()
+ log.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+ TestCase.set_logger(log)
+
+ # The unittest framework requires a program name, so use the name of this
+ # file instead (we do not want to have to pass a fake program name to main
+ # when this is called from the interpreter).
+ unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+ main()
descr = None
if descr_type == "nsd":
- descr = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ descr = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
else:
- descr = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ descr = VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
if input_format == 'json':
json_str = open(infile).read()
def configure_vld(proxy, vld_xml_hdl):
vld_xml = vld_xml_hdl.read()
logger.debug("Attempting to deserialize XML into VLD protobuf: %s", vld_xml)
- vld = VldYang.YangData_Vld_VldCatalog_Vld()
+ vld = VldYang.YangData_RwProject_Project_VldCatalog_Vld()
vld.from_xml_v2(model, vld_xml)
logger.debug("Sending VLD to netconf: %s", vld)
def configure_vnfd(proxy, vnfd_xml_hdl):
vnfd_xml = vnfd_xml_hdl.read()
logger.debug("Attempting to deserialize XML into VNFD protobuf: %s", vnfd_xml)
- vnfd = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ vnfd = VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
vnfd.from_xml_v2(model, vnfd_xml)
logger.debug("Sending VNFD to netconf: %s", vnfd)
def configure_nsd(proxy, nsd_xml_hdl):
nsd_xml = nsd_xml_hdl.read()
logger.debug("Attempting to deserialize XML into NSD protobuf: %s", nsd_xml)
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
nsd.from_xml_v2(model, nsd_xml)
logger.debug("Sending NSD to netconf: %s", nsd)
if output_format == 'json':
with open('%s/%s.json' % (outdir, self.name), "w") as fh:
fh.write(simplejson.dumps(desc, indent=4))
- elif output_format.strip() == 'yaml':
+ elif output_format == 'yaml':
with open('%s/%s.yaml' % (outdir, self.name), "w") as fh:
fh.write(yaml.dump(desc, default_flow_style=False))
- elif output_format.strip() == 'xml':
+ elif output_format == 'xml':
# Converting from dict to xml does not provide the
# required output. So using the PBCM to_xml and then
# printing only from the catalog tag.
internal_cp.name = cp_name + "/icp{}".format(i)
internal_cp.id = cp_name + "/icp{}".format(i)
internal_cp.type_yang = 'VPORT'
- ivld_cp = internal_vlds[i].internal_connection_point_ref.add()
+ ivld_cp = internal_vlds[i].internal_connection_point.add()
ivld_cp.id_ref = internal_cp.id
internal_interface = vdu.internal_interface.add()
group_desc.min_instance_count = scale_group.min_count
for vnfd, count in scale_group.vnfd_count_map.items():
member = group_desc.vnfd_member.add()
- member.member_vnf_index_ref = vnfd_index_map[vnfd]
+ member.member_vnf_index_ref = str(vnfd_index_map[vnfd])
member.count = count
for trigger in scale_group.config_action:
@classmethod
def from_xml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ descriptor = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
descriptor.from_xml_v2(RiftNSD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_yaml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ descriptor = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
descriptor.from_yaml(RiftNSD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_dict(cls, nsd_dict):
- descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict(nsd_dict)
+ descriptor = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict(nsd_dict)
return cls(descriptor)
@classmethod
def from_xml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ descriptor = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
descriptor.from_xml_v2(RiftVNFD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_yaml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ descriptor = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
descriptor.from_yaml(RiftVNFD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_dict(cls, vnfd_dict):
- descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict(vnfd_dict)
+ descriptor = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict(vnfd_dict)
return cls(descriptor)
}
leaf http-endpoint-ref {
- type leafref {
- path "../../http-endpoint/path";
- }
+ // TODO (Philip): Fix this
+ // type leafref {
+ // path "../../http-endpoint/path";
+ // }
+ type string;
}
leaf json-query-method {
leaf member-vnf-index-ref {
description "Reference to member-vnf within constituent-vnfds";
- type leafref {
- path "../../../constituent-vnfd/member-vnf-index";
- }
+ // TODO (Philip): Fix this
+ // type leafref {
+ // path "../../../constituent-vnfd/member-vnf-index";
+ // }
+ type uint64;
}
leaf vnfd-id-ref {
leaf vnfd-connection-point-ref {
description "A reference to a connection point name";
- type leafref {
- path "../../../../../../vnfd:vnfd-catalog/vnfd:vnfd" +
- "[vnfd:id = current()/../vnfd-id-ref]/" +
- "vnfd:connection-point/vnfd:name";
- }
+ // TODO (Philip): Fix this
+ // type leafref {
+ // path "../../../../../../vnfd:vnfd-catalog/vnfd:vnfd" +
+ // "[vnfd:id = current()/../vnfd-id-ref]/" +
+ // "vnfd:connection-point/vnfd:name";
+ // }
+ type string;
}
}
}
leaf vnfd-id-ref {
description
"Identifier for the VNFD.";
- type leafref {
- path "../../../../../vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
- }
+ // TODO (Philip): Fix this
+ // type leafref {
+ // path "../../../../../vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+ // }
+ type string;
}
leaf start-by-default {
leaf member-vnf-index-ref {
description "member VNF index of this member VNF";
- type leafref {
- path "../../../constituent-vnfd/member-vnf-index";
- }
+ // TODO (Philip): Fix this
+ // type leafref {
+ // path "../../../constituent-vnfd/member-vnf-index";
+ // }
+ type uint64;
}
leaf vnfd-id-ref {
description
"Identifier for the VNFD.";
- type leafref {
- path "../../../../../../vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
- }
+ // TODO (Philip): Fix this
+ // type leafref {
+ // path "../../../../../../vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+ // }
+ type string;
}
}
}
namespace "http://riftio.com/ns/riftware-1.0/rw-project";
prefix "rw-project";
+ import rw-pb-ext {
+ prefix "rw-pb-ext";
+ }
+
revision 2017-02-08 {
description
"Initial revision. This YANG file defines the
description "Project management related configuration.";
+ leaf name-ref {
+ type leafref {
+ path "../../name";
+ }
+ mandatory true;
+ }
+
// list user {
// description
// "The list of Users who have been assigned Roles within this
"""
Implementation of the riftcm_config_plugin.RiftCMConfigPluginBase
"""
- def __init__(self, dts, log, loop, account):
- riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop, account)
+ def __init__(self, dts, log, loop, project, account):
+ riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log,
+ loop, project, account)
self._name = account.name
self._type = riftcm_config_plugin.DEFAULT_CAP_TYPE
self._rift_install_dir = os.environ['RIFT_INSTALL']
GET_NS_CONF_XPATH = "I,/nsr:get-ns-service-primitive-values"
GET_NS_CONF_O_XPATH = "O,/nsr:get-ns-service-primitive-values"
- def __init__(self, dts, log, loop, nsm):
+ def __init__(self, dts, log, loop, project, nsm):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsm = nsm
self._ns_regh = None
self._vnf_regh = None
self._get_ns_conf_regh = None
- self.job_manager = rift.mano.config_agent.ConfigAgentJobManager(dts, log, loop, nsm)
+ self.job_manager = rift.mano.config_agent.ConfigAgentJobManager(dts, log, loop,
+ project, nsm)
self._rift_install_dir = os.environ['RIFT_INSTALL']
self._rift_artif_dir = os.environ['RIFT_ARTIFACTS']
""" Return the NS manager instance """
return self._nsm
+ def deregister(self):
+ self._log.debug("De-register conman rpc handlers for project {}".
+ format(self._project))
+ for reg in self.reghs:
+ if reg:
+ reg.deregister()
+ reg = None
+
+ self.job_manager.deregister()
+
def prepare_meta(self, rpc_ip):
try:
def on_ns_config_prepare(xact_info, action, ks_path, msg):
""" prepare callback from dts exec-ns-service-primitive"""
assert action == rwdts.QueryAction.RPC
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
rpc_ip = msg
rpc_op = NsrYang.YangOutput_Nsr_ExecNsServicePrimitive.from_dict({
"triggered_by": rpc_ip.triggered_by,
@asyncio.coroutine
def on_get_ns_config_values_prepare(xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
nsr_id = msg.nsr_id_ref
cfg_prim_name = msg.name
try:
"""
Juju implementation of the riftcm_config_plugin.RiftCMConfigPluginBase
"""
- def __init__(self, dts, log, loop, account):
- riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop, account)
+ def __init__(self, dts, log, loop, project, account):
+ riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop,
+ project, account)
self._name = account.name
self._type = 'juju'
self._ip_address = account.juju.ip_address
Agent class for NSR
created for Agents to use objects from NSR
'''
- def __init__(self, nsr_dict, cfg):
+ def __init__(self, nsr_dict, cfg, project):
self._nsr = nsr_dict
self._cfg = cfg
+ self._project = project
self._vnfrs = []
self._vnfrs_msg = []
self._vnfr_ids = {}
if vnfr['id'] in self._vnfr_ids.keys():
agent_vnfr = self._vnfr_ids[vnfr['id']]
else:
- agent_vnfr = RiftCMvnfr(self.name, vnfr, vnfr_msg)
+ agent_vnfr = RiftCMvnfr(self.name, vnfr, vnfr_msg, self._project)
self._vnfrs.append(agent_vnfr)
self._vnfrs_msg.append(vnfr_msg)
self._vnfr_ids[agent_vnfr.id] = agent_vnfr
'''
Agent base class for VNFR processing
'''
- def __init__(self, nsr_name, vnfr_dict, vnfr_msg):
+ def __init__(self, nsr_name, vnfr_dict, vnfr_msg, project):
self._vnfr = vnfr_dict
self._vnfr_msg = vnfr_msg
self._nsr_name = nsr_name
self._configurable = False
+ self._project = project
@property
def nsr_name(self):
@property
def xpath(self):
""" VNFR xpath """
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id)
+ return self._project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".
+ format(self.id))
def set_to_configurable(self):
self._configurable = True
There will be single instance of this plugin for each plugin type.
"""
- def __init__(self, dts, log, loop, config_agent):
+ def __init__(self, dts, log, loop, project, config_agent):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._config_agent = config_agent
@property
class ConfigAccountHandler(object):
- def __init__(self, dts, log, loop, on_add_config_agent, on_delete_config_agent):
+ def __init__(self, dts, log, loop, project, on_add_config_agent, on_delete_config_agent):
self._log = log
self._dts = dts
self._loop = loop
+ self._project = project
self._on_add_config_agent = on_add_config_agent
self._on_delete_config_agent = on_delete_config_agent
self._log.debug("creating config account handler")
self.cloud_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber(
- self._dts, self._log,
+ self._dts, self._log, self._project,
rift.mano.config_agent.ConfigAgentCallbacks(
on_add_apply=self.on_config_account_added,
on_delete_apply=self.on_config_account_deleted,
def register(self):
self.cloud_cfg_handler.register()
+ def deregister(self):
+ self.cloud_cfg_handler.deregister()
+
+
class RiftCMConfigPlugins(object):
""" NSM Config Agent Plugins """
def __init__(self):
self._config_plugins = RiftCMConfigPlugins()
self._config_handler = ConfigAccountHandler(
- self._dts, self._log, self._loop, self._on_config_agent, self._on_config_agent_delete)
+ self._dts, self._log, self._loop, parent._project,
+ self._on_config_agent, self._on_config_agent_delete)
self._plugin_instances = {}
self._default_account_added = False
else:
# Otherwise, instantiate a new plugin using the config agent account
self._log.debug("Instantiting new config agent using class: %s", cap_inst)
- new_instance = cap_inst(self._dts, self._log, self._loop, config_agent)
+ new_instance = cap_inst(self._dts, self._log, self._loop,
+ self._ConfigManagerConfig._project, config_agent)
self._plugin_instances[cap_name] = new_instance
# TODO (pjoseph): See why this was added, as this deletes the
for account in config_agents:
self._on_config_agent(account)
+ def deregister(self):
+ self._log.debug("De-registering config agent nsm plugin manager".
+ format(self._ConfigManagerConfig._project))
+ self._config_handler.deregister()
+
def set_config_agent(self, nsr, vnfr, method):
if method == 'juju':
agent_type = 'juju'
self._log = log
self._loop = loop
self._parent = parent
+ self._project = parent._project
+
self._nsr_dict = {}
self.pending_cfg = {}
self.terminate_cfg = {}
self.pending_tasks = [] # User for NSRid get retry
# (mainly excercised at restart case)
- self._config_xpath = "C,/cm-config"
- self._opdata_xpath = "D,/rw-conman:cm-state"
+
+ self._config_xpath = self._project.add_project("C,/rw-conman:cm-config")
+ self._opdata_xpath = self._project.add_project("D,/rw-conman:cm-state")
self.cm_config = conmanY.SoConfig()
# RO specific configuration
self.cm_state['states'] = "Initialized"
# Initialize objects to register
- self.cmdts_obj = ConfigManagerDTS(self._log, self._loop, self, self._dts)
+ self.cmdts_obj = ConfigManagerDTS(self._log, self._loop, self, self._dts, self._project)
self._config_agent_mgr = conagent.RiftCMConfigAgent(
self._dts,
self._log,
self.reg_handles = [
self.cmdts_obj,
self._config_agent_mgr,
- RiftCM_rpc.RiftCMRPCHandler(self._dts, self._log, self._loop,
+ RiftCM_rpc.RiftCMRPCHandler(self._dts, self._log, self._loop, self._project,
PretendNsm(
self._dts, self._log, self._loop, self)),
]
+ self._op_reg = None
def is_nsr_valid(self, nsr_id):
if nsr_id in self._nsr_dict:
# Initialize all handles that needs to be registered
for reg in self.reg_handles:
yield from reg.register()
-
+
+ def deregister(self):
+ # De-register all reg handles
+ self._log.debug("De-register ConfigManagerConfig for project {}".
+ format(self._project))
+
+ for reg in self.reg_handles:
+ reg.deregister()
+ reg = None
+
+ self._op_reg.delete_element(self._opdata_xpath)
+ self._op_reg.deregister()
+
@asyncio.coroutine
def register_cm_state_opdata(self):
try:
handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- yield from self._dts.register(xpath=self._opdata_xpath,
- handler=handler,
- flags=rwdts.Flag.PUBLISHER)
+ self._op_reg = yield from self._dts.register(xpath=self._opdata_xpath,
+ handler=handler,
+ flags=rwdts.Flag.PUBLISHER)
self._log.info("Successfully registered for opdata(%s)", self._opdata_xpath)
except Exception as e:
self._log.error("Failed to register for opdata as (%s)", e)
if method in vnf_config:
return method
return None
-
+
def get_cfg_file_extension(method, configuration_options):
ext_dict = {
"netconf" : "xml",
try:
if id not in nsr_dict:
- nsr_obj = ConfigManagerNSR(self._log, self._loop, self, id)
+ nsr_obj = ConfigManagerNSR(self._log, self._loop, self, self._project, id)
nsr_dict[id] = nsr_obj
else:
self._log.info("NSR(%s) is already initialized!", id)
# Create Agent NSR class
nsr_config = yield from cmdts_obj.get_nsr_config(id)
self._log.debug("NSR {} config: {}".format(id, nsr_config))
- nsr_obj.agent_nsr = riftcm_config_plugin.RiftCMnsr(nsr, nsr_config)
+ nsr_obj.agent_nsr = riftcm_config_plugin.RiftCMnsr(nsr, nsr_config, self._project)
try:
yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.RECEIVED)
# return
nsr_obj.set_config_dir(self)
-
+
for const_vnfr in nsr['constituent_vnfr_ref']:
self._log.debug("Fetching VNFR (%s)", const_vnfr['vnfr_id'])
vnfr_msg = yield from cmdts_obj.get_vnfr(const_vnfr['vnfr_id'])
def terminate_NSR(self, id):
nsr_dict = self._nsr_dict
if id not in nsr_dict:
- self._log.error("NSR(%s) does not exist!", id)
+ self._log.debug("NSR(%s) does not exist!", id)
return
else:
- # Remove this NSR if we have it on pending task list
- for task in self.pending_tasks:
- if task['nsrid'] == id:
- self.del_from_pending_tasks(task)
+ try:
+ # Remove this NSR if we have it on pending task list
+ for task in self.pending_tasks:
+ if task['nsrid'] == id:
+ self.del_from_pending_tasks(task)
- # Remove this object from global list
- nsr_obj = nsr_dict.pop(id, None)
+ # Remove this object from global list
+ nsr_obj = nsr_dict.pop(id, None)
- # Remove this NS cm-state from global status list
- self.cm_state['cm_nsr'].remove(nsr_obj.cm_nsr)
+ # Remove this NS cm-state from global status list
+ self.cm_state['cm_nsr'].remove(nsr_obj.cm_nsr)
- # Also remove any scheduled configuration event
- for nsr_obj_p in self._parent.pending_cfg:
- if nsr_obj_p == nsr_obj:
- assert id == nsr_obj_p._nsr_id
- #self._parent.pending_cfg.remove(nsr_obj_p)
- # Mark this as being deleted so we do not try to configure it if we are in cfg_delay (will wake up and continue to process otherwise)
- nsr_obj_p.being_deleted = True
- self._log.info("Removed scheduled configuration for NSR(%s)", nsr_obj.nsr_name)
+ # Also remove any scheduled configuration event
+ for nsr_obj_p in self._parent.pending_cfg:
+ if nsr_obj_p == nsr_obj:
+ assert id == nsr_obj_p._nsr_id
+ #self._parent.pending_cfg.remove(nsr_obj_p)
+ # Mark this as being deleted so we do not try to configure
+ # it if we are in cfg_delay (will wake up and continue to process otherwise)
+ nsr_obj_p.being_deleted = True
+ self._log.info("Removed scheduled configuration for NSR(%s)", nsr_obj.nsr_name)
- self._parent.remove_nsr_obj(id)
+ self._parent.remove_nsr_obj(id)
- # Call Config Agent to clean up for each VNF
- for agent_vnfr in nsr_obj.agent_nsr.vnfrs:
- yield from self._config_agent_mgr.invoke_config_agent_plugins(
- 'notify_terminate_vnfr',
- nsr_obj.agent_nsr,
- agent_vnfr)
+ # Call Config Agent to clean up for each VNF
+ for agent_vnfr in nsr_obj.agent_nsr.vnfrs:
+ yield from self._config_agent_mgr.invoke_config_agent_plugins(
+ 'notify_terminate_vnfr',
+ nsr_obj.agent_nsr,
+ agent_vnfr)
- # publish delete cm-state (cm-nsr)
- yield from nsr_obj.delete_cm_nsr()
+ # publish delete cm-state (cm-nsr)
+ yield from nsr_obj.delete_cm_nsr()
- #####################TBD###########################
- # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_terminate_ns', self.id)
+ #####################TBD###########################
+ # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_terminate_ns', self.id)
- self._log.info("NSR(%s/%s) is deleted", nsr_obj.nsr_name, id)
+ self._log.info("NSR(%s/%s) is deleted", nsr_obj.nsr_name, id)
+
+ except Exception as e:
+ self._log.exception("Terminate NSR exception: {}".format(e))
@asyncio.coroutine
def process_initial_config(self, nsr_obj, conf, script, vnfr_name=None):
class ConfigManagerNSR(object):
- def __init__(self, log, loop, parent, id):
+ def __init__(self, log, loop, parent, project, id):
self._log = log
self._loop = loop
self._rwcal = None
self._cp_dict = {}
self._nsr_id = id
self._parent = parent
+ self._project = project
self._log.info("Instantiated NSR entry for id=%s", id)
self.nsr_cfg_config_attributes_dict = {}
self.vnf_config_attributes_dict = {}
@property
def nsr_opdata_xpath(self):
''' Returns full xpath for this NSR cm-state opdata '''
- return(
- "D,/rw-conman:cm-state" +
- "/rw-conman:cm-nsr[rw-conman:id='{}']"
- ).format(self._nsr_id)
+ return self._project.add_project((
+ "D,/rw-conman:cm-state/rw-conman:cm-nsr[rw-conman:id='{}']"
+ ).format(self._nsr_id))
@property
def vnfrs(self):
class ConfigManagerDTS(object):
''' This class either reads from DTS or publishes to DTS '''
- def __init__(self, log, loop, parent, dts):
+ def __init__(self, log, loop, parent, dts, project):
self._log = log
self._loop = loop
self._parent = parent
self._dts = dts
+ self._project = project
@asyncio.coroutine
- def _read_dts(self, xpath, do_trace=False):
+ def _read_dts(self, path, do_trace=False):
+ xpath = self._project.add_project(path)
self._log.debug("_read_dts path = %s", xpath)
flags = rwdts.XactFlag.MERGE
res_iter = yield from self._dts.query_read(
return cfgagentl
@asyncio.coroutine
- def update(self, path, msg, flags=rwdts.XactFlag.REPLACE):
+ def update(self, xpath, msg, flags=rwdts.XactFlag.REPLACE):
"""
Update a cm-state (cm-nsr) record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating cm-state %s:%s dts_pub_hdl = %s", path, msg, self.dts_pub_hdl)
self.dts_pub_hdl.update_element(path, msg, flags)
self._log.debug("Updated cm-state, %s:%s", path, msg)
@asyncio.coroutine
- def delete(self, path):
+ def delete(self, xpath):
"""
Delete cm-nsr record in DTS with the path only
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting cm-nsr %s dts_pub_hdl = %s", path, self.dts_pub_hdl)
self.dts_pub_hdl.delete_element(path)
self._log.debug("Deleted cm-nsr, %s", path)
def register(self):
yield from self.register_to_publish()
yield from self.register_for_nsr()
-
+
+ def deregister(self):
+ self._log.debug("De-registering conman config for project {}".
+ format(self._project.name))
+ if self.dts_reg_hdl:
+ self.dts_reg_hdl.deregister()
+ self.dts_reg_hdl = None
+
+ if self.dts_pub_hdl:
+ self.dts_pub_hdl.deregister()
+ self.dts_pub_hdl = None
+
@asyncio.coroutine
def register_to_publish(self):
''' Register to DTS for publishing cm-state opdata '''
- xpath = "D,/rw-conman:cm-state/rw-conman:cm-nsr"
+ xpath = self._project.add_project("D,/rw-conman:cm-state/rw-conman:cm-nsr")
self._log.debug("Registering to publish cm-state @ %s", xpath)
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
@property
def nsr_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
@asyncio.coroutine
def register_for_nsr(self):
else:
log_vnf += "{}/".format(vnf_cfg[item])
return log_vnf
-
+
+
class ConfigManagerROifConnectionError(Exception):
pass
+
+
class ScriptError(Exception):
pass
self._log = log
self._loop = loop
self._parent = parent
- self._nsr_xpath = "/cm-state/cm-nsr"
+ self._nsr_xpath = parent._project.add_project("/cm-state/cm-nsr")
@asyncio.coroutine
def register(self):
pass
+ def deregister(self):
+ pass
+
@asyncio.coroutine
def update_vnf_state(self, vnf_cfg, state):
nsr_obj = vnf_cfg['nsr_obj']
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
'''
This file - ConfigManagerTasklet()
|
++
+|
+ConfigManagerProject()
+|
+--|--> ConfigurationManager()
|
+--> rwconman_config.py - ConfigManagerConfig()
)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
from . import rwconman_config as Config
from . import rwconman_events as Event
return log_vnf
class ConfigurationManager(object):
- def __init__(self, log, loop, dts):
+ def __init__(self, log, loop, dts, project):
self._log = log
self._loop = loop
self._dts = dts
+ self._project = project
+
self.cfg_sleep = True
self.cfg_dir = os.path.join(os.environ["RIFT_INSTALL"], "etc/conman")
self._config = Config.ConfigManagerConfig(self._dts, self._log, self._loop, self)
self.pending_cfg = []
self.pending_tasks = {}
self._nsr_objs = {}
+ self._task = None # The configuration_handler task
self._handlers = [
self._config,
yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED)
return ret_status
- # Basically, this loop will never end.
- while True:
- # Check the pending tasks are complete
- # Store a list of tasks that are completed and
- # remove from the pending_tasks list outside loop
- ids = []
- for nsr_id, task in self.pending_tasks.items():
- if task.done():
- ids.append(nsr_id)
- e = task.exception()
- if e:
- self._log.error("Exception in configuring nsr {}: {}".
- format(nsr_id, e))
- nsr_obj = self.get_nsr_obj(nsr_id)
- if nsr_obj:
- yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED, str(e))
-
+ try:
+ # Basically, this loop will never end.
+ while True:
+ # Check the pending tasks are complete
+ # Store a list of tasks that are completed and
+ # remove from the pending_tasks list outside loop
+ ids = []
+ for nsr_id, task in self.pending_tasks.items():
+ if task.done():
+ ids.append(nsr_id)
+ e = task.exception()
+ if e:
+ self._log.error("Exception in configuring nsr {}: {}".
+ format(nsr_id, e))
+ nsr_obj = self.get_nsr_obj(nsr_id)
+ if nsr_obj:
+ yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED, str(e))
+
+ else:
+ rc = task.result()
+ self._log.debug("NSR {} configured: {}".format(nsr_id, rc))
else:
- rc = task.result()
- self._log.debug("NSR {} configured: {}".format(nsr_id, rc))
- else:
- self._log.debug("NSR {} still configuring".format(nsr_id))
-
- # Remove the completed tasks
- for nsr_id in ids:
- self.pending_tasks.pop(nsr_id)
-
- # TODO (pjoseph): Fix this
- # Sleep before processing any NS (Why are we getting multiple NSR running DTS updates?)
- # If the sleep is not 10 seconds it does not quite work, NSM is marking it 'running'
- # wrongfully 10 seconds in advance?
- yield from asyncio.sleep(10, loop=self._loop)
-
- if self.pending_cfg:
- # get first NS, pending_cfg is nsr_obj list
- nsr_obj = self.pending_cfg[0]
- nsr_done = False
- if nsr_obj.being_deleted is False:
- # Process this NS, returns back same obj is successfull or exceeded retries
- try:
- self._log.info("Processing NSR:{}".format(nsr_obj.nsr_name))
-
- # Check if we already have a task running for this NSR
- # Case where we are still configuring and terminate is called
- if nsr_obj.nsr_id in self.pending_tasks:
- self._log.error("NSR {} in state {} has a configure task running.".
- format(nsr_obj.nsr_name, nsr_obj.get_ns_cm_state()))
- # Terminate the task for this NSR
- self.pending_tasks[nsr_obj.nsr_id].cancel()
-
- yield from self.update_ns_state(nsr_obj, conmanY.RecordState.CFG_PROCESS)
-
- # Call in a separate thread
- self.pending_tasks[nsr_obj.nsr_id] = \
- self._loop.create_task(
- process_nsr_obj(nsr_obj)
- )
-
- # Remove this nsr_obj
- self.pending_cfg.remove(nsr_obj)
-
- except Exception as e:
- self._log.error("Failed to process NSR as %s", str(e))
- self._log.exception(e)
-
+ self._log.debug("NSR {} still configuring".format(nsr_id))
+
+ # Remove the completed tasks
+ for nsr_id in ids:
+ self.pending_tasks.pop(nsr_id)
+
+ # TODO (pjoseph): Fix this
+ # Sleep before processing any NS (Why are we getting multiple NSR running DTS updates?)
+ # If the sleep is not 10 seconds it does not quite work, NSM is marking it 'running'
+ # wrongfully 10 seconds in advance?
+ yield from asyncio.sleep(10, loop=self._loop)
+
+ if self.pending_cfg:
+ # get first NS, pending_cfg is nsr_obj list
+ nsr_obj = self.pending_cfg[0]
+ nsr_done = False
+ if nsr_obj.being_deleted is False:
+ # Process this NS, returns back same obj is successfull or exceeded retries
+ try:
+ self._log.info("Processing NSR:{}".format(nsr_obj.nsr_name))
+
+ # Check if we already have a task running for this NSR
+ # Case where we are still configuring and terminate is called
+ if nsr_obj.nsr_id in self.pending_tasks:
+ self._log.error("NSR {} in state {} has a configure task running.".
+ format(nsr_obj.nsr_name, nsr_obj.get_ns_cm_state()))
+ # Terminate the task for this NSR
+ self.pending_tasks[nsr_obj.nsr_id].cancel()
+
+ yield from self.update_ns_state(nsr_obj, conmanY.RecordState.CFG_PROCESS)
+
+ # Call in a separate thread
+ self.pending_tasks[nsr_obj.nsr_id] = \
+ self._loop.create_task(
+ process_nsr_obj(nsr_obj)
+ )
+
+ # Remove this nsr_obj
+ self.pending_cfg.remove(nsr_obj)
+
+ except Exception as e:
+ self._log.error("Failed to process NSR as %s", str(e))
+ self._log.exception(e)
+
+ except asyncio.CancelledError as e:
+ self._log.debug("Stopped configuration handler for project {}".format(self._project))
@asyncio.coroutine
def register(self):
for reg in self._handlers:
yield from reg.register()
- asyncio.ensure_future(self.configuration_handler(), loop=self._loop)
+ self._task = asyncio.ensure_future(self.configuration_handler(), loop=self._loop)
+
+ def deregister(self):
+ self._log.debug("De-register conman for project {}".format(self._project.name))
+ self._task.cancel()
+
+ for reg in self._handlers:
+ reg.deregister()
+
+
+class ConfigManagerProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(ConfigManagerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._con_man = None
+
+ @asyncio.coroutine
+ def register (self):
+ self._log.info("Initializing the Configuration-Manager tasklet")
+ self._con_man = ConfigurationManager(self.log,
+ self.loop,
+ self._dts,
+ self,)
+ yield from self._con_man.register()
+
+ def deregister(self):
+ self._log.debug("De-register project {}".format(self.name))
+ self._con_man.deregister()
+
class ConfigManagerTasklet(rift.tasklets.Tasklet):
def __init__(self, *args, **kwargs):
self.rwlog.set_category("rw-conman-log")
self._dts = None
- self._con_man = None
+
+ self.project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
def start(self):
super(ConfigManagerTasklet, self).start()
@asyncio.coroutine
def init(self):
- self._log.info("Initializing the Configuration-Manager tasklet")
- self._con_man = ConfigurationManager(self.log,
- self.loop,
- self._dts)
- yield from self._con_man.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, ConfigManagerProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
self.log.debug("STARTING - test_create_resource_pools")
tinfo = self.new_tinfo('poolconfig')
dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
- pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
- pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records"
+ pool_xpath = "C,/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+ pool_records_xpath = "D,/rw-project:project/rw-resource-mgr:resource-pool-records"
account_xpath = "C,/rw-launchpad:cloud-account"
- compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
- network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
+ compute_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
+ network_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
@asyncio.coroutine
def configure_cloud_account():
log,
dts,
loop,
+ project,
nsr_id,
monp_id,
scaling_criteria,
self.log,
self.dts,
self.loop,
+ project,
self.nsr_id,
self.monp_id,
callback=self.add_value)
log,
dts,
loop,
+ project,
nsr_id,
nsd_id,
scaling_group_name,
self.loop = loop
self.log = log
self.dts = dts
+ self.project = project
self.nsd_id = nsd_id
self.nsr_id = nsr_id
self.scaling_group_name = scaling_group_name
self.log,
self.dts,
self.loop,
+ self.project,
self.nsr_id,
callback=self.handle_nsr_monp)
self.log,
self.dts,
self.loop,
+ self.project,
self.nsr_id,
monp.id,
cri,
"""
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import rift.mano.cloud
import rift.mano.dts as subscriber
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
+class AutoScalerProject(ManoProject, engine.ScalingPolicy.Delegate):
-class AutoScalerTasklet(rift.tasklets.Tasklet, engine.ScalingPolicy.Delegate):
- """The main task of this Tasklet is to listen for NSR changes and once the
- NSR is configured, ScalingPolicy is created.
- """
- def __init__(self, *args, **kwargs):
+ def __init__(self, name, tasklet, **kw):
+ super(AutoScalerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
- try:
- super().__init__(*args, **kwargs)
- self.store = None
- self.monparam_store = None
+ self.store = None
+ self.monparam_store = None
+ self.nsr_sub = None
+ self.nsr_monp_subscribers = {}
+ self.instance_id_store = collections.defaultdict(list)
- self.nsr_sub = None
- self.nsr_monp_subscribers = {}
- self.instance_id_store = collections.defaultdict(list)
-
- except Exception as e:
- self.log.exception(e)
+ self.store = subscriber.SubscriberStore.from_project(self)
+ self.nsr_sub = subscriber.NsrCatalogSubscriber(self.log, self.dts, self.loop,
+ self, self.handle_nsr)
- def start(self):
- super().start()
+ def deregister(self):
+ self.log.debug("De-register project {}".format(self.name))
+ self.nsr_sub.deregister()
+ self.store.deregister()
- self.log.debug("Registering with dts")
-
- self.dts = rift.tasklets.DTS(
- self.tasklet_info,
- RwLaunchpadYang.get_schema(),
- self.loop,
- self.on_dts_state_change
- )
-
- self.store = subscriber.SubscriberStore.from_tasklet(self)
- self.nsr_sub = subscriber.NsrCatalogSubscriber(self.log, self.dts, self.loop, self.handle_nsr)
-
- self.log.debug("Created DTS Api GI Object: %s", self.dts)
-
- def stop(self):
- try:
- self.dts.deinit()
- except Exception as e:
- self.log.exception(e)
@asyncio.coroutine
- def init(self):
+ def register (self):
self.log.debug("creating vnfr subscriber")
yield from self.store.register()
yield from self.nsr_sub.register()
- @asyncio.coroutine
- def run(self):
- pass
-
- @asyncio.coroutine
- def on_dts_state_change(self, state):
- """Handle DTS state change
-
- Take action according to current DTS state to transition application
- into the corresponding application state
-
- Arguments
- state - current dts state
-
- """
- switch = {
- rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
- rwdts.State.CONFIG: rwdts.State.RUN,
- }
-
- handlers = {
- rwdts.State.INIT: self.init,
- rwdts.State.RUN: self.run,
- }
-
- # Transition application to next state
- handler = handlers.get(state, None)
- if handler is not None:
- yield from handler()
-
- # Transition dts to next state
- next_state = switch.get(state, None)
- if next_state is not None:
- self.dts.handle.set_state(next_state)
-
def scale_in(self, scaling_group_name, nsr_id):
"""Delegate callback
# Trigger an rpc
rpc_ip = NsrYang.YangInput_Nsr_ExecScaleIn.from_dict({
+ 'project_name': self.name,
'nsr_id_ref': nsr_id,
'instance_id': instance_id,
'scaling_group_name_ref': scaling_group_name})
def _scale_out():
# Trigger an rpc
rpc_ip = NsrYang.YangInput_Nsr_ExecScaleOut.from_dict({
+ 'project_name': self.name,
'nsr_id_ref': nsr_id ,
'scaling_group_name_ref': scaling_group_name})
NS that moves to config state.
Args:
- nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
+ nsr (RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr): Ns Opdata
action (rwdts.QueryAction): Action type of the change.
"""
def nsr_create():
for scaling_group in nsd.scaling_group_descriptor:
for policy_cfg in scaling_group.scaling_policy:
policy = engine.ScalingPolicy(
- self.log, self.dts, self.loop,
+ self.log, self.dts, self.loop, self,
nsr.ns_instance_config_ref,
nsr.nsd_ref,
scaling_group.name,
nsr_create()
elif action == rwdts.QueryAction.DELETE:
nsr_delete()
+
+
+class AutoScalerTasklet(rift.tasklets.Tasklet):
+ """The main task of this Tasklet is to listen for NSR changes and once the
+ NSR is configured, ScalingPolicy is created.
+ """
+ def __init__(self, *args, **kwargs):
+
+ try:
+ super().__init__(*args, **kwargs)
+
+ self._project_handler = None
+ self.projects = {}
+
+ except Exception as e:
+ self.log.exception(e)
+
+ def start(self):
+ super().start()
+
+ self.log.debug("Registering with dts")
+
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ RwLaunchpadYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+ def stop(self):
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, AutoScalerProject)
+ self.project_handler.register()
+
+ @asyncio.coroutine
+ def run(self):
+ pass
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
+
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
class NsrMonParamSubscriber(mano_dts.AbstractOpdataSubscriber):
"""Registers for NSR monitoring parameter changes.
-
+
Attributes:
monp_id (str): Monitoring Param ID
nsr_id (str): NSR ID
"""
- def __init__(self, log, dts, loop, nsr_id, monp_id=None, callback=None):
- super().__init__(log, dts, loop, callback)
+ def __init__(self, log, dts, loop, project, nsr_id, monp_id=None, callback=None):
+ super().__init__(log, dts, loop, project, callback)
self.nsr_id = nsr_id
self.monp_id = monp_id
def get_xpath(self):
- return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+ return self.project.add_project(("D,/nsr:ns-instance-opdata/nsr:nsr" +
"[nsr:ns-instance-config-ref='{}']".format(self.nsr_id) +
"/nsr:monitoring-param" +
- ("[nsr:id='{}']".format(self.monp_id) if self.monp_id else ""))
-
-
-
+ ("[nsr:id='{}']".format(self.monp_id) if self.monp_id else "")))
)
-ScalingCriteria = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy_ScalingCriteria
-ScalingPolicy = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy
+ScalingCriteria = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy_ScalingCriteria
+ScalingPolicy = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy
class MockDelegate(engine.ScalingCriteria.Delegate):
def __call__(self):
store = mock.MagicMock()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict({
'id': "1",
'monitoring_param': [
{'description': 'no of ping requests',
store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({'id': '1'})
- mock_vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict({'id': '1'})
+ mock_vnfr.vnfd = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict({
'ns_instance_config_ref': "1",
'name_ref': "Foo",
'nsd_ref': '1',
scale_in_val = 100
scale_out_val = 200
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict({
'id': '1',
'monitoring_param': (monp_cfg if not self.legacy else []),
'constituent_vnfd': [{'member_vnf_index': 1,
def _populate_mock_values(self, criterias, nsr_id, floor, ceil):
# Mock publish
# Verify Scale in AND operator
- NsMonParam = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+ NsMonParam = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_MonitoringParam
publisher = rift.test.dts.DescriptorPublisher(self.log, self.dts, self.loop)
for criteria in criterias:
monp_id = criteria.ns_monitoring_param_ref
- w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
+ w_xpath = "D,/rw-project:project/nsr:ns-instance-opdata/nsr:nsr"
w_xpath = w_xpath + "[nsr:ns-instance-config-ref='{}']/nsr:monitoring-param".format(nsr_id)
xpath = w_xpath + "[nsr:id ='{}']".format(monp_id)
self._loop = loop
self._dts = dts
- def create_job(self, image_name, image_checksum, cloud_account_names=None):
+ def create_job(self, image_name, image_checksum, project, cloud_account_names=None):
""" Create an image upload_job and return an UploadJob instance
Arguments:
"""
create_job_msg = RwImageMgmtYang.CreateUploadJob.from_dict({
"onboarded_image": {
+ "project_name": project.name,
"image_name": image_name,
"image_checksum": image_checksum,
}
job_id = rpc_result.job_id
- return UploadJob(self._log, self._loop, self._dts, job_id)
+ return UploadJob(self._log, self._loop, self._dts, job_id, project)
- def create_job_threadsafe(self, image_name, image_checksum, cloud_account_names=None):
+ def create_job_threadsafe(self, image_name, image_checksum, project, cloud_account_names=None):
""" A thread-safe, syncronous wrapper for create_job """
future = concurrent.futures.Future()
def add_task():
task = self._loop.create_task(
- self.create_job(image_name, image_checksum, cloud_account_names)
+ self.create_job(image_name, image_checksum, project, cloud_account_names)
)
task.add_done_callback(on_done)
class UploadJob(object):
""" A handle for a image upload job """
- def __init__(self, log, loop, dts, job_id):
+ def __init__(self, log, loop, dts, job_id, project):
self._log = log
self._loop = loop
self._dts = dts
self._job_id = job_id
+ self._project = project
@asyncio.coroutine
def wait_until_complete(self):
UploadJobCancelled: The upload job was cancelled
"""
self._log.debug("waiting for upload job %s to complete", self._job_id)
+ xpath = self._project.add_project("D,/rw-image-mgmt:upload-jobs/" +
+ "rw-image-mgmt:job[rw-image-mgmt:id='{}']".
+ format(self._job_id))
while True:
- query_iter = yield from self._dts.query_read(
- "D,/rw-image-mgmt:upload-jobs/rw-image-mgmt:job[rw-image-mgmt:id='{}']".format(
- self._job_id
- )
- )
+ query_iter = yield from self._dts.query_read(xpath)
job_status_msg = None
for fut_resp in query_iter:
job_status_msg = (yield from fut_resp).result
import rift.tasklets
import rift.mano.cloud
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectConfigCallbacks,
+ ProjectHandler,
+ get_add_delete_update_cfgs,
+ DEFAULT_PROJECT,
+ )
from . import glance_proxy_server
from . import glance_client
class CloudAccountDtsHandler(object):
- def __init__(self, log, dts, log_hdl):
+ def __init__(self, log, dts, log_hdl, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._cloud_cfg_subscriber = None
+ self._project = project
def register(self, on_add_apply, on_delete_apply):
- self._log.debug("creating cloud account config handler")
+ self._log.debug("Project {}: creating cloud account config handler".
+ format(self._project.name))
self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
- self._dts, self._log, self._log_hdl,
+ self._dts, self._log, self._log_hdl, self._project,
rift.mano.cloud.CloudAccountConfigCallbacks(
on_add_apply=on_add_apply,
on_delete_apply=on_delete_apply,
)
self._cloud_cfg_subscriber.register()
+ def deregister(self):
+ self._log.debug("Project {}: Removing cloud account config handler".
+ format(self._project.name))
+ self._cloud_cfg_subscriber.deregister()
+
def openstack_image_to_image_info(openstack_image):
"""Convert the OpenstackImage to a ImageInfo protobuf message
class ImageDTSShowHandler(object):
""" A DTS publisher for the upload-jobs data container """
- def __init__(self, log, loop, dts, job_controller):
+ def __init__(self, log, loop, dts, job_controller, project):
self._log = log
self._loop = loop
self._dts = dts
self._job_controller = job_controller
+ self._project = project
self._subscriber = None
+ def get_xpath(self):
+ return self._project.add_project("D,/rw-image-mgmt:upload-jobs")
+
@asyncio.coroutine
def register(self):
""" Register as a publisher and wait for reg_ready to complete """
- def get_xpath():
- return "D,/rw-image-mgmt:upload-jobs"
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
xact_info.respond_xpath(
rwdts.XactRspCode.ACK,
- xpath=get_xpath(),
+ xpath=self.get_xpath(),
msg=jobs_pb_msg,
)
reg_event.set()
self._subscriber = yield from self._dts.register(
- xpath=get_xpath(),
+ xpath=self.get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare,
on_ready=on_ready,
yield from reg_event.wait()
+ def deregister(self):
+ self._log.debug("Project {}: De-register show image handler".
+ format(self._project.name))
+ if self._subscriber:
+ self._subscriber.delete_element(self.get_xpath())
+ self._subscriber.deregister()
+ self._subscriber = None
+
class ImageDTSRPCHandler(object):
""" A DTS publisher for the upload-job RPC's """
- def __init__(self, log, loop, dts, accounts, glance_client, upload_task_creator, job_controller):
+ def __init__(self, log, loop, dts, accounts, glance_client,
+ upload_task_creator, job_controller, project):
self._log = log
self._loop = loop
self._dts = dts
self._glance_client = glance_client
self._upload_task_creator = upload_task_creator
self._job_controller = job_controller
+ self._project = project
- self._subscriber = None
+ self._create = None
+ self._cancel = None
@asyncio.coroutine
def _register_create_upload_job(self):
create_msg = msg
account_names = create_msg.cloud_account
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
# If cloud accounts were not specified, upload image to all cloud account
if not account_names:
account_names = list(self._accounts.keys())
def on_ready(_, status):
reg_event.set()
- self._subscriber = yield from self._dts.register(
- xpath="I," + get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare,
- on_ready=on_ready,
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._create = yield from self._dts.register(
+ xpath="I," + get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare,
+ on_ready=on_ready,
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
yield from reg_event.wait()
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
if not msg.has_field("job_id"):
self._log.error("cancel-upload-job missing job-id field.")
xact_info.respond_xpath(rwdts.XactRspCode.NACK)
def on_ready(_, status):
reg_event.set()
- self._subscriber = yield from self._dts.register(
- xpath="I," + get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare,
- on_ready=on_ready,
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._cancel = yield from self._dts.register(
+ xpath="I," + get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare,
+ on_ready=on_ready,
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
yield from reg_event.wait()
yield from self._register_create_upload_job()
yield from self._register_cancel_upload_job()
+ def deregister(self):
+ self._log.debug("Project {}: Deregister image rpc handlers".
+ format(self._project.name))
+ if self._create:
+ self._create.deregister()
+ self._create = None
+
+ if self._cancel:
+ self._cancel.deregister()
+ self._cancel = None
+
class GlanceClientUploadTaskCreator(object):
""" This class creates upload tasks using configured cloud accounts and
configured image catalog glance client """
- def __init__(self, log, loop, accounts, glance_client):
+ def __init__(self, log, loop, accounts, glance_client, project):
self._log = log
self._loop = loop
self._accounts = accounts
self._glance_client = glance_client
+ self._project = project
@asyncio.coroutine
def create_tasks(self, account_names, image_id=None, image_name=None, image_checksum=None):
create_msg.image_checksum if "image_checksum" in create_msg else None)
)
+class ImageMgrProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(ImageMgrProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+ try:
+ self.glance_client = kw['client']
+ except KeyError as e:
+ self._log.exception("kw {}: {}".format(kw, e))
+
+ self.cloud_cfg_subscriber = None
+ self.job_controller = None
+ self.task_creator = None
+ self.rpc_handler = None
+ self.show_handler = None
+
+ self.cloud_accounts = {}
+
+ @asyncio.coroutine
+ def register(self):
+ try:
+ self.log.debug("creating cloud account handler")
+ self.cloud_cfg_subscriber = CloudAccountDtsHandler(self._log,
+ self._dts,
+ self._log_hdl,
+ self)
+ self.cloud_cfg_subscriber.register(
+ self.on_cloud_account_create,
+ self.on_cloud_account_delete
+ )
+
+ self.job_controller = upload.ImageUploadJobController(
+ self.log, self.loop, self
+ )
+
+ self.task_creator = GlanceClientUploadTaskCreator(
+ self.log, self.loop, self.cloud_accounts,
+ self.glance_client, self
+ )
+
+ self.rpc_handler = ImageDTSRPCHandler(
+ self.log, self.loop, self.dts, self.cloud_accounts,
+ self.glance_client, self.task_creator,
+ self.job_controller, self
+ )
+ yield from self.rpc_handler.register()
+
+ self.show_handler = ImageDTSShowHandler(
+ self.log, self.loop, self.dts, self.job_controller, self
+ )
+ yield from self.show_handler.register()
+ except Exception as e:
+ self.log.exception("Error during project {} register: e".
+ format(self.name, e))
+
+ def deregister(self):
+ self.log.debug("De-register handlers for project: {}".format(self.name))
+ self.rpc_handler.deregister()
+ self.show_handler.deregister()
+ self.cloud_cfg_subscriber.deregister()
+
+ def on_cloud_account_create(self, account):
+ self.log.debug("adding cloud account: %s", account.name)
+ self.cloud_accounts[account.name] = account
+
+ def on_cloud_account_delete(self, account_name):
+ self.log.debug("deleting cloud account: %s", account_name)
+ if account_name not in self.cloud_accounts:
+ self.log.warning("cloud account not found: %s", account_name)
+ else:
+ del self.cloud_accounts[account_name]
+
class ImageManagerTasklet(rift.tasklets.Tasklet):
"""
super().__init__(*args, **kwargs)
self.rwlog.set_category("rw-mano-log")
- self.cloud_cfg_subscriber = None
self.http_proxy = None
self.proxy_server = None
self.dts = None
- self.job_controller = None
- self.cloud_accounts = {}
self.glance_client = None
- self.task_creator = None
- self.rpc_handler = None
- self.show_handler = None
+ self.project_handler = None
+
+ self.projects = {}
def start(self):
super().start()
@asyncio.coroutine
def init(self):
try:
- self.log.debug("creating cloud account handler")
- self.cloud_cfg_subscriber = CloudAccountDtsHandler(self.log, self.dts, self.log_hdl)
- self.cloud_cfg_subscriber.register(
- self.on_cloud_account_create,
- self.on_cloud_account_delete
- )
-
self.log.debug("creating http proxy server")
self.http_proxy = glance_proxy_server.QuickProxyServer(self.log, self.loop)
)
self.proxy_server.start()
- self.job_controller = upload.ImageUploadJobController(
- self.log, self.loop
- )
-
self.glance_client = glance_client.OpenstackGlanceClient.from_token(
self.log, "127.0.0.1", "9292", "test"
)
- self.task_creator = GlanceClientUploadTaskCreator(
- self.log, self.loop, self.cloud_accounts, self.glance_client
- )
-
- self.rpc_handler = ImageDTSRPCHandler(
- self.log, self.loop, self.dts, self.cloud_accounts, self.glance_client, self.task_creator,
- self.job_controller
- )
- yield from self.rpc_handler.register()
-
- self.show_handler = ImageDTSShowHandler(
- self.log, self.loop, self.dts, self.job_controller
- )
- yield from self.show_handler.register()
+ self.log.debug("Creating project handler")
+ self.project_handler = ProjectHandler(self, ImageMgrProject,
+ client=self.glance_client)
+ self.project_handler.register()
except Exception as e:
self.log.exception("error during init")
- def on_cloud_account_create(self, account):
- self.log.debug("adding cloud account: %s", account.name)
- self.cloud_accounts[account.name] = account
-
- def on_cloud_account_delete(self, account_name):
- self.log.debug("deleting cloud account: %s", account_name)
- if account_name not in self.cloud_accounts:
- self.log.warning("cloud account not found: %s", account_name)
-
- del self.cloud_accounts[account_name]
-
@asyncio.coroutine
def run(self):
pass
""" This class starts and manages ImageUploadJobs """
MAX_COMPLETED_JOBS = 20
- def __init__(self, log, loop, max_completed_jobs=MAX_COMPLETED_JOBS):
+ def __init__(self, log, loop, project, max_completed_jobs=MAX_COMPLETED_JOBS):
self._log = log
self._loop = loop
+ self._project = project
self._job_id_gen = itertools.count(1)
self._max_completed_jobs = max_completed_jobs
)
query_iter = yield from self.dts_c.query_read(
- "D,/rw-image-mgmt:upload-jobs",
+ "D,/rw-project:project/rw-image-mgmt:upload-jobs",
)
for fut_resp in query_iter:
#
import json
+import logging
import os
import tempfile
+import yaml
import gi
gi.require_version('RwNsdYang', '1.0')
RwYang,
)
+from rift.mano.utils.project import NS_PROJECT
+
class UnknownExtensionError(Exception):
pass
@property
def yang_class(self):
- """ The Protobuf's GI class (e.g. RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd) """
+ """ The Protobuf's GI class (e.g. RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd) """
return self._yang_pb_cls
@property
return self.yang_class.from_json(self.model, decode(json), strict=False)
def _from_yaml_file_hdl(self, file_hdl):
- yaml = file_hdl.read()
+ yml = file_hdl.read()
- return self.yang_class.from_yaml(self.model, decode(yaml), strict=False)
+ # Need to prefix project on to the descriptor and then
+ # convert to yang pb
+ # TODO: See if there is a better way to do this
+ desc = {NS_PROJECT: []}
+ desc[NS_PROJECT].append(yaml.load(decode(yml)))
+ # log = logging.getLogger('rw-mano-log')
+ # log.error("Desc from yaml: {}".format(desc))
+ return self.yang_class.from_yaml(self.model, yaml.dump(desc), strict=False)
def to_json_string(self, pb_msg):
""" Serialize a protobuf message into JSON
try:
json_str = pb_msg.to_json(self.model)
+ # Remove rw-project:project top level element
+ dic = json.loads(json_str)
+ jstr = json.dumps(dic[NS_PROJECT][0])
except Exception as e:
raise SerializationError(e)
- return json_str
+ log = logging.getLogger('rw-mano-log')
+ log.error("Desc to json: {}".format(jstr))
+ return jstr
def to_yaml_string(self, pb_msg):
""" Serialize a protobuf message into YAML
class VnfdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the VNFD descriptor"""
def __init__(self):
- super().__init__(VnfdYang, VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+ super().__init__(VnfdYang, VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd)
class NsdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the NSD descriptor"""
def __init__(self):
- super().__init__(NsdYang, NsdYang.YangData_Nsd_NsdCatalog_Nsd)
+ super().__init__(NsdYang, NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd)
class RwVnfdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the VNFD descriptor"""
def __init__(self):
- super().__init__(RwVnfdYang, RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+ super().__init__(RwVnfdYang, RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd)
class RwNsdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the NSD descriptor"""
def __init__(self):
- super().__init__(RwNsdYang, RwNsdYang.YangData_Nsd_NsdCatalog_Nsd)
+ super().__init__(RwNsdYang, RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd)
@property
def root_dir(self):
return self._root_dir
-
def _get_package_dir(self, package_id):
+ self._log.debug("Package dir {}, {}".format(self._root_dir, package_id))
return os.path.join(self._root_dir, package_id)
def _get_package_files(self, package_id):
XPATH = "D,/rw-launchpad:datacenters"
- def __init__(self, log, dts, loop):
+ def __init__(self, log, dts, loop, project):
"""Creates an instance of a DataCenterPublisher
Arguments:
tasklet - the tasklet that this publisher is registered for
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self._ro_sub = mano_dts.ROAccountConfigSubscriber(
self.log,
self.dts,
self.loop,
+ self.project,
callback=self.on_ro_account_change
)
self.ro_accounts = {}
elif action == RwDts.QueryAction.DELETE and ro_account.name in self.ro_accounts:
del self.ro_accounts[ro_account.name]
+ def deregister(self):
+ self._log.debug("De-register datacenter handler for project {}".
+ format(self.project.name))
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
+ self._ro_sub.deregister()
+
@asyncio.coroutine
def register(self):
"""Registers the publisher with DTS"""
xact_info.respond_xpath(
RwDts.XactRspCode.MORE,
- 'D,/rw-launchpad:datacenters',
+ self.project.add_project(DataCenterPublisher.XPATH),
datacenters,
)
with self.dts.group_create() as group:
self.reg = group.register(
- xpath=DataCenterPublisher.XPATH,
+ xpath=self.project.add_project(DataCenterPublisher.XPATH),
handler=handler,
flags=RwDts.Flag.PUBLISHER,
)
class ExportRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, application, store_map, exporter, catalog_map):
+ def __init__(self, application, catalog_map):
"""
Args:
application: UploaderApplication
- store_map: dict containing VnfdStore & NsdStore
- exporter : DescriptorPackageArchiveExporter
calalog_map: Dict containing Vnfds and Nsd onboarding.
"""
- super().__init__(log, dts, loop)
+ super().__init__(application.log, application.dts, application.loop)
self.application = application
- self.store_map = store_map
- self.exporter = exporter
+ self.store_map = application.package_store_map
+ self.exporter = application.exporter
self.catalog_map = catalog_map
- self.log = log
@property
def xpath(self):
# Parse the IDs
desc_id = msg.package_id
- catalog = self.catalog_map[desc_type]
+ catalog = self.catalog_map[desc_type](project=msg.project_name)
if desc_id not in catalog:
raise ValueError("Unable to find package ID: {}".format(desc_id))
except Exception as e:
raise ImageUploadError("Failed to upload image to catalog: %s" % str(e)) from e
- def upload_image_to_cloud_accounts(self, image_name, image_checksum, cloud_accounts=None):
+ def upload_image_to_cloud_accounts(self, image_name, image_checksum, project, cloud_accounts=None):
self._log.debug("uploading image %s to all cloud accounts", image_name)
- upload_job = self._client.create_job_threadsafe(image_name, image_checksum, cloud_accounts)
+ upload_job = self._client.create_job_threadsafe(image_name, image_checksum, project, cloud_accounts)
try:
upload_job.wait_until_complete_threadsafe()
except client.UploadJobError as e:
import requests
+from rift.mano.utils.project import DEFAULT_PROJECT
from rift.package import convert
from gi.repository import (
NsdYang,
class DescriptorOnboarder(object):
""" This class is responsible for onboarding descriptors using Restconf"""
DESC_ENDPOINT_MAP = {
- NsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
- RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
- VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
- RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: "nsd-catalog/nsd",
+ RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: "nsd-catalog/nsd",
+ VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+ RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
}
DESC_SERIALIZER_MAP = {
- NsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.NsdSerializer(),
- RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.RwNsdSerializer(),
- VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.VnfdSerializer(),
- RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.RwVnfdSerializer(),
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: convert.NsdSerializer(),
+ RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: convert.RwNsdSerializer(),
+ VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: convert.VnfdSerializer(),
+ RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: convert.RwVnfdSerializer(),
}
HEADERS = {"content-type": "application/vnd.yang.data+json"}
return headers
- def _get_url(self, descriptor_msg):
+ def _get_url(self, descriptor_msg, project=None):
if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
raise TypeError("Invalid descriptor message type")
+ if project is None:
+ project = DEFAULT_PROJECT
+
endpoint = DescriptorOnboarder.DESC_ENDPOINT_MAP[type(descriptor_msg)]
+ ep = "project/{}/{}".format(project, endpoint)
url = "{}://{}:{}/api/config/{}".format(
"https" if self._use_ssl else "http",
self._host,
self.port,
- endpoint,
+ ep,
)
return url
- def _make_request_args(self, descriptor_msg, auth=None):
+ def _make_request_args(self, descriptor_msg, auth=None, project=None):
if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
raise TypeError("Invalid descriptor message type")
serializer = DescriptorOnboarder.DESC_SERIALIZER_MAP[type(descriptor_msg)]
json_data = serializer.to_json_string(descriptor_msg)
- url = self._get_url(descriptor_msg)
+ url = self._get_url(descriptor_msg, project=project)
request_args = dict(
url=url,
self._log.error(msg)
raise UpdateError(msg) from e
- def onboard(self, descriptor_msg, auth=None):
+ def onboard(self, descriptor_msg, auth=None, project=None):
""" Onboard the descriptor config
Arguments:
OnboardError - The descriptor config update failed
"""
- request_args = self._make_request_args(descriptor_msg, auth)
+ request_args = self._make_request_args(descriptor_msg, auth, project)
try:
response = requests.post(**request_args)
response.raise_for_status()
except requests.exceptions.ConnectionError as e:
msg = "Could not connect to restconf endpoint: %s" % str(e)
self._log.error(msg)
+ self._log.exception(msg)
raise OnboardError(msg) from e
except requests.exceptions.HTTPError as e:
msg = "POST request to %s error: %s" % (request_args["url"], response.text)
self._log.error(msg)
+ self._log.exception(msg)
raise OnboardError(msg) from e
except requests.exceptions.Timeout as e:
msg = "Timed out connecting to restconf endpoint: %s", str(e)
self._log.error(msg)
+ self._log.exception(msg)
raise OnboardError(msg) from e
import rift.tasklets
import rift.mano.cloud
import rift.mano.config_agent
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ get_add_delete_update_cfgs,
+ DEFAULT_PROJECT,
+ )
from rift.package import store
from . import uploader
MAX_BODY_SIZE = 1 * MB # Max. size loaded into memory!
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
+class LaunchpadError(Exception):
+ pass
+class LpProjectNotFound(Exception):
+ pass
class CatalogDtsHandler(object):
- def __init__(self, tasklet, app):
+ def __init__(self, project, app):
self.app = app
self.reg = None
- self.tasklet = tasklet
+ self.project = project
@property
def log(self):
- return self.tasklet.log
+ return self.project.log
@property
def dts(self):
- return self.tasklet.dts
+ return self.project.dts
class NsdCatalogDtsHandler(CatalogDtsHandler):
def add_nsd(self, nsd):
self.log.debug('nsd-catalog-handler:add:{}'.format(nsd.id))
- if nsd.id not in self.tasklet.nsd_catalog:
- self.tasklet.nsd_catalog[nsd.id] = nsd
+ if nsd.id not in self.project.nsd_catalog:
+ self.project.nsd_catalog[nsd.id] = nsd
else:
self.log.error("nsd already in catalog: {}".format(nsd.id))
def update_nsd(self, nsd):
self.log.debug('nsd-catalog-handler:update:{}'.format(nsd.id))
- if nsd.id in self.tasklet.nsd_catalog:
- self.tasklet.nsd_catalog[nsd.id] = nsd
+ if nsd.id in self.project.nsd_catalog:
+ self.project.nsd_catalog[nsd.id] = nsd
else:
self.log.error("unrecognized NSD: {}".format(nsd.id))
def delete_nsd(self, nsd_id):
self.log.debug('nsd-catalog-handler:delete:{}'.format(nsd_id))
- if nsd_id in self.tasklet.nsd_catalog:
- del self.tasklet.nsd_catalog[nsd_id]
+ if nsd_id in self.project.nsd_catalog:
+ del self.project.nsd_catalog[nsd_id]
else:
self.log.error("unrecognized NSD: {}".format(nsd_id))
try:
- self.tasklet.nsd_package_store.delete_package(nsd_id)
+ self.project.tasklet.nsd_package_store.delete_package(nsd_id)
except store.PackageStoreError as e:
self.log.warning("could not delete package from store: %s", str(e))
for cfg in update_cfgs:
self.update_nsd(cfg)
- self.log.debug("Registering for NSD catalog")
+ self.log.debug("Registering for NSD catalog in project".
+ format(self.project.name))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self.dts.appconf_group_create(acg_handler) as acg:
+ xpath = self.project.add_project(NsdCatalogDtsHandler.XPATH)
self.reg = acg.register(
- xpath=NsdCatalogDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
)
+ def deregister(self):
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
class VnfdCatalogDtsHandler(CatalogDtsHandler):
XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
def add_vnfd(self, vnfd):
self.log.debug('vnfd-catalog-handler:add:{}'.format(vnfd.id))
- if vnfd.id not in self.tasklet.vnfd_catalog:
- self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+ if vnfd.id not in self.project.vnfd_catalog:
+ self.project.vnfd_catalog[vnfd.id] = vnfd
else:
self.log.error("VNFD already in catalog: {}".format(vnfd.id))
def update_vnfd(self, vnfd):
self.log.debug('vnfd-catalog-handler:update:{}'.format(vnfd.id))
- if vnfd.id in self.tasklet.vnfd_catalog:
- self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+ if vnfd.id in self.project.vnfd_catalog:
+ self.project.vnfd_catalog[vnfd.id] = vnfd
else:
self.log.error("unrecognized VNFD: {}".format(vnfd.id))
def delete_vnfd(self, vnfd_id):
self.log.debug('vnfd-catalog-handler:delete:{}'.format(vnfd_id))
- if vnfd_id in self.tasklet.vnfd_catalog:
- del self.tasklet.vnfd_catalog[vnfd_id]
+ if vnfd_id in self.project.vnfd_catalog:
+ del self.project.vnfd_catalog[vnfd_id]
else:
self.log.error("unrecognized VNFD: {}".format(vnfd_id))
try:
- self.tasklet.vnfd_package_store.delete_package(vnfd_id)
+ self.project.tasklet.vnfd_package_store.delete_package(vnfd_id)
except store.PackageStoreError as e:
self.log.warning("could not delete package from store: %s", str(e))
for cfg in update_cfgs:
self.update_vnfd(cfg)
- self.log.debug("Registering for VNFD catalog")
+ self.log.debug("Registering for VNFD catalog in project {}".
+ format(self.project.name))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self.dts.appconf_group_create(acg_handler) as acg:
+ xpath = self.project.add_project(VnfdCatalogDtsHandler.XPATH)
self.reg = acg.register(
- xpath=VnfdCatalogDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
)
+ def deregister(self):
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
class CfgAgentAccountHandlers(object):
- def __init__(self, dts, log, log_hdl, loop):
+ def __init__(self, dts, log, log_hdl, loop, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._project = project
self._log.debug("creating config agent account config handler")
self.cfg_agent_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber(
- self._dts, self._log,
+ self._dts, self._log, self._project,
rift.mano.config_agent.ConfigAgentCallbacks(
on_add_apply=self.on_cfg_agent_account_added,
on_delete_apply=self.on_cfg_agent_account_deleted,
self._log.debug("creating config agent account opdata handler")
self.cfg_agent_operdata_handler = rift.mano.config_agent.CfgAgentDtsOperdataHandler(
- self._dts, self._log, self._loop,
+ self._dts, self._log, self._loop, self._project
)
def on_cfg_agent_account_deleted(self, account):
self.cfg_agent_cfg_handler.register()
yield from self.cfg_agent_operdata_handler.register()
+ def deregister(self):
+ self.cfg_agent_operdata_handler.deregister()
+ self.cfg_agent_cfg_handler.deregister()
+
+
class CloudAccountHandlers(object):
- def __init__(self, dts, log, log_hdl, loop, app):
+ def __init__(self, dts, log, log_hdl, loop, app, project):
self._log = log
self._log_hdl = log_hdl
self._dts = dts
self._loop = loop
self._app = app
+ self._project = project
- self._log.debug("creating cloud account config handler")
+ self._log.debug("Creating cloud account config handler for project {}".
+ format(project.name))
self.cloud_cfg_handler = rift.mano.cloud.CloudAccountConfigSubscriber(
- self._dts, self._log, self._log_hdl,
+ self._dts, self._log, self._log_hdl, self._project,
rift.mano.cloud.CloudAccountConfigCallbacks(
on_add_apply=self.on_cloud_account_added,
on_delete_apply=self.on_cloud_account_deleted,
- )
+ ),
)
self._log.debug("creating cloud account opdata handler")
self.cloud_operdata_handler = rift.mano.cloud.CloudAccountDtsOperdataHandler(
- self._dts, self._log, self._loop,
+ self._dts, self._log, self._loop, self._project,
)
def on_cloud_account_deleted(self, account_name):
self._log.debug("cloud account deleted")
- self._app.accounts.clear()
- self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+ self._app.accounts[self._project.name] = \
+ list(self.cloud_cfg_handler.accounts.values())
self.cloud_operdata_handler.delete_cloud_account(account_name)
def on_cloud_account_added(self, account):
self._log.debug("cloud account added")
- self._app.accounts.clear()
- self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+ self._app.accounts[self._project.name] = \
+ list(self.cloud_cfg_handler.accounts.values())
self._log.debug("accounts: %s", self._app.accounts)
self.cloud_operdata_handler.add_cloud_account(account)
self.cloud_cfg_handler.register()
yield from self.cloud_operdata_handler.register()
+ def deregister(self):
+ self.cloud_cfg_handler.deregister()
+ yield from self.cloud_operdata_handler.deregister()
-class LaunchpadTasklet(rift.tasklets.Tasklet):
- UPLOAD_MAX_BODY_SIZE = MAX_BODY_SIZE
- UPLOAD_MAX_BUFFER_SIZE = MAX_BUFFER_SIZE
- UPLOAD_PORT = "4567"
- def __init__(self, *args, **kwargs):
- super(LaunchpadTasklet, self).__init__(*args, **kwargs)
- self.rwlog.set_category("rw-mano-log")
- self.rwlog.set_subcategory("launchpad")
+class LaunchpadProject(ManoProject):
- self.app = None
- self.server = None
+ def __init__(self, name, tasklet, **kw):
+ super(LaunchpadProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+ self._app = kw['app']
- self.account_handler = None
self.config_handler = None
self.nsd_catalog_handler = None
self.vld_catalog_handler = None
self.cloud_handler = None
self.datacenter_handler = None
self.lp_config_handler = None
-
- self.vnfd_package_store = store.VnfdPackageFilesystemStore(self.log)
- self.nsd_package_store = store.NsdPackageFilesystemStore(self.log)
+ self.account_handler = None
self.nsd_catalog = dict()
self.vld_catalog = dict()
self.vnfd_catalog = dict()
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @asyncio.coroutine
+ def register(self):
+ self.log.debug("creating NSD catalog handler for project {}".format(self.name))
+ self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self._app)
+ yield from self.nsd_catalog_handler.register()
+
+ self.log.debug("creating VNFD catalog handler for project {}".format(self.name))
+ self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self._app)
+ yield from self.vnfd_catalog_handler.register()
+
+ self.log.debug("creating datacenter handler for project {}".format(self.name))
+ self.datacenter_handler = datacenters.DataCenterPublisher(self.log, self.dts,
+ self.loop, self)
+ yield from self.datacenter_handler.register()
+
+ self.log.debug("creating cloud account handler for project {}".format(self.name))
+ self.cloud_handler = CloudAccountHandlers(self.dts, self.log, self.log_hdl,
+ self.loop, self._app, self)
+ yield from self.cloud_handler.register()
+
+ self.log.debug("creating config agent handler for project {}".format(self.name))
+ self.config_handler = CfgAgentAccountHandlers(self.dts, self.log, self.log_hdl,
+ self.loop, self)
+ yield from self.config_handler.register()
+
+ def deregister(self):
+ self.log.debug("De-register handlers for project: {}".format(self.name))
+ self.config_handler.deregister()
+ self.cloud_handler.deregister()
+ self.datacenter_handler.deregister()
+ self.vnfd_catalog_handler.deregister()
+ self.nsd_catalog_handler.deregister()
+
+ @asyncio.coroutine
+ def delete_prepare(self):
+ if self.nsd_catalog or self.vnfd_catalog or self.vld_catalog:
+ return False
+ return True
+
@property
def cloud_accounts(self):
if self.cloud_handler is None:
return list(self.cloud_handler.cloud_cfg_handler.accounts.values())
+
+class LaunchpadTasklet(rift.tasklets.Tasklet):
+ UPLOAD_MAX_BODY_SIZE = MAX_BODY_SIZE
+ UPLOAD_MAX_BUFFER_SIZE = MAX_BUFFER_SIZE
+ UPLOAD_PORT = "4567"
+
+ def __init__(self, *args, **kwargs):
+ super(LaunchpadTasklet, self).__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-mano-log")
+ self.rwlog.set_subcategory("launchpad")
+
+ self.dts = None
+ self.project_handler = None
+
+ self.vnfd_package_store = store.VnfdPackageFilesystemStore(self.log)
+ self.nsd_package_store = store.NsdPackageFilesystemStore(self.log)
+
+ self.app = None
+ self.server = None
+ self.projects = {}
+ print("LP Tasklet init")
+
+ def _get_project(project=None):
+ if project is None:
+ project = DEFAULT_PROJECT
+
+ if project in self.projects:
+ return self.projects[project]
+
+ msg = "Project {} not found".format(project)
+ self._log.error(msg)
+ raise LpProjectNotFound(msg)
+
+ def nsd_catalog_get(self, project=None):
+ return self._get_project(project=project).nsd_catalog
+
+ def vnfd_catalog_get(self, project=None):
+ return self._get_project(project=project).vnfd_catalog
+
+ def get_cloud_accounts(self, project=None):
+ return self._get_project(project=project).cloud_accounts
+
def start(self):
super(LaunchpadTasklet, self).start()
self.log.info("Starting LaunchpadTasklet")
self.log.exception("Caught Exception in LP stop")
raise
+ def get_vnfd_catalog(self, project):
+ return self.projects[project].vnfd_catalog
+
+ def get_nsd_catalog(self, project):
+ return self.projects[project].nsd_catalog
+
@asyncio.coroutine
def init(self):
- io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
- self.app = uploader.UploaderApplication.from_tasklet(self)
- yield from self.app.register()
-
- manifest = self.tasklet_info.get_pb_manifest()
- ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
- ssl_key = manifest.bootstrap_phase.rwsecurity.key
- ssl_options = {
+ try:
+ io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+ self.app = uploader.UploaderApplication.from_tasklet(self)
+ yield from self.app.register()
+
+ manifest = self.tasklet_info.get_pb_manifest()
+ ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+ ssl_key = manifest.bootstrap_phase.rwsecurity.key
+ ssl_options = {
"certfile": ssl_cert,
"keyfile": ssl_key,
- }
-
- if manifest.bootstrap_phase.rwsecurity.use_ssl:
- self.server = tornado.httpserver.HTTPServer(
- self.app,
- max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
- io_loop=io_loop,
- ssl_options=ssl_options,
- )
-
- else:
- self.server = tornado.httpserver.HTTPServer(
- self.app,
- max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
- io_loop=io_loop,
- )
-
- self.log.debug("creating NSD catalog handler")
- self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self.app)
- yield from self.nsd_catalog_handler.register()
-
- self.log.debug("creating VNFD catalog handler")
- self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self.app)
- yield from self.vnfd_catalog_handler.register()
-
- self.log.debug("creating datacenter handler")
- self.datacenter_handler = datacenters.DataCenterPublisher(self.log, self.dts, self.loop)
- yield from self.datacenter_handler.register()
+ }
+
+ if manifest.bootstrap_phase.rwsecurity.use_ssl:
+ self.server = tornado.httpserver.HTTPServer(
+ self.app,
+ max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+ io_loop=io_loop,
+ ssl_options=ssl_options,
+ )
- self.log.debug("creating cloud account handler")
- self.cloud_handler = CloudAccountHandlers(
- self.dts, self.log, self.log_hdl, self.loop, self.app
+ else:
+ self.server = tornado.httpserver.HTTPServer(
+ self.app,
+ max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+ io_loop=io_loop,
)
- yield from self.cloud_handler.register()
- self.log.debug("creating config agent handler")
- self.config_handler = CfgAgentAccountHandlers(self.dts, self.log, self.log_hdl, self.loop)
- yield from self.config_handler.register()
+ self.log.debug("Registering project handler")
+ print("PJ: Registering project handler")
+ self.project_handler = ProjectHandler(self, LaunchpadProject,
+ app=self.app)
+ self.project_handler.register()
+
+ except Exception as e:
+ self.log.error("Exception : {}".format(e))
+ self.log.exception(e)
@asyncio.coroutine
def run(self):
class UploadRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, application):
+ def __init__(self, application):
"""
Args:
application: UploaderApplication
"""
- super().__init__(log, dts, loop)
+ super().__init__(application.log, application.dts, application.loop)
self.application = application
@property
log = self.application.get_logger(transaction_id)
log.message(OnboardStart())
+ self.log.debug("Package create RPC: {}".format(msg))
auth = None
if msg.username is not None:
auth = (msg.username, msg.password)
+ try:
+ project = msg.project_name
+ except AttributeError as e:
+ self._log.warning("Did not get project name in RPC: {}".
+ format(msg.as_dict()))
+ project = rift.mano.utils.project.DEFAULT_PROJECT
+
self.application.onboard(
msg.external_url,
transaction_id,
- auth=auth
+ auth=auth,
+ project=project,
)
rpc_op = RPC_PACKAGE_CREATE_ENDPOINT.from_dict({
- "transaction_id": transaction_id})
+ "transaction_id": transaction_id,
+ "project_name": project,
+ })
return rpc_op
class UpdateRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, application):
+ def __init__(self, application):
"""
Args:
application: UploaderApplication
"""
- super().__init__(log, dts, loop)
+ super().__init__(application.log, application.dts, application.loop)
self.application = application
@property
self.application.update(
msg.external_url,
transaction_id,
- auth=auth
+ auth=auth,
+ project=msg.project_name,
)
rpc_op = RPC_PACKAGE_UPDATE_ENDPOINT.from_dict({
- "transaction_id": transaction_id})
+ "transaction_id": transaction_id,
+ "project_name": msg.project_name,
+ })
return rpc_op
class UpdatePackage(downloader.DownloaderProtocol):
- def __init__(self, log, loop, url, auth,
+ def __init__(self, log, loop, project, url, auth,
onboarder, uploader, package_store_map):
super().__init__()
self.log = log
self.loop = loop
+ self.project = project
self.url = url
self.auth = auth
self.onboarder = onboarder
)
try:
self.uploader.upload_image(image_name, image_checksum, image_hdl)
- self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum)
+ self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum, self.project)
except image.ImageUploadError as e:
self.log.exception("Failed to upload image: %s", image_name)
self.log.message(UpdateDescriptorUpdate())
try:
- self.onboarder.update(descriptor_msg)
+ self.onboarder.update(descriptor_msg, project=self.project)
except onboard.UpdateError as e:
raise MessageException(UpdateDescriptorError(package.descriptor_file)) from e
class OnboardPackage(downloader.DownloaderProtocol):
- def __init__(self, log, loop, url, auth,
+ def __init__(self, log, loop, project, url, auth,
onboarder, uploader, package_store_map):
self.log = log
self.loop = loop
+ self.project = project
self.url = url
self.auth = auth
self.onboarder = onboarder
self.uploader = uploader
self.package_store_map = package_store_map
+ self.project = project
def _onboard_package(self, packages):
# Extract package could return multiple packages if
self.log.message(OnboardDescriptorOnboard())
try:
- self.onboarder.onboard(descriptor_msg)
+ self.onboarder.onboard(descriptor_msg, project=self.project)
except onboard.OnboardError as e:
raise MessageException(OnboardDescriptorError(package.descriptor_file)) from e
ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
ssl_key = manifest.bootstrap_phase.rwsecurity.key
return cls(
- tasklet.log,
- tasklet.dts,
- tasklet.loop,
- ssl=(ssl_cert, ssl_key),
- vnfd_store=tasklet.vnfd_package_store,
- nsd_store=tasklet.nsd_package_store,
- vnfd_catalog=tasklet.vnfd_catalog,
- nsd_catalog=tasklet.nsd_catalog)
+ tasklet,
+ ssl=(ssl_cert, ssl_key),
+ vnfd_store=tasklet.vnfd_package_store,
+ nsd_store=tasklet.nsd_package_store)
def __init__(
self,
- log,
- dts,
- loop,
+ tasklet,
ssl=None,
vnfd_store=None,
- nsd_store=None,
- vnfd_catalog=None,
- nsd_catalog=None):
+ nsd_store=None):
- self.log = log
- self.loop = loop
- self.dts = dts
+ self.log = tasklet.log
+ self.loop = tasklet.loop
+ self.dts = tasklet.dts
+
+ self.accounts = {}
self.use_ssl = False
self.ssl_cert, self.ssl_key = None, None
if not nsd_store:
nsd_store = rift.package.store.NsdPackageFilesystemStore(self.log)
- self.accounts = []
self.messages = collections.defaultdict(list)
self.export_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/exports')
self.exporter = export.DescriptorPackageArchiveExporter(self.log)
self.loop.create_task(export.periodic_export_cleanup(self.log, self.loop, self.export_dir))
- self.vnfd_catalog = vnfd_catalog
- self.nsd_catalog = nsd_catalog
+ self.get_vnfd_catalog = tasklet.get_vnfd_catalog
+ self.get_nsd_catalog = tasklet.get_nsd_catalog
catalog_map = {
- "vnfd": self.vnfd_catalog,
- "nsd": self.nsd_catalog
+ "vnfd": self.get_vnfd_catalog,
+ "nsd": self.get_nsd_catalog
}
- self.upload_handler = UploadRpcHandler(self.log, self.dts, self.loop, self)
- self.update_handler = UpdateRpcHandler(self.log, self.dts, self.loop, self)
- self.export_handler = export.ExportRpcHandler(
- self.log,
- self.dts,
- self.loop,
- self,
- store_map=self.package_store_map,
- exporter=self.exporter,
- catalog_map=catalog_map
- )
+ self.upload_handler = UploadRpcHandler(self)
+ self.update_handler = UpdateRpcHandler(self)
+ self.export_handler = export.ExportRpcHandler(self, catalog_map)
attrs = dict(log=self.log, loop=self.loop)
def get_logger(self, transaction_id):
return message.Logger(self.log, self.messages[transaction_id])
- def onboard(self, url, transaction_id, auth=None):
+ def onboard(self, url, transaction_id, auth=None, project=None):
log = message.Logger(self.log, self.messages[transaction_id])
onboard_package = OnboardPackage(
log,
self.loop,
+ project,
url,
auth,
self.onboarder,
self.loop.run_in_executor(None, onboard_package.download_package)
- def update(self, url, transaction_id, auth=None):
+ def update(self, url, transaction_id, auth=None, project=None):
log = message.Logger(self.log, self.messages[transaction_id])
update_package = UpdatePackage(
log,
self.loop,
+ project,
url,
auth,
self.onboarder,
self._log = log
self._args = args
+ self._project = args.project
+
self._pkgs = None
self._service_name = None
user=self._user,
passwd=self._password,
api_server_ip=self._api_server_ip)
+
self._upload_url = "curl -k https://{ip}:{port}/api/upload". \
format(ip=self._ip,
port=self._uport)
self._headers = '-H "accept: application/json"' + \
' -H "content-type: application/json"'
- self._conf_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/config". \
+
+ self._conf_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/config/project/{project}". \
format(header=self._headers,
user=self._user,
passwd=self._password,
ip=self._ip,
- port=self._rport)
- self._oper_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/operational". \
+ port=self._rport,
+ project=self._project)
+
+ self._oper_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/operational/project/{project}". \
format(header=self._headers,
user=self._user,
passwd=self._password,
ip=self._ip,
- port=self._rport)
+ port=self._rport,
+ project=self._project)
@property
def log(self):
return self._log
def validate_args(self):
+ args = self._args
if args.upload_pkg is not None:
self._pkgs = args.upload_pkg
self.log.debug("Packages to upload: {}".format(self._pkgs))
uuid.UUID(args.datacenter)
self._dc = args.datacenter
except ValueError as e:
- raise OnboardPkgInvalidDescId("Invalid UUID for datacenter: {}".
- format(args.datacenter))
+ raise OnboardPkgInvalidDescId("Invalid UUID for datacenter {}: {}".
+ format(args.datacenter, e))
elif args.vim_account:
self._account = args.vim_account
self._service_name,
self._account))
- if (self._pkgs is None) and (self._nsd_id is None):
- raise OnboardPkgInputError("Need to specify either upload-pkg or instantiate options")
+ if (self._pkgs is None) and (self._nsd_id is None) and (not args.list_nsds):
+ raise OnboardPkgInputError("Need to specify either upload-pkg or instantiate or list options")
# Validate the port numbers are correct
def valid_port(port):
format(self._nsd_id,
js['error']))
- nsd = js['nsd:nsd']
+ try:
+ nsd = js['nsd:nsd']
+ except KeyError as e:
+ raise OnboardPkgNsdError("NSD ID {} provided is not valid".
+ format(self._nsd_id))
+
self.log.debug("NSD to instantiate: {}".format(nsd))
# Generate a UUID for NS
self.log.info("Successfully initiated instantiation of NS as {} ({})".
format(self._service_name, ns_id))
+ def list_nsds(self):
+ if self._args.list_nsds:
+ self.log.debug("Check NSDS at {}:{}, with credentials {}:{}".
+ format(self._ip, self._rport, self._user, self._password))
+
+ rest_url = self._conf_url+"/nsd-catalog/nsd"
+ try:
+ output = self._exec_cmd(rest_url)
+ self.log.debug("Output of NSD list: {}".
+ format(output))
+ if output:
+ js = json.loads(output)
+ if "error" in js:
+ raise OnboardPkgRcConnError("SO Restconf connect error: {}".
+ format(js["error"]))
+ else:
+ print("No NSDs found on SO")
+ return
+
+ self.log.debug("NSD list: {}".format(js))
+ print('List of NSDs on SO:\nName\tID')
+ for nsd in js['nsd:nsd']:
+ print('{}\t{}'.format(nsd['name'], nsd['id']))
+
+ except OnboardPkgCmdError as e:
+ self.log.error("SO restconf connect failed: {}".format(e))
+ raise OnboardPkgRcConnError("SO Restconf connect error: {}".
+ format(e))
+
def process(self):
self.validate_args()
self.validate_connectivity()
self.upload_packages()
self.instantiate()
+ self.list_nsds()
if __name__ == "__main__":
help="Descriptor packages to upload. " + \
"If multiple descriptors are provided, they are uploaded in the same sequence.")
+ parser.add_argument("-l", "--list-nsds", action='store_true',
+ help="List available network service descriptors")
+
parser.add_argument("-i", "--instantiate",
help="Instantiate a network service with the name")
parser.add_argument("-d", "--nsd-id",
parser.add_argument("-c", "--vim-account",
help="Cloud/VIM account to instantiate on")
+ parser.add_argument("--project", default='default',
+ help="Project to use, default 'default'")
parser.add_argument("-o", "--onboard-port", default=8443, type=int,
help="Onboarding port number - node port number, default 8443")
parser.add_argument("-p", "--upload-port", default=4567, type=int,
self._vnfd_serializer = rift.package.convert.VnfdSerializer()
def test_create_archive(self):
- rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+ rw_vnfd_msg = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd(
id="new_id", name="new_name", description="new_description"
)
json_desc_str = self._rw_vnfd_serializer.to_json_string(rw_vnfd_msg)
self.assertEqual(package.descriptor_msg, rw_vnfd_msg)
def test_export_package(self):
- rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+ rw_vnfd_msg = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd(
id="new_id", name="new_name", description="new_description",
meta="THIS FIELD IS NOT IN REGULAR VNFD"
)
- vnfd_msg = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ vnfd_msg = VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
vnfd_msg.from_dict(rw_vnfd_msg.as_dict(), ignore_missing_keys=True)
self.assertNotEqual(rw_vnfd_msg, vnfd_msg)
@rift.test.dts.async_test
def test_onboard_nsd(self):
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
self.assertEqual(self._handler_info.last_request_message, nsd_msg)
self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
@rift.test.dts.async_test
def test_update_nsd(self):
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
self.assertEqual(self._handler_info.last_request_message, nsd_msg)
self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
@rift.test.dts.async_test
def test_bad_descriptor_type(self):
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog()
+ nsd_msg = NsdYang.YangData_RwProject_Project_NsdCatalog()
with self.assertRaises(TypeError):
yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
# Use a port not used by the instantiated server
new_port = self._port - 1
self._onboarder.port = new_port
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
with self.assertRaises(onboard.OnboardError):
yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
# Set the timeout to something minimal to speed up test
self._onboarder.timeout = .1
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
# Force the request to timeout by running the call synchronously so the
with self.assertRaises(onboard.OnboardError):
mock_vnfd_catalog = mock.MagicMock()
self.uid, path = self.create_mock_package()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict({
"id": self.uid
})
mock_vnfd_catalog = {self.uid: mock_vnfd}
self._account = account
self._plugin = plugin
self._timestamp = 0
- self._metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ self._metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
self._vdur = vdur
self._vim_id = vdur.vim_id
self._updating = None
try:
# Create uninitialized metric structure
- vdu_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ vdu_metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
# VCPU
vdu_metrics.vcpu.total = self.vdur.vm_flavor.vcpu_count
different sub-systems that are used to monitor the NFVI.
"""
- def __init__(self, loop, log, config):
+ def __init__(self, loop, log, config, project):
"""Create a Monitor object
Arguments:
- loop - an event loop
- log - the logger used by this object
- config - an instance of InstanceConfiguration
+ loop - an event loop
+ log - the logger used by this object
+ config - an instance of InstanceConfiguration
+ project - an instance of the project
"""
self._loop = loop
self._log = log
+ self._project = project
self._cloud_accounts = dict()
self._nfvi_plugins = NfviMetricsPluginManager(log)
"""The event log used by this object"""
return self._log
+ @property
+ def project(self):
+ return self._project
+
@property
def cache(self):
"""The NFVI metrics cache"""
import rift.tasklets
import rift.mano.cloud
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
from . import core
class DtsHandler(object):
- def __init__(self, tasklet):
+ def __init__(self, project):
self.reg = None
- self.tasklet = tasklet
+ self.project = project
@property
def log(self):
- return self.tasklet.log
+ return self.project._log
@property
def log_hdl(self):
- return self.tasklet.log_hdl
+ return self.project._log_hdl
@property
def dts(self):
- return self.tasklet.dts
+ return self.project._dts
@property
def loop(self):
- return self.tasklet.loop
+ return self.project._loop
@property
def classname(self):
with self.dts.group_create() as group:
group.register(
- xpath=VnfrCatalogSubscriber.XPATH,
+ xpath=self.project.add_project(VnfrCatalogSubscriber.XPATH),
flags=rwdts.Flag.SUBSCRIBER,
handler=handler,
)
with self.dts.appconf_group_create(acg_handler) as acg:
self.reg = acg.register(
- xpath=NsInstanceConfigSubscriber.XPATH,
+ xpath=self.project.add_project(NsInstanceConfigSubscriber.XPATH),
flags=rwdts.Flag.SUBSCRIBER,
)
class CloudAccountDtsHandler(DtsHandler):
- def __init__(self, tasklet):
- super().__init__(tasklet)
+ def __init__(self, project):
+ super().__init__(project)
self._cloud_cfg_subscriber = None
def register(self):
self.log.debug("creating cloud account config handler")
self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
- self.dts, self.log, self.log_hdl,
+ self.dts, self.log, self.log_hdl, self.project,
rift.mano.cloud.CloudAccountConfigCallbacks(
on_add_apply=self.tasklet.on_cloud_account_create,
on_delete_apply=self.tasklet.on_cloud_account_delete,
# before timing out, the current data will be published instead.
TIMEOUT = 2.0
- def __init__(self, tasklet, vnfr, vdur):
+ def __init__(self, project, vnfr, vdur):
"""Create an instance of VdurNvfiPublisher
Arguments:
vdur - the VDUR of the VDU whose metrics are published
"""
- super().__init__(tasklet)
+ super().__init__(project)
self._vnfr = vnfr
self._vdur = vdur
self._handle = None
- self._xpath = VdurNfviMetricsPublisher.XPATH.format(vnfr.id, vdur.id)
+ self._xpath = project.add_project(VdurNfviMetricsPublisher.XPATH.format(vnfr.id, vdur.id))
self._deregistered = asyncio.Event(loop=self.loop)
with self.dts.appconf_group_create(acg_handler) as acg:
self.reg = acg.register(
- xpath="C,/rw-launchpad:launchpad-config",
+ xpath=self.project.add_project("C,/rw-launchpad:launchpad-config"),
flags=rwdts.Flag.SUBSCRIBER,
)
them on to the tasklet.
"""
- def __init__(self, tasklet):
- super().__init__(tasklet)
+ def __init__(self, project):
+ super().__init__(project)
self._handle = None
@asyncio.coroutine
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
try:
+
+ if not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
response = VnfrYang.YangOutput_Vnfr_CreateAlarm()
response.alarm_id = yield from self.tasklet.on_create_alarm(
msg.cloud_account,
them on to the tasklet.
"""
- def __init__(self, tasklet):
- super().__init__(tasklet)
+ def __init__(self, project):
+ super().__init__(project)
self._handle = None
@asyncio.coroutine
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
try:
+ if not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
yield from self.tasklet.on_destroy_alarm(
msg.cloud_account,
msg.alarm_id,
])
-class MonitorTasklet(rift.tasklets.Tasklet):
- """
- The MonitorTasklet provides a interface for DTS to interact with an
- instance of the Monitor class. This allows the Monitor class to remain
- independent of DTS.
- """
-
- DEFAULT_POLLING_PERIOD = 1.0
-
- def __init__(self, *args, **kwargs):
- try:
- super().__init__(*args, **kwargs)
- self.rwlog.set_category("rw-monitor-log")
-
- self.vnfr_subscriber = VnfrCatalogSubscriber(self)
- self.cloud_cfg_subscriber = CloudAccountDtsHandler(self)
- self.ns_instance_config_subscriber = NsInstanceConfigSubscriber(self)
- self.launchpad_cfg_subscriber = LaunchpadConfigDtsSubscriber(self)
+class MonitorProject(ManoProject):
- self.config = core.InstanceConfiguration()
- self.config.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD
+ def __init__(self, name, tasklet, **kw):
+ super(MonitorProject, self).__init__(log, name)
+ self._tasklet = tasklet
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
- self.monitor = core.Monitor(self.loop, self.log, self.config)
- self.vdur_handlers = dict()
+ self.vnfr_subscriber = VnfrCatalogSubscriber(self)
+ self.cloud_cfg_subscriber = CloudAccountDtsHandler(self)
+ self.ns_instance_config_subscriber = NsInstanceConfigSubscriber(self)
+ self.launchpad_cfg_subscriber = LaunchpadConfigDtsSubscriber(self)
- self.webhooks = None
- self.create_alarm_rpc = CreateAlarmRPC(self)
- self.destroy_alarm_rpc = DestroyAlarmRPC(self)
+ self.config = core.InstanceConfiguration()
+ self.config.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD
+ self.monitor = core.Monitor(self.loop, self.log, self.config, self)
+ self.vdur_handlers = dict()
- except Exception as e:
- self.log.exception(e)
-
- @property
- def polling_period(self):
- return self.config.polling_period
-
- @property
- def public_ip(self):
- """The public IP of the launchpad"""
- return self.config.public_ip
-
- def start(self):
- super().start()
- self.log.info("Starting MonitoringTasklet")
-
- self.log.debug("Registering with dts")
- self.dts = rift.tasklets.DTS(
- self.tasklet_info,
- RwLaunchpadYang.get_schema(),
- self.loop,
- self.on_dts_state_change
- )
-
- self.log.debug("Created DTS Api GI Object: %s", self.dts)
-
- def stop(self):
- try:
- self.dts.deinit()
- except Exception as e:
- self.log.exception(e)
+ self.create_alarm_rpc = CreateAlarmRPC(self)
+ self.destroy_alarm_rpc = DestroyAlarmRPC(self)
@asyncio.coroutine
- def init(self):
+ def register (self):
self.log.debug("creating cloud account handler")
self.cloud_cfg_subscriber.register()
self.log.debug("creating destroy-alarm rpc handler")
yield from self.destroy_alarm_rpc.register()
- self.log.debug("creating webhook server")
- loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
- self.webhooks = WebhookApplication(self)
- self.server = tornado.httpserver.HTTPServer(
- self.webhooks,
- io_loop=loop,
- )
-
- @asyncio.coroutine
- def on_public_ip(self, ip):
- """Store the public IP of the launchpad
- Arguments:
- ip - a string containing the public IP address of the launchpad
+ @property
+ def polling_period(self):
+ return self.config.polling_period
- """
- self.config.public_ip = ip
+ @property
+ def public_ip(self):
+ """The public IP of the launchpad"""
+ return self.config.public_ip
def on_ns_instance_config_update(self, config):
"""Update configuration information
def on_cloud_account_delete(self, account_name):
self.monitor.remove_cloud_account(account_name)
- @asyncio.coroutine
- def run(self):
- self.webhooks.listen(WebhookApplication.DEFAULT_WEBHOOK_PORT)
-
- def on_instance_started(self):
- self.log.debug("Got instance started callback")
-
- @asyncio.coroutine
- def on_dts_state_change(self, state):
- """Handle DTS state change
-
- Take action according to current DTS state to transition application
- into the corresponding application state
-
- Arguments
- state - current dts state
-
- """
- switch = {
- rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
- rwdts.State.CONFIG: rwdts.State.RUN,
- }
-
- handlers = {
- rwdts.State.INIT: self.init,
- rwdts.State.RUN: self.run,
- }
-
- # Transition application to next state
- handler = handlers.get(state, None)
- if handler is not None:
- yield from handler()
-
- # Transition dts to next state
- next_state = switch.get(state, None)
- if next_state is not None:
- self.dts.handle.set_state(next_state)
-
def on_vnfr_create(self, vnfr):
if not self.monitor.nfvi_metrics_available(vnfr.cloud_account):
msg = "NFVI metrics unavailable for {}"
"""
yield from self.monitor.destroy_alarm(account, alarm_id)
+
+
+class MonitorTasklet(rift.tasklets.Tasklet):
+ """
+ The MonitorTasklet provides a interface for DTS to interact with an
+ instance of the Monitor class. This allows the Monitor class to remain
+ independent of DTS.
+ """
+
+ DEFAULT_POLLING_PERIOD = 1.0
+
+ def __init__(self, *args, **kwargs):
+ try:
+ super().__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-monitor-log")
+
+ self._project_handler = None
+ self.projects = {}
+
+ self.webhooks = None
+
+ except Exception as e:
+ self.log.exception(e)
+
+ def start(self):
+ super().start()
+ self.log.info("Starting MonitoringTasklet")
+
+ self.log.debug("Registering with dts")
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ RwLaunchpadYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+ def stop(self):
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ self.log.debug("creating webhook server")
+ loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+ self.webhooks = WebhookApplication(self)
+ self.server = tornado.httpserver.HTTPServer(
+ self.webhooks,
+ io_loop=loop,
+ )
+
+ @asyncio.coroutine
+ def on_public_ip(self, ip):
+ """Store the public IP of the launchpad
+
+ Arguments:
+ ip - a string containing the public IP address of the launchpad
+
+ """
+ self.config.public_ip = ip
+
+ @asyncio.coroutine
+ def run(self):
+ self.webhooks.listen(WebhookApplication.DEFAULT_WEBHOOK_PORT)
+
+ def on_instance_started(self):
+ self.log.debug("Got instance started callback")
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
+
monp_id (str): Monitoring Param ID
vnfr_id (str): VNFR ID
"""
- def __init__(self, log, dts, loop, vnfr_id, monp_id, callback=None):
- super().__init__(log, dts, loop, callback)
+ def __init__(self, log, dts, loop, project, vnfr_id, monp_id, callback=None):
+ super().__init__(log, dts, loop, project, callback)
self.vnfr_id = vnfr_id
self.monp_id = monp_id
def get_xpath(self):
- return("D,/vnfr:vnfr-catalog" +
+ return self.project.add_project(("D,/vnfr:vnfr-catalog" +
"/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id) +
"/vnfr:monitoring-param" +
- "[vnfr:id='{}']".format(self.monp_id))
+ "[vnfr:id='{}']".format(self.monp_id)))
class NsrMonitoringParam():
"""Class that handles NS Mon-param data.
"""
- MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+ MonParamMsg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_MonitoringParam
MISSING = None
DEFAULT_AGGREGATION_TYPE = "AVERAGE"
"""Convenience class that constructs NSMonitoringParam objects
Args:
- nsd (RwNsdYang.YangData_Nsd_NsdCatalog_Nsd): Nsd object
+ nsd (RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd): Nsd object
constituent_vnfrs (list): List of constituent vnfr objects of NSR
store (SubscriberStore): Store object instance
Also handles legacy NSD descriptor which has no mon-param defines. In
such cases the mon-params are created from VNFD's mon-param config.
"""
- MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
-
mon_params = []
for mon_param_msg in nsd.monitoring_param:
mon_params.append(NsrMonitoringParam(
def from_handler(cls, handler, monp, callback):
"""Convenience class to build NsrMonitoringParamPoller object.
"""
- return cls(handler.log, handler.dts, handler.loop, monp, callback)
+ return cls(handler.log, handler.dts, handler.loop, handler.project,
+ monp, callback)
- def __init__(self, log, dts, loop, monp, callback=None):
+ def __init__(self, log, dts, loop, project, monp, callback=None):
"""
Args:
monp (NsrMonitoringParam): Param object
callback (None, optional): Callback to be triggered after value has
been aggregated.
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.monp = monp
self.subscribers = []
for vnfr_id, monp_id in self.monp.vnfr_ids:
callback = functools.partial(self.update_value, vnfr_id=vnfr_id)
self.subscribers.append(VnfrMonitoringParamSubscriber(
- self.loop, self.dts, self.loop, vnfr_id, monp_id, callback=callback))
+ self.loop, self.dts, self.loop, self.project,
+ vnfr_id, monp_id, callback=callback))
@asyncio.coroutine
def start(self):
class NsrMonitorDtsHandler(mano_dts.DtsHandler):
""" NSR monitoring class """
- def __init__(self, log, dts, loop, nsr, constituent_vnfrs, store):
+ def __init__(self, log, dts, loop, project, nsr, constituent_vnfrs, store):
"""
Args:
- nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): NSR object
+ nsr (RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr): NSR object
constituent_vnfrs (list): list of VNFRs in NSR
store (SubscriberStore): Store instance
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.nsr = nsr
self.store = store
self.mon_params_pollers = []
def xpath(self, param_id=None):
- return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+ return self.project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr" +
"[nsr:ns-instance-config-ref='{}']".format(self.nsr.ns_instance_config_ref) +
"/nsr:monitoring-param" +
("[nsr:id='{}']".format(param_id) if param_id else ""))
import rift.mano.cloud
import rift.mano.dts as subscriber
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
from . import vnfr_core
from . import nsr_core
-class MonitoringParameterTasklet(rift.tasklets.Tasklet):
- """The main task of this Tasklet is to listen for VNFR changes and once the
- VNFR hits the running state, triggers the monitor.
- """
- def __init__(self, *args, **kwargs):
- try:
- super().__init__(*args, **kwargs)
- self.rwlog.set_category("rw-monitor-log")
- except Exception as e:
- self.log.exception(e)
+class MonParamProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(MonParamProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
self.vnfr_subscriber = None
self.store = None
# Needs to be moved to store once the DTS bug is resolved
self.vnfrs = {}
- def start(self):
- super().start()
-
- self.log.info("Starting MonitoringParameterTasklet")
- self.log.debug("Registering with dts")
-
- self.dts = rift.tasklets.DTS(
- self.tasklet_info,
- RwLaunchpadYang.get_schema(),
- self.loop,
- self.on_dts_state_change
- )
-
- self.vnfr_subscriber = subscriber.VnfrCatalogSubscriber.from_tasklet(
+ self.vnfr_subscriber = subscriber.VnfrCatalogSubscriber.from_project(
self,
callback=self.handle_vnfr)
- self.nsr_subsriber = subscriber.NsrCatalogSubscriber.from_tasklet(
+ self.nsr_subsriber = subscriber.NsrCatalogSubscriber.from_project(
self,
callback=self.handle_nsr)
- self.store = subscriber.SubscriberStore.from_tasklet(self)
+ self.store = subscriber.SubscriberStore.from_project(self)
self.log.debug("Created DTS Api GI Object: %s", self.dts)
- def stop(self):
- try:
- self.dts.deinit()
- except Exception as e:
- self.log.exception(e)
-
@asyncio.coroutine
- def init(self):
+ def register (self):
self.log.debug("creating vnfr subscriber")
yield from self.store.register()
yield from self.vnfr_subscriber.register()
yield from self.nsr_subsriber.register()
- @asyncio.coroutine
- def run(self):
- pass
-
- @asyncio.coroutine
- def on_dts_state_change(self, state):
- """Handle DTS state change
-
- Take action according to current DTS state to transition application
- into the corresponding application state
-
- Arguments
- state - current dts state
-
- """
- switch = {
- rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
- rwdts.State.CONFIG: rwdts.State.RUN,
- }
-
- handlers = {
- rwdts.State.INIT: self.init,
- rwdts.State.RUN: self.run,
- }
-
- # Transition application to next state
- handler = handlers.get(state, None)
- if handler is not None:
- yield from handler()
-
- # Transition dts to next state
- next_state = switch.get(state, None)
- if next_state is not None:
- self.dts.handle.set_state(next_state)
+ def deregister(self):
+ self.log.debug("De-register vnfr project {}".format(self.name))
+ #TODO:
def handle_vnfr(self, vnfr, action):
"""Starts a monitoring parameter job for every VNFR that reaches
"""
def vnfr_create():
- # if vnfr.operational_status == "running" and vnfr.id not in self.vnfr_monitors:
if vnfr.config_status == "configured" and vnfr.id not in self.vnfr_monitors:
vnf_mon = vnfr_core.VnfMonitorDtsHandler.from_vnf_data(
NS that moves to config state.
Args:
- nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
+ nsr (RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr): Ns Opdata
action (rwdts.QueryAction): Action type of the change.
"""
def nsr_create():
self.log,
self.dts,
self.loop,
+ self,
nsr,
list(self.vnfrs.values()),
self.store
@asyncio.coroutine
def task():
- yield from nsr_mon.register()
- yield from nsr_mon.start()
+ try:
+ yield from nsr_mon.register()
+ yield from nsr_mon.start()
+ except Exception as e:
+ self.log.exception("NSR {} monparam task failed: {}".
+ format(nsr.name_ref, e))
self.loop.create_task(task())
nsr_create()
elif action == rwdts.QueryAction.DELETE:
nsr_delete()
+
+
+class MonitoringParameterTasklet(rift.tasklets.Tasklet):
+ """The main task of this Tasklet is to listen for VNFR changes and once the
+ VNFR hits the running state, triggers the monitor.
+ """
+ def __init__(self, *args, **kwargs):
+ try:
+ super().__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-monitor-log")
+ except Exception as e:
+ self.log.exception(e)
+
+ self._project_handler = None
+ self.projects = {}
+
+ def start(self):
+ super().start()
+
+ self.log.info("Starting MonitoringParameterTasklet")
+ self.log.debug("Registering with dts")
+
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ RwLaunchpadYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ def stop(self):
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, MonParamProject)
+ self.project_handler.register()
+
+ @asyncio.coroutine
+ def run(self):
+ pass
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:monitoring-param"
@classmethod
- def from_vnf_data(cls, tasklet, vnfr_msg, vnfd_msg):
- handler = cls(tasklet.log, tasklet.dts, tasklet.loop,
+ def from_vnf_data(cls, project, vnfr_msg, vnfd_msg):
+ handler = cls(project.log, project.dts, project.loop, project,
vnfr_msg.id, vnfr_msg.mgmt_interface.ip_address,
vnfd_msg.monitoring_param, vnfd_msg.http_endpoint)
return handler
- def __init__(self, log, dts, loop, vnfr_id, mgmt_ip, params, endpoints):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, vnfr_id, mgmt_ip, params, endpoints):
+ super().__init__(log, dts, loop, project)
self._mgmt_ip = mgmt_ip
self._vnfr_id = vnfr_id
mon_params = []
for mon_param in params:
- param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(
+ param = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict(
mon_param.as_dict()
)
mon_params.append(param)
http_endpoints = []
for endpoint in endpoints:
- endpoint = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict(
+ endpoint = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint.from_dict(
endpoint.as_dict()
)
http_endpoints.append(endpoint)
def xpath(self, param_id=None):
""" Monitoring params xpath """
- return("D,/vnfr:vnfr-catalog" +
+ return self.project.add_project(("D,/vnfr:vnfr-catalog" +
"/vnfr:vnfr[vnfr:id='{}']".format(self._vnfr_id) +
"/vnfr:monitoring-param" +
- ("[vnfr:id='{}']".format(param_id) if param_id else ""))
+ ("[vnfr:id='{}']".format(param_id) if param_id else "")))
@property
def msg(self):
'ping-response-rx-count': 10
}
- mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+ mon_param_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam()
mon_param_msg.from_dict({
'id': '1',
'name': 'ping-request-tx-count',
'units': 'packets'
})
- endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+ endpoint_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint()
endpoint_msg.from_dict({
'path': ping_path,
'polling_interval_secs': 1,
'ping-response-rx-count': 10
}
- mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+ mon_param_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam()
mon_param_msg.from_dict({
'id': '1',
'name': 'ping-request-tx-count',
'units': 'packets'
})
- endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+ endpoint_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint()
endpoint_msg.from_dict({
'path': ping_path,
'https': 'true',
class MonParamMsgGenerator(object):
def __init__(self, num_messages=1):
ping_path = r"/api/v1/ping/stats"
- self._endpoint_msg = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict({
+ self._endpoint_msg = vnfryang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint.from_dict({
'path': ping_path,
'https': 'true',
'polling_interval_secs': 1,
self._mon_param_msgs = []
for i in range(1, num_messages):
- self._mon_param_msgs.append(vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+ self._mon_param_msgs.append(vnfryang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
'id': '%s' % i,
'name': 'param_num_%s' % i,
'json_query_method': "NAMEKEY",
def setup_mock_store(self, aggregation_type, monps, legacy=False):
store = mock.MagicMock()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict({
'id': "1",
'monitoring_param': [
{'description': 'no of ping requests',
})
store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict({
'id': '1',
'monitoring_param': ([monp.as_dict() for monp in monps] if not legacy else [])
})
- mock_vnfr.vnfd = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
+ mock_vnfr.vnfd = vnfryang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict({
'ns_instance_config_ref': "1",
'name_ref': "Foo",
'constituent_vnfr_ref': [{'vnfr_id': mock_vnfr.id}],
'vnfd_monitoring_param_ref': '2'}]
}]
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict({
'id': str(uuid.uuid1()),
'monitoring_param': (monp if not legacy else [])
})
class CloudAccountConfigSubscriber:
- def __init__(self, log, dts, log_hdl):
+ def __init__(self, log, dts, log_hdl, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
+ self._project = project
self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
self._dts,
self._log,
self._log_hdl,
+ self._project,
rift.mano.cloud.CloudAccountConfigCallbacks())
def get_cloud_account_sdn_name(self, account_name):
def register(self):
self._cloud_sub.register()
+ def deregister(self):
+ self._cloud_sub.deregister()
+
class ROAccountPluginSelector(object):
"""
"""
DEFAULT_PLUGIN = RwNsPlugin
- def __init__(self, dts, log, loop, records_publisher):
+ def __init__(self, dts, log, loop, project, records_publisher):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._records_publisher = records_publisher
self._nsm_plugins = NsmPlugins()
self._log,
self._dts,
self._loop,
+ self._project,
callback=self.on_ro_account_change
)
self._nsr_sub = mano_dts.NsrCatalogSubscriber(
self._log,
self._dts,
self._loop,
+ self._project,
self.handle_nsr)
# The default plugin will be RwNsPlugin
def register(self):
yield from self._ro_sub.register()
yield from self._nsr_sub.register()
+
+ def deregister(self):
+ self._log.debug("Project {} de-register".format(self._project.name))
+ self._ro_sub.deregister()
+ self._nsr_sub.deregister()
)
if action == rwdts.QueryAction.READ:
- schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur.schema()
path_entry = schema.keyspec_to_entry(ks_path)
try:
self._nsr._nsr_uuid,
self._vdur_id
)
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
if console_url:
vdur_console.console_url = console_url
except openmano_client.InstanceStatusError as e:
self._log.error("Could not get NS instance console URL: %s",
str(e))
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
vdur_console.console_url = 'none'
def vnfr_uptime_update(self, vnfr):
try:
- vnfr_ = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({'id': vnfr.id})
+ vnfr_ = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict({'id': vnfr.id})
while True:
vnfr_.uptime = int(time.time()) - vnfr._create_time
yield from self._publisher.publish_vnfr(None, vnfr_)
""" The network service op data DTS handler """
XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._regh = None
@property
@asyncio.coroutine
def register(self):
""" Register for Nsr op data publisher registration"""
- self._log.debug("Registering Nsr op data path %s as publisher",
- NsrOpDataDtsHandler.XPATH)
+ if self._regh:
+ return
+
+ xpath = self._project.add_project(NsrOpDataDtsHandler.XPATH)
+ self._log.debug("Registering Nsr op data path {} as publisher".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
- self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ)
@asyncio.coroutine
- def create(self, xact, path, msg):
+ def create(self, xact, xpath, msg):
"""
Create an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating NSR xact = %s, %s:%s", xact, path, msg)
self.regh.create_element(path, msg)
self._log.debug("Created NSR xact = %s, %s:%s", xact, path, msg)
@asyncio.coroutine
- def update(self, xact, path, msg, flags=rwdts.XactFlag.REPLACE):
+ def update(self, xact, xpath, msg, flags=rwdts.XactFlag.REPLACE):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating NSR xact = %s, %s:%s regh = %s", xact, path, msg, self.regh)
self.regh.update_element(path, msg, flags)
self._log.debug("Updated NSR xact = %s, %s:%s", xact, path, msg)
@asyncio.coroutine
- def delete(self, xact, path):
+ def delete(self, xact, xpath):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting NSR xact:%s, path:%s", xact, path)
self.regh.delete_element(path)
self._log.debug("Deleted NSR xact:%s, path:%s", xact, path)
class VnfrPublisherDtsHandler(object):
- """ Registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' DTS"""
+ """ Registers 'D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr' DTS"""
XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._regh = None
@asyncio.coroutine
def register(self):
""" Register for Vvnfr create/update/delete/read requests from dts """
+ if self._regh:
+ return
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
"%s action on VirtualNetworkFunctionRecord not supported",
action)
- self._log.debug("Registering for VNFR using xpath: %s",
- VnfrPublisherDtsHandler.XPATH,)
+ xpath = self._project.add_project(VnfrPublisherDtsHandler.XPATH)
+ self._log.debug("Registering for VNFR using xpath: {}".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VnfrPublisherDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=(rwdts.Flag.PUBLISHER |
rwdts.Flag.NO_PREP_READ |
class VlrPublisherDtsHandler(object):
- """ registers 'D,/vlr:vlr-catalog/vlr:vlr """
+ """ registers 'D,/rw-project:project/vlr:vlr-catalog/vlr:vlr """
XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._regh = None
def register(self):
""" Register for vlr create/update/delete/read requests from dts """
+ if self._regh:
+ return
+
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
""" prepare callback from dts """
"%s action on VirtualLinkRecord not supported",
action)
- self._log.debug("Registering for VLR using xpath: %s",
- VlrPublisherDtsHandler.XPATH,)
+ xpath = self._project.add_project(VlrPublisherDtsHandler.XPATH)
+ self._log.debug("Registering for VLR using xpath: {}".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VlrPublisherDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=(rwdts.Flag.PUBLISHER |
rwdts.Flag.NO_PREP_READ |
HEADERS = {"content-type": "application/vnd.yang.data+json"}
- def __init__(self, use_ssl, ssl_cert, ssl_key, loop):
+ def __init__(self, use_ssl, ssl_cert, ssl_key, loop, project):
self.use_ssl = use_ssl
self.ssl_cert = ssl_cert
self.ssl_key = ssl_key
+ self._project = project
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self.loop = loop
scheme = "https" if self.use_ssl else "http"
- url = "{}://127.0.0.1:8008/api/config/vnfd-catalog/vnfd/{}"
+ url = "{}://127.0.0.1:8008/api/config/project/{}/vnfd-catalog/vnfd/{}"
model = RwYang.Model.create_libncx()
model.load_module("rw-vnfd")
options["cert"] = (self.ssl_cert, self.ssl_key)
response = requests.put(
- url.format(scheme, vnfd.id),
+ url.format(scheme, self._project.name, vnfd.id),
**options
)
self._loop = loop
self._dts = dts
self.nsm = parent
+ self.project = parent._project
self._log.debug("Initialized ROConfigManager")
def is_ready(self):
@property
def cm_state_xpath(self):
- return ("/rw-conman:cm-state/rw-conman:cm-nsr")
+ return self.project.add_project("/rw-conman:cm-state/rw-conman:cm-nsr")
@classmethod
def map_config_status(cls, status):
import rift.mano.ncclient
import rift.mano.config_data.config
import rift.mano.dts as mano_dts
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ get_add_delete_update_cfgs,
+ DEFAULT_PROJECT,
+ )
from . import rwnsm_conman as conman
from . import cloud
"sdn_account": self._sdn_account_name,
"operational_status": 'init',
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
elif self._vnffgr_state == VnffgRecordState.TERMINATED:
vnffgr_dict = {"id": self._vnffgr_id,
"vnffgd_id_ref": self._vnffgd_msg.id,
"sdn_account": self._sdn_account_name,
"operational_status": 'terminated',
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
else:
try:
vnffgr = self._vnffgmgr.fetch_vnffgr(self._vnffgr_id)
"sdn_account": self._sdn_account_name,
"operational_status": 'failed',
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
return vnffgr
"vnffgd_name_ref": self._vnffgd_msg.name,
"sdn_account": self._sdn_account_name,
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
for rsp in self._vnffgd_msg.rsp:
vnffgr_rsp = vnffgr.rsp.add()
vnffgr_rsp.id = str(uuid.uuid4())
vnfd = [vnfr.vnfd for vnfr in self._nsr.vnfrs.values() if vnfr.vnfd.id == rsp_cp_ref.vnfd_id_ref]
self._log.debug("VNFD message during VNFFG instantiation is %s",vnfd)
if len(vnfd) > 0 and vnfd[0].has_field('service_function_type'):
- self._log.debug("Service Function Type for VNFD ID %s is %s",rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type)
+ self._log.debug("Service Function Type for VNFD ID %s is %s",
+ rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type)
else:
- self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",rsp_cp_ref.vnfd_id_ref)
+ self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",
+ rsp_cp_ref.vnfd_id_ref)
continue
vnfr_cp_ref = vnffgr_rsp.vnfr_connection_point_ref.add()
self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
if vnfr.operational_status == 'failed':
self._log.error("Fetching VNFR for %s failed", vnfr.id)
- raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+ raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" %
+ (self.id, vnfr.id))
yield from asyncio.sleep(2, loop=self._loop)
vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
self._log.debug("Received VNFR is %s", vnfr)
rsp_id_ref = _rsp[0].id
rsp_name = _rsp[0].name
else:
- self._log.error("RSP with ID %s not found during classifier creation for classifier id %s",vnffgd_classifier.rsp_id_ref,vnffgd_classifier.id)
+ self._log.error("RSP with ID %s not found during classifier creation for classifier id %s",
+ vnffgd_classifier.rsp_id_ref,vnffgd_classifier.id)
continue
vnffgr_classifier = vnffgr.classifier.add()
vnffgr_classifier.id = vnffgd_classifier.id
self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
if vnfr.operational_status == 'failed':
self._log.error("Fetching VNFR for %s failed", vnfr.id)
- raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+ raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" %
+ (self.id, vnfr.id))
yield from asyncio.sleep(2, loop=self._loop)
vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
self._log.debug("Received VNFR is %s", vnfr)
for ext_intf in vdu.external_interface:
if ext_intf.name == vnffgr_classifier.vnfr_connection_point_ref:
vnffgr_classifier.vm_id = vdu.vim_id
- self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id,
- vnfr_cp_ref.connection_point_params.vm_id)
+ self._log.debug("VIM ID for CP %s in VNFR %s is %s",
+ cp.name,nsr_vnfr.id,
+ vnfr_cp_ref.connection_point_params.vm_id)
break
self._log.info("VNFFGR msg to be sent is %s", vnffgr)
XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
@staticmethod
@asyncio.coroutine
- def create_record(dts, log, loop, nsr_name, vld_msg, cloud_account_name, om_datacenter, ip_profile, nsr_id, restart_mode=False):
+ def create_record(dts, log, loop, project, nsr_name, vld_msg,
+ cloud_account_name, om_datacenter, ip_profile,
+ nsr_id, restart_mode=False):
"""Creates a new VLR object based on the given data.
If restart mode is enabled, then we look for existing records in the
dts,
log,
loop,
+ project,
nsr_name,
vld_msg,
cloud_account_name,
if restart_mode:
res_iter = yield from dts.query_read(
- "D,/vlr:vlr-catalog/vlr:vlr",
+ project.add_project("D,/vlr:vlr-catalog/vlr:vlr"),
rwdts.XactFlag.MERGE)
for fut in res_iter:
return vlr_obj
- def __init__(self, dts, log, loop, nsr_name, vld_msg, cloud_account_name, om_datacenter, ip_profile, nsr_id):
+ def __init__(self, dts, log, loop, project, nsr_name, vld_msg,
+ cloud_account_name, om_datacenter, ip_profile, nsr_id):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsr_name = nsr_name
self._vld_msg = vld_msg
self._cloud_account_name = cloud_account_name
@property
def xpath(self):
""" path for this object """
- return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self._vlr_id)
+ return self._project.add_project("D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".
+ format(self._vlr_id))
@property
def id(self):
vlr_dict['ip_profile_params' ] = self._ip_profile.ip_profile_params.as_dict()
vlr_dict.update(vld_copy_dict)
- vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+ vlr = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.from_dict(vlr_dict)
return vlr
def reset_id(self, vlr_id):
def create_nsr_vlr_msg(self, vnfrs):
""" The VLR message"""
- nsr_vlr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vlr()
+ nsr_vlr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vlr()
nsr_vlr.vlr_ref = self._vlr_id
nsr_vlr.assigned_subnet = self.assigned_subnet
nsr_vlr.cloud_account = self.cloud_account_name
@staticmethod
@asyncio.coroutine
- def create_record(dts, log, loop, vnfd, const_vnfd_msg, nsd_id, nsr_name,
+ def create_record(dts, log, loop, project, vnfd, const_vnfd_msg, nsd_id, nsr_name,
cloud_account_name, om_datacenter_name, nsr_id, group_name, group_instance_id,
placement_groups, restart_mode=False):
"""Creates a new VNFR object based on the given data.
dts,
log,
loop,
+ project,
vnfd,
const_vnfd_msg,
nsd_id,
if restart_mode:
res_iter = yield from dts.query_read(
- "D,/vnfr:vnfr-catalog/vnfr:vnfr",
+ project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr"),
rwdts.XactFlag.MERGE)
for fut in res_iter:
dts,
log,
loop,
+ project,
vnfd,
const_vnfd_msg,
nsd_id,
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._vnfd = vnfd
self._const_vnfd_msg = const_vnfd_msg
self._nsd_id = nsd_id
@property
def xpath(self):
""" VNFR xpath """
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id)
+ return self._project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']"
+ .format(self.id))
@property
def vnfr_msg(self):
@property
def const_vnfr_msg(self):
""" VNFR message """
- return RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConstituentVnfrRef(vnfr_id=self.id,cloud_account=self.cloud_account_name,om_datacenter=self._om_datacenter_name)
+ return RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConstituentVnfrRef(
+ vnfr_id=self.id, cloud_account=self.cloud_account_name,
+ om_datacenter=self._om_datacenter_name)
@property
def vnfd(self):
@staticmethod
def vnfr_xpath(vnfr):
""" Get the VNFR path from VNFR """
- return (VirtualNetworkFunctionRecord.XPATH + "[vnfr:id = '{}']").format(vnfr.id)
+ return (self._project.add_project(VirtualNetworkFunctionRecord.XPATH) +
+ "[vnfr:id = '{}']").format(vnfr.id)
@property
def config_type(self):
}
vnfr_dict.update(vnfd_copy_dict)
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
- vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict(),
- ignore_missing_keys=True)
- vnfr.member_vnf_index_ref = self.member_vnf_index
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+ vnfr.vnfd = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd. \
+ from_dict(self.vnfd.as_dict(),
+ ignore_missing_keys=True)
+ vnfr.member_vnf_index_ref = str(self.member_vnf_index)
vnfr.vnf_configuration.from_dict(self._vnfd.vnf_configuration.as_dict())
if self._vnfd.mgmt_interface.has_field("port"):
format(self.name, self.vnfr_msg))
yield from self._dts.query_update(
self.xpath,
- rwdts.XactFlag.TRACE,
+ 0, #rwdts.XactFlag.TRACE,
self.vnfr_msg
)
# For every connection point in the VNFD fill in the identifier
for conn_p in self._vnfd.connection_point:
- cpr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint()
+ cpr = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint()
cpr.name = conn_p.name
cpr.type_yang = conn_p.type_yang
if conn_p.has_field('port_security_enabled'):
event_list = []
idx = 1
for entry in self._events:
- event = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_OperationalEvents()
+ event = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_OperationalEvents()
event.id = idx
idx += 1
event.timestamp, event.event, event.description, event.details = entry
""" Network service record """
XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
- def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg, sdn_account_name, key_pairs, restart_mode=False,
+ def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg,
+ sdn_account_name, key_pairs, project, restart_mode=False,
vlr_handler=None):
self._dts = dts
self._log = log
self._nsm_plugin = nsm_plugin
self._sdn_account_name = sdn_account_name
self._vlr_handler = vlr_handler
+ self._project = project
self._nsd = None
self._nsr_msg = None
self.set_state(NetworkServiceRecordState.INIT)
- self.substitute_input_parameters = InputParameterSubstitution(self._log)
+ self.substitute_input_parameters = InputParameterSubstitution(self._log, self._project)
@property
def nsm_plugin(self):
for group_info in self._nsr_cfg_msg.nsd_placement_group_maps:
if group_info.placement_group_ref == input_group.name:
- group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+ group = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_PlacementGroupsInfo()
group_dict = {k:v for k,v in
group_info.as_dict().items() if k != 'placement_group_ref'}
for param in copy_dict:
def vlr_uptime_update(self, vlr):
try:
- vlr_ = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict({'id': vlr.id})
+ vlr_ = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.from_dict({'id': vlr.id})
while True:
vlr_.uptime = int(time.time()) - vlr._create_time
- yield from self._vlr_handler.update(None, VirtualLinkRecord.vlr_xpath(vlr), vlr_)
+ xpath = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
+ yield from self._vlr_handler.update(None, xpath, vlr_)
yield from asyncio.sleep(2, loop=self._loop)
except asyncio.CancelledError:
self._log.debug("Received cancellation request for vlr_uptime_update task")
- yield from self._vlr_handler.delete(None, VirtualLinkRecord.vlr_xpath(vlr))
+ xpath = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
+ yield from self._vlr_handler.delete(None, xpath)
@asyncio.coroutine
self._dts,
self._log,
self._loop,
+ self._project,
self.name,
vld,
cloud_account,
vnfr = yield from VirtualNetworkFunctionRecord.create_record(self._dts,
self._log,
self._loop,
+ self._project,
vnfd_msg,
const_vnfd,
self.nsd_id,
@property
def nsr_xpath(self):
""" Returns the xpath associated with this NSR """
- return(
+ return self._project.add_project((
"D,/nsr:ns-instance-opdata" +
"/nsr:nsr[nsr:ns-instance-config-ref = '{}']"
- ).format(self.id)
+ ).format(self.id))
@staticmethod
def xpath_from_nsr(nsr):
""" Returns the xpath associated with this NSR op data"""
- return (NetworkServiceRecord.XPATH +
- "[nsr:ns-instance-config-ref = '{}']").format(nsr.id)
+ return self._project.add_project((NetworkServiceRecord.XPATH +
+ "[nsr:ns-instance-config-ref = '{}']").format(nsr.id))
@property
def nsd_xpath(self):
""" Return NSD config xpath."""
- return(
+ return self._project.add_project((
"C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}']"
- ).format(self.nsd_id)
+ ).format(self.nsd_id))
@asyncio.coroutine
def instantiate(self, config_xact):
def create_msg(self):
""" The network serice record as a message """
nsr_dict = {"ns_instance_config_ref": self.id}
- nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict(nsr_dict)
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict(nsr_dict)
#nsr.cloud_account = self.cloud_account_name
nsr.sdn_account = self._sdn_account_name
nsr.name_ref = self.name
nsr.uptime = int(time.time()) - self._create_time
for cfg_prim in self.nsd_msg.service_primitive:
- cfg_prim = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
+ cfg_prim = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
cfg_prim.as_dict())
nsr.service_primitive.append(cfg_prim)
This class is responsible for substituting input parameters into an NSD.
"""
- def __init__(self, log):
+ def __init__(self, log, project):
"""Create an instance of InputParameterSubstitution
Arguments:
"""
self.log = log
+ self.project = project
def __call__(self, nsd, nsr_config):
"""Substitutes input parameters from the NSR config into the NSD
# to be modified
optional_input_parameters = set()
for input_parameter in nsd.input_parameter_xpath:
- optional_input_parameters.add(input_parameter.xpath)
+ optional_input_parameters.add(self.project.add_project(input_parameter.xpath))
# Apply the input parameters to the descriptor
if nsr_config.input_parameter:
@staticmethod
def path_for_id(nsd_id):
""" Return path for the passed nsd_id"""
- return "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}'".format(nsd_id)
+ return self._nsm._project.add_project(
+ "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}'".
+ format(nsd_id))
def path(self):
""" Return the message associated with this NetworkServiceDescriptor"""
self._nsm = nsm
self._regh = None
+ self._project = nsm._project
@property
def regh(self):
def register(self):
""" Register for Nsd create/update/delete/read requests from dts """
+ if self._regh:
+ self._log.warning("DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
except Exception as e:
self._log.error("Exception in cleaning up NSD libs {}: {}".
format(nsd_id, e))
- self._log.excpetion(e)
+ self._log.exception(e)
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
# Need a list in scratch to store NSDs to create/update later
# acg._scratch['nsds'] = list()
self._regh = acg.register(
- xpath=NsdDtsHandler.XPATH,
+ xpath=self._project.add_project(NsdDtsHandler.XPATH),
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
on_prepare=on_prepare)
+ def deregister(self):
+ self._log.debug("De-register NSD handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
class VnfdDtsHandler(object):
""" DTS handler for VNFD config changes """
self._loop = loop
self._nsm = nsm
self._regh = None
+ self._project = nsm._project
@property
def regh(self):
def register(self):
""" Register for VNFD configuration"""
+ if self._regh:
+ self._log.warning("DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
@asyncio.coroutine
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ xpath = self._project.add_project(VnfdDtsHandler.XPATH)
self._log.debug(
- "Registering for VNFD config using xpath: %s",
- VnfdDtsHandler.XPATH,
- )
+ "Registering for VNFD config using xpath {} for project {}"
+ .format(xpath, self._project))
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self._dts.appconf_group_create(handler=acg_hdl) as acg:
# Need a list in scratch to store VNFDs to create/update later
# acg._scratch['vnfds'] = list()
# acg._scratch['deleted_vnfds'] = list()
self._regh = acg.register(
- xpath=VnfdDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
on_prepare=on_prepare)
+ def deregister(self):
+ self._log.debug("De-register VNFD handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
+
class NsrRpcDtsHandler(object):
""" The network service instantiation RPC DTS handler """
EXEC_NSR_CONF_XPATH = "I,/nsr:start-network-service"
self._ns_regh = None
self._manager = None
- self._nsr_config_url = NsrRpcDtsHandler.REST_BASE_V2_URL + 'config/ns-instance-config'
+ self._nsr_config_url = NsrRpcDtsHandler.REST_BASE_V2_URL + \
+ 'config/project/{}/ns-instance-config'. \
+ format(self._nsm._project.name)
self._model = RwYang.Model.create_libncx()
self._model.load_schema_ypbc(RwNsrYang.get_schema())
def _apply_ns_instance_config(self,payload_dict):
#self._log.debug("At apply NS instance config with payload %s",payload_dict)
- req_hdr= {'accept':'application/vnd.yang.data+json','content-type':'application/vnd.yang.data+json'}
- response=requests.post(self._nsr_config_url, headers=req_hdr, auth=('admin', 'admin'),data=payload_dict,verify=False)
+ req_hdr= {'accept':'application/vnd.yang.data+json',
+ 'content-type':'application/vnd.yang.data+json'}
+ response=requests.post(self._nsr_config_url, headers=req_hdr,
+ auth=('admin', 'admin'),data=payload_dict,verify=False)
return response
@asyncio.coroutine
def register(self):
""" Register for NS monitoring read from dts """
+ if self._ns_regh:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
@asyncio.coroutine
def on_ns_config_prepare(xact_info, action, ks_path, msg):
""" prepare callback from dts start-network-service"""
assert action == rwdts.QueryAction.RPC
rpc_ip = msg
+
+ if not self._nsm._project.rpc_check(msg, xact_info=xact_info):
+ return
+
rpc_op = NsrYang.YangOutput_Nsr_StartNetworkService.from_dict({
- "nsr_id":str(uuid.uuid4())
- })
+ "nsr_id":str(uuid.uuid4()),
+ "project_name": msg.prject_name,
+ })
- if not ('name' in rpc_ip and 'nsd_ref' in rpc_ip and ('cloud_account' in rpc_ip or 'om_datacenter' in rpc_ip)):
- self._log.error("Mandatory parameters name or nsd_ref or cloud account not found in start-network-service {}".format(rpc_ip))
+ if not ('name' in rpc_ip and 'nsd_ref' in rpc_ip and
+ ('cloud_account' in rpc_ip or 'om_datacenter' in rpc_ip)):
+ self._log.error("Mandatory parameters name or nsd_ref or " +
+ "cloud account not found in start-network-service {}".
+ format(rpc_ip))
self._log.debug("start-network-service RPC input: {}".format(rpc_ip))
ns_instance_config_dict = {"id":rpc_op.nsr_id, "admin_status":"ENABLED"}
ns_instance_config_copy_dict = {k:v for k, v in rpc_ip.as_dict().items()
- if k in RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr().fields}
+ if k in RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr().fields}
ns_instance_config_dict.update(ns_instance_config_copy_dict)
- ns_instance_config = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr.from_dict(ns_instance_config_dict)
- ns_instance_config.nsd = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+ ns_instance_config = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.from_dict(ns_instance_config_dict)
+ ns_instance_config.nsd = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_Nsd()
ns_instance_config.nsd.from_dict(nsd_copy.msg.as_dict())
payload_dict = ns_instance_config.to_json(self._model)
flags=rwdts.Flag.PUBLISHER,
)
+ def deregister(self):
+ self._log.debug("De-register NSR RPC for project {}".
+ format(self._nsm._project.name))
+ if self._ns_regh:
+ self._ns_regh.deregister()
+ self._ns_regh = None
+
class NsrDtsHandler(object):
""" The network service DTS handler """
self._log = log
self._loop = loop
self._nsm = nsm
+ self._project = self._nsm._project
self._nsr_regh = None
self._scale_regh = None
def register(self):
""" Register for Nsr create/update/delete/read requests from dts """
+ if self._nsr_regh:
+ self._log.warning("DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
for vld in vl_delta["deleted"]:
yield from self._nsm.nsr_terminate_vl(nsr_id, vld)
- def get_add_delete_update_cfgs(dts_member_reg, xact, key_name, scratch):
- # Unfortunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys
- if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
def get_nsr_key_pairs(dts_member_reg, xact):
key_pairs = {}
for instance_cfg, keyspec in dts_member_reg.get_xact_elements(xact, include_keyspec=True):
def begin_instantiation(nsr):
# Begin instantiation
self._log.info("Beginning NS instantiation: %s", nsr.id)
- yield from self._nsm.instantiate_ns(nsr.id, xact)
+ try:
+ yield from self._nsm.instantiate_ns(nsr.id, xact)
+ except Exception as e:
+ self._log.exception("NS instantiation: {}".format(e))
+ raise e
self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)",
xact, action, scratch)
(added_msgs, deleted_msgs, updated_msgs) = get_add_delete_update_cfgs(self._nsr_regh,
xact,
- "id",
- scratch)
+ "id")
self._log.debug("Added: %s, Deleted: %s, Updated: %s", added_msgs,
deleted_msgs, updated_msgs)
acg.handle.prepare_complete_ok(xact_info.handle)
- self._log.debug("Registering for NSR config using xpath: %s",
- NsrDtsHandler.NSR_XPATH)
+ xpath = self._project.add_project(NsrDtsHandler.NSR_XPATH)
+ self._log.debug("Registering for NSR config using xpath: {}".
+ format(xpath))
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self._dts.appconf_group_create(handler=acg_hdl) as acg:
- self._nsr_regh = acg.register(xpath=NsrDtsHandler.NSR_XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
- on_prepare=on_prepare)
+ self._nsr_regh = acg.register(
+ xpath=xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare
+ )
self._scale_regh = acg.register(
- xpath=NsrDtsHandler.SCALE_INSTANCE_XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY| rwdts.Flag.CACHE,
- )
+ xpath=self._project.add_project(NsrDtsHandler.SCALE_INSTANCE_XPATH),
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY| rwdts.Flag.CACHE,
+ )
self._key_pair_regh = acg.register(
- xpath=NsrDtsHandler.KEY_PAIR_XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
- )
+ xpath=self._project.add_project(NsrDtsHandler.KEY_PAIR_XPATH),
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ )
+
+ def deregister(self):
+ self._log.debug("De-register NSR config for project {}".
+ format(self._project.name))
+ if self._nsr_regh:
+ self._nsr_regh.deregister()
+ self._nsr_regh = None
+ if self._scale_regh:
+ self._scale_regh.deregister()
+ self._scale_regh = None
+ if self._key_pair_regh:
+ self._key_pair_regh.deregister()
+ self._key_pair_regh = None
class NsrOpDataDtsHandler(object):
self._log = log
self._loop = loop
self._nsm = nsm
+
+ self._project = nsm._project
self._regh = None
@property
@asyncio.coroutine
def register(self):
""" Register for Nsr op data publisher registration"""
- self._log.debug("Registering Nsr op data path %s as publisher",
- NsrOpDataDtsHandler.XPATH)
+ if self._regh:
+ self._log.warning("NSR op data handler already registered for project {}".
+ format(self._project.name))
+ return
+
+ xpath = self._project.add_project(NsrOpDataDtsHandler.XPATH)
+ self._log.debug("Registering Nsr op data path {} as publisher".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
handlers = rift.tasklets.Group.Handler()
with self._dts.group_create(handler=handlers) as group:
- self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ | rwdts.Flag.DATASTORE)
+ def deregister(self):
+ self._log.debug("De-register NSR opdata for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
- def create(self, path, msg):
+ def create(self, xpath, msg):
"""
Create an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating NSR %s:%s", path, msg)
self.regh.create_element(path, msg)
self._log.debug("Created NSR, %s:%s", path, msg)
@asyncio.coroutine
- def update(self, path, msg, flags=rwdts.XactFlag.REPLACE):
+ def update(self, xpath, msg, flags=rwdts.XactFlag.REPLACE):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating NSR, %s:%s regh = %s", path, msg, self.regh)
self.regh.update_element(path, msg, flags)
self._log.debug("Updated NSR, %s:%s", path, msg)
@asyncio.coroutine
- def delete(self, path):
+ def delete(self, xpath):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting NSR path:%s", path)
self.regh.delete_element(path)
self._log.debug("Deleted NSR path:%s", path)
@asyncio.coroutine
def register(self):
""" Register for vnfr create/update/delete/ advises from dts """
+ if self._regh:
+ self._log.warning("VNFR DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
def on_commit(xact_info):
""" The transaction has been committed """
xact_info, action, ks_path, msg
)
- schema = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+ schema = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
if path_entry.key00.id not in self._nsm._vnfrs:
- self._log.error("%s request for non existent record path %s",
- action, xpath)
+ # Check if this is a monitoring param xpath
+ if 'vnfr:monitoring-param' not in xpath:
+ self._log.error("%s request for non existent record path %s",
+ action, xpath)
xact_info.respond_xpath(rwdts.XactRspCode.NA, xpath)
return
- self._log.debug("Deleting VNFR with id %s", path_entry.key00.id)
if action == rwdts.QueryAction.CREATE or action == rwdts.QueryAction.UPDATE:
yield from self._nsm.update_vnfr(msg)
elif action == rwdts.QueryAction.DELETE:
hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._nsm._project.add_project(
+ VnfrDtsHandler.XPATH),
handler=hdl,
flags=(rwdts.Flag.SUBSCRIBER),)
+ def deregister(self):
+ self._log.debug("De-register VNFR for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
class NsdRefCountDtsHandler(object):
""" The NSD Ref Count DTS handler """
@asyncio.coroutine
def register(self):
""" Register for NSD ref count read from dts """
+ if self._regh:
+ self._log.warning("NSD ref DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
xpath = ks_path.to_xpath(RwNsrYang.get_schema())
if action == rwdts.QueryAction.READ:
- schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount.schema()
+ schema = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_NsdRefCount.schema()
path_entry = schema.keyspec_to_entry(ks_path)
nsd_list = yield from self._nsm.get_nsd_refcount(path_entry.key00.nsd_id_ref)
for xpath, msg in nsd_list:
hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=NsdRefCountDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._nsm._project.add_project(
+ NsdRefCountDtsHandler.XPATH),
handler=hdl,
flags=rwdts.Flag.PUBLISHER,)
+ def deregister(self):
+ self._log.debug("De-register NSD Ref count for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
class NsManager(object):
""" The Network Service Manager class"""
- def __init__(self, dts, log, loop,
+ def __init__(self, dts, log, loop, project,
nsr_handler, vnfr_handler, vlr_handler, ro_plugin_selector,
vnffgmgr, vnfd_pub_handler, cloud_account_handler):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsr_handler = nsr_handler
self._vnfr_pub_handler = vnfr_handler
self._vlr_pub_handler = vlr_handler
VnfrDtsHandler(dts, log, loop, self),
NsdRefCountDtsHandler(dts, log, loop, self),
NsrDtsHandler(dts, log, loop, self),
- ScalingRpcHandler(log, dts, loop, self.scale_rpc_callback),
- NsrRpcDtsHandler(dts,log,loop,self),
+ ScalingRpcHandler(log, dts, loop, self._project,
+ self.scale_rpc_callback),
+ NsrRpcDtsHandler(dts, log, loop, self),
self._vnfd_dts_handler,
self.cfgmgr_obj,
]
for dts_handle in self._dts_handlers:
yield from dts_handle.register()
+ def deregister(self):
+ """ Register all static DTS handlers """
+ for dts_handle in self._dts_handlers:
+ yield from dts_handle.deregister()
+
def get_ns_by_nsr_id(self, nsr_id):
""" get NSR by nsr id """
msg : RPC input
action : Scaling Action
"""
- ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
- ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+ ScalingGroupInstance = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance
+ ScalingGroup = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup
+
+ xpath = self._project.add_project(
+ ('C,/nsr:ns-instance-config/nsr:nsr[nsr:id="{}"]').
+ format(msg.nsr_id_ref))
- xpath = ('C,/nsr:ns-instance-config/nsr:nsr[nsr:id="{}"]').format(
- msg.nsr_id_ref)
- instance = ScalingGroupInstance.from_dict({"id": msg.instance_id})
+ instance = ScalingGroupInstance.from_dict({
+ "id": msg.instance_id,
+ "project_name": self._project.name,})
@asyncio.coroutine
def get_nsr_scaling_group():
nsr_msg,
sdn_account_name,
key_pairs,
+ self._project,
restart_mode=restart_mode,
vlr_handler=self._ro_plugin_selector._records_publisher._vlr_pub_hdlr
)
@asyncio.coroutine
def get_nsr_config(self, nsd_id):
- xpath = "C,/nsr:ns-instance-config"
+ xpath = self._project.add_project("C,/nsr:ns-instance-config")
results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
for result in results:
def nsd_refcount_xpath(nsd_id):
""" xpath for ref count entry """
- return (NsdRefCountDtsHandler.XPATH +
+ return (self._project.add_project(NsdRefCountDtsHandler.XPATH) +
"[rw-nsr:nsd-id-ref = '{}']").format(nsd_id)
nsd_list = []
if nsd_id is None or nsd_id == "":
for nsd in self._nsds.values():
- nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount()
+ nsd_msg = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_NsdRefCount()
nsd_msg.nsd_id_ref = nsd.id
nsd_msg.instance_ref_count = nsd.ref_count
nsd_list.append((nsd_refcount_xpath(nsd.id), nsd_msg))
elif nsd_id in self._nsds:
- nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount()
+ nsd_msg = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_NsdRefCount()
nsd_msg.nsd_id_ref = self._nsds[nsd_id].id
nsd_msg.instance_ref_count = self._nsds[nsd_id].ref_count
nsd_list.append((nsd_refcount_xpath(nsd_id), nsd_msg))
""" This class provides a publisher interface that allows plugin objects
to publish NSR/VNFR/VLR"""
- def __init__(self, dts, log, loop, nsr_pub_hdlr, vnfr_pub_hdlr, vlr_pub_hdlr):
+ def __init__(self, dts, log, loop, project, nsr_pub_hdlr,
+ vnfr_pub_hdlr, vlr_pub_hdlr,):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsr_pub_hdlr = nsr_pub_hdlr
self._vlr_pub_hdlr = vlr_pub_hdlr
self._vnfr_pub_hdlr = vnfr_pub_hdlr
@asyncio.coroutine
def publish_vlr(self, xact, vlr):
""" Publish a VLR """
- path = VirtualLinkRecord.vlr_xpath(vlr)
+ path = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
return (yield from self._vlr_pub_hdlr.update(xact, path, vlr))
@asyncio.coroutine
def unpublish_vlr(self, xact, vlr):
""" Unpublish a VLR """
- path = VirtualLinkRecord.vlr_xpath(vlr)
+ path = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
return (yield from self._vlr_pub_hdlr.delete(xact, path))
ACTION = Enum('ACTION', 'SCALE_IN SCALE_OUT')
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
self.callback = callback
self.last_instance_id = defaultdict(int)
+ self._regh_in = None
+ self._regh_out = None
@asyncio.coroutine
def register(self):
+ if self._regh_in:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
@asyncio.coroutine
def on_scale_in_prepare(xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
try:
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
if self.callback:
self.callback(xact_info.xact, msg, self.ACTION.SCALE_IN)
rpc_op = NsrYang.YangOutput_Nsr_ExecScaleIn.from_dict({
- "instance_id": msg.instance_id})
+ "instance_id": msg.instance_id,
+ "project_name": self._project.name,})
xact_info.respond_xpath(
rwdts.XactRspCode.ACK,
assert action == rwdts.QueryAction.RPC
try:
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
scaling_group = msg.scaling_group_name_ref
if not msg.instance_id:
last_instance_id = self.last_instance_id[scale_group]
self.callback(xact_info.xact, msg, self.ACTION.SCALE_OUT)
rpc_op = NsrYang.YangOutput_Nsr_ExecScaleOut.from_dict({
- "instance_id": msg.instance_id})
+ "instance_id": msg.instance_id,
+ "project_name": self._project.name,})
xact_info.respond_xpath(
rwdts.XactRspCode.ACK,
on_prepare=on_scale_out_prepare)
with self.dts.group_create() as group:
- group.register(
- xpath=self.__class__.SCALE_IN_INPUT_XPATH,
- handler=scale_in_hdl,
- flags=rwdts.Flag.PUBLISHER)
- group.register(
- xpath=self.__class__.SCALE_OUT_INPUT_XPATH,
- handler=scale_out_hdl,
- flags=rwdts.Flag.PUBLISHER)
-
+ self._regh_in = group.register(
+ xpath=self.__class__.SCALE_IN_INPUT_XPATH,
+ handler=scale_in_hdl,
+ flags=rwdts.Flag.PUBLISHER)
+ self._regh_out = group.register(
+ xpath=self.__class__.SCALE_OUT_INPUT_XPATH,
+ handler=scale_out_hdl,
+ flags=rwdts.Flag.PUBLISHER)
+
+ def deregister(self):
+ self._log.debug("De-register scale RPCs for project {}".
+ format(self._project.name))
+ if self._regh_in:
+ self._regh_in.deregister()
+ self._regh_in = None
+ if self._regh_out:
+ self._regh_out.deregister()
+ self._regh_out = None
+
+
+class NsmProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(NsmProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
-class NsmTasklet(rift.tasklets.Tasklet):
- """
- The network service manager tasklet
- """
- def __init__(self, *args, **kwargs):
- super(NsmTasklet, self).__init__(*args, **kwargs)
- self.rwlog.set_category("rw-mano-log")
- self.rwlog.set_subcategory("nsm")
-
- self._dts = None
self._nsm = None
self._ro_plugin_selector = None
self._vnffgmgr = None
- self._nsr_handler = None
+ self._nsr_pub_handler = None
self._vnfr_pub_handler = None
self._vlr_pub_handler = None
self._vnfd_pub_handler = None
self._records_publisher_proxy = None
- def start(self):
- """ The task start callback """
- super(NsmTasklet, self).start()
- self.log.info("Starting NsmTasklet")
-
- self.log.debug("Registering with dts")
- self._dts = rift.tasklets.DTS(self.tasklet_info,
- RwNsmYang.get_schema(),
- self.loop,
- self.on_dts_state_change)
-
- self.log.debug("Created DTS Api GI Object: %s", self._dts)
-
- def stop(self):
- try:
- self._dts.deinit()
- except Exception:
- print("Caught Exception in NSM stop:", sys.exc_info()[0])
- raise
-
- def on_instance_started(self):
- """ Task instance started callback """
- self.log.debug("Got instance started callback")
-
@asyncio.coroutine
- def init(self):
- """ Task init callback """
- self.log.debug("Got instance started callback")
-
- self.log.debug("creating config account handler")
-
- self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(self._dts, self.log, self.loop)
+ def register(self):
+ self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(
+ self._dts, self.log, self.loop, self)
yield from self._nsr_pub_handler.register()
- self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(self._dts, self.log, self.loop)
+ self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(
+ self._dts, self.log, self.loop, self)
yield from self._vnfr_pub_handler.register()
- self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(self._dts, self.log, self.loop)
+ self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(
+ self._dts, self.log, self.loop, self)
yield from self._vlr_pub_handler.register()
- manifest = self.tasklet_info.get_pb_manifest()
+ manifest = self._tasklet.tasklet_info.get_pb_manifest()
use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
ssl_key = manifest.bootstrap_phase.rwsecurity.key
- self._vnfd_pub_handler = publisher.VnfdPublisher(use_ssl, ssl_cert, ssl_key, self.loop)
+ self._vnfd_pub_handler = publisher.VnfdPublisher(
+ use_ssl, ssl_cert, ssl_key, self.loop, self)
self._records_publisher_proxy = NsmRecordsPublisherProxy(
self._dts,
self.log,
self.loop,
+ self,
self._nsr_pub_handler,
self._vnfr_pub_handler,
self._vlr_pub_handler,
self._dts,
self.log,
self.loop,
+ self,
self._records_publisher_proxy,
)
yield from self._ro_plugin_selector.register()
self._cloud_account_handler = cloud.CloudAccountConfigSubscriber(
self._log,
self._dts,
- self.log_hdl)
+ self.log_hdl,
+ self,
+ )
yield from self._cloud_account_handler.register()
- self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts, self.log, self.log_hdl, self.loop)
+ self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts, self.log, self.log_hdl, self.loop, self)
yield from self._vnffgmgr.register()
self._nsm = NsManager(
self._dts,
self.log,
self.loop,
+ self,
self._nsr_pub_handler,
self._vnfr_pub_handler,
self._vlr_pub_handler,
self._ro_plugin_selector,
self._vnffgmgr,
self._vnfd_pub_handler,
- self._cloud_account_handler
+ self._cloud_account_handler,
)
yield from self._nsm.register()
+ def deregister(self):
+ self._log.debug("Project {} de-register".format(self.name))
+ self._nsm.deregister()
+ self._vnffgmgr.deregister()
+ self._cloud_account_handler.deregister()
+ self._ro_plugin_selector.deregister()
+
+
+class NsmTasklet(rift.tasklets.Tasklet):
+ """
+ The network service manager tasklet
+ """
+ def __init__(self, *args, **kwargs):
+ super(NsmTasklet, self).__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-mano-log")
+ self.rwlog.set_subcategory("nsm")
+
+ self._dts = None
+ self.project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
+
+ def start(self):
+ """ The task start callback """
+ super(NsmTasklet, self).start()
+ self.log.info("Starting NsmTasklet")
+
+ self.log.debug("Registering with dts")
+ self._dts = rift.tasklets.DTS(self.tasklet_info,
+ RwNsmYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change)
+
+ self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+ def stop(self):
+ try:
+ self._dts.deinit()
+ except Exception:
+ print("Caught Exception in NSM stop:", sys.exc_info()[0])
+ raise
+
+ def on_instance_started(self):
+ """ Task instance started callback """
+ self.log.debug("Got instance started callback")
+
+ @asyncio.coroutine
+ def init(self):
+ """ Task init callback """
+ self.log.debug("Got instance started callback")
+
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, NsmProject)
+ self.project_handler.register()
+
+
+
@asyncio.coroutine
def run(self):
""" Task run callback """
class VnffgMgr(object):
""" Implements the interface to backend plugins to fetch topology """
- def __init__(self, dts, log, log_hdl, loop):
+ def __init__(self, dts, log, log_hdl, loop, project):
self._account = {}
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._project = project
self._sdn = {}
- self._sdn_handler = SDNAccountDtsHandler(self._dts,self._log,self)
+ self._sdn_handler = SDNAccountDtsHandler(self._dts, self._log, self)
self._vnffgr_list = {}
@asyncio.coroutine
def register(self):
yield from self._sdn_handler.register()
+ def deregister(self):
+ self._log.debug("Project {} de-register vnffgmgr".
+ format(self._project.name))
+ self._sdn_handler.deregister()
+
def set_sdn_account(self,account):
if (account.name in self._account):
self._log.error("SDN Account is already set")
del self._vnffgr_list[vnffgr_id]
class SDNAccountDtsHandler(object):
- XPATH = "C,/rw-project:project/rw-sdn:sdn/rw-sdn:account"
+ XPATH = "C,/rw-sdn:sdn/rw-sdn:account"
def __init__(self, dts, log, parent):
self._dts = dts
self._log = log
self._parent = parent
+ self._project = self._parent._project
self._sdn_account = {}
+ self._reg = None
def _set_sdn_account(self, account):
self._log.info("Setting sdn account: {}".format(account))
if msg.has_field("account_type"):
errmsg = "Cannot update SDN account's account-type."
self._log.error(errmsg)
- xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- SDNAccountDtsHandler.XPATH,
- errmsg)
+ xact_info.send_error_xpath(
+ RwTypes.RwStatus.FAILURE,
+ self._project.add_project(SDNAccountDtsHandler.XPATH),
+ errmsg
+ )
raise SdnAccountError(errmsg)
# Update the sdn account record
if not msg.has_field('account_type'):
errmsg = "New SDN account must contain account-type field."
self._log.error(errmsg)
- xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- SDNAccountDtsHandler.XPATH,
- errmsg)
+ xact_info.send_error_xpath(
+ RwTypes.RwStatus.FAILURE,
+ self._project.add_project(SDNAccountDtsHandler.XPATH),
+ errmsg
+ )
raise SdnAccountError(errmsg)
# Set the sdn account record
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for Sdn Account config using xpath: %s",
- SDNAccountDtsHandler.XPATH,
- )
+ xpath = self._project.add_project(SDNAccountDtsHandler.XPATH)
+ self._log.debug("Registering for Sdn Account config using xpath: {}".
+ format(xpath))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self._dts.appconf_group_create(acg_handler) as acg:
- acg.register(
- xpath=SDNAccountDtsHandler.XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
- on_prepare=on_prepare
- )
-
-
-
+ self._reg = acg.register(
+ xpath=xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+ on_prepare=on_prepare
+ )
+
+ def deregister(self):
+ self._log.debug("De-register SDN Account handler in vnffg for project".
+ format(self._project.name))
+ self._reg.deregister()
+ self._reg = None
def create_record_msg(self):
""" Returns a NSR Scaling group record """
- msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord(
+ msg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ScalingGroupRecord(
scaling_group_name_ref=self.name,
)
return self._vnfrs.values()
def create_record_msg(self):
- msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance(
+ msg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance(
instance_id=self._instance_id,
create_time=self._create_time,
op_status=self._op_status,
class DownloadStatusPublisher(mano_dts.DtsHandler, url_downloader.DownloaderProtocol):
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project):
+ super().__init__(log, dts, loop, project)
self.tasks = {}
def xpath(self, download_id=None):
- return ("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job" +
- ("[download-id='{}']".format(download_id) if download_id else ""))
+ return self._project.add_project("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job" +
+ ("[download-id='{}']".
+ format(download_id) if download_id else ""))
@asyncio.coroutine
def register(self):
assert self.reg is not None
+ def dergister(self):
+ self._log.debug("De-registering download status for project {}".
+ format(self.project.name))
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
def on_download_progress(self, download_job_msg):
"""callback that triggers update.
3. Return a tracking ID for the client to monitor the entire status
"""
- def __init__(self, log, dts, loop, proxy, publisher):
+ def __init__(self, log, dts, loop, proxy, tasklet):
"""
Args:
proxy: Any impl of .proxy.AbstractPackageManagerProxy
- publisher: Instance of DownloadStatusPublisher
+ publisher: Instance of tasklet to find the DownloadStatusPublisher
+ for a specific project
"""
super().__init__(log, dts, loop)
self.proxy = proxy
- self.publisher = publisher
+ self.tasklet = tasklet
@property
def xpath(self):
return "/rw-pkg-mgmt:package-file-add"
+ def get_publisher(self, msg):
+ try:
+ proj = self.tasklet.projects[msg.project_name]
+ except Exception as e:
+ err = "Project or project name not found {}: {}". \
+ format(msg.as_dict(), e)
+ self.log.error (err)
+ raise Exception (err)
+
+ return proj.job_handler
+
@asyncio.coroutine
def callback(self, ks_path, msg):
+ publisher = self.get_publisher(msg)
+
if not msg.external_url:
# For now we will only support External URL download
raise Exception ("No download URL provided")
proxy=self.proxy,
log=self.log)
- download_id = yield from self.publisher.register_downloader(url_downloader)
+ download_id = yield from publisher.register_downloader(url_downloader)
rpc_op = RPC_PACKAGE_ADD_ENDPOINT.from_dict({"task_id": download_id})
RwDts as rwdts,
RwPkgMgmtYang)
import rift.tasklets
-
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
from . import rpc
from .proxy import filesystem
from . import publisher as pkg_publisher
+class PackageManagerProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(PackageManagerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self.job_handler = pkg_publisher.DownloadStatusPublisher(
+ self._log, self._dts, self._loop, self)
+
+ @asyncio.coroutine
+ def register (self):
+ yield from self.job_handler.register()
+
+ def deregister (self):
+ yield from self.job_handler.deregister()
+
+
class PackageManagerTasklet(rift.tasklets.Tasklet):
def __init__(self, *args, **kwargs):
try:
self.rwlog.set_category("rw-mano-log")
self.endpoint_rpc = None
self.schema_rpc = None
+
+ self._project_handler = None
+ self.projects = {}
+
except Exception as e:
self.log.exception(e)
proxy = filesystem.FileSystemProxy(self.loop, self.log)
args = [self.log, self.dts, self.loop]
- self.job_handler = pkg_publisher.DownloadStatusPublisher(*args)
-
args.append(proxy)
self.endpoint_rpc = rpc.EndpointDiscoveryRpcHandler(*args)
self.schema_rpc = rpc.SchemaRpcHandler(*args)
self.delete_rpc = rpc.PackageDeleteOperationsRpcHandler(*args)
- args.append(self.job_handler)
+ args.append(self)
self.pkg_op = rpc.PackageOperationsRpcHandler(*args)
def stop(self):
yield from self.endpoint_rpc.register()
yield from self.schema_rpc.register()
yield from self.pkg_op.register()
- yield from self.job_handler.register()
yield from self.delete_rpc.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, PackageManagerProject)
+ self.project_handler.register()
+
@asyncio.coroutine
def run(self):
pass
class DownloadStatusSubscriber(mano_dts.AbstractOpdataSubscriber):
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop, callback)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project, callback)
def get_xpath(self):
- return ("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job")
+ return self._project.add_project(
+ "D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job")
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
self._parent = parent
self._cloud_sub = None
+ self._res_sub = None
+ self._project = parent._project
@asyncio.coroutine
def register(self):
yield from self.register_resource_pool_operational_data()
self.register_cloud_account_config()
+ def deregister(self):
+ self._log.debug("De-register for project {}".format(self._project.name))
+ if self._cloud_sub:
+ self._cloud_sub.deregister()
+ self._cloud_sub = None
+
+ if self._res_sub:
+ self._res_sub.delete_element(
+ self._project.add_project(ResourceMgrConfig.XPATH_POOL_OPER_DATA))
+ self._res_sub.deregister()
+ self._res_sub = None
+
def register_cloud_account_config(self):
def on_add_cloud_account_apply(account):
self._log.debug("Received on_add_cloud_account: %s", account)
)
self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
- self._dts, self._log, self._rwlog_hdl, cloud_callbacks
- )
+ self._dts, self._log, self._rwlog_hdl,
+ self._project, cloud_callbacks
+ )
self._cloud_sub.register()
@asyncio.coroutine
cloud_account_msg.records.append(pool_info)
xact_info.respond_xpath(rwdts.XactRspCode.ACK,
- ResourceMgrConfig.XPATH_POOL_OPER_DATA,
+ self._project.add_project(ResourceMgrConfig.XPATH_POOL_OPER_DATA),
msg=msg,)
- self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: %s",
- ResourceMgrConfig.XPATH_POOL_OPER_DATA)
+ xpath = self._project.add_project(ResourceMgrConfig.XPATH_POOL_OPER_DATA)
+ self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: {}".
+ format(xpath))
handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- response = yield from self._dts.register(xpath=ResourceMgrConfig.XPATH_POOL_OPER_DATA,
- handler=handler,
- flags=rwdts.Flag.PUBLISHER)
-
+ self._res_sub = yield from self._dts.register(xpath=xpath,
+ handler=handler,
+ flags=rwdts.Flag.PUBLISHER)
self._dts = dts
self._loop = loop
self._parent = parent
+ self._project = parent._project
self._vdu_reg = None
self._link_reg = None
yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()],
timeout=timeout, loop=self._loop)
- def create_record_dts(self, regh, xact, path, msg):
+ def _add_config_flag(self, xpath, config=False):
+ if xpath[0] == '/':
+ if config:
+ return 'C,' + xpath
+ else:
+ return 'D,' + xpath
+
+ return xpath
+
+ def create_record_dts(self, regh, xact, xpath, msg):
"""
Create a record in DTS with path and message
"""
+ path = self._add_config_flag(self._project.add_project(xpath))
self._log.debug("Creating Resource Record xact = %s, %s:%s",
xact, path, msg)
regh.create_element(path, msg)
- def delete_record_dts(self, regh, xact, path):
+ def delete_record_dts(self, regh, xact, xpath):
"""
Delete a VNFR record in DTS with path and message
"""
+ path = self._add_config_flag(self._project.add_project(xpath))
self._log.debug("Deleting Resource Record xact = %s, %s",
xact, path)
regh.delete_element(path)
+
@asyncio.coroutine
def register(self):
@asyncio.coroutine
yield from self._parent.release_virtual_network(pathentry.key00.event_id)
self.delete_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
elif action == rwdts.QueryAction.READ:
- response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id)
+ # TODO: Check why we are getting null event id request
+ if pathentry.key00.event_id:
+ response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id)
+ else:
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
else:
raise ValueError("Only read/create/delete actions available. Received action: %s" %(action))
- self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.",
- response_xpath, response_info)
+ self._log.info("Responding with VirtualLinkInfo at xpath %s: %s.",
+ response_xpath, response_info)
xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
return rwdts.MemberRspCode.ACTION_OK
def monitor_vdu_state(response_xpath, pathentry):
- self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
+ self._log.debug("Initiating VDU state monitoring for xpath: %s ", response_xpath)
time_to_wait = 300
sleep_time = 2
loop_cnt = int(time_to_wait/sleep_time)
response_info)
else:
if response_info.resource_state == 'active' or response_info.resource_state == 'failed':
- self._log.info("VDU state monitoring: VDU reached terminal state. Publishing VDU info: %s at path: %s",
+ self._log.info("VDU state monitoring: VDU reached terminal state. " +
+ "Publishing VDU info: %s at path: %s",
response_info, response_xpath)
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
return
else:
### End of loop. This is only possible if VDU did not reach active state
- err_msg = "VDU state monitoring: VDU at xpath :{} did not reached active state in {} seconds. Aborting monitoring".format(response_xpath, time_to_wait)
+ err_msg = ("VDU state monitoring: VDU at xpath :{} did not reached active " +
+ "state in {} seconds. Aborting monitoring".
+ format(response_xpath, time_to_wait))
self._log.info(err_msg)
response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
response_info.resource_state = 'failed'
def allocate_vdu_task(ks_path, event_id, cloud_account, request_msg):
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+ response_xpath = self._add_config_flag(response_xpath)
schema = RwResourceMgrYang.VDUEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
try:
response_info)
else:
if response_info.resource_state == 'failed' or response_info.resource_state == 'active' :
- self._log.info("Virtual compute create task completed. Publishing VDU info: %s at path: %s",
- response_info, response_xpath)
+ self._log.debug("Virtual compute create task completed. Publishing VDU info: %s at path: %s",
+ response_info, response_xpath)
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
response_info)
else:
+ self._log.debug("VDU create monitor at {}".format(response_xpath))
asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry),
loop = self._loop)
-
@asyncio.coroutine
def on_vdu_request_prepare(xact_info, action, ks_path, request_msg):
self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s",
xact_info, action, request_msg)
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+ response_xpath = self._add_config_flag(response_xpath)
schema = RwResourceMgrYang.VDUEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
yield from self._parent.release_virtual_compute(pathentry.key00.event_id)
self.delete_record_dts(self._vdu_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
elif action == rwdts.QueryAction.READ:
- response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+ # TODO: Check why we are getting null event id request
+ if pathentry.key00.event_id:
+ response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+ else:
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
else:
raise ValueError("Only create/delete actions available. Received action: %s" %(action))
link_handlers = rift.tasklets.Group.Handler(on_event=onlink_event,)
with self._dts.group_create(handler=link_handlers) as link_group:
- self._log.debug("Registering for Link Resource Request using xpath: %s",
- ResourceMgrEvent.VLINK_REQUEST_XPATH)
+ xpath = self._project.add_project(ResourceMgrEvent.VLINK_REQUEST_XPATH)
+ self._log.debug("Registering for Link Resource Request using xpath: {}".
+ format(xpath))
- self._link_reg = link_group.register(xpath=ResourceMgrEvent.VLINK_REQUEST_XPATH,
- handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
- on_commit=on_link_request_commit,
- on_prepare=on_link_request_prepare),
- flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+ self._link_reg = link_group.register(xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+ on_commit=on_link_request_commit,
+ on_prepare=on_link_request_prepare),
+ flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
vdu_handlers = rift.tasklets.Group.Handler(on_event=onvdu_event, )
with self._dts.group_create(handler=vdu_handlers) as vdu_group:
- self._log.debug("Registering for VDU Resource Request using xpath: %s",
- ResourceMgrEvent.VDU_REQUEST_XPATH)
+ xpath = self._project.add_project(ResourceMgrEvent.VDU_REQUEST_XPATH)
+ self._log.debug("Registering for VDU Resource Request using xpath: {}".
+ format(xpath))
+
+ self._vdu_reg = vdu_group.register(xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+ on_commit=on_vdu_request_commit,
+ on_prepare=on_vdu_request_prepare),
+ flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+
+
+ def deregister(self):
+ self._log.debug("De-register for project {}".format(self._project.name))
- self._vdu_reg = vdu_group.register(xpath=ResourceMgrEvent.VDU_REQUEST_XPATH,
- handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
- on_commit=on_vdu_request_commit,
- on_prepare=on_vdu_request_prepare),
- flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+ if self._vdu_reg:
+ self._vdu_reg.deregister()
+ self._vdu_reg = None
+ if self._link_reg:
+ self._link_reg.deregister()
+ self._link_reg = None
)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
from . import rwresmgr_core as Core
from . import rwresmgr_config as Config
class ResourceManager(object):
- def __init__(self, log, log_hdl, loop, dts):
+ def __init__(self, log, log_hdl, loop, dts, project):
self._log = log
self._log_hdl = log_hdl
self._loop = loop
self._dts = dts
+ self._project = project
+
self.config_handler = Config.ResourceMgrConfig(self._dts, self._log, self._log_hdl, self._loop, self)
self.event_handler = Event.ResourceMgrEvent(self._dts, self._log, self._loop, self)
self.core = Core.ResourceMgrCore(self._dts, self._log, self._log_hdl, self._loop, self)
yield from self.config_handler.register()
yield from self.event_handler.register()
+ def deregister(self):
+ self.event_handler.deregister()
+ self.config_handler.deregister()
+
def add_cloud_account_config(self, account):
self._log.debug("Received Cloud-Account add config event for account: %s", account.name)
self.core.add_cloud_account(account)
return info
+class ResMgrProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(ResMgrProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._resource_manager = None
+
+ @asyncio.coroutine
+ def register (self):
+ self._log.debug("Initializing the Resource Manager tasklet for project {}".
+ format(self.name))
+ self._resource_manager = ResourceManager(self._log,
+ self._log_hdl,
+ self._loop,
+ self._dts,
+ self,)
+ yield from self._resource_manager.register()
+
+ def deregister(self):
+ self._log.debug("De-registering project {}".format(self.name))
+ self._resource_manager.deregister()
+
+
class ResMgrTasklet(rift.tasklets.Tasklet):
def __init__(self, *args, **kwargs):
super(ResMgrTasklet, self).__init__(*args, **kwargs)
self.rwlog.set_category("rw-resource-mgr-log")
self._dts = None
- self._resource_manager = None
+ self._project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
def start(self):
super(ResMgrTasklet, self).start()
- self.log.info("Starting ResMgrTasklet")
+ self.log.debug("Starting ResMgrTasklet")
self.log.debug("Registering with dts")
@asyncio.coroutine
def init(self):
- self._log.info("Initializing the Resource Manager tasklet")
- self._resource_manager = ResourceManager(self.log,
- self.log_hdl,
- self.loop,
- self._dts)
- yield from self._resource_manager.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, ResMgrProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
@asyncio.coroutine
def configure_compute_resource_pools(self, dts, resource_type, cloud_type):
- pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+ pool_xpath = "C,/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
msg = self.get_compute_pool_msg("virtual-compute", resource_type, cloud_type)
self.log.info("Configuring compute-resource-pool: %s",msg)
yield from dts.query_create(pool_xpath,
@asyncio.coroutine
def configure_network_resource_pools(self, dts, resource_type, cloud_type):
- pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+ pool_xpath = "C,/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
msg = self.get_network_pool_msg("virtual-network", resource_type, cloud_type)
self.log.info("Configuring network-resource-pool: %s",msg)
yield from dts.query_create(pool_xpath,
@asyncio.coroutine
def verify_resource_pools_config(self, dts):
- pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records"
+ pool_records_xpath = "D,/rw-project:project/rw-resource-mgr:resource-pool-records"
self.log.debug("Verifying test_create_resource_pools results")
res_iter = yield from dts.query_read(pool_records_xpath,)
for result in res_iter:
@asyncio.coroutine
def reserve_network_resources(self, name, dts, cloud_type):
- network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
+ network_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
msg,xpath = self.get_network_reserve_msg(name, cloud_type, network_xpath)
self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg))
yield from dts.query_create(xpath, 0, msg)
@asyncio.coroutine
def reserve_compute_resources(self, name, dts, cloud_type, vlinks = []):
- compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
+ compute_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
msg,xpath = self.get_compute_reserve_msg(name, cloud_type, compute_xpath, vlinks)
self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg))
yield from dts.query_create(xpath, 0, msg)
class StagingStorePublisher(mano_dts.DtsHandler, StagingStoreProtocol):
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, project):
+ super().__init__(project.log, project.dts, project.loop, project)
self.delegate = None
def xpath(self, area_id=None):
- return ("D,/rw-staging-mgmt:staging-areas/rw-staging-mgmt:staging-area" +
- ("[area-id='{}']".format(area_id) if area_id else ""))
+ return self.project.add_project("D,/rw-staging-mgmt:staging-areas/rw-staging-mgmt:staging-area" +
+ ("[area-id='{}']".format(area_id) if area_id else ""))
@asyncio.coroutine
def register(self):
assert self.reg is not None
+ def deregister(self):
+ self._log.debug("Project {}: de-register staging store handler".
+ format(self._project.name))
+ if self.reg:
+ self.reg.deregister()
+
def on_staging_area_create(self, store):
self.reg.update_element(self.xpath(store.area_id), store)
RwDts as rwdts,
RwStagingMgmtYang)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
from . import rpc
from . import store
from .publisher import StagingStorePublisher
+class StagingManagerProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(StagingManagerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self.publisher = StagingStorePublisher(self)
+ # For recovery
+ self.publisher.delegate = tasklet.store
+
+ @asyncio.coroutine
+ def register (self):
+ yield from self.publisher.register()
+
+ def deregister(self):
+ self.publisher.deregister()
+
+
class StagingManagerTasklet(rift.tasklets.Tasklet):
"""Tasklet to handle all staging related operations
"""
def __init__(self, *args, **kwargs):
try:
super().__init__(*args, **kwargs)
+ self._project_handler = None
+ self.projects = {}
+
except Exception as e:
- self.log.exception(e)
+ self.log.exception("Staging Manager tasklet init: {}".
+ format(e))
def start(self):
super().start()
@asyncio.coroutine
def init(self):
- self.store = store.StagingFileStore(log=self.log)
- self.publisher = StagingStorePublisher(self.log, self.dts, self.loop)
- # Fore recovery
- self.publisher.delegate = self.store
- # For create and delete events
- self.store.delegate = self.publisher
- yield from self.publisher.register()
-
+ self.store = store.StagingFileStore(self)
io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
self.app = StagingApplication(self.store)
self.dts,
self.loop,
self.store)
-
yield from self.create_stg_rpc.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, StagingManagerProject)
+ self.project_handler.register()
+
@asyncio.coroutine
def run(self):
self.server.listen(self.app.PORT)
META_YAML = "meta.yaml"
DEFAULT_EXPIRY = 60 * 60
- def __init__(self, log=None, root_dir=None):
+ def __init__(self, tasklet, root_dir=None):
default_path = os.path.join(
os.getenv('RIFT_ARTIFACTS'),
"launchpad/staging")
if not os.path.isdir(self.root_dir):
os.makedirs(self.root_dir)
- self.log = log or logging.getLogger()
+ self.log = tasklet.log
self.tmp_dir = tempfile.mkdtemp(dir=self.root_dir)
self._cache = {}
- self.delegate = None
+ self.tasklet = tasklet
def on_recovery(self, staging_areas):
for area in staging_areas:
return self._cache[area_id]
+ def get_delegate(self, msg):
+ try:
+ proj = self.tasklet.projects[msg.project_name]
+ except Exception as e:
+ err = "Project or project name not found {}: {}". \
+ format(msg.as_dict(), e)
+ self.log.error (err)
+ raise Exception (err)
+
+ return proj.publisher
+
def create_staging_area(self, staging_area_config):
"""Create the staging area
Args:
Raises:
StagingAreaExists: if the staging area already exists
"""
+ delegate = self.get_delegate(staging_area_config)
+
area_id = str(uuid.uuid4())
container_path = os.path.join(self.root_dir, str(area_id))
self._cache[area_id] = staging_area
try:
- if self.delegate:
- self.delegate.on_staging_area_create(staging_area.model)
+ if delegate:
+ delegate.on_staging_area_create(staging_area.model)
except Exception as e:
- self.log.exception(str(e))
+ self.log.exception(e)
return staging_area
staging_area (str or model.StagingArea): Staging ID or the
StagingArea object
"""
+ delegate = self.get_delegate(staging_area_config)
+
if type(staging_area) is str:
staging_area = self.get_staging_area(staging_area)
staging_area.model.status = "EXPIRED"
try:
- if self.delegate:
- self.delegate.on_staging_area_delete(staging_area.model)
+ if delegate:
+ delegate.on_staging_area_delete(staging_area.model)
except Exception as e:
- self.log.exception(str(e))
+ self.log.exception(e)
import rift.package.cloud_init
import rift.package.script
import rift.mano.dts as mano_dts
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
class VMResourceError(Exception):
class VcsComponent(object):
""" VCS Component within the VNF descriptor """
- def __init__(self, dts, log, loop, cluster_name, vcs_handler, component, mangled_name):
+ def __init__(self, dts, log, loop, cluster_name,
+ vcs_handler, component, mangled_name):
self._dts = dts
self._log = log
self._loop = loop
@property
def path(self):
""" The path for this object """
- return("D,/rw-manifest:manifest" +
+ return ("D,/rw-manifest:manifest" +
"/rw-manifest:operational-inventory" +
"/rw-manifest:component" +
"[rw-manifest:component-name = '{}']").format(self.name)
dts,
log,
loop,
+ project,
vdud,
vnfr,
mgmt_intf,
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._vdud = vdud
self._vnfr = vnfr
self._mgmt_intf = mgmt_intf
self._rm_regh = None
self._vm_resp = None
self._vdud_cloud_init = None
- self._vdur_console_handler = VnfrConsoleOperdataDtsHandler(dts, log, loop, self._vnfr._vnfm, self._vnfr.vnfr_id, self._vdur_id,self.vdu_id)
+ self._vdur_console_handler = VnfrConsoleOperdataDtsHandler(
+ dts, log, loop, self._vnfr._vnfm, self._vnfr.vnfr_id, self._vdur_id,self.vdu_id)
@asyncio.coroutine
def vdu_opdata_register(self):
placement_groups.append(group.as_dict())
vdur_dict['placement_groups_info'] = placement_groups
- return RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
+ return RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
@property
def resmgr_path(self):
""" path for resource-mgr"""
- return ("D,/rw-resource-mgr:resource-mgmt" +
- "/vdu-event" +
- "/vdu-event-data[event-id='{}']".format(self._request_id))
+ xpath = self._project.add_project("D,/rw-resource-mgr:resource-mgmt" +
+ "/vdu-event" +
+ "/vdu-event-data[event-id='{}']".format(self._request_id))
+ return xpath
@property
def vm_flavor_msg(self):
if availability_zones:
if len(availability_zones) > 1:
- self._log.error("Can not launch VDU: %s in multiple availability zones. Requested Zones: %s", self.name, availability_zones)
- raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple availability zones. Requsted Zones".format(self.name, availability_zones))
+ self._log.error("Can not launch VDU: %s in multiple availability zones. " +
+ "Requested Zones: %s", self.name, availability_zones)
+ raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple availability" +
+ " zones. Requsted Zones".format(self.name, availability_zones))
else:
vm_create_msg_dict['availability_zone'] = availability_zones[0]
if server_groups:
if len(server_groups) > 1:
- self._log.error("Can not launch VDU: %s in multiple Server Group. Requested Groups: %s", self.name, server_groups)
- raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple Server Groups. Requsted Groups".format(self.name, server_groups))
+ self._log.error("Can not launch VDU: %s in multiple Server Group. " +
+ "Requested Groups: %s", self.name, server_groups)
+ raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple " +
+ "Server Groups. Requsted Groups".format(self.name, server_groups))
else:
vm_create_msg_dict['server_group'] = server_groups[0]
self._rm_regh = None
if self._vdur_console_handler is not None:
- self._log.error("Deregistering vnfr vdur registration handle")
+ self._log.debug("Deregistering vnfr vdur registration handle")
self._vdur_console_handler._regh.deregister()
self._vdur_console_handler._regh = None
class InternalVirtualLinkRecord(object):
""" Internal Virtual Link record """
- def __init__(self, dts, log, loop, ivld_msg, vnfr_name, cloud_account_name, ip_profile=None):
+ def __init__(self, dts, log, loop, project,
+ ivld_msg, vnfr_name, cloud_account_name, ip_profile=None):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._ivld_msg = ivld_msg
self._vnfr_name = vnfr_name
self._cloud_account_name = cloud_account_name
def vlr_path(self):
""" VLR path for this VLR instance"""
- return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self.vlr_id)
+ return self._project.add_project("D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".
+ format(self.vlr_id))
def create_vlr(self):
""" Create the VLR record which will be instantiated """
vlr_dict.update(vld_copy_dict)
- vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+ vlr = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.from_dict(vlr_dict)
return vlr
@asyncio.coroutine
self._dts = dts
self._log = log
self._loop = loop
+ self._project = vnfm._project
self._cluster_name = cluster_name
self._vnfr_msg = vnfr_msg
self._vnfr_id = vnfr_msg.id
@staticmethod
def vnfd_xpath(vnfd_id):
""" VNFD xpath associated with this VNFR """
- return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id)
+ return ("C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".
+ format(vnfd_id))
@property
def vnfd_ref_count(self):
def get_nsr_config(self):
### Need access to NS instance configuration for runtime resolution.
### This shall be replaced when deployment flavors are implemented
- xpath = "C,/nsr:ns-instance-config"
+ xpath = self._project.add_project("C,/nsr:ns-instance-config")
results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
for result in results:
vnfd_fields = ["short_name", "vendor", "description", "version"]
vnfd_copy_dict = {k: v for k, v in self.vnfd.as_dict().items() if k in vnfd_fields}
- mgmt_intf = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MgmtInterface()
+ mgmt_intf = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MgmtInterface()
ip_address, port = self.mgmt_intf_info()
if ip_address is not None:
vnfr_dict.update(vnfd_copy_dict)
- vnfr_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
- vnfr_msg.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
+ vnfr_msg = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+ vnfr_msg.vnfd = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
vnfr_msg.create_time = self._create_time
vnfr_msg.uptime = int(time.time()) - self._create_time
vnfr_msg.dashboard_url = self.dashboard_url
for cpr in self._cprs:
- new_cp = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict())
+ new_cp = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict())
vnfr_msg.connection_point.append(new_cp)
if self._vnf_mon is not None:
for monp in self._vnf_mon.msg:
vnfr_msg.monitoring_param.append(
- VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict()))
+ VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict()))
if self._vnfr.vnf_configuration is not None:
vnfr_msg.vnf_configuration.from_dict(self._vnfr.vnf_configuration.as_dict())
vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = ip_address
for group in self._vnfr_msg.placement_groups_info:
- group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+ group_info = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_PlacementGroupsInfo()
group_info.from_dict(group.as_dict())
vnfr_msg.placement_groups_info.append(group_info)
@property
def xpath(self):
""" path for this VNFR """
- return("D,/vnfr:vnfr-catalog"
+ return self._project.add_project("D,/vnfr:vnfr-catalog"
"/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id))
@asyncio.coroutine
for group_info in nsr_config.vnfd_placement_group_maps:
if group_info.placement_group_ref == input_group.name and \
group_info.vnfd_id_ref == self.vnfd_id:
- group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+ group = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
group_dict = {k:v for k,v in
group_info.as_dict().items()
if (k != 'placement_group_ref' and k !='vnfd_id_ref')}
placement_groups = []
### Step-1: Get VNF level placement groups
for group in self._vnfr_msg.placement_groups_info:
- #group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+ #group_info = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
#group_info.from_dict(group.as_dict())
placement_groups.append(group)
group_info = self.resolve_placement_group_cloud_construct(group,
nsr_config)
if group_info is None:
- self._log.info("Could not resolve cloud-construct for placement group: %s", group.name)
- ### raise VNFMPlacementGroupError("Could not resolve cloud-construct for placement group: {}".format(group.name))
+ self._log.info("Could not resolve cloud-construct for " +
+ "placement group: %s", group.name)
else:
- self._log.info("Successfully resolved cloud construct for placement group: %s for VDU: %s in VNF: %s (Member Index: %s)",
+ self._log.info("Successfully resolved cloud construct for " +
+ "placement group: %s for VDU: %s in VNF: %s (Member Index: %s)",
str(group_info),
vdu.name,
self.vnf_name,
dts=self._dts,
log=self._log,
loop=self._loop,
+ project = self._project,
vdud=vdu,
vnfr=vnfr,
mgmt_intf=self.has_mgmt_interface(vdu),
def vlr_xpath(self, vlr_id):
""" vlr xpath """
- return(
- "D,/vlr:vlr-catalog/"
+ return self._project.add_project("D,/vlr:vlr-catalog/"
"vlr:vlr[vlr:id = '{}']".format(vlr_id))
def ext_vlr_by_id(self, vlr_id):
@asyncio.coroutine
def instantiate(self, xact, restart_mode=False):
""" instantiate this VNF """
+ self._log.info("Instantiate VNF {}: {}".format(self._vnfr_id, self._state))
self.set_state(VirtualNetworkFunctionRecordState.VL_INIT_PHASE)
self._rw_vnfd = yield from self._vnfm.fetch_vnfd(self._vnfd_id)
cp_copy_dict = {k: v for k, v in cp.as_dict().items() if k in cp_fields}
cpr_dict = {}
cpr_dict.update(cp_copy_dict)
- return VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict)
+ return VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict)
self._log.debug("Fetching VLRs for VNFR id = %s, cps = %s",
self._vnfr_id, self._vnfr.connection_point)
vlr_path = self.vlr_xpath(cp.vlr_ref)
self._log.debug("Fetching VLR with path = %s", vlr_path)
- res_iter = yield from self._dts.query_read(self.vlr_xpath(cp.vlr_ref),
+ res_iter = yield from self._dts.query_read(vlr_path,
rwdts.XactFlag.MERGE)
for i in res_iter:
r = yield from i
yield from self.publish_inventory(xact)
# Publish inventory
- self._log.debug("VNFR-ID %s: Creating VLs", self._vnfr_id)
+ self._log.debug("Create VLs {}: {}".format(self._vnfr_id, self._state))
yield from self.create_vls()
# publish the VNFR
- self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+ self._log.debug("Publish VNFR {}: {}".format(self._vnfr_id, self._state))
yield from self.publish(xact)
# instantiate VLs
- self._log.debug("VNFR-ID %s: Instantiate VLs", self._vnfr_id)
+ self._log.debug("Instantiate VLs {}: {}".format(self._vnfr_id, self._state))
try:
yield from self.instantiate_vls(xact, restart_mode)
except Exception as e:
self.set_state(VirtualNetworkFunctionRecordState.VM_INIT_PHASE)
# instantiate VDUs
- self._log.debug("VNFR-ID %s: Create VDUs", self._vnfr_id)
+ self._log.debug("Create VDUs {}: {}".format(self._vnfr_id, self._state))
yield from self.create_vdus(self, restart_mode)
try:
yield from self.publish(xact)
# publish the VNFR
- self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+ self._log.debug("VNFR {}: Publish VNFR with state {}".
+ format(self._vnfr_id, self._state))
yield from self.publish(xact)
# instantiate VDUs
# ToDo: Check if this should be prevented during restart
- self._log.debug("VNFR-ID %s: Instantiate VDUs", self._vnfr_id)
+ self._log.debug("Instantiate VDUs {}: {}".format(self._vnfr_id, self._state))
_ = self._loop.create_task(self.instantiate_vdus(xact, self))
# publish the VNFR
""" DTS registration handle """
return self._regh
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFD DTS handler for project {}".
+ format(self._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for VNFD configuration"""
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug(
- "Registering for VNFD config using xpath: %s",
- VnfdDtsHandler.XPATH,
- )
+ xpath = self._vnfm._project.add_project(VnfdDtsHandler.XPATH)
+ self._log.debug("Registering for VNFD config using xpath: {}".
+ format(xpath))
+
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self._dts.appconf_group_create(handler=acg_hdl) as acg:
self._regh = acg.register(
- xpath=VnfdDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
on_prepare=on_prepare)
""" DTS registration handle """
return self._regh
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VCS DTS handler for project {}".
+ format(self._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Registers VCS component dts publisher registration"""
VcsComponentDtsHandler.XPATH, xact, path, msg)
class VnfrConsoleOperdataDtsHandler(object):
- """ registers 'D,/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]' and handles CRUD from DTS"""
+ """
+ Registers 'D,/rw-project:project/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]'
+ and handles CRUD from DTS
+ """
+
@property
def vnfr_vdu_console_xpath(self):
""" path for resource-mgr"""
- return ("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
+ return self._project.add_project("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']" +
+ "/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
def __init__(self, dts, log, loop, vnfm, vnfr_id, vdur_id, vdu_id):
self._dts = dts
self._vdur_id = vdur_id
self._vdu_id = vdu_id
+ self._project = vnfm._project
+
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFR console DTS handler for project {}".
+ format(self._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for VNFR VDU Operational Data read from dts """
)
if action == rwdts.QueryAction.READ:
- schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur.schema()
path_entry = schema.keyspec_to_entry(ks_path)
self._log.debug("VDU Opdata path is {}".format(path_entry))
try:
return
with self._dts.transaction() as new_xact:
resp = yield from vdur.read_resource(new_xact)
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
if resp.console_url:
vdur_console.console_url = resp.console_url
self._log.debug("Recevied console URL for vdu {} is {}".format(self._vdu_id,vdur_console))
except Exception:
self._log.exception("Caught exception while reading VDU %s", self._vdu_id)
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
vdur_console.console_url = 'none'
class VnfrDtsHandler(object):
- """ registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS"""
+ """ registers 'D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS"""
XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
def __init__(self, dts, log, loop, vnfm):
self._vnfm = vnfm
self._regh = None
+ self._project = vnfm._project
@property
def regh(self):
""" Return VNF manager instance """
return self._vnfm
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFR DTS handler for project {}".
+ format(self._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for vnfr create/update/delete/read requests from dts """
vnfr.set_state(VirtualNetworkFunctionRecordState.FAILED)
yield from vnfr.publish(None)
elif action == rwdts.QueryAction.DELETE:
- schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
vnfr = self._vnfm.get_vnfr(path_entry.key00.id)
self._log.error("Caught exception while deleting vnfr %s", path_entry.key00.id)
elif action == rwdts.QueryAction.UPDATE:
- schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
vnfr = None
try:
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for VNFR using xpath: %s",
- VnfrDtsHandler.XPATH,)
+ xpath = self._project.add_project(VnfrDtsHandler.XPATH)
+ self._log.debug("Registering for VNFR using xpath: {}".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
on_prepare=on_prepare,)
handlers = rift.tasklets.Group.Handler(on_event=on_event,)
with self._dts.group_create(handler=handlers) as group:
- self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=(rwdts.Flag.PUBLISHER |
rwdts.Flag.NO_PREP_READ |
rwdts.Flag.DATASTORE),)
@asyncio.coroutine
- def create(self, xact, path, msg):
+ def create(self, xact, xpath, msg):
"""
Create a VNFR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating VNFR xact = %s, %s:%s",
xact, path, msg)
xact, path, msg)
@asyncio.coroutine
- def update(self, xact, path, msg):
+ def update(self, xact, xpath, msg):
"""
Update a VNFR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating VNFR xact = %s, %s:%s",
xact, path, msg)
self.regh.update_element(path, msg)
xact, path, msg)
@asyncio.coroutine
- def delete(self, xact, path):
+ def delete(self, xact, xpath):
"""
Delete a VNFR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting VNFR xact = %s, %s", xact, path)
self.regh.delete_element(path)
self._log.debug("Deleted VNFR xact = %s, %s", xact, path)
""" Return the NS manager instance """
return self._vnfm
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFD Ref DTS handler for project {}".
+ format(self._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for VNFD ref count read from dts """
)
if action == rwdts.QueryAction.READ:
- schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_VnfdRefCount.schema()
path_entry = schema.keyspec_to_entry(ks_path)
vnfd_list = yield from self._vnfm.get_vnfd_refcount(path_entry.key00.vnfd_id_ref)
for xpath, msg in vnfd_list:
hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VnfdRefCountDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._vnfm._project.add_project(
+ VnfdRefCountDtsHandler.XPATH),
handler=hdl,
flags=rwdts.Flag.PUBLISHER,
)
class VnfManager(object):
""" The virtual network function manager class """
- def __init__(self, dts, log, loop, cluster_name):
+ def __init__(self, dts, log, loop, project, cluster_name):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._cluster_name = cluster_name
self._vcs_handler = VcsComponentDtsHandler(dts, log, loop, self)
self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self)
self._vnfr_ref_handler = VnfdRefCountDtsHandler(dts, log, loop, self)
- self._nsr_handler = mano_dts.NsInstanceConfigSubscriber(log, dts, loop, callback=self.handle_nsr)
+ self._nsr_handler = mano_dts.NsInstanceConfigSubscriber(
+ log, dts, loop, project, callback=self.handle_nsr)
self._dts_handlers = [VnfdDtsHandler(dts, log, loop, self),
self._vnfr_handler,
for hdl in self._dts_handlers:
yield from hdl.register()
+ def deregister(self):
+ self.log.debug("De-register VNFM project {}".format(self.name))
+ for hdl in self._dts_handlers:
+ yield from hdl.deregister()
+
@asyncio.coroutine
def run(self):
""" Run this VNFM instance """
@asyncio.coroutine
def fetch_vnfd(self, vnfd_id):
""" Fetch VNFDs based with the vnfd id"""
- vnfd_path = VirtualNetworkFunctionRecord.vnfd_xpath(vnfd_id)
+ vnfd_path = self._project.add_project(
+ VirtualNetworkFunctionRecord.vnfd_xpath(vnfd_id))
self._log.debug("Fetch vnfd with path %s", vnfd_path)
vnfd = None
- res_iter = yield from self._dts.query_read(vnfd_path, rwdts.XactFlag.MERGE)
+ res_iter = yield from self._dts.query_read(vnfd_path,
+ rwdts.XactFlag.MERGE)
for ent in res_iter:
res = yield from ent
def vnfd_refcount_xpath(self, vnfd_id):
""" xpath for ref count entry """
- return (VnfdRefCountDtsHandler.XPATH +
- "[rw-vnfr:vnfd-id-ref = '{}']").format(vnfd_id)
+ return self._project.add_project(VnfdRefCountDtsHandler.XPATH +
+ "[rw-vnfr:vnfd-id-ref = '{}']").format(vnfd_id)
@asyncio.coroutine
def get_vnfd_refcount(self, vnfd_id):
vnfd_list = []
if vnfd_id is None or vnfd_id == "":
for vnfd in self._vnfds_to_vnfr.keys():
- vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+ vnfd_msg = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_VnfdRefCount()
vnfd_msg.vnfd_id_ref = vnfd
vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd]
vnfd_list.append((self.vnfd_refcount_xpath(vnfd), vnfd_msg))
elif vnfd_id in self._vnfds_to_vnfr:
- vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+ vnfd_msg = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_VnfdRefCount()
vnfd_msg.vnfd_id_ref = vnfd_id
vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd_id]
vnfd_list.append((self.vnfd_refcount_xpath(vnfd_id), vnfd_msg))
return vnfd_list
+class VnfmProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(VnfmProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._vnfm = None
+
+ @asyncio.coroutine
+ def register (self):
+ try:
+ vm_parent_name = self._tasklet.tasklet_info.get_parent_vm_parent_instance_name()
+ assert vm_parent_name is not None
+ self._vnfm = VnfManager(self._dts, self.log, self.loop, self, vm_parent_name)
+ yield from self._vnfm.run()
+ except Exception:
+ print("Caught Exception in VNFM init:", sys.exc_info()[0])
+ raise
+
+ def deregister(self):
+ self._log.debug("De-register project {} for VnfmProject".
+ format(self.name))
+ self._vnfm.deregister()
+
+
class VnfmTasklet(rift.tasklets.Tasklet):
""" VNF Manager tasklet class """
def __init__(self, *args, **kwargs):
self.rwlog.set_subcategory("vnfm")
self._dts = None
- self._vnfm = None
+ self._project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
def start(self):
try:
@asyncio.coroutine
def init(self):
""" Task init callback """
- try:
- vm_parent_name = self.tasklet_info.get_parent_vm_parent_instance_name()
- assert vm_parent_name is not None
- self._vnfm = VnfManager(self._dts, self.log, self.loop, vm_parent_name)
- yield from self._vnfm.run()
- except Exception:
- print("Caught Exception in VNFM init:", sys.exc_info()[0])
- raise
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, VnfmProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
from rift.vlmgr import (
VlrDtsHandler,
self._log = log
self._loop = loop
self._parent = parent
+ self._project = self._parent._project
+ self._regh = None
+ self._rpch = None
def _register_show_status(self):
def get_xpath(sdn_name=None):
- return "D,/rw-project:project/rw-sdn:sdn/rw-sdn:account{}" \
+ return self._project.add_project("D,/rw-sdn:sdn/rw-sdn:account{}" \
"/rw-sdn:connection-status".format(
"[name='%s']" % sdn_name if sdn_name is not None else ''
- )
+ ))
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._regh = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
def _register_validate_rpc(self):
def get_xpath():
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
if not msg.has_field("sdn_account"):
raise SdnAccountNotFound("SDN account name not provided")
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._rpch = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
@asyncio.coroutine
def register(self):
yield from self._register_show_status()
yield from self._register_validate_rpc()
+ def deregister(self):
+ self._log.debug("De-register SDN opdata handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
+ if self._rpch:
+ self._rpch.deregister()
+ self._rpch = None
+
+
class SDNAccountDtsHandler(object):
- XPATH = "C,/rw-project:project/rw-sdn:sdn/rw-sdn:account"
+ XPATH = "C,/rw-sdn:sdn/rw-sdn:account"
def __init__(self, dts, log, parent):
self._dts = dts
self._log = log
self._parent = parent
+ self._project = parent._project
self._sdn_account = {}
+ self._regh = None
+
+ @property
+ def _xpath(self):
+ return self._project.add_project(SDNAccountDtsHandler.XPATH)
def _set_sdn_account(self, account):
self._log.info("Setting sdn account: {}".format(account))
errmsg = "Cannot update SDN account's account-type."
self._log.error(errmsg)
xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- SDNAccountDtsHandler.XPATH,
+ self._xpath,
errmsg)
raise SdnAccountError(errmsg)
errmsg = "New SDN account must contain account-type field."
self._log.error(errmsg)
xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- SDNAccountDtsHandler.XPATH,
+ self._xpath,
errmsg)
raise SdnAccountError(errmsg)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for Sdn Account config using xpath: %s",
- SDNAccountDtsHandler.XPATH,
- )
+ self._log.debug("Registering for Sdn Account config using xpath: {}".
+ format(self._xpath))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self._dts.appconf_group_create(acg_handler) as acg:
- acg.register(
- xpath=SDNAccountDtsHandler.XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
- on_prepare=on_prepare
- )
+ self._regh = acg.register(
+ xpath=self._xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+ on_prepare=on_prepare
+ )
+
+ def deregister(self):
+ self._log.debug("De-register VLR handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
class VnsManager(object):
""" The Virtual Network Service Manager """
- def __init__(self, dts, log, log_hdl, loop):
+ def __init__(self, dts, log, log_hdl, loop, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._project = project
+
self._vlr_handler = VlrDtsHandler(dts, log, loop, self)
self._vld_handler = VldDtsHandler(dts, log, loop, self)
self._sdn_handler = SDNAccountDtsHandler(dts,log,self)
self._sdn_opdata_handler = SDNAccountDtsOperdataHandler(dts,log, loop, self)
- self._acctmgr = SdnAccountMgr(self._log, self._log_hdl, self._loop)
+ self._acctmgr = SdnAccountMgr(self._log, self._log_hdl, self._loop, self._project)
self._nwtopdata_store = NwtopDataStore(log)
- self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store)
- self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store)
+ self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, self._project,
+ self._acctmgr, self._nwtopdata_store)
+ self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, self._project,
+ self._acctmgr, self._nwtopdata_store)
self._vlrs = {}
@asyncio.coroutine
# Not used for now
yield from self.register_nwtopdiscovery_handler()
+ def deregister(self):
+ self._nwtopdiscovery_handler.deregister()
+ self._nwtopstatic_handler.deregister()
+ self._vld_handler.deregister()
+ self._vlr_handler.deregister()
+ self._sdn_opdata_handler.deregister()
+ self._sdn_handler.deregister()
+
def create_vlr(self, msg):
""" Create VLR """
if msg.id in self._vlrs:
return False
@asyncio.coroutine
- def publish_vlr(self, xact, path, msg):
+ def publish_vlr(self, xact, xpath, msg):
""" Publish a VLR """
+ path = self._project.add_project(xpath)
self._log.debug("Publish vlr called with path %s, msg %s",
path, msg)
yield from self._vlr_handler.update(xact, path, msg)
@asyncio.coroutine
- def unpublish_vlr(self, xact, path):
+ def unpublish_vlr(self, xact, xpath):
""" Publish a VLR """
+ path = self._project.add_project(xpath)
self._log.debug("Unpublish vlr called with path %s", path)
yield from self._vlr_handler.delete(xact, path)
+class VnsProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(VnsProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._vlr_handler = None
+ self._vnsm = None
+ # A mapping of instantiated vlr_id's to VirtualLinkRecord objects
+ self._vlrs = {}
+
+ @asyncio.coroutine
+ def register (self):
+ self._vnsm = VnsManager(dts=self._dts,
+ log=self.log,
+ log_hdl=self._log_hdl,
+ loop=self._loop,
+ project=self)
+ yield from self._vnsm.run()
+
+ # NSM needs to detect VLD deletion that has active VLR
+ # self._vld_handler = VldDescriptorConfigDtsHandler(
+ # self._dts, self.log, self.loop, self._vlrs,
+ # )
+ # yield from self._vld_handler.register()
+
+ def deregister(self):
+ self._log.debug("De-register project {}".format(self.name))
+ self._vnsm.deregister()
+
+
class VnsTasklet(rift.tasklets.Tasklet):
""" The VNS tasklet class """
def __init__(self, *args, **kwargs):
self.rwlog.set_subcategory("vns")
self._dts = None
- self._vlr_handler = None
+ self._project_handler = None
+ self.projects = {}
- self._vnsm = None
- # A mapping of instantiated vlr_id's to VirtualLinkRecord objects
- self._vlrs = {}
+ @property
+ def dts(self):
+ return self._dts
def start(self):
super(VnsTasklet, self).start()
@asyncio.coroutine
def init(self):
""" task init callback"""
- self._vnsm = VnsManager(dts=self._dts,
- log=self.log,
- log_hdl=self.log_hdl,
- loop=self.loop)
- yield from self._vnsm.run()
-
- # NSM needs to detect VLD deletion that has active VLR
- # self._vld_handler = VldDescriptorConfigDtsHandler(
- # self._dts, self.log, self.loop, self._vlrs,
- # )
- # yield from self._vld_handler.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, VnsProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
class SdnGetPluginError(Exception):
""" Error while fetching SDN plugin """
pass
-
-
+
+
class SdnGetInterfaceError(Exception):
""" Error while fetching SDN interface"""
pass
class SdnAccountMgr(object):
""" Implements the interface to backend plugins to fetch topology """
- def __init__(self, log, log_hdl, loop):
+ def __init__(self, log, log_hdl, loop, project):
self._account = {}
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._project = project
self._sdn = {}
self._regh = None
- self._status = RwsdnYang.SDNAccount_ConnectionStatus(
+ self._status = RwsdnYang.SdnConnectionStatus(
status='unknown',
details="Connection status lookup not started"
)
)
if rwstatus == RwTypes.RwStatus.SUCCESS:
- self._status = RwsdnYang.SDNAccount_ConnectionStatus.from_dict(status.as_dict())
+ self._status = RwsdnYang.SdnConnectionStatus.from_dict(status.as_dict())
else:
- self._status = RwsdnYang.SDNAccount_ConnectionStatus(
+ self._status = RwsdnYang.SdnConnectionStatus(
status="failure",
details="Error when calling CAL validate sdn creds"
)
""" Handles DTS interactions for the Discovered Topology registration """
DISC_XPATH = "D,/nd:network"
- def __init__(self, dts, log, loop, acctmgr, nwdatastore):
+ def __init__(self, dts, log, loop, project, acctmgr, nwdatastore):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._acctmgr = acctmgr
self._nwdatastore = nwdatastore
""" The registration handle associated with this Handler"""
return self._regh
+ def deregister(self):
+ self._log.debug("De-register Topology discovery handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for the Discovered Topology path """
""" Handles DTS interactions for the Static Topology registration """
STATIC_XPATH = "C,/nd:network"
- def __init__(self, dts, log, loop, acctmgr, nwdatastore):
+ def __init__(self, dts, log, loop, project, acctmgr, nwdatastore):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._acctmgr = acctmgr
self._regh = None
def regh(self):
""" The registration handle associated with this Handler"""
return self._regh
-
-
+
+ def deregister(self):
+ self._log.debug("De-register Topology static handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for the Static Topology path """
on_apply=apply_nw_config)
with self._dts.appconf_group_create(handler=handler) as acg:
- acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH,
- flags = rwdts.Flag.SUBSCRIBER,
- on_prepare=prepare_nw_cfg)
-
-
+ self._regh = acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH,
+ flags = rwdts.Flag.SUBSCRIBER,
+ on_prepare=prepare_nw_cfg)
self._vnsm = vnsm
self._vlr_msg = vlr_msg
+ self._project = vnsm._project
self._network_id = None
self._network_pool = None
self._assigned_subnet = None
@property
def vld_xpath(self):
""" VLD xpath associated with this VLR record """
- return "C,/vld:vld-catalog/vld:vld[id='{}']".format(self.vld_id)
+ return self._project.add_project("C,/vld:vld-catalog/vld:vld[id='{}']".
+ format(self.vld_id))
@property
def vld_id(self):
@property
def xpath(self):
""" path for this VLR """
- return("D,/vlr:vlr-catalog"
+ return self._project.add_project("D,/vlr:vlr-catalog"
"/vlr:vlr[vlr:id='{}']".format(self.vlr_id))
@property
@property
def resmgr_path(self):
""" path for resource-mgr"""
- return ("D,/rw-resource-mgr:resource-mgmt" +
+ return self._project.add_project("D,/rw-resource-mgr:resource-mgmt" +
"/vlink-event/vlink-event-data[event-id='{}']".format(self._request_id))
@property
@property
def msg(self):
""" VLR message for this VLR """
- msg = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr()
+ msg = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr()
msg.copy_from(self._vlr_msg)
if self._network_id is not None:
self._vnsm = vnsm
self._regh = None
+ self._project = vnsm._project
@property
def regh(self):
return
elif action == rwdts.QueryAction.DELETE:
# Delete an VLR record
- schema = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.schema()
+ schema = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
self._log.debug("Terminating VLR id %s", path_entry.key00.id)
yield from self._vnsm.delete_vlr(path_entry.key00.id, xact_info.xact)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
return
- self._log.debug("Registering for VLR using xpath: %s",
- VlrDtsHandler.XPATH)
+ xpath = self._project.add_project(VlrDtsHandler.XPATH)
+ self._log.debug("Registering for VLR using xpath: {}".
+ format(xpath))
reg_handle = rift.tasklets.DTS.RegistrationHandler(
on_commit=on_commit,
handlers = rift.tasklets.Group.Handler(on_event=on_event,)
with self._dts.group_create(handler=handlers) as group:
self._regh = group.register(
- xpath=VlrDtsHandler.XPATH,
+ xpath=xpath,
handler=reg_handle,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ| rwdts.Flag.DATASTORE,
)
+ def deregister(self):
+ self._log.debug("De-register VLR handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
- def create(self, xact, path, msg):
+ def create(self, xact, xpath, msg):
"""
Create a VLR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating VLR xact = %s, %s:%s",
xact, path, msg)
self.regh.create_element(path, msg)
xact, path, msg)
@asyncio.coroutine
- def update(self, xact, path, msg):
+ def update(self, xact, xpath, msg):
"""
Update a VLR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating VLR xact = %s, %s:%s",
xact, path, msg)
self.regh.update_element(path, msg)
xact, path, msg)
@asyncio.coroutine
- def delete(self, xact, path):
+ def delete(self, xact, xpath):
"""
Delete a VLR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting VLR xact = %s, %s", xact, path)
self.regh.delete_element(path)
self._log.debug("Deleted VLR xact = %s, %s", xact, path)
"Got on prepare for VLD update (ks_path: %s) (action: %s)",
ks_path.to_xpath(VldYang.get_schema()), msg)
- schema = VldYang.YangData_Vld_VldCatalog_Vld.schema()
+ schema = VldYang.YangData_RwProject_Project_VldCatalog_Vld.schema()
path_entry = schema.keyspec_to_entry(ks_path)
+ # TODO: Check why on project delete this gets called
+ if not path_entry:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ return
+
vld_id = path_entry.key00.id
disabled_actions = [rwdts.QueryAction.DELETE, rwdts.QueryAction.UPDATE]
handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
yield from self._dts.register(
- VldDtsHandler.XPATH,
+ self._vnsm._project.add_project(VldDtsHandler.XPATH),
flags=rwdts.Flag.SUBSCRIBER,
handler=handler
)
+
+ def deregister(self):
+ self._log.debug("De-register VLD handler for project {}".
+ format(self._vnsm._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
description "Valid ID to track the status of the task";
type string;
}
+ uses manotypes:rpc-project-name;
}
}
description "Valid ID to track the status of the task";
type string;
}
+ uses manotypes:rpc-project-name;
}
}
description "Valid ID to track the status of the task";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
}
description "Valid ID to track the status of the task";
type string;
}
+ uses manotypes:rpc-project-name;
}
}
description "Trace in case of a failure";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
}
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+ descr_xpath = "/rw-project:project/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
descr_value = "New NSD Description"
in_param_id = str(uuid.uuid4())
- input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1= NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+ descr_xpath = "/rw-project:project/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
descr_value = "New NSD Description"
in_param_id = str(uuid.uuid4())
- input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1= NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
return mgmt_session.proxy
-ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
-ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+ScalingGroupInstance = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance
+ScalingGroup = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup
INSTANCE_ID = 1
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+ descr_xpath = "/rw-project:project/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
descr_value = "automation"
in_param_id = str(uuid.uuid4())
- input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+ descr_xpath = "/rw-project:project/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
descr_value = "automation"
in_param_id = str(uuid.uuid4())
- input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+ descr_xpath = "/rw-project:project/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
descr_value = "New NSD Description"
in_param_id = str(uuid.uuid4())
- input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
class ResourceMgrMock(object):
- VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
- VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
+ VDU_REQUEST_XPATH = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
+ VLINK_REQUEST_XPATH = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
def __init__(self, dts, log, loop):
self._log = log
#!/usr/bin/env python3
-#
+#
# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import rift.tasklets
import rift.test.dts
import rw_peas
+from rift.mano.utils.project import (
+ ManoProject,
+ DEFAULT_PROJECT,
+)
+PROJECT = 'default'
+
openstack_info = {
'username': 'pluto',
'password': 'mypasswd',
@staticmethod
def cm_state(k=None):
- if k is None:
- return ("D,/rw-conman:cm-state/rw-conman:cm-nsr")
- else:
- return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
- ("[rw-conman:id='{}']".format(k) if k is not None else ""))
+ return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
+ ("[rw-conman:id='{}']".format(k) if k is not None else ""))
@staticmethod
def nsr_scale_group_instance(nsr_id=None, group_name=None, index=None):
("/nsr:instance") +
("[nsr:index='{}']".format(index) if index is not None else ""))
+ @staticmethod
+ def cloud_account(k=None):
+ return ("C,/rw-cloud:cloud/rw-cloud:account" +
+ ("[rw-cloud:name='{}']".format(k) if k is not None else ""))
+
+ @staticmethod
+ def project(k=None):
+ return ("C,/rw-project:project" +
+ ("[rw-project:name='{}']".format(k) if k is not None else ""))
+
class ManoQuerier(object):
- def __init__(self, log, dts):
+ def __init__(self, log, dts, project):
self.log = log
self.dts = dts
+ self.project = project
+
+ def add_project(self, xpath):
+ return self.project.add_project(xpath)
@asyncio.coroutine
- def _read_query(self, xpath, do_trace=False):
- self.log.debug("Running XPATH read query: %s (trace: %s)", xpath, do_trace)
+ def _read_query(self, xpath, do_trace=False, project=True):
+ if project:
+ xp = self.add_project(xpath)
+ else:
+ xp = xpath
+ self.log.debug("Running XPATH read query: %s (trace: %s)", xp, do_trace)
flags = rwdts.XactFlag.MERGE
flags += rwdts.XactFlag.TRACE if do_trace else 0
res_iter = yield from self.dts.query_read(
- xpath, flags=flags
+ xp, flags=flags
)
results = []
return results
+ @asyncio.coroutine
+ def _delete_query(self, xpath, flags=0):
+ xp = self.add_project(xpath)
+ self.log.debug("Running XPATH delete query: %s (flags: %d)", xp, flags)
+ with self.dts.transaction() as xact:
+ yield from self.dts.query_delete(
+ xp,
+ flags
+ )
+
+ @asyncio.coroutine
+ def _update_query(self, xpath, msg, flags=0):
+ xp = self.add_project(xpath)
+ self.log.debug("Running XPATH update query: %s (flags: %d)", xp, flags)
+ with self.dts.transaction() as xact:
+ yield from self.dts.query_update(
+ xp,
+ flags,
+ msg
+ )
+
@asyncio.coroutine
def get_cm_state(self, nsr_id=None):
return (yield from self._read_query(XPaths.cm_state(nsr_id), False))
@asyncio.coroutine
def get_nsr_scale_group_instance_opdata(self, nsr_id=None, group_name=None, index=None):
return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name, index), False))
- #return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name), True))
@asyncio.coroutine
def get_nsr_configs(self, nsr_id=None):
@asyncio.coroutine
def delete_nsr(self, nsr_id):
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- XPaths.nsr_config(nsr_id),
- 0
- #rwdts.XactFlag.TRACE,
- #rwdts.Flag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.nsr_config(nsr_id)))
@asyncio.coroutine
def delete_nsd(self, nsd_id):
- nsd_xpath = XPaths.nsd(nsd_id)
- self.log.debug("Attempting to delete NSD with path = %s", nsd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- nsd_xpath,
- rwdts.XactFlag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.nsd(nsd_id),
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def delete_vnfd(self, vnfd_id):
- vnfd_xpath = XPaths.vnfd(vnfd_id)
- self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- vnfd_xpath,
- rwdts.XactFlag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.vnfd(vnfd_id),
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_nsd(self, nsd_id, nsd_msg):
- nsd_xpath = XPaths.nsd(nsd_id)
- self.log.debug("Attempting to update NSD with path = %s", nsd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- nsd_xpath,
- rwdts.XactFlag.ADVISE,
- nsd_msg,
- )
+ return (yield from self._update_query(XPaths.nsd(nsd_id), nsd_msg,
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_vnfd(self, vnfd_id, vnfd_msg):
- vnfd_xpath = XPaths.vnfd(vnfd_id)
- self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- vnfd_xpath,
- rwdts.XactFlag.ADVISE,
- vnfd_msg,
- )
+ return (yield from self._update_query(XPaths.vnfd(vnfd_id), vnfd_msg,
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_nsr_config(self, nsr_id, nsr_msg):
- nsr_xpath = XPaths.nsr_config(nsr_id)
- self.log.debug("Attempting to update NSR with path = %s", nsr_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- nsr_xpath,
- rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE,
- nsr_msg,
- )
+ return (yield from self._update_query(
+ XPaths.nsr_config(nsr_id),
+ nsr_msg,
+ rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE))
class ManoTestCase(rift.test.dts.AbstractDTSTest):
nsd_ref_counts = yield from self.querier.get_nsd_ref_counts(nsd_id)
self.assertEqual(num_ref, nsd_ref_counts[0].instance_ref_count)
+
class DescriptorPublisher(object):
- def __init__(self, log, loop, dts):
+ def __init__(self, log, loop, dts, project):
self.log = log
self.loop = loop
self.dts = dts
+ self.project = project
self._registrations = []
@asyncio.coroutine
def publish(self, w_path, path, desc):
ready_event = asyncio.Event(loop=self.loop)
+ if 'rw-project' in path:
+ w_xp = w_path
+ xp = path
+ else:
+ w_xp = self.project.add_project(w_path)
+ xp = self.project.add_project(path)
@asyncio.coroutine
def on_ready(regh, status):
self.log.debug("Create element: %s, obj-type:%s obj:%s",
- path, type(desc), desc)
+ xp, type(desc), desc)
with self.dts.transaction() as xact:
- regh.create_element(path, desc, xact.xact)
- self.log.debug("Created element: %s, obj:%s", path, desc)
+ regh.create_element(xp, desc, xact.xact)
+ self.log.debug("Created element: %s, obj:%s", xp, desc)
ready_event.set()
handler = rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready
)
- self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+ self.log.debug("Registering path: %s, obj:%s", w_xp, desc)
reg = yield from self.dts.register(
- w_path,
+ w_xp,
handler,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
)
self._registrations.append(reg)
- self.log.debug("Registered path : %s", w_path)
+ self.log.debug("Registered path : %s", w_xp)
yield from ready_event.wait()
return reg
reg.deregister()
+class ProjectPublisher(object):
+ XPATH = "C,/rw-project:project"
+
+ def __init__(self, log, loop, dts, project):
+ self.dts = dts
+ self.log = log
+ self.loop = loop
+ self.project = project
+ self.ref = None
+
+ self.querier = ManoQuerier(log, dts, project)
+ self.publisher = DescriptorPublisher(log, loop,
+ dts, project)
+
+ self._ready_event = asyncio.Event(loop=self.loop)
+ asyncio.ensure_future(self.register(), loop=loop)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self._ready_event.set()
+
+ self.log.debug("Registering path: %s", ProjectPublisher.XPATH)
+ self.reg = yield from self.dts.register(
+ ProjectPublisher.XPATH,
+ flags=rwdts.Flag.PUBLISHER,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_ready=on_ready,
+ ),
+ )
+
+ def deregister(self):
+ if self.reg is not None:
+ self.reg.deregister()
+
+ @asyncio.coroutine
+ def publish_project(self, config, xpath, xpath_wild):
+ # Publish project
+ self.log.debug("Publishing cloud_account path: %s - %s, type:%s, obj:%s",
+ xpath, xpath_wild, type(config), config)
+ yield from self.publisher.publish(xpath_wild, xpath, config)
+
+
+class CloudAccountPublisher(object):
+ XPATH = "C,/rw-cloud:cloud"
+
+ def __init__(self, log, loop, dts, project):
+ self.dts = dts
+ self.log = log
+ self.loop = loop
+ self.project = project
+ self.ref = None
+
+ self.querier = ManoQuerier(log, dts, project)
+ self.publisher = DescriptorPublisher(log, loop,
+ dts, project)
+
+ self.xpath = self.project.add_project(CloudAccountPublisher.XPATH)
+
+ self._ready_event = asyncio.Event(loop=self.loop)
+ asyncio.ensure_future(self.register(), loop=loop)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self._ready_event.set()
+
+ self.log.debug("Registering path: %s", self.xpath)
+ self.reg = yield from self.dts.register(
+ self.xpath,
+ flags=rwdts.Flag.PUBLISHER,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_ready=on_ready,
+ ),
+ )
+
+ def deregister(self):
+ if self.reg is not None:
+ self.reg.deregister()
+
+ @asyncio.coroutine
+ def publish_account(self, account, xpath, xpath_wild):
+ # Publish cloud account
+ self.log.debug("Publishing cloud_account path: %s - %s, type:%s, obj:%s",
+ xpath, xpath_wild, type(account), account)
+ yield from self.publisher.publish(xpath_wild, xpath, account)
+
+
class PingPongNsrConfigPublisher(object):
XPATH = "C,/nsr:ns-instance-config"
- def __init__(self, log, loop, dts, ping_pong, cloud_account_name):
+ def __init__(self, log, loop, dts, ping_pong, cloud_account_name, project):
self.dts = dts
self.log = log
self.loop = loop
+ self.project = project
self.ref = None
- self.querier = ManoQuerier(log, dts)
-
- self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig()
+ self.querier = ManoQuerier(log, dts, project)
+ self.xpath = self.project.add_project(PingPongNsrConfigPublisher.XPATH)
+ self.nsr_config = rwnsryang.YangData_RwProject_Project_NsInstanceConfig()
- nsr = rwnsryang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = rwnsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "ns1.{}".format(nsr.id)
- nsr.nsd = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+ nsr.nsd = nsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr_Nsd()
nsr.nsd.from_dict(ping_pong.ping_pong_nsd.nsd.as_dict())
nsr.cloud_account = cloud_account_name
#'cloud_account':'mock_account1'
})
- inputs = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
- inputs.xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(ping_pong.nsd_id)
+ inputs = nsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter()
+ inputs.xpath = self.project.add_project(
+ "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(ping_pong.nsd_id))
inputs.value = "inigo montoya"
fast_cpu = {'metadata_key': 'FASTCPU', 'metadata_value': 'True'}
def on_ready(regh, status):
self._ready_event.set()
- self.log.debug("Registering path: %s", PingPongNsrConfigPublisher.XPATH)
+ self.log.debug("Registering path: %s", self.xpath)
self.reg = yield from self.dts.register(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
flags=rwdts.Flag.PUBLISHER,
handler=rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready,
yield from self._ready_event.wait()
with self.dts.transaction() as xact:
self.reg.create_element(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
self.nsr_config,
xact=xact.xact,
)
})
with self.dts.transaction() as xact:
self.reg.update_element(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
self.nsr_config,
xact=xact.xact,
)
"cloud_type" : cloud_type,
construct_type : construct_value,
})
-
+
def create_vnfd_placement_group_map(self,
nsr,
"cloud_type" : cloud_type,
construct_type : construct_value,
})
-
-
+
+
@asyncio.coroutine
def delete_scale_group_instance(self, group_name, index):
self.log.debug("Deleting scale group %s instance %s", group_name, index)
#del self.nsr_config.nsr[0].scaling_group[0].instance[0]
- xpath = XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id, group_name, index)
+ xpath = self.project.add_project(
+ XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id,
+ group_name, index))
yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
- #with self.dts.transaction() as xact:
- # self.reg.update_element(
- # PingPongNsrConfigPublisher.XPATH,
- # self.nsr_config,
- # flags=rwdts.XactFlag.REPLACE,
- # xact=xact.xact,
- # )
def deregister(self):
if self.reg is not None:
def update_vnf_cloud_map(self,vnf_cloud_map):
self.log.debug("Modifying NSR to add VNF cloud account map: {}".format(vnf_cloud_map))
for vnf_index,cloud_acct in vnf_cloud_map.items():
- vnf_maps = [vnf_map for vnf_map in self.nsr_config.nsr[0].vnf_cloud_account_map if vnf_index == vnf_map.member_vnf_index_ref]
+ vnf_maps = [vnf_map for vnf_map in \
+ self.nsr_config.nsr[0].vnf_cloud_account_map \
+ if vnf_index == vnf_map.member_vnf_index_ref]
if vnf_maps:
vnf_maps[0].cloud_account = cloud_acct
- else:
+ else:
self.nsr_config.nsr[0].vnf_cloud_account_map.add().from_dict({
'member_vnf_index_ref':vnf_index,
'cloud_account':cloud_acct
class PingPongDescriptorPublisher(object):
- def __init__(self, log, loop, dts, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
+ def __init__(self, log, loop, dts, project,
+ num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
self.log = log
self.loop = loop
self.dts = dts
+ self.project = project
- self.querier = ManoQuerier(self.log, self.dts)
- self.publisher = DescriptorPublisher(self.log, self.loop, self.dts)
+ self.querier = ManoQuerier(self.log, self.dts, self.project)
+ self.publisher = DescriptorPublisher(self.log, self.loop,
+ self.dts, self.project)
self.ping_vnfd, self.pong_vnfd, self.ping_pong_nsd = \
ping_pong_nsd.generate_ping_pong_descriptors(
pingcount=1,
)
-
-
class ManoTestCase(rift.test.dts.AbstractDTSTest):
"""
DTS GI interface unittests
@staticmethod
def get_cal_account(account_type, account_name):
"""
- Creates an object for class RwcalYang.Clo
+ Creates an object for class RwcalYang.Cloud
"""
- account = rwcloudyang.CloudAccount()
+ account = rwcloudyang.CloudAcc()
if account_type == 'mock':
account.name = account_name
account.account_type = "mock"
return account
@asyncio.coroutine
- def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
+ def configure_project(self, project=None):
+ if project is None:
+ project = self.project
+
+ proj_xpath = "C,{}/project-config".format(project.prefix)
+ self.log.info("Creating project: {} with {}".
+ format(proj_xpath, project.config.as_dict()))
+ xpath_wild = "C,/rw-project:project/project-config"
+ yield from self.project_publisher.publish_project(project.config,
+ proj_xpath,
+ xpath_wild)
+
+ @asyncio.coroutine
+ def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1", project=None):
account = self.get_cal_account(cloud_type, cloud_name)
- account_xpath = "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
self.log.info("Configuring cloud-account: %s", account)
- yield from dts.query_create(account_xpath,
- rwdts.XactFlag.ADVISE,
- account)
+ if project is None:
+ project = self.project
+ xpath = project.add_project(XPaths.cloud_account(account.name))
+ xpath_wild = project.add_project(XPaths.cloud_account())
+
+ # account_xpath = project.add_project(
+ # "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name))
+ # yield from dts.query_create(account_xpath,
+ # rwdts.XactFlag.ADVISE,
+ # account)
+ yield from self.cloud_publisher.publish_account(account, xpath, xpath_wild)
@asyncio.coroutine
def wait_tasklets(self):
self.log.debug("STARTING - %s", self.id())
self.tinfo = self.new_tinfo(self.id())
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
- self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop, self.dts)
- self.querier = ManoQuerier(self.log, self.dts)
+ self.project = ManoProject(self.log,
+ name=DEFAULT_PROJECT)
+ self.project1 = ManoProject(self.log,
+ name='test-1')
+ self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop,
+ self.dts, self.project)
+ self.querier = ManoQuerier(self.log, self.dts, self.project)
+ self.project_publisher = ProjectPublisher(
+ self.log,
+ loop,
+ self.dts,
+ self.project
+ )
+ self.cloud_publisher = CloudAccountPublisher(
+ self.log,
+ loop,
+ self.dts,
+ self.project
+ )
self.nsr_publisher = PingPongNsrConfigPublisher(
self.log,
loop,
self.dts,
self.ping_pong,
"mock_account",
+ self.project,
)
def test_create_nsr_record(self):
+ @asyncio.coroutine
+ def verify_projects(termination=False):
+ self.log.debug("Verifying projects = %s", XPaths.project())
+
+ accts = yield from self.querier._read_query(XPaths.project(),
+ project=False)
+ projs = []
+ for acc in accts:
+ self.log.debug("Project: {}".format(acc.as_dict()))
+ if acc.name not in projs:
+ projs.append(acc.name)
+ self.log.debug("Merged: {}".format(projs))
+ self.assertEqual(2, len(projs))
+
+ @asyncio.coroutine
+ def verify_cloud_accounts(termination=False):
+ self.log.debug("Verifying cloud accounts = %s", XPaths.cloud_account())
+
+ accts = yield from self.querier._read_query(XPaths.cloud_account())
+ self.assertEqual(2, len(accts))
+
+ accts = yield from self.querier._read_query(
+ self.project1.add_project(XPaths.cloud_account()), project=False)
+ self.assertEqual(1, len(accts))
+
+ accts = yield from self.querier._read_query(
+ "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account",
+ project=False)
+ self.assertEqual(3, len(accts))
+
+ accts = yield from self.querier._read_query(
+ "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='mock_account']",
+ project=False)
+ self.assertEqual(2, len(accts))
+
@asyncio.coroutine
def verify_cm_state(termination=False, nsrid=None):
self.log.debug("Verifying cm_state path = %s", XPaths.cm_state(nsrid))
- #print("###>>> Verifying cm_state path:", XPaths.cm_state(nsrid))
loop_count = 10
loop_sleep = 10
nsr_config = nsr_configs[0]
self.assertEqual(
- "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id),
+ "/rw-project:project/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id),
nsr_config.input_parameter[0].xpath,
)
yield from verify_cm_state(termination, nsrid)
yield from verify_nsr_config_status(termination, nsrid)
+ yield from verify_cloud_account(termination)
+ yield from verify_project_record(termination)
+
@asyncio.coroutine
def verify_scale_instance(index):
self.log.debug("Verifying scale record path = %s, Termination=%d",
def run_test():
yield from self.wait_tasklets()
+ yield from self.configure_project()
+ yield from self.configure_project(project=self.project1)
cloud_type = "mock"
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account")
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account1")
+ yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account",
+ project=self.project1)
+
+ yield from verify_cloud_accounts()
+ yield from verify_projects()
yield from self.ping_pong.publish_desciptors()
+ return
# Attempt deleting VNFD not in use
yield from self.ping_pong.update_ping_vnfd()
raise DescriptorOnboardError(state)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "UTM-only"
nsr.short_name = "UTM-only"
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
raise DescriptorOnboardError(state)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "UTM-WIMS"
nsr.short_name = "UTM-WIMS"
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
def create_nsr_from_nsd_id(nsd_id):
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "pingpong_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
nsr.short_name = "nsr_short_name"
nsr.admin_status = "ENABLED"
nsr.cloud_account = "openstack"
- param = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
- param.xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:vendor'
+ param = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter()
+ param.xpath = '/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:vendor'
param.value = "rift-o-matic"
nsr.input_parameter.append(param)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-2Vrouter-TS EPA"
nsr.short_name = "TG-2Vrouter-TS EPA"
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-2Vrouter-TS EPA"
nsr.short_name = "TG-2Vrouter-TS EPA"
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-Vrouter-TS-EPA-SRIOV"
nsr.short_name = "TG-Vrouter-TS-EPA-SRIOV"
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
def test_configure_resource_orchestrator(self, so_proxy):
cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
'ro_port' : 2022,
'ro_username' : 'admin',
'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
+ so_proxy.merge_config('/rw-project:project/rw-conman:cm-config', cfg)
def test_configure_service_orchestrator(self, nsm_proxy):
cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
class NsrDtsHandler(object):
""" The network service DTS handler """
- NSR_XPATH = "C,/nsr:ns-instance-config/nsr:nsr"
- SCALE_INSTANCE_XPATH = "C,/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
+ NSR_XPATH = "C,/rw-project:project/nsr:ns-instance-config/nsr:nsr"
+ SCALE_INSTANCE_XPATH = "C,/rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
def __init__(self, dts, log, loop, nsm):
self._dts = dts
def get_scale_group_instances(self, nsr_id, group_name):
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
""" Register for Nsr create/update/delete/read requests from dts """
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
class XPaths(object):
@staticmethod
def nsr_config(nsr_id=None):
- return ("C,/nsr:ns-instance-config/nsr:nsr" +
+ return ("C,/rw-project:project/nsr:ns-instance-config/nsr:nsr" +
("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else ""))
def scaling_group_instance(nsr_id, group_name, instance_id):
- return ("C,/nsr:ns-instance-config/nsr:nsr" +
+ return ("C,/rw-project:project/nsr:ns-instance-config/nsr:nsr" +
"[nsr:id='{}']".format(nsr_id) +
"/nsr:scaling-group" +
"[nsr:scaling-group-name-ref='{}']".format(group_name) +
block = xact.block_create()
block.add_query_update(
XPaths.nsr_config(nsr1_uuid),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
block = xact.block_create()
block.add_query_update(
XPaths.scaling_group_instance(nsr1_uuid, "group", 1234),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
block = xact.block_create()
block.add_query_create(
XPaths.scaling_group_instance(nsr1_uuid, "group", 12345),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
block = xact.block_create()
block.add_query_update(
XPaths.nsr_config(nsr2_uuid),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
{'name': 'rift-ro', 'account_type': 'rift_ro', 'rift_ro': {'rift_ro': True}})
# Test rift-ro plugin CREATE
- w_xpath = "C,/rw-launchpad:resource-orchestrator"
+ w_xpath = "C,/rw-project:project/rw-launchpad:resource-orchestrator"
xpath = w_xpath
yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
yield from asyncio.sleep(5, loop=self.loop)
# Test update
mock_orch_acc.openmano.port = 9789
mock_orch_acc.openmano.host = "10.64.11.78"
- yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
+ yield from self.dts.query_update("C,/rw-project:project/rw-launchpad:resource-orchestrator",
rwdts.XactFlag.ADVISE, mock_orch_acc)
assert orch.ro_plugin._cli_api._port == mock_orch_acc.openmano.port
assert orch.ro_plugin._cli_api._host == mock_orch_acc.openmano.host
mock_orch_acc.openmano.port = 9788
with self.assertRaises(Exception):
- yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
+ yield from self.dts.query_update("C,/rw-project:project/rw-launchpad:resource-orchestrator",
rwdts.XactFlag.ADVISE, mock_orch_acc)
# Test delete
- yield from self.dts.query_delete("C,/rw-launchpad:resource-orchestrator",
+ yield from self.dts.query_delete("C,/rw-project:project/rw-launchpad:resource-orchestrator",
flags=rwdts.XactFlag.ADVISE)
assert orch.ro_plugin == None
def make_nsr(ns_instance_config_ref=str(uuid.uuid4())):
- nsr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr()
nsr.ns_instance_config_ref = ns_instance_config_ref
return nsr
def make_vnfr(id=str(uuid.uuid4())):
- vnfr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.id = id
return vnfr
def make_vdur(id=str(uuid.uuid4()), vim_id=str(uuid.uuid4())):
- vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.id = id
vdur.vim_id = vim_id
return vdur
mock = self.plugin_manager.plugin(self.account.name)
mock.set_impl(TestNfviMetricsCache.Plugin())
- self.vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ self.vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
self.vdur.id = "test-vdur-id"
self.vdur.vim_id = "test-vim-id"
self.vdur.vm_flavor.vcpu_count = 4
return True
def nfvi_metrics(self, account, vim_id):
- metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
metrics.vcpu.utilization = 0.5
return None, metrics
pass
def test_alarm_create_and_destroy(self):
- alarm = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_Alarms()
+ alarm = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_Alarms()
alarm.name = "test-alarm"
alarm.description = "test-description"
alarm.vdur_id = "test-vdur-id"
self.monitor.add_cloud_account(self.account)
# Create a VNFR associated with the cloud account
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.cloud_account = self.account.name
vnfr.id = 'test-vnfr-id'
to retrieve the NFVI metrics associated with the VDU.
"""
# Define the VDUR to be registered
- vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vm_flavor.vcpu_count = 4
vdur.vm_flavor.memory_mb = 100
vdur.vm_flavor.storage_gb = 2
the VDURs contained in the VNFR are unregistered.
"""
# Define the VDUR to be registered
- vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vim_id = 'test-vim-id-1'
vdur.id = 'test-vdur-id-1'
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.cloud_account = self.account.name
vnfr.id = 'test-vnfr-id'
# Add another VDUR to the VNFR and update the monitor. Both VDURs
# should now be registered
- vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vim_id = 'test-vim-id-2'
vdur.id = 'test-vdur-id-2'
Monitor.
"""
# Create the VNFR
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.cloud_account = self.account.name
vnfr.id = 'test-vnfr-id'
"""
# Create the initial NSD catalog
- nsd_catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ nsd_catalog = NsdYang.YangData_RwProject_Project_NsdCatalog()
# Create an NSD, set its 'id', and add it to the catalog
nsd_id = str(uuid.uuid4())
nsd_catalog.nsd.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd(
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd(
id=nsd_id,
)
)
# Retrieve the NSD using and xpath expression
- xpath = '/nsd:nsd-catalog/nsd:nsd[nsd:id={}]'.format(nsd_id)
+ xpath = '/rw-project:project/nsd:nsd-catalog/nsd:nsd[nsd:id={}]'.format(nsd_id)
nsd = rwxpath.getxattr(nsd_catalog, xpath)
self.assertEqual(nsd_id, nsd.id)
"""
# Define a simple NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
# Check that the unset fields are in fact set to None
- self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
- self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+ self.assertEqual(None, rwxpath.getxattr(nsd, "/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:name"))
+ self.assertEqual(None, rwxpath.getxattr(nsd, "/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
# Set the values of the 'name' and 'short-name' fields
- rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name", "test-name")
- rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name", "test-short-name")
+ rwxpath.setxattr(nsd, "/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:name", "test-name")
+ rwxpath.setxattr(nsd, "/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:short-name", "test-short-name")
# Check that the 'name' and 'short-name' fields are correctly set
- self.assertEqual(nsd.name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
- self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+ self.assertEqual(nsd.name, rwxpath.getxattr(nsd, "/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:name"))
+ self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, "/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
class TestInputParameterSubstitution(unittest.TestCase):
config, no exception should be raised.
"""
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsd = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
self.substitute_input_parameters(None, None)
self.substitute_input_parameters(nsd, None)
"""
# Define the original NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
nsd.name = "robert"
nsd.short_name = "bob"
# Define which parameters may be modified
nsd.input_parameter_xpath.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
- xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
+ xpath="/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:name",
label="NSD Name",
)
)
# Define the input parameters that are intended to be modified
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr_config.input_parameter.extend([
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
- xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
+ xpath="/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:name",
value="alice",
),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
- xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
+ xpath="/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
value="alice",
),
])
"""
# Define the original NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
nsd.name = "robert"
nsd.short_name = "bob"
# Define which parameters may be modified
nsd.input_parameter_xpath.extend([
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
- xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
+ xpath="/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:name",
label="NSD Name",
),
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
- xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
+ xpath="/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
label="NSD Short Name",
),
])
# Define the input parameters that are intended to be modified
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr_config.input_parameter.extend([
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
- xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
+ xpath="/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:name",
value="robert",
),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
- xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
+ xpath="/rw-project:project/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
value="bob",
),
])