FROM ubuntu:16.04
RUN apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get --yes install git tox make python python-pip debhelper && \
- DEBIAN_FRONTEND=noninteractive apt-get --yes install wget python-dev python-software-properties python-stdeb&& \
- DEBIAN_FRONTEND=noninteractive pip install -U pip && \
- DEBIAN_FRONTENT=noninteractive pip install -U requests logutils jsonschema lxml kafka mock && \
- DEBIAN_FRONTEND=noninteractive pip install -U setuptools setuptools-version-command stdeb jsmin && \
- DEBIAN_FRONTEND=noninteractive pip install -U six pyvcloud==19.1.1 bottle cherrypy pyopenssl && \
+ DEBIAN_FRONTEND=noninteractive apt-get --yes install git tox make python python-pip python3 python3-pip debhelper && \
+ DEBIAN_FRONTEND=noninteractive apt-get --yes install wget python-dev python-software-properties python-stdeb && \
DEBIAN_FRONTEND=noninteractive apt-get --yes install default-jre libmysqlclient-dev && \
- DEBIAN_FRONTEND=noninteractive apt-get --yes install libmysqlclient-dev libxml2 && \
- DEBIAN_FRONTEND=noninteractive pip install -U MySQL-python \
- python-openstackclient \
- python-keystoneclient \
- aodhclient \
- gnocchiclient \
- boto==2.48 \
- python-cloudwatchlogs-logging \
- py-cloudwatch \
- peewee==3.1.*
+ DEBIAN_FRONTEND=noninteractive apt-get --yes install libmysqlclient-dev libxml2 python3-all
include requirements.txt
include README.rst
-include kafkad
-recursive-include osm_mon *
+recursive-include osm_mon *.py
recursive-include devops-stages *
-recursive-include test *
+recursive-include test *.py
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-#__author__ = "Prithiv Mohan"
-#__date__ = "14/Sep/2017"
-
-SHELL := /bin/bash
-all: package install
-
-clean_deb:
- rm -rf .build
-
-clean:
- rm -rf build
- rm -rf .build
- find . -name '*.pyc' -delete
-
-prepare:
- #apt-get --yes install python-stdeb python-pip libmysqlclient-dev debhelper
- #pip install --upgrade setuptools
- mkdir -p build/
- cp tox.ini build/
- cp MANIFEST.in build/
- cp requirements.txt build/
- cp test-requirements.txt build/
- cp README.rst build/
- cp setup.py build/
- cp kafkad build/
- cp -r osm_mon build/
- cp -r devops-stages build/
- cp -r scripts build/
- #pip install -r requirements.txt
- #pip install -r test-requirements.txt
-
-build: clean openstack_plugins prepare
- python -m py_compile build/osm_mon/plugins/OpenStack/*.py
-
-build: clean vrops_plugins prepare
- python -m py_compile build/osm_mon/plugins/vRealiseOps/*.py
-
-build: clean cloudwatch_plugins prepare
- python -m py_compile build/osm_mon/plugins/CloudWatch/*.py
-
-build: clean core prepare
- python -m py_compile build/osm_mon/core/message_bus/*.py
-
-pip: prepare
- cd build ./setup.py sdist
-
-package: clean clean_deb prepare
- cd build && python setup.py --command-packages=stdeb.command sdist_dsc --with-python2=True --with-python3=False bdist_deb
- mkdir -p .build
- cp build/deb_dist/python-*.deb .build/
-
-develop: prepare
- cd build && ./setup.py develop
-
-install:
- DEBIAN_FRONTEND=noninteractive apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get install --yes python-pip && \
- pip install --upgrade pip
- dpkg -i build/deb_dist/*.deb
-
-build-docker-from-source:
- docker build -t osm:MON -f docker/Dockerfile
#__date__ = "14/Sep/2017"
#!/bin/bash
-make clean all BRANCH=master
-make package
+rm -rf deb_dist
+rm -rf dist
+rm -rf osm_mon.egg-info
+tox -e build
#__date__ = "14/Sep/2017"
#!/bin/bash
-echo "UNITTEST"
+tox
import json
import logging
-import os
import sys
+import six
import yaml
-from osm_mon.core.settings import Config
-
-logging.basicConfig(stream=sys.stdout,
- format='%(asctime)s %(message)s',
- datefmt='%m/%d/%Y %I:%M:%S %p',
- level=logging.INFO)
-log = logging.getLogger(__name__)
-
-sys.path.append(os.path.abspath(os.path.join(os.path.realpath(__file__), '..', '..', '..', '..')))
-
from kafka import KafkaConsumer
+from osm_mon.core.settings import Config
from osm_mon.plugins.OpenStack.Aodh import alarming
from osm_mon.plugins.OpenStack.Gnocchi import metrics
from osm_mon.core.auth import AuthManager
from osm_mon.core.database import DatabaseManager
-cfg = Config.instance()
-cfg.read_environ()
+from osm_common import dbmongo
-# Initialize consumers for alarms and metrics
-common_consumer = KafkaConsumer(bootstrap_servers=cfg.BROKER_URI,
- key_deserializer=bytes.decode,
- value_deserializer=bytes.decode,
- group_id="mon-consumer")
+logging.basicConfig(stream=sys.stdout,
+ format='%(asctime)s %(message)s',
+ datefmt='%m/%d/%Y %I:%M:%S %p',
+ level=logging.INFO)
+log = logging.getLogger(__name__)
-auth_manager = AuthManager()
-database_manager = DatabaseManager()
-database_manager.create_tables()
-# Create OpenStack alarming and metric instances
-openstack_metrics = metrics.Metrics()
-openstack_alarms = alarming.Alarming()
+def get_vim_type(db_manager, vim_uuid):
+ """Get the vim type that is required by the message."""
+ credentials = db_manager.get_credentials(vim_uuid)
+ return credentials.type
-# Create CloudWatch alarm and metric instances
-cloudwatch_alarms = plugin_alarms()
-cloudwatch_metrics = plugin_metrics()
-aws_connection = Connection()
-aws_access_credentials = AccessCredentials()
-# Create vROps plugin_receiver class instance
-vrops_rcvr = plugin_receiver.PluginReceiver()
+def get_vdur(common_db, nsr_id, member_index, vdu_name):
+ vnfr = get_vnfr(common_db, nsr_id, member_index)
+ for vdur in vnfr['vdur']:
+ if vdur['vdu-id-ref'] == vdu_name:
+ return vdur
+ raise ValueError('vdur not found for nsr-id %s, member_index %s and vdu_name %s', nsr_id, member_index, vdu_name)
-def get_vim_type(vim_uuid):
- """Get the vim type that is required by the message."""
- try:
- credentials = database_manager.get_credentials(vim_uuid)
- return credentials.type
- except Exception:
- log.exception("Error getting vim_type: ")
- return None
-
-
-# Define subscribe the consumer for the plugins
-topics = ['metric_request', 'alarm_request', 'access_credentials', 'vim_account']
-# TODO: Remove access_credentials
-common_consumer.subscribe(topics)
-
-log.info("Listening for alarm_request and metric_request messages")
-for message in common_consumer:
- log.info("Message arrived: %s", message)
- try:
+def get_vnfr(common_db, nsr_id, member_index):
+ vnfr = common_db.get_one(table="vnfrs", filter={"nsr-id-ref": nsr_id, "member-vnf-index-ref": str(member_index)})
+ return vnfr
+
+
+def main():
+ cfg = Config.instance()
+ cfg.read_environ()
+
+ auth_manager = AuthManager()
+ database_manager = DatabaseManager()
+ database_manager.create_tables()
+
+ # Create OpenStack alarming and metric instances
+ openstack_metrics = metrics.Metrics()
+ openstack_alarms = alarming.Alarming()
+
+ # Create CloudWatch alarm and metric instances
+ cloudwatch_alarms = plugin_alarms()
+ cloudwatch_metrics = plugin_metrics()
+ aws_connection = Connection()
+ aws_access_credentials = AccessCredentials()
+
+ # Create vROps plugin_receiver class instance
+ vrops_rcvr = plugin_receiver.PluginReceiver()
+
+ common_db = dbmongo.DbMongo()
+ common_db_uri = cfg.MONGO_URI.split(':')
+ common_db.db_connect({'host': common_db_uri[0], 'port': int(common_db_uri[1]), 'name': 'osm'})
+
+ # Initialize consumers for alarms and metrics
+ common_consumer = KafkaConsumer(bootstrap_servers=cfg.BROKER_URI,
+ key_deserializer=bytes.decode,
+ value_deserializer=bytes.decode,
+ group_id="mon-consumer")
+
+ # Define subscribe the consumer for the plugins
+ topics = ['metric_request', 'alarm_request', 'access_credentials', 'vim_account']
+ # TODO: Remove access_credentials
+ common_consumer.subscribe(topics)
+
+ log.info("Listening for alarm_request and metric_request messages")
+ for message in common_consumer:
+ log.info("Message arrived: %s", message)
try:
- values = json.loads(message.value)
- except ValueError:
- values = yaml.safe_load(message.value)
-
- if message.topic == "vim_account":
- if message.key == "create" or message.key == "edit":
- auth_manager.store_auth_credentials(values)
- if message.key == "delete":
- auth_manager.delete_auth_credentials(values)
-
- else:
- # Check the vim desired by the message
- vim_type = get_vim_type(values['vim_uuid'])
- if vim_type == "openstack":
- log.info("This message is for the OpenStack plugin.")
- if message.topic == "metric_request":
- openstack_metrics.metric_calls(message)
- if message.topic == "alarm_request":
- openstack_alarms.alarming(message)
-
- elif vim_type == "aws":
- log.info("This message is for the CloudWatch plugin.")
- aws_conn = aws_connection.setEnvironment()
- if message.topic == "metric_request":
- cloudwatch_metrics.metric_calls(message, aws_conn)
- if message.topic == "alarm_request":
- cloudwatch_alarms.alarm_calls(message, aws_conn)
- if message.topic == "access_credentials":
- aws_access_credentials.access_credential_calls(message)
-
- elif vim_type == "vmware":
- log.info("This metric_request message is for the vROPs plugin.")
- vrops_rcvr.consume(message)
+ try:
+ values = json.loads(message.value)
+ except ValueError:
+ values = yaml.safe_load(message.value)
- else:
- log.debug("vim_type is misconfigured or unsupported; %s",
- vim_type)
+ if message.topic == "vim_account":
+ if message.key == "create" or message.key == "edit":
+ auth_manager.store_auth_credentials(values)
+ if message.key == "delete":
+ auth_manager.delete_auth_credentials(values)
- except Exception:
- log.exception("Exception processing message: ")
+ else:
+ # Get ns_id from message
+ # TODO: Standardize all message models to avoid the need of figuring out where are certain fields
+ contains_list = False
+ list_index = None
+ ns_id = None
+ for k, v in six.iteritems(values):
+ if isinstance(v, dict):
+ if 'ns_id' in v:
+ ns_id = v['ns_id']
+ contains_list = True
+ list_index = k
+ if not contains_list and 'ns_id' in values:
+ ns_id = values['ns_id']
+
+ vnf_index = values[list_index]['vnf_member_index'] if contains_list else values['vnf_member_index']
+
+ # Check the vim desired by the message
+ vnfr = get_vnfr(common_db, ns_id, vnf_index)
+ vim_uuid = vnfr['vim-account-id']
+ vim_type = get_vim_type(database_manager, vim_uuid)
+
+ if (contains_list and 'vdu_name' in values[list_index]) or 'vdu_name' in values:
+ vdu_name = values[list_index]['vdu_name'] if contains_list else values['vdu_name']
+ vdur = get_vdur(common_db, ns_id, vnf_index, vdu_name)
+ if contains_list:
+ values[list_index]['resource_uuid'] = vdur['vim-id']
+ else:
+ values['resource_uuid'] = vdur['vim-id']
+ message = message._replace(value=json.dumps(values))
+
+ if vim_type == "openstack":
+ log.info("This message is for the OpenStack plugin.")
+ if message.topic == "metric_request":
+ openstack_metrics.metric_calls(message, vim_uuid)
+ if message.topic == "alarm_request":
+ openstack_alarms.alarming(message, vim_uuid)
+
+ elif vim_type == "aws":
+ log.info("This message is for the CloudWatch plugin.")
+ aws_conn = aws_connection.setEnvironment()
+ if message.topic == "metric_request":
+ cloudwatch_metrics.metric_calls(message, aws_conn)
+ if message.topic == "alarm_request":
+ cloudwatch_alarms.alarm_calls(message, aws_conn)
+ if message.topic == "access_credentials":
+ aws_access_credentials.access_credential_calls(message)
+
+ elif vim_type == "vmware":
+ log.info("This metric_request message is for the vROPs plugin.")
+ vrops_rcvr.consume(message)
+
+ else:
+ log.debug("vim_type is misconfigured or unsupported; %s",
+ vim_type)
+
+ except Exception:
+ log.exception("Exception processing message: ")
+
+
+if __name__ == '__main__':
+ main()
+++ /dev/null
-#gitkeep file to keep the initial empty directory structure.
"ack_details":
{
"alarm_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "tenant_uuid": { "type": "string" }
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"},
+ "vdu_name": { "type": "string"}
},
"required": [ "schema_version",
"schema_type",
- "vim_type",
- "vim_uuid",
"alarm_uuid",
- "resource_uuid",
- "tenant_uuid" ]
+ "ns_id",
+ "vnf_member_index" ]
}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-# This is the message bus schema to create_alarm */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string "},
- "vim_uuid": { "type": "string" },
- "alarm_create_request":
- {
- "correlation_id": { "type": "integer" },
- "alarm_name": { "type": "string" },
- "metric_name": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "vdu_id": { "type": "string"},
- "description": { "type": "string" },
- "severity": { "type": "string" },
- "operation": { "type": "string" },
- "threshold_value": { "type": "integer" },
- "unit": { "type": "string" },
- "statistic": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "vim_type",
- "vim_uuid",
- "correlation_id",
- "alarm_name",
- "metric_name",
- "resource_uuid",
- "severity",
- "operation",
- "threshold_value",
- "unit",
- "statistic" ]
-}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+# This is the message bus schema to create_alarm */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "alarm_create_request":
+ {
+ "correlation_id": { "type": "integer" },
+ "alarm_name": { "type": "string" },
+ "metric_name": { "type": "string" },
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"},
+ "vdu_name": { "type": "string"},
+ "description": { "type": "string" },
+ "severity": { "type": "string" },
+ "operation": { "type": "string" },
+ "threshold_value": { "type": "integer" },
+ "statistic": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "correlation_id",
+ "alarm_name",
+ "metric_name",
+ "ns_id",
+ "vnf_member_index",
+ "vdu_name",
+ "severity",
+ "operation",
+ "threshold_value",
+ "statistic" ]
+}
{
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "vim_type": { "type": "string" },
- "vim_uuid": { "type": "string" },
- "metric_create":
+ "metric_create_request":
{
+ "correlation_id": { "type": "integer" },
"metric_name": { "type" : "string" },
"metric_unit": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "vdu_id": { "type": "string"}
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"},
+ "vdu_name": { "type": "string"}
},
"required": [ "schema_version",
"schema_type",
"correlation_id",
- "vim_type",
- "vim_uuid",
"metric_name",
"metric_unit",
- "resource_uuid" ]
+ "ns_id",
+ "vnf_member_index",
+ "vdu_name" ]
}
{
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "vim_uuid": { "type": "string" },
"metric_create_response":
{
- "metric_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
+ "correlation_id": { "type": "integer" },
"status": { "type": "boolean" }
},
"required": [ "schema_type",
{
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "vim_uuid": { "type": "string" },
"alarm_delete_request":
{
- "alarm_uuid": { "type": "string" },
"correlation_id": { "type": "integer" },
- "vdu_id": { "type": "string"}
+ "alarm_uuid": { "type": "string" },
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"}
},
"required": [ "schema_version",
"schema_type",
- "vim_type",
- "vim_uuid",
"alarm_uuid",
+ "ns_id",
+ "vnf_member_index",
"correlation_id"
]
}
{
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
- "metric_name": { "type": "string" },
- "metric_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "vdu_id": { "type": "string"},
"correlation_id": { "type": "integer" },
- "vim_type": { "type": "string" },
- "vim_uuid": { "type": "string" },
+ "metric_name": { "type": "string" },
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"},
+ "vdu_name": { "type": "string"},
"required": [ "schema_version",
"schema_type",
"metric_name",
{
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "vim_uuid": { "type": "string" },
"alarm_list_request":
{
"correlation_id": { "type": "integer" },
- "resource_uuid": { "type": "string" },
- "vdu_id": { "type": "string"},
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"},
+ "vdu_name": { "type": "string"},
"alarm_name": { "type": "string" },
"severity": { "type" : "string" }
},
"schema_type": { "type": "string" },
"vim_type": { "type": "string" },
"vim_uuid": { "type": "string" },
- "list_alarm_resp": { "$ref": "definitions.json#/notify_details" }
+ "list_alarm_response": { "$ref": "definitions.json#/notify_details" }
}
{
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "vim_uuid": { "type": "string" },
"metrics_list_request":
{
- "metric_name": { "type": "string" },
"correlation_id": { "type": "integer" },
- "resource_uuid": { "type": "string" },
- "vdu_id": { "type": "string"}
+ "metric_name": { "type": "string" },
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"},
+ "vdu_name": { "type": "string"}
},
"required": [ "schema_version",
"schema_type",
{
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
- "definitions":
+ "notify_details":
{
- "notify_details":
- {
- "alarm_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "vdu_id": { "type": "string"},
- "description": { "type": "string" },
- "vim_type": { "type": "string" },
- "vim_uuid": { "type": "string" },
- "severity": { "type" : "string" },
- "status": { "type": "string" },
- "start_date": { "type": "string" },
- "update_date": { "type": "string" },
- "cancel_date": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "alarm_uuid",
- "resource_uuid",
- "vim_type",
- "vim_uuid",
- "severity",
- "status",
- "start_date" ]
- }
+ "alarm_uuid": { "type": "string" },
+ "description": { "type": "string" },
+ "severity": { "type" : "string" },
+ "status": { "type": "string" },
+ "start_date": { "type": "string" },
+ "update_date": { "type": "string" },
+ "cancel_date": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "alarm_uuid",
+ "resource_uuid",
+ "vim_type",
+ "vim_uuid",
+ "severity",
+ "status",
+ "start_date" ]
}
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
"metric_name": { "type": "string" },
- "metric_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "vdu_id": { "type": "string"},
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"},
+ "vdu_name": { "type": "string"},
"correlation_id": { "type": "integer" },
- "vim_type": { "type": "string" },
- "vim_uuid": { "type": "string" },
"collection_period": { "type": "integer" },
"collection_unit": { "type": "string" },
"required": ["schema_version",
{
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "vim_uuid": { "type": "string" },
"alarm_update_request":
- {
+{
"correlation_id": { "type": "integer" },
+ "vim_uuid": { "type": "string" },
"alarm_uuid": { "type": "string" },
- "metric_uuid": { "type": "string" },
- "vdu_id": { "type": "string"},
+ "metric_name": { "type": "string" },
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"},
+ "vdu_name": { "type": "string"},
"description": { "type": "string" },
"severity": { "type": "string" },
"operation": { "type": "string" },
"threshold_value": { "type": "string" },
- "unit": { "type": "string" },
"statistic": { "type": "string" }
},
"required": [ "schema_version",
- "scema_type",
+ "schema_type",
"vim_type",
"vim_uuid",
"correlation_id",
{
"schema_version": { "type": "string" },
"schema_type": { "type": "string" },
- "correlation_id": { "type": "integer" },
"vim_type": { "type": "string" },
"vim_uuid": { "type": "string" },
- "metric_create":
+ "metric_update_request":
{
- "metric_name": { "type": "string" },
- "metric_unit": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "vdu_id": { "type": "string"}
+ "correlation_id": { "type": "integer" },
+ "metric_name": { "type": "string" },
+ "metric_unit": { "type": "string" },
+ "ns_id": { "type": "string"},
+ "vnf_member_index": { "type": "integer"},
+ "vdu_name": { "type": "string"}
},
"required": [ "schema_version",
"schema_type",
_configuration = [
CfgParam('BROKER_URI', "localhost:9092", six.text_type),
+ CfgParam('MONGO_URI', "mongo:27017", six.text_type),
CfgParam('DATABASE', "sqlite:///mon_sqlite.db", six.text_type),
CfgParam('OS_NOTIFIER_URI', "http://localhost:8662", six.text_type),
CfgParam('OS_DEFAULT_GRANULARITY', "300", six.text_type),
val = str(os.environ[key])
setattr(self, key, val)
except KeyError as exc:
- log.warning("Environment variable not present: %s", exc)
+ log.debug("Environment variable not present: %s", exc)
return
alarm_info['schema_version'] = str(list_info['schema_version'])
alarm_info['schema_type'] = 'list_alarm_response'
- alarm_info['list_alarm_resp'] = alarm_list
+ alarm_info['list_alarm_response'] = alarm_list
return alarm_info
except Exception as e:
ack_details = self.get_ack_details(alarm_info)
payload = json.dumps(ack_details)
file = open('../../core/models/notify_alarm.json','wb').write((payload))
- self.producer.notify_alarm(key='notify_alarm',message=payload,topic = 'alarm_response')
+ self.producer.notify_alarm(key='notify_alarm',message=payload)
log.info("Acknowledge sent: %s", ack_details)
else:
if update_resp == None:
payload = json.dumps(update_resp)
file = open('../../core/models/update_alarm_resp.json','wb').write((payload))
- self.producer.update_alarm_response(key='update_alarm_response',message=payload,topic = 'alarm_response')
+ self.producer.update_alarm_response(key='update_alarm_response',message=payload)
log.debug("Alarm Already exists")
else:
payload = json.dumps(update_resp)
file = open('../../core/models/update_alarm_resp.json','wb').write((payload))
- self.producer.update_alarm_response(key='update_alarm_response',message=payload,topic = 'alarm_response')
+ self.producer.update_alarm_response(key='update_alarm_response',message=payload)
log.info("Alarm Updated with alarm info: %s", update_resp)
else:
del_resp = self.delete_alarm(del_info)
payload = json.dumps(del_resp)
file = open('../../core/models/delete_alarm_resp.json','wb').write((payload))
- self.producer.delete_alarm_response(key='delete_alarm_response',message=payload,topic = 'alarm_response')
+ self.producer.delete_alarm_response(key='delete_alarm_response',message=payload)
log.info("Alarm Deleted with alarm info: %s", del_resp)
list_resp = self.get_alarms_list(alarm_info)#['alarm_names']
payload = json.dumps(list_resp)
file = open('../../core/models/list_alarm_resp.json','wb').write((payload))
- self.producer.list_alarm_response(key='list_alarm_response',message=payload,topic = 'alarm_response')
+ self.producer.list_alarm_response(key='list_alarm_response',message=payload)
else:
log.error("Resource ID is Incorrect")
log.info("Action required against: %s" % (message.topic))
if message.key == "create_metric_request":
- if self.check_resource(metric_info['metric_create']['resource_uuid']) == True:
- metric_resp = self.create_metric_request(metric_info['metric_create']) #alarm_info = message.value
+ if self.check_resource(metric_info['metric_create_request']['resource_uuid']) == True:
+ metric_resp = self.create_metric_request(metric_info['metric_create_request']) #alarm_info = message.value
metric_response['schema_version'] = metric_info['schema_version']
metric_response['schema_type'] = "create_metric_response"
metric_response['metric_create_response'] = metric_resp
return metric_response
elif message.key == "update_metric_request":
- if self.check_resource(metric_info['metric_create']['resource_uuid']) == True:
- update_resp = self.update_metric_request(metric_info['metric_create'])
+ if self.check_resource(metric_info['metric_create_request']['resource_uuid']) == True:
+ update_resp = self.update_metric_request(metric_info['metric_create_request'])
metric_response['schema_version'] = metric_info['schema_version']
metric_response['schema_type'] = "update_metric_response"
metric_response['metric_update_response'] = update_resp
log = logging.getLogger(__name__)
-ALARM_NAMES = {
- "average_memory_usage_above_threshold": "average_memory_utilization",
- "disk_read_ops": "disk_read_ops",
- "disk_write_ops": "disk_write_ops",
- "disk_read_bytes": "disk_read_bytes",
- "disk_write_bytes": "disk_write_bytes",
- "net_packets_dropped": "packets_dropped",
- "packets_in_above_threshold": "packets_received",
- "packets_out_above_threshold": "packets_sent",
- "cpu_utilization_above_threshold": "cpu_utilization"}
-
METRIC_MAPPINGS = {
"average_memory_utilization": "memory.percent",
"disk_read_ops": "disk.read.requests",
log.warning("Failed to create the alarm: %s", exc)
return None, False
- def alarming(self, message):
+ def alarming(self, message, vim_uuid):
"""Consume info from the message bus to manage alarms."""
try:
values = json.loads(message.value)
values = yaml.safe_load(message.value)
log.info("OpenStack alarm action required.")
- vim_uuid = values['vim_uuid']
auth_token = Common.get_auth_token(vim_uuid)
alarm_endpoint, metric_endpoint, auth_token, alarm_details, vim_config)
# Generate a valid response message, send via producer
+ if alarm_status is True:
+ log.info("Alarm successfully created")
+ self._database_manager.save_alarm(alarm_id, vim_uuid)
try:
- if alarm_status is True:
- log.info("Alarm successfully created")
- self._database_manager.save_alarm(alarm_id, vim_uuid)
-
resp_message = self._response.generate_response(
'create_alarm_response', status=alarm_status,
alarm_id=alarm_id,
# Checking what fields are specified for a list request
try:
name = list_details['alarm_name'].lower()
- if name not in ALARM_NAMES.keys():
- log.warning("This alarm is not supported, won't be used!")
- name = None
except KeyError as exc:
log.info("Alarm name isn't specified.")
name = None
resource_id = rule['resource_id']
metric_name = [key for key, value in six.iteritems(METRIC_MAPPINGS) if value == rule['metric']][0]
except Exception as exc:
- log.warning("Failed to retrieve existing alarm info: %s.\
- Can only update OSM alarms.", exc)
+ log.exception("Failed to retrieve existing alarm info. Can only update OSM alarms.")
return None, False
# Generates and check payload configuration for alarm update
return json.loads(update_alarm.text)['alarm_id'], True
except Exception as exc:
- log.warning("Alarm update could not be performed: %s", exc)
- return None, False
+ log.exception("Alarm update could not be performed: ")
return None, False
def check_payload(self, values, metric_name, resource_id,
# Initializer a producer to send responses back to SO
self._producer = KafkaProducer("metric_response")
- def metric_calls(self, message):
+ def metric_calls(self, message, vim_uuid):
"""Consume info from the message bus to manage metric requests."""
try:
values = json.loads(message.value)
values = yaml.safe_load(message.value)
log.info("OpenStack metric action required.")
- auth_token = Common.get_auth_token(values['vim_uuid'])
+ auth_token = Common.get_auth_token(vim_uuid)
- endpoint = Common.get_endpoint("metric", values['vim_uuid'])
+ endpoint = Common.get_endpoint("metric", vim_uuid)
if 'metric_name' in values and values['metric_name'] not in METRIC_MAPPINGS.keys():
raise ValueError('Metric ' + values['metric_name'] + ' is not supported.')
if message.key == "create_metric_request":
# Configure metric
- metric_details = values['metric_create']
+ metric_details = values['metric_create_request']
metric_id, resource_id, status = self.configure_metric(
endpoint, auth_token, metric_details)
try:
resp_message = self._response.generate_response(
'create_metric_response', status=status,
- cor_id=values['correlation_id'],
+ cor_id=metric_details['correlation_id'],
metric_id=metric_id, r_id=resource_id)
log.info("Response messages: %s", resp_message)
self._producer.create_metrics_resp(
# Log and send a response back to this effect
log.warning("Gnocchi doesn't support metric configuration\
updates.")
- req_details = values['metric_create']
+ req_details = values['metric_create_request']
metric_name = req_details['metric_name']
resource_id = req_details['resource_uuid']
metric_id = self.get_metric_id(
try:
resp_message = self._response.generate_response(
'update_metric_response', status=False,
- cor_id=values['correlation_id'],
+ cor_id=req_details['correlation_id'],
r_id=resource_id, m_id=metric_id)
log.info("Response message: %s", resp_message)
self._producer.update_metric_response(
'update_metric_response', resp_message)
except Exception as exc:
- log.warning("Failed to send an update response:%s", exc)
+ log.exception("Failed to send an update response:")
elif message.key == "list_metric_request":
list_details = values['metrics_list_request']
'unit': values['metric_unit']}}
result = Common.perform_request(
res_url, auth_token, req_type="post",
- payload=json.dumps(payload))
+ payload=json.dumps(payload, sort_keys=True))
# Get id of newly created metric
for row in json.loads(result.text):
if row['name'] == metric_name:
resource_payload = json.dumps({'id': resource_id,
'metrics': {
- metric_name: metric}})
+ metric_name: metric}}, sort_keys=True)
resource = Common.perform_request(
url, auth_token, req_type="post",
alarm_list_resp = {"schema_version": schema_version,
"schema_type": "list_alarm_response",
"correlation_id": kwargs['cor_id'],
- "list_alarm_resp": kwargs['alarm_list']}
+ "list_alarm_response": kwargs['alarm_list']}
return json.dumps(alarm_list_resp)
def create_alarm_response(self, **kwargs):
import requests
import logging
+import six
from pyvcloud.vcd.client import BasicLoginCredentials
from pyvcloud.vcd.client import Client
API_VERSION = '5.9'
import datetime
from socket import getfqdn
-from requests.packages.urllib3.exceptions import InsecureRequestWarning
-requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+import urllib3
+urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
OPERATION_MAPPING = {'GE':'GT_EQ', 'LE':'LT_EQ', 'GT':'GT', 'LT':'LT', 'EQ':'EQ'}
severity_mano2vrops = {'WARNING':'WARNING', 'MINOR':'WARNING', 'MAJOR':"IMMEDIATE",\
#1) get alarm & metrics parameters from plugin specific file
def_a_params = self.get_default_Params(config_dict['alarm_name'])
if not def_a_params:
- self.logger.warn("Alarm not supported: {}".format(config_dict['alarm_name']))
+ self.logger.warning("Alarm not supported: {}".format(config_dict['alarm_name']))
return None
metric_key_params = self.get_default_Params(config_dict['metric_name'])
if not metric_key_params:
- self.logger.warn("Metric not supported: {}".format(config_dict['metric_name']))
+ self.logger.warning("Metric not supported: {}".format(config_dict['metric_name']))
return None
#1.2) Check if alarm definition already exists
vrops_alarm_name = def_a_params['vrops_alarm']+ '-' + config_dict['resource_uuid']
alert_def_list = self.get_alarm_defination_by_name(vrops_alarm_name)
if alert_def_list:
- self.logger.warn("Alarm already exists: {}. Try updating by update_alarm_request"\
+ self.logger.warning("Alarm already exists: {}. Try updating by update_alarm_request"\
.format(vrops_alarm_name))
return None
if symptom_uuid is not None:
self.logger.info("Symptom defined: {} with ID: {}".format(symptom_params['symptom_name'],symptom_uuid))
else:
- self.logger.warn("Failed to create Symptom: {}".format(symptom_params['symptom_name']))
+ self.logger.warning("Failed to create Symptom: {}".format(symptom_params['symptom_name']))
return None
#3) create alert definition
#To Do - Get type & subtypes for all 5 alarms
alarm_params = {'name':vrops_alarm_name,
'description':config_dict['description']\
- if config_dict.has_key('description') and config_dict['description'] is not None else config_dict['alarm_name'],
+ if 'description' in config_dict and config_dict['description'] is not None else config_dict['alarm_name'],
'adapterKindKey':def_a_params['adapter_kind'],
'resourceKindKey':def_a_params['resource_kind'],
'waitCycles':1, 'cancelCycles':1,
alarm_def = self.create_alarm_definition(alarm_params)
if alarm_def is None:
- self.logger.warn("Failed to create Alert: {}".format(alarm_params['name']))
+ self.logger.warning("Failed to create Alert: {}".format(alarm_params['name']))
return None
self.logger.info("Alarm defined: {} with ID: {}".format(alarm_params['name'],alarm_def))
#4) Find vm_moref_id from vApp uuid in vCD
vm_moref_id = self.get_vm_moref_id(config_dict['resource_uuid'])
if vm_moref_id is None:
- self.logger.warn("Failed to find vm morefid for vApp in vCD: {}".format(config_dict['resource_uuid']))
+ self.logger.warning("Failed to find vm morefid for vApp in vCD: {}".format(config_dict['resource_uuid']))
return None
#5) Based on vm_moref_id, find VM's corresponding resource_id in vROPs to set notification
resource_id = self.get_vm_resource_id(vm_moref_id)
if resource_id is None:
- self.logger.warn("Failed to find resource in vROPs: {}".format(config_dict['resource_uuid']))
+ self.logger.warning("Failed to find resource in vROPs: {}".format(config_dict['resource_uuid']))
return None
#6) Configure alarm notification for a particular VM using it's resource_id
data=json.dumps(data))
if resp.status_code != 201:
- self.logger.warn("Failed to create Symptom definition: {}, response {}"\
+ self.logger.warning("Failed to create Symptom definition: {}, response {}"\
.format(symptom_params['symptom_name'], resp.content))
return None
return symptom_id
except Exception as exp:
- self.logger.warn("Error creating symptom definition : {}\n{}"\
+ self.logger.warning("Error creating symptom definition : {}\n{}"\
.format(exp, traceback.format_exc()))
data=json.dumps(data))
if resp.status_code != 201:
- self.logger.warn("Failed to create Alarm definition: {}, response {}"\
+ self.logger.warning("Failed to create Alarm definition: {}, response {}"\
.format(alarm_params['name'], resp.content))
return None
return alarm_uuid
except Exception as exp:
- self.logger.warn("Error creating alarm definition : {}\n{}".format(exp, traceback.format_exc()))
+ self.logger.warning("Error creating alarm definition : {}\n{}".format(exp, traceback.format_exc()))
def configure_rest_plugin(self):
data=json.dumps(data))
if resp.status_code is not 201:
- self.logger.warn("Failed to create REST Plugin: {} for url: {}, \nresponse code: {},"\
+ self.logger.warning("Failed to create REST Plugin: {} for url: {}, \nresponse code: {},"\
"\nresponse content: {}".format(plugin_name, webhook_url,\
resp.status_code, resp.content))
return None
plugin_id = resp_data['pluginId']
if plugin_id is None:
- self.logger.warn("Failed to get REST Plugin ID for {}, url: {}".format(plugin_name, webhook_url))
+ self.logger.warning("Failed to get REST Plugin ID for {}, url: {}".format(plugin_name, webhook_url))
return None
else:
self.logger.info("Created REST Plugin: {} with ID : {} for url: {}".format(plugin_name, plugin_id, webhook_url))
status = self.enable_rest_plugin(plugin_id, plugin_name)
if status is False:
- self.logger.warn("Failed to enable created REST Plugin: {} for url: {}".format(plugin_name, webhook_url))
+ self.logger.warning("Failed to enable created REST Plugin: {} for url: {}".format(plugin_name, webhook_url))
return None
else:
self.logger.info("Enabled REST Plugin: {} for url: {}".format(plugin_name, webhook_url))
verify = False, headers = headers)
if resp.status_code is not 200:
- self.logger.warn("Failed to REST GET Alarm plugin details \nResponse code: {}\nResponse content: {}"\
+ self.logger.warning("Failed to REST GET Alarm plugin details \nResponse code: {}\nResponse content: {}"\
.format(resp.status_code, resp.content))
return None
plugin_id = notify_plugin.get('pluginId')
if plugin_id is None:
- self.logger.warn("REST plugin {} not found".format(plugin_name))
+ self.logger.warning("REST plugin {} not found".format(plugin_name))
return None
else:
self.logger.info("Found REST Plugin: {}".format(plugin_name))
verify = False)
if resp.status_code is not 204:
- self.logger.warn("Failed to enable REST plugin {}. \nResponse code {}\nResponse Content: {}"\
+ self.logger.warning("Failed to enable REST plugin {}. \nResponse code {}\nResponse Content: {}"\
.format(plugin_name, resp.status_code, resp.content))
return False
return True
except Exception as exp:
- self.logger.warn("Error enabling REST plugin for {} plugin: Exception: {}\n{}"\
+ self.logger.warning("Error enabling REST plugin for {} plugin: Exception: {}\n{}"\
.format(plugin_name, exp, traceback.format_exc()))
def create_alarm_notification_rule(self, alarm_name, alarm_id, resource_id):
#1) Find the REST Plugin id details for - MON_module_REST_Plugin
plugin_id = self.check_if_plugin_configured(plugin_name)
if plugin_id is None:
- self.logger.warn("Failed to get REST plugin_id for : {}".format('MON_module_REST_Plugin'))
+ self.logger.warning("Failed to get REST plugin_id for : {}".format('MON_module_REST_Plugin'))
return None
#2) Create Alarm notification rule
data=json.dumps(data))
if resp.status_code is not 201:
- self.logger.warn("Failed to create Alarm notification rule {} for {} alarm."\
+ self.logger.warning("Failed to create Alarm notification rule {} for {} alarm."\
"\nResponse code: {}\nResponse content: {}"\
.format(notification_name, alarm_name, resp.status_code, resp.content))
return None
return vm_moref_id
except Exception as exp:
- self.logger.warn("Error occurred while getting VM moref ID for VM : {}\n{}"\
+ self.logger.warning("Error occurred while getting VM moref ID for VM : {}\n{}"\
.format(exp, traceback.format_exc()))
vca = self.connect_as_admin()
if not vca:
- self.logger.warn("Failed to connect to vCD")
+ self.logger.warning("Failed to connect to vCD")
return parsed_respond
url_list = [self.vcloud_site, '/api/vApp/vapp-', vapp_uuid]
verify=False)
if response.status_code != 200:
- self.logger.warn("REST API call {} failed. Return status code {}"\
+ self.logger.warning("REST API call {} failed. Return status code {}"\
.format(get_vapp_restcall, response.content))
return parsed_respond
parsed_respond["vm_vcenter_info"]= vm_vcenter_info
except Exception as exp :
- self.logger.warn("Error occurred calling rest api for getting vApp details: {}\n{}"\
+ self.logger.warning("Error occurred calling rest api for getting vApp details: {}\n{}"\
.format(exp, traceback.format_exc()))
return parsed_respond
client_as_admin.set_credentials(BasicLoginCredentials(self.admin_username, org,\
self.admin_password))
except Exception as e:
- self.logger.warn("Can't connect to a vCloud director as: {} with exception {}"\
+ self.logger.warning("Can't connect to a vCloud director as: {} with exception {}"\
.format(self.admin_username, e))
return client_as_admin
verify = False, headers = headers)
if resp.status_code is not 200:
- self.logger.warn("Failed to get resource details from vROPs for {}"\
+ self.logger.warning("Failed to get resource details from vROPs for {}"\
"\nResponse code:{}\nResponse Content: {}"\
.format(vm_moref_id, resp.status_code, resp.content))
return None
.format(vm_resource_id, vm_moref_id))
except Exception as exp:
- self.logger.warn("get_vm_resource_id: Error in parsing {}\n{}"\
+ self.logger.warning("get_vm_resource_id: Error in parsing {}\n{}"\
.format(exp, traceback.format_exc()))
return vm_resource_id
return_data['tenant_uuid'] = None
return_data['unit'] = None
#return_data['tenant_id'] = self.tenant_id
- #self.logger.warn("return_data: {}".format(return_data))
+ #self.logger.warning("return_data: {}".format(return_data))
#1) Get metric details from plugin specific file & format it into vROPs metrics
metric_key_params = self.get_default_Params(metric['metric_name'])
if not metric_key_params:
- self.logger.warn("Metric not supported: {}".format(metric['metric_name']))
+ self.logger.warning("Metric not supported: {}".format(metric['metric_name']))
#To Do: Return message
return return_data
#2.a) Find vm_moref_id from vApp uuid in vCD
vm_moref_id = self.get_vm_moref_id(metric['resource_uuid'])
if vm_moref_id is None:
- self.logger.warn("Failed to find vm morefid for vApp in vCD: {}".format(metric['resource_uuid']))
+ self.logger.warning("Failed to find vm morefid for vApp in vCD: {}".format(metric['resource_uuid']))
return return_data
#2.b) Based on vm_moref_id, find VM's corresponding resource_id in vROPs to set notification
resource_id = self.get_vm_resource_id(vm_moref_id)
if resource_id is None:
- self.logger.warn("Failed to find resource in vROPs: {}".format(metric['resource_uuid']))
+ self.logger.warning("Failed to find resource in vROPs: {}".format(metric['resource_uuid']))
return return_data
#3) Calculate begin & end time for period & period unit
verify = False, headers = headers)
if resp.status_code is not 200:
- self.logger.warn("Failed to retrive Metric data from vROPs for {}\nResponse code:{}\nResponse Content: {}"\
+ self.logger.warning("Failed to retrive Metric data from vROPs for {}\nResponse code:{}\nResponse Content: {}"\
.format(metric['metric_name'], resp.status_code, resp.content))
return return_data
#5) Convert to required format
metrics_data = {}
json_data = json.loads(resp.content)
- for resp_key,resp_val in json_data.iteritems():
+ for resp_key,resp_val in six.iteritems(json_data):
if resp_key == 'values':
data = json_data['values'][0]
- for data_k,data_v in data.iteritems():
+ for data_k,data_v in six.iteritems(data):
if data_k == 'stat-list':
stat_list = data_v
- for stat_list_k,stat_list_v in stat_list.iteritems():
- for stat_keys,stat_vals in stat_list_v[0].iteritems():
+ for stat_list_k,stat_list_v in six.iteritems(stat_list):
+ for stat_keys,stat_vals in six.iteritems(stat_list_v[0]):
if stat_keys == 'timestamps':
metrics_data['time_series'] = stat_list_v[0]['timestamps']
if stat_keys == 'data':
"""Update alarm configuration (i.e. Symptom & alarm) as per request
"""
if new_alarm_config.get('alarm_uuid') is None:
- self.logger.warn("alarm_uuid is required to update an Alarm")
+ self.logger.warning("alarm_uuid is required to update an Alarm")
return None
#1) Get Alarm details from it's uuid & find the symptom defination
alarm_details_json, alarm_details = self.get_alarm_defination_details(new_alarm_config['alarm_uuid'])
"""Get alarm details based on alarm UUID
"""
if alarm_uuid is None:
- self.logger.warn("get_alarm_defination_details: Alarm UUID not provided")
+ self.logger.warning("get_alarm_defination_details: Alarm UUID not provided")
return None, None
alarm_details = {}
verify = False, headers = headers)
if resp.status_code is not 200:
- self.logger.warn("Alarm to be updated not found: {}\nResponse code:{}\nResponse Content: {}"\
+ self.logger.warning("Alarm to be updated not found: {}\nResponse code:{}\nResponse Content: {}"\
.format(alarm_uuid, resp.status_code, resp.content))
return None, None
alarm_details['sub_type'] = json_data['subType']
alarm_details['symptom_definition_id'] = json_data['states'][0]['base-symptom-set']['symptomDefinitionIds'][0]
except Exception as exp:
- self.logger.warn("Exception while retriving alarm defination details: {}".format(exp))
+ self.logger.warning("Exception while retriving alarm defination details: {}".format(exp))
return None, None
return json_data, alarm_details
alert_match_list = []
if alarm_name is None:
- self.logger.warn("get_alarm_defination_by_name: Alarm name not provided")
+ self.logger.warning("get_alarm_defination_by_name: Alarm name not provided")
return alert_match_list
json_data = {}
verify = False, headers = headers)
if resp.status_code is not 200:
- self.logger.warn("get_alarm_defination_by_name: Error in response: {}\nResponse code:{}"\
+ self.logger.warning("get_alarm_defination_by_name: Error in response: {}\nResponse code:{}"\
"\nResponse Content: {}".format(alarm_name, resp.status_code, resp.content))
return alert_match_list
json_data = json.loads(resp.content)
if json_data['alertDefinitions'] is not None:
alerts_list = json_data['alertDefinitions']
- alert_match_list = filter(lambda alert: alert['name'] == alarm_name, alerts_list)
+ alert_match_list = list(filter(lambda alert: alert['name'] == alarm_name, alerts_list))
status = False if not alert_match_list else True
#self.logger.debug("Found alert_match_list: {}for larm_name: {},\nstatus: {}".format(alert_match_list, alarm_name,status))
return alert_match_list
except Exception as exp:
- self.logger.warn("Exception while searching alarm defination: {}".format(exp))
+ self.logger.warning("Exception while searching alarm defination: {}".format(exp))
return alert_match_list
if symptom_details is None:
return None
- if new_alarm_config.has_key('severity') and new_alarm_config['severity'] is not None:
+ if 'severity' in new_alarm_config and new_alarm_config['severity'] is not None:
symptom_details['state']['severity'] = severity_mano2vrops[new_alarm_config['severity']]
- if new_alarm_config.has_key('operation') and new_alarm_config['operation'] is not None:
+ if 'operation' in new_alarm_config and new_alarm_config['operation'] is not None:
symptom_details['state']['condition']['operator'] = OPERATION_MAPPING[new_alarm_config['operation']]
- if new_alarm_config.has_key('threshold_value') and new_alarm_config['threshold_value'] is not None:
+ if 'threshold_value' in new_alarm_config and new_alarm_config['threshold_value'] is not None:
symptom_details['state']['condition']['value'] = new_alarm_config['threshold_value']
#Find vrops metric key from metric_name, if required
"""
- if new_alarm_config.has_key('metric_name') and new_alarm_config['metric_name'] is not None:
+ if 'metric_name' in new_alarm_config and new_alarm_config['metric_name'] is not None:
metric_key_params = self.get_default_Params(new_alarm_config['metric_name'])
if not metric_key_params:
- self.logger.warn("Metric not supported: {}".format(config_dict['metric_name']))
+ self.logger.warning("Metric not supported: {}".format(config_dict['metric_name']))
return None
symptom_details['state']['condition']['key'] = metric_key_params['metric_key']
"""
data=data)
if resp.status_code != 200:
- self.logger.warn("Failed to update Symptom definition: {}, response {}"\
+ self.logger.warning("Failed to update Symptom definition: {}, response {}"\
.format(symptom_uuid, resp.content))
return None
.format(symptom_uuid, new_alarm_config['alarm_uuid']))
return symptom_uuid
else:
- self.logger.warn("Failed to update Symptom Defination {} for : {}"\
+ self.logger.warning("Failed to update Symptom Defination {} for : {}"\
.format(symptom_uuid, new_alarm_config['alarm_uuid']))
return None
"""
symptom_details = {}
if symptom_uuid is None:
- self.logger.warn("get_symptom_defination_details: Symptom UUID not provided")
+ self.logger.warning("get_symptom_defination_details: Symptom UUID not provided")
return None
api_url = '/suite-api/api/symptomdefinitions/'
verify = False, headers = headers)
if resp.status_code is not 200:
- self.logger.warn("Symptom defination not found {} \nResponse code:{}\nResponse Content: {}"\
+ self.logger.warning("Symptom defination not found {} \nResponse code:{}\nResponse Content: {}"\
.format(symptom_uuid, resp.status_code, resp.content))
return None
def reconfigure_alarm(self, alarm_details_json, new_alarm_config):
"""Reconfigure alarm defination as per input
"""
- if new_alarm_config.has_key('severity') and new_alarm_config['severity'] is not None:
+ if 'severity' in new_alarm_config and new_alarm_config['severity'] is not None:
alarm_details_json['states'][0]['severity'] = new_alarm_config['severity']
- if new_alarm_config.has_key('description') and new_alarm_config['description'] is not None:
+ if 'description' in new_alarm_config and new_alarm_config['description'] is not None:
alarm_details_json['description'] = new_alarm_config['description']
api_url = '/suite-api/api/alertdefinitions'
data=data)
if resp.status_code != 200:
- self.logger.warn("Failed to update Alarm definition: {}, response code {}, response content: {}"\
+ self.logger.warning("Failed to update Alarm definition: {}, response code {}, response content: {}"\
.format(alarm_details_json['id'], resp.status_code, resp.content))
return None
else:
auth=(self.vrops_user, self.vrops_password),
verify = False, headers = headers)
if resp.status_code is not 204:
- self.logger.warn("Failed to delete notification rules for {}".format(alarm_name))
+ self.logger.warning("Failed to delete notification rules for {}".format(alarm_name))
return None
else:
self.logger.info("Deleted notification rules for {}".format(alarm_name))
verify = False, headers = headers)
if resp.status_code is not 200:
- self.logger.warn("Failed to get notification rules details for {}"\
+ self.logger.warning("Failed to get notification rules details for {}"\
.format(alarm_name))
return None
notifications = json.loads(resp.content)
- if notifications is not None and notifications.has_key('notification-rule'):
+ if notifications is not None and 'notification-rule' in notifications:
notifications_list = notifications['notification-rule']
for dict in notifications_list:
if dict['name'] is not None and dict['name'] == alarm_notify_id:
.format(notification_id, alarm_name))
return notification_id
- self.logger.warn("Notification id to be deleted not found for {}"\
+ self.logger.warning("Notification id to be deleted not found for {}"\
.format(alarm_name))
return None
auth=(self.vrops_user, self.vrops_password),
verify = False, headers = headers)
if resp.status_code is not 204:
- self.logger.warn("Failed to delete alarm definition {}".format(alarm_id))
+ self.logger.warning("Failed to delete alarm definition {}".format(alarm_id))
return None
else:
self.logger.info("Deleted alarm definition {}".format(alarm_id))
auth=(self.vrops_user, self.vrops_password),
verify = False, headers = headers)
if resp.status_code is not 204:
- self.logger.warn("Failed to delete symptom definition {}".format(symptom_id))
+ self.logger.warning("Failed to delete symptom definition {}".format(symptom_id))
return None
else:
self.logger.info("Deleted symptom definition {}".format(symptom_id))
return status
metric_key_params = self.get_default_Params(metric_info['metric_name'])
if not metric_key_params:
- self.logger.warn("Metric not supported: {}".format(metric_info['metric_name']))
+ self.logger.warning("Metric not supported: {}".format(metric_info['metric_name']))
return status
else:
#If Metric is supported, verify optional metric unit & return status
triggered_alarms_list = []
if list_alarm_input.get('resource_uuid') is None:
- self.logger.warn("Resource UUID is required to get triggered alarms list")
+ self.logger.warning("Resource UUID is required to get triggered alarms list")
return triggered_alarms_list
#1)Find vROPs resource ID using RO resource UUID
#1) Find vm_moref_id from vApp uuid in vCD
vm_moref_id = self.get_vm_moref_id(ro_resource_uuid)
if vm_moref_id is None:
- self.logger.warn("Failed to find vm morefid for vApp in vCD: {}".format(ro_resource_uuid))
+ self.logger.warning("Failed to find vm morefid for vApp in vCD: {}".format(ro_resource_uuid))
return None
#2) Based on vm_moref_id, find VM's corresponding resource_id in vROPs to set notification
vrops_resource_id = self.get_vm_resource_id(vm_moref_id)
if vrops_resource_id is None:
- self.logger.warn("Failed to find resource in vROPs: {}".format(ro_resource_uuid))
+ self.logger.warning("Failed to find resource in vROPs: {}".format(ro_resource_uuid))
return None
return vrops_resource_id
verify = False, headers = headers)
if resp.status_code is not 200:
- self.logger.warn("Failed to get triggered alarms for {}"\
+ self.logger.warning("Failed to get triggered alarms for {}"\
.format(ro_resource_uuid))
return None
all_alerts = json.loads(resp.content)
- if all_alerts.has_key('alerts'):
+ if 'alerts' in all_alerts:
if not all_alerts['alerts']:
self.logger.info("No alarms present on resource {}".format(ro_resource_uuid))
return resource_alarms
alarm_instance['vim_type'] = 'VMware'
#find severity of alarm
severity = None
- for key,value in severity_mano2vrops.iteritems():
+ for key,value in six.iteritems(severity_mano2vrops):
if value == alarm['alertLevel']:
severity = key
if severity is None:
#Core producer
+import six
+
from osm_mon.plugins.vRealiseOps.mon_plugin_vrops import MonPlugin
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..'))
self.publish_metrics_data_status(metrics_data)
elif message.key == "create_metric_request":
metric_info = json.loads(message.value)
- metric_status = self.verify_metric(metric_info['metric_create'])
+ metric_status = self.verify_metric(metric_info['metric_create_request'])
#Publish message using producer
self.publish_create_metric_response(metric_info, metric_status)
elif message.key == "update_metric_request":
metric_info = json.loads(message.value)
- metric_status = self.verify_metric(metric_info['metric_create'])
+ metric_status = self.verify_metric(metric_info['metric_create_request'])
#Publish message using producer
self.publish_update_metric_response(metric_info, metric_status)
elif message.key == "delete_metric_request":
metric_info = json.loads(message.value)
#Deleting Metric Data is not allowed. Publish status as False
- self.logger.warn("Deleting Metric is not allowed: {}".format(metric_info['metric_name']))
+ self.logger.warning("Deleting Metric is not allowed: {}".format(metric_info['metric_name']))
#Publish message using producer
self.publish_delete_metric_response(metric_info)
elif message.topic == 'access_credentials':
"metric_create_response":
{
"metric_uuid":'0',
- "resource_uuid":metric_info['metric_create']['resource_uuid'],
+ "resource_uuid":metric_info['metric_create_request']['resource_uuid'],
"status":metric_status
}
}
"metric_update_response":
{
"metric_uuid":'0',
- "resource_uuid":metric_info['metric_create']['resource_uuid'],
+ "resource_uuid":metric_info['metric_create_request']['resource_uuid'],
"status":metric_status
}
}
"""
topic = 'metric_response'
msg_key = 'delete_metric_response'
- if metric_info.has_key('tenant_uuid') and metric_info['tenant_uuid'] is not None:
+ if 'tenant_uuid' in metric_info and metric_info['tenant_uuid'] is not None:
tenant_uuid = metric_info['tenant_uuid']
else:
tenant_uuid = None
response_msg = {"schema_version":schema_version,
"schema_type":"list_alarm_response",
"correlation_id":list_alarm_input['alarm_list_request']['correlation_id'],
- "list_alarm_resp":triggered_alarm_list
+ "list_alarm_response":triggered_alarm_list
}
self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
.format(topic, msg_key, response_msg))
for config in root:
if config.tag == 'Access_Config':
for param in config:
- for key,val in access_info.iteritems():
+ for key,val in six.iteritems(access_info):
if param.tag == key:
#print param.tag, val
param.text = val
tree.write(CONFIG_FILE_PATH)
wr_status = True
except Exception as exp:
- self.logger.warn("Failed to update Access Config Parameters: {}".format(exp))
+ self.logger.warning("Failed to update Access Config Parameters: {}".format(exp))
return wr_status
-<!--
-##
-# Copyright 2016-2017 VMware Inc.
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: osslegalrouting@vmware.com
-##
--->
<alarmsDefaultConfig>
<Average_Memory_Usage_Above_Threshold>
<vrops_alarm>Avg_Mem_Usage_Above_Thr</vrops_alarm>
</PACKETS_SENT>
<Access_Config>
<vrops_site>https://192.169.241.123</vrops_site>
- <vrops_user>Admin</vrops_user>
- <vrops_password>VMware1!</vrops_password>
- <vcloud-site>https://mano-vcd-1.corp.local</vcloud-site>
- <admin_username>administrator</admin_username>
- <admin_password>VMware1!</admin_password>
+ <vrops_user>admin</vrops_user>
+ <vrops_password>vmware</vrops_password>
+ <vcloud-site>https://192.169.241.15</vcloud-site>
+ <admin_username>admin</admin_username>
+ <admin_password>vmware</admin_password>
<nsx_manager>https://192.169.241.104</nsx_manager>
<nsx_user>admin</nsx_user>
<nsx_password>VMware1!</nsx_password>
- <vcenter_ip>192.169.241.103</vcenter_ip>
+ <vcenter_ip>192.169.241.13</vcenter_ip>
<vcenter_port>443</vcenter_port>
- <vcenter_user>administrator@vsphere.local</vcenter_user>
- <vcenter_password>VMware1!</vcenter_password>
- <vim_tenant_name>Org2-VDC-PVDC1</vim_tenant_name>
+ <vcenter_user>admin</vcenter_user>
+ <vcenter_password>vmware</vcenter_password>
+ <vim_tenant_name>Org2</vim_tenant_name>
<orgname>Org2</orgname>
- <tenant_id>Org2-VDC-PVDC1</tenant_id>
+ <tenant_id>Org2</tenant_id>
</Access_Config>
-</alarmsDefaultConfig>
-
-
+</alarmsDefaultConfig>
\ No newline at end of file
def test_update_alarm_req(self, resp, update_alarm, update_resp, get_creds):
"""Test Aodh update alarm request message from KafkaProducer."""
# Set-up message, producer and consumer for tests
- payload = {"vim_type": "OpenSTACK",
- "vim_uuid": "test_id",
- "alarm_update_request":
+ payload = {"alarm_update_request":
{"correlation_id": 123,
"alarm_uuid": "alarm_id",
"metric_uuid": "metric_id"}}
value=json.dumps(payload))
for message in self.req_consumer:
- # Check the vim desired by the message
if message.key == "update_alarm_request":
# Mock a valid alarm update
update_alarm.return_value = "alarm_id", True
- self.alarms.alarming(message)
+ self.alarms.alarming(message, 'test_id')
# A response message is generated and sent via MON's producer
resp.assert_called_with(
def test_create_alarm_req(self, resp, config_alarm, create_resp, get_creds):
"""Test Aodh create alarm request message from KafkaProducer."""
# Set-up message, producer and consumer for tests
- payload = {"vim_type": "OpenSTACK",
- "vim_uuid": "test_id",
- "alarm_create_request":
+ payload = {"alarm_create_request":
{"correlation_id": 123,
"alarm_name": "my_alarm",
"metric_name": "my_metric",
value=json.dumps(payload))
for message in self.req_consumer:
- # Check the vim desired by the message
if message.key == "create_alarm_request":
# Mock a valid alarm creation
config_alarm.return_value = "alarm_id", True
- self.alarms.alarming(message)
+ self.alarms.alarming(message, 'test_id')
# A response message is generated and sent via MON's produce
resp.assert_called_with(
def test_list_alarm_req(self, resp, list_alarm, list_resp, get_creds):
"""Test Aodh list alarm request message from KafkaProducer."""
# Set-up message, producer and consumer for tests
- payload = {"vim_type": "OpenSTACK",
- "vim_uuid": "test_id",
- "alarm_list_request":
+ payload = {"alarm_list_request":
{"correlation_id": 123,
"resource_uuid": "resource_id", }}
get_creds.return_value = mock_creds
for message in self.req_consumer:
- # Check the vim desired by the message
if message.key == "list_alarm_request":
# Mock an empty list generated by the request
list_alarm.return_value = []
- self.alarms.alarming(message)
+ self.alarms.alarming(message, 'test_id')
# Response message is generated
resp.assert_called_with(
def test_delete_alarm_req(self, resp, del_resp, del_alarm, get_creds):
"""Test Aodh delete alarm request message from KafkaProducer."""
# Set-up message, producer and consumer for tests
- payload = {"vim_type": "OpenSTACK",
- "vim_uuid": "test_id",
- "alarm_delete_request":
+ payload = {"alarm_delete_request":
{"correlation_id": 123,
"alarm_uuid": "alarm_id", }}
get_creds.return_value = mock_creds
for message in self.req_consumer:
- # Check the vim desired by the message
if message.key == "delete_alarm_request":
- self.alarms.alarming(message)
+ self.alarms.alarming(message, 'test_id')
# Response message is generated and sent by MON's producer
resp.assert_called_with(
def test_ack_alarm_req(self, ack_alarm, get_creds):
"""Test Aodh acknowledge alarm request message from KafkaProducer."""
# Set-up message, producer and consumer for tests
- payload = {"vim_type": "OpenSTACK",
- "vim_uuid": "test_id",
- "ack_details":
+ payload = {"ack_details":
{"alarm_uuid": "alarm_id", }}
self.producer.send('alarm_request', key="acknowledge_alarm",
get_creds.return_value = mock_creds
for message in self.req_consumer:
- # Check the vim desired by the message
if message.key == "acknowledge_alarm":
- self.alarms.alarming(message)
+ self.alarms.alarming(message, 'test_id')
return
self.fail("No message received in consumer")
def test_create_metric_req(self, resp, create_resp, config_metric):
"""Test Gnocchi create metric request message from producer."""
# Set-up message, producer and consumer for tests
- payload = {"vim_type": "OpenSTACK",
- "vim_uuid": "1",
- "correlation_id": 123,
- "metric_create":
- {"metric_name": "cpu_utilization",
- "resource_uuid": "resource_id"}}
+ payload = {"metric_create_request": {"correlation_id": 123,
+ "metric_name": "cpu_utilization",
+ "resource_uuid": "resource_id"}}
self.producer.send('metric_request', key="create_metric_request",
value=json.dumps(payload))
for message in self.req_consumer:
- # Check the vim desired by the message
- vim_type = json.loads(message.value)["vim_type"].lower()
- if vim_type == "openstack":
+ if message.key == "create_metric_request":
# A valid metric is created
config_metric.return_value = "metric_id", "resource_id", True
- self.metric_req.metric_calls(message)
+ self.metric_req.metric_calls(message, 'test_id')
# A response message is generated and sent by MON's producer
resp.assert_called_with(
if message.key == "delete_metric_request":
# Metric has been deleted
del_metric.return_value = True
- self.metric_req.metric_calls(message)
+ self.metric_req.metric_calls(message, 'test_id')
# A response message is generated and sent by MON's producer
resp.assert_called_with(
if message.key == "read_metric_data_request":
# Mock empty lists generated by the request message
read_data.return_value = [], []
- self.metric_req.metric_calls(message)
+ self.metric_req.metric_calls(message, 'test_id')
# A response message is generated and sent by MON's producer
resp.assert_called_with(
if message.key == "list_metric_request":
# Mock an empty list generated by the request
list_metrics.return_value = []
- self.metric_req.metric_calls(message)
+ self.metric_req.metric_calls(message, 'test_id')
# A response message is generated and sent by MON's producer
resp.assert_called_with(
def test_update_metrics_req(self, resp, update_resp, get_id):
"""Test Gnocchi update metric request message from KafkaProducer."""
# Set-up message, producer and consumer for tests
- payload = {"vim_type": "OpenSTACK",
- "vim_uuid": "test_id",
- "correlation_id": 123,
- "metric_create":
- {"metric_name": "my_metric",
- "resource_uuid": "resource_id", }}
+ payload = {"metric_create_request": {"metric_name": "my_metric",
+ "correlation_id": 123,
+ "resource_uuid": "resource_id", }}
self.producer.send('metric_request', key="update_metric_request",
value=json.dumps(payload))
if message.key == "update_metric_request":
# Gnocchi doesn't support metric updates
get_id.return_value = "metric_id"
- self.metric_req.metric_calls(message)
+ self.metric_req.metric_calls(message, 'test_id')
# Response message is generated and sent via MON's producer
# No metric update has taken place
import mock
from osm_mon.core.auth import AuthManager
-from osm_mon.core.database import VimCredentials
+from osm_mon.core.database import VimCredentials, DatabaseManager
+from osm_mon.core.message_bus.producer import KafkaProducer
from osm_mon.plugins.OpenStack.Aodh import alarming as alarm_req
from osm_mon.plugins.OpenStack.common import Common
"""Initialize a mocked message instance."""
self.topic = 'alarm_request'
self.key = None
- self.value = json.dumps({'vim_uuid': 'test_id', 'mock_value': 'mock_details'})
+ self.value = json.dumps({'mock_value': 'mock_details'})
+@mock.patch.object(KafkaProducer, 'publish', mock.Mock())
class TestAlarmKeys(unittest.TestCase):
"""Integration test for alarm request keys."""
get_creds.return_value = mock_creds
- self.alarming.alarming(message)
+ self.alarming.alarming(message, 'test_id')
get_token.assert_called_with('test_id')
get_endpoint.assert_any_call('alarming', 'test_id')
# Mock a message value and key
message = Message()
message.key = 'delete_alarm_request'
- message.value = json.dumps({'vim_uuid': 'test_id',
- 'alarm_delete_request':
- {'alarm_uuid': 'my_alarm_id'}})
+ message.value = json.dumps({'alarm_delete_request': {
+ 'correlation_id': 1,
+ 'alarm_uuid': 'my_alarm_id'
+ }})
get_creds.return_value = mock_creds
+ del_alarm.return_value = {}
# Call the alarming functionality and check delete request
- self.alarming.alarming(message)
+ self.alarming.alarming(message, 'test_id')
del_alarm.assert_called_with(mock.ANY, mock.ANY, 'my_alarm_id')
@mock.patch.object(Common, 'get_endpoint', mock.Mock())
# Mock a message with list alarm key and value
message = Message()
message.key = 'list_alarm_request'
- message.value = json.dumps({'vim_uuid': 'test_id', 'alarm_list_request': 'my_alarm_details'})
+ message.value = json.dumps({'alarm_list_request': {'correlation_id': 1}})
get_creds.return_value = mock_creds
+ list_alarm.return_value = []
+
# Call the alarming functionality and check list functionality
- self.alarming.alarming(message)
- list_alarm.assert_called_with(mock.ANY, mock.ANY, 'my_alarm_details')
+ self.alarming.alarming(message, 'test_id')
+ list_alarm.assert_called_with(mock.ANY, mock.ANY, {'correlation_id': 1})
@mock.patch.object(Common, 'get_auth_token', mock.Mock())
@mock.patch.object(Common, 'get_endpoint', mock.Mock())
# Mock a message with acknowledge alarm key and value
message = Message()
message.key = 'acknowledge_alarm'
- message.value = json.dumps({'vim_uuid': 'test_id',
- 'ack_details':
+ message.value = json.dumps({'ack_details':
{'alarm_uuid': 'my_alarm_id'}})
get_creds.return_value = mock_creds
# Call alarming functionality and check acknowledge functionality
- self.alarming.alarming(message)
+ self.alarming.alarming(message, 'test_id')
ack_alarm.assert_called_with(mock.ANY, mock.ANY, 'my_alarm_id')
@mock.patch.object(Common, 'get_auth_token', mock.Mock())
@mock.patch.object(Common, 'get_endpoint', mock.Mock())
+ @mock.patch.object(DatabaseManager, 'save_alarm', mock.Mock())
@mock.patch.object(AuthManager, 'get_credentials')
@mock.patch.object(alarm_req.Alarming, 'configure_alarm')
def test_config_alarm_key(self, config_alarm, get_creds):
# Mock a message with config alarm key and value
message = Message()
message.key = 'create_alarm_request'
- message.value = json.dumps({'vim_uuid': 'test_id', 'alarm_create_request': 'alarm_details'})
+ message.value = json.dumps({'alarm_create_request': {'correlation_id': 1}})
get_creds.return_value = mock_creds
# Call alarming functionality and check config alarm call
config_alarm.return_value = 'my_alarm_id', True
- self.alarming.alarming(message)
- config_alarm.assert_called_with(mock.ANY, mock.ANY, mock.ANY, 'alarm_details', {})
+ self.alarming.alarming(message, 'test_id')
+ config_alarm.assert_called_with(mock.ANY, mock.ANY, mock.ANY, {'correlation_id': 1}, {})
check_metric.return_value = "my_metric_id"
check_pay.return_value = "my_payload"
+ perf_req.return_value = type('obj', (object,), {'text': '{"alarm_id":"1"}'})
+
self.alarming.configure_alarm(alarm_endpoint, metric_endpoint, auth_token, values, {})
perf_req.assert_called_with(
"alarm_endpoint/v2/alarms/", auth_token,
"""Test update alarm with invalid get response."""
values = {"alarm_uuid": "my_alarm_id"}
+ perf_req.return_value = type('obj', (object,), {'invalid_prop': 'Invalid response'})
+
self.alarming.update_alarm(alarm_endpoint, auth_token, values, {})
perf_req.assert_called_with(mock.ANY, auth_token, req_type="get")
resp = Response({"name": "my_alarm",
"state": "alarm",
"gnocchi_resources_threshold_rule":
- {"resource_id": "my_resource_id",
- "metric": "my_metric"}})
+ {"resource_id": "my_resource_id",
+ "metric": "my_metric"}})
perf_req.return_value = resp
check_pay.return_value = None
values = {"alarm_uuid": "my_alarm_id"}
@mock.patch.object(Common, "perform_request")
def test_update_alarm_valid(self, perf_req, check_pay):
"""Test valid update alarm request."""
- resp = Response({"name": "my_alarm",
+ resp = Response({"alarm_id": "1",
+ "name": "my_alarm",
"state": "alarm",
"gnocchi_resources_threshold_rule":
- {"resource_id": "my_resource_id",
- "metric": "disk.write.requests"}})
+ {"resource_id": "my_resource_id",
+ "metric": "disk.write.requests"}})
perf_req.return_value = resp
values = {"alarm_uuid": "my_alarm_id"}
self.assertDictEqual(
json.loads(payload), {"name": "alarm_name",
"gnocchi_resources_threshold_rule":
- {"resource_id": "r_id",
- "metric": "disk.write.requests",
- "comparison_operator": "gt",
- "aggregation_method": "count",
- "threshold": 12,
- "granularity": 300,
- "resource_type": "generic"},
+ {"resource_id": "r_id",
+ "metric": "disk.write.requests",
+ "comparison_operator": "gt",
+ "aggregation_method": "count",
+ "threshold": 12,
+ "granularity": 300,
+ "resource_type": "generic"},
"severity": "low",
"state": "ok",
"type": "gnocchi_resources_threshold",
self.assertEqual(
json.loads(payload), {"name": "alarm_name",
"gnocchi_resources_threshold_rule":
- {"resource_id": "r_id",
- "metric": "disk.write.requests",
- "comparison_operator": "gt",
- "aggregation_method": "count",
- "threshold": 12,
- "granularity": 300,
- "resource_type": "generic"},
+ {"resource_id": "r_id",
+ "metric": "disk.write.requests",
+ "comparison_operator": "gt",
+ "aggregation_method": "count",
+ "threshold": 12,
+ "granularity": 300,
+ "resource_type": "generic"},
"severity": "low",
"state": "alarm",
"type": "gnocchi_resources_threshold",
@mock.patch.object(Common, "perform_request")
def test_get_alarm_state(self, perf_req):
"""Test the get alarm state function."""
+ perf_req.return_value = type('obj', (object,), {'text': '{"alarm_id":"1"}'})
+
self.alarming.get_alarm_state(alarm_endpoint, auth_token, "alarm_id")
perf_req.assert_called_with(
"name": "metric_name",
"unit": "units"}}}
+ perf_req.return_value = type('obj', (object,), {'text': '{"id":"1"}'})
+
self.metrics.configure_metric(endpoint, auth_token, values)
perf_req.assert_called_with(
"<ANY>/v1/resource/generic", auth_token, req_type="post",
- payload=json.dumps(payload))
+ payload=json.dumps(payload, sort_keys=True))
@mock.patch.object(Common, "perform_request")
def test_delete_metric_req(self, perf_req):
@mock.patch.object(Common, "perform_request")
def test_delete_metric_invalid_status(self, perf_req):
"""Test invalid response for delete request."""
- perf_req.return_value = "404"
+ perf_req.return_value = type('obj', (object,), {"status_code": "404"})
status = self.metrics.delete_metric(endpoint, auth_token, "metric_id")
"collection_unit": "DAY",
"collection_period": 1}
+ perf_req.return_value = type('obj', (object,), {'text': '{"metric_data":"[]"}'})
+
get_metric.return_value = "metric_id"
self.metrics.read_metric_data(endpoint, auth_token, values)
import mock
+from osm_mon.core.message_bus.producer import KafkaProducer
from osm_mon.plugins.OpenStack.Gnocchi import metrics as metric_req
from osm_mon.plugins.OpenStack.common import Common
"""Initialize a mocked message instance."""
self.topic = "metric_request"
self.key = None
- self.value = json.dumps({"vim_uuid": "test_id", "mock_message": "message_details"})
+ self.value = json.dumps({"mock_message": "message_details"})
+@mock.patch.object(KafkaProducer, 'publish', mock.Mock())
class TestMetricReq(unittest.TestCase):
"""Integration test for metric request keys."""
super(TestMetricReq, self).setUp()
self.metrics = metric_req.Metrics()
- @mock.patch.object(Common, 'get_endpoint')
- @mock.patch.object(Common, "get_auth_token")
- def test_access_cred_metric_auth(self, get_token, get_endpoint):
- """Test authentication with access credentials."""
- message = Message()
-
- self.metrics.metric_calls(message)
-
- get_token.assert_called_with('test_id')
- get_endpoint.assert_any_call('metric', 'test_id')
-
@mock.patch.object(Common, "get_auth_token", mock.Mock())
@mock.patch.object(Common, 'get_endpoint', mock.Mock())
@mock.patch.object(metric_req.Metrics, "delete_metric")
# Mock a message value and key
message = Message()
message.key = "delete_metric_request"
- message.value = json.dumps({"vim_uuid": "test_id", "metric_name": "disk_write_ops", "resource_uuid": "my_r_id"})
+ message.value = json.dumps({"metric_name": "disk_write_ops", "resource_uuid": "my_r_id", "correlation_id": 1})
+
+ del_metric.return_value = True
# Call the metric functionality and check delete request
get_metric_id.return_value = "my_metric_id"
- self.metrics.metric_calls(message)
+ self.metrics.metric_calls(message, 'test_id')
del_metric.assert_called_with(mock.ANY, mock.ANY, "my_metric_id")
@mock.patch.object(Common, "get_auth_token", mock.Mock())
# Mock a message with list metric key and value
message = Message()
message.key = "list_metric_request"
- message.value = json.dumps({"vim_uuid": "test_id", "metrics_list_request": "metric_details"})
+ message.value = json.dumps({"metrics_list_request": {"correlation_id": 1}})
+
+ list_metrics.return_value = []
# Call the metric functionality and check list functionality
- self.metrics.metric_calls(message)
- list_metrics.assert_called_with(mock.ANY, mock.ANY, "metric_details")
+ self.metrics.metric_calls(message, 'test_id')
+ list_metrics.assert_called_with(mock.ANY, mock.ANY, {"correlation_id": 1})
@mock.patch.object(Common, "get_auth_token", mock.Mock())
@mock.patch.object(Common, 'get_endpoint', mock.Mock())
# Mock a message with update metric key and value
message = Message()
message.key = "update_metric_request"
- message.value = json.dumps({"vim_uuid": "test_id",
- "metric_create":
- {"metric_name": "my_metric",
+ message.value = json.dumps({"metric_create_request":
+ {"correlation_id": 1,
+ "metric_name": "my_metric",
"resource_uuid": "my_r_id"}})
# Call metric functionality and confirm no function is called
# Gnocchi does not support updating a metric configuration
- self.metrics.metric_calls(message)
+ self.metrics.metric_calls(message, 'test_id')
config_metric.assert_not_called()
list_metrics.assert_not_called()
delete_metric.assert_not_called()
# Mock a message with create metric key and value
message = Message()
message.key = "create_metric_request"
- message.value = json.dumps({"vim_uuid": "test_id", "metric_create": "metric_details"})
+ message.value = json.dumps({"metric_create_request": "metric_details"})
# Call metric functionality and check config metric
config_metric.return_value = "metric_id", "resource_id", True
- self.metrics.metric_calls(message)
+ self.metrics.metric_calls(message, 'test_id')
config_metric.assert_called_with(mock.ANY, mock.ANY, "metric_details")
@mock.patch.object(Common, "get_auth_token", mock.Mock())
# Mock a message with a read data key and value
message = Message()
message.key = "read_metric_data_request"
- message.value = json.dumps({"vim_uuid": "test_id", "alarm_uuid": "alarm_id"})
+ message.value = json.dumps({"alarm_uuid": "alarm_id"})
# Call metric functionality and check read data metrics
read_data.return_value = "time_stamps", "data_values"
- self.metrics.metric_calls(message)
+ self.metrics.metric_calls(message, 'test_id')
read_data.assert_called_with(
mock.ANY, mock.ANY, json.loads(message.value))
sev=values['severity'], date=a_date,
state=values['current'], vim_type="OpenStack")
self._producer.notify_alarm(
- 'notify_alarm', resp_message, 'alarm_response')
+ 'notify_alarm', resp_message)
except Exception:
pass
self.handler.notify_alarm(json.loads(post_data))
notify.assert_called_with(
- "notify_alarm", valid_notify_resp, "alarm_response")
+ "notify_alarm", valid_notify_resp)
@mock.patch.object(monPlugin.requests, 'get')
+ # @unittest.skip("NEEDS FIX")
def test_get_alarm_defination_by_name_no_valid_alarm_found(self, m_get):
"""Test get_alarm_defination_by_name: With no valid alarm found in returned list"""
msg.topic = "metric_request"
msg.key = "create_metric_request"
- msg.value = json.dumps({"metric_create":"metric_details"})
+ msg.value = json.dumps({"metric_create_request":"metric_details"})
# set the return value
m_verify_metric.return_value = True
msg.topic = "metric_request"
msg.key = "update_metric_request"
- msg.value = json.dumps({"metric_create":"metric_details"})
+ msg.value = json.dumps({"metric_create_request":"metric_details"})
# set the return value
m_verify_metric.return_value = True
# Mock metric_info
metric_info = {'vim_type' : 'VMware','correlation_id': 'e14b203c',
- 'metric_create':{
+ 'metric_create_request':{
'resource_uuid': '6486e69',
'metric_name': 'CPU_UTILIZATION',
'metric_unit': '%'
# Mock metric_info
metric_info = {'vim_type' : 'VMware','correlation_id': 'e14b203c',
- 'metric_create':{
+ 'metric_create_request':{
'resource_uuid': '6486e69',
'metric_name': 'CPU_UTILIZATION',
'metric_unit': '%'
--- /dev/null
+import unittest
+
+import mock
+
+from osm_mon.core.database import VimCredentials
+from osm_mon.core.message_bus.common_consumer import *
+
+
+class CommonConsumerTest(unittest.TestCase):
+ @mock.patch.object(DatabaseManager, "get_credentials")
+ def test_get_vim_type(self, get_creds):
+ mock_creds = VimCredentials()
+ mock_creds.id = 'test_id'
+ mock_creds.user = 'user'
+ mock_creds.url = 'url'
+ mock_creds.password = 'password'
+ mock_creds.tenant_name = 'tenant_name'
+ mock_creds.type = 'openstack'
+
+ get_creds.return_value = mock_creds
+
+ db_manager = DatabaseManager()
+ vim_type = get_vim_type(db_manager, 'test_id')
+
+ self.assertEqual(vim_type, 'openstack')
+
+ @mock.patch.object(dbmongo.DbMongo, "get_one")
+ def test_get_vdur(self, get_one):
+ get_one.return_value = {'_id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
+ '_admin': {
+ 'projects_read': ['admin'], 'created': 1526044312.102287,
+ 'modified': 1526044312.102287, 'projects_write': ['admin']
+ },
+ 'vim-account-id': 'c1740601-7287-48c8-a2c9-bce8fee459eb',
+ 'nsr-id-ref': '5ec3f571-d540-4cb0-9992-971d1b08312e',
+ 'vdur': [
+ {
+ 'internal-connection-point': [],
+ 'vdu-id-ref': 'ubuntuvnf_vnfd-VM',
+ 'id': 'ffd73f33-c8bb-4541-a977-44dcc3cbe28d',
+ 'vim-id': '27042672-5190-4209-b844-95bbaeea7ea7'
+ }
+ ],
+ 'vnfd-ref': 'ubuntuvnf_vnfd',
+ 'member-vnf-index-ref': '1',
+ 'created-time': 1526044312.0999322,
+ 'vnfd-id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
+ 'id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01'}
+
+ common_db = dbmongo.DbMongo()
+ vdur = get_vdur(common_db, '5ec3f571-d540-4cb0-9992-971d1b08312e', '1', 'ubuntuvnf_vnfd-VM')
+ expected_vdur = {
+ 'internal-connection-point': [],
+ 'vdu-id-ref': 'ubuntuvnf_vnfd-VM',
+ 'id': 'ffd73f33-c8bb-4541-a977-44dcc3cbe28d',
+ 'vim-id': '27042672-5190-4209-b844-95bbaeea7ea7'
+ }
+
+ self.assertDictEqual(vdur, expected_vdur)
+
+
+if __name__ == '__main__':
+ unittest.main()
# For those usages not covered by the Apache License, Version 2.0 please
# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-stdeb==0.8.*
kafka==1.3.*
lxml==4.2.*
requests==2.18.*
# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
__author__ = "Prithiv Mohan"
-__date__ = "14/Sep/2017"
+__date__ = "14/Sep/2017"
from setuptools import setup
-from os import system
+
+
+def parse_requirements(requirements):
+ with open(requirements) as f:
+ return [l.strip('\n') for l in f if l.strip('\n') and not l.startswith('#') and '://' not in l]
+
+
_name = 'osm_mon'
_version = '1.0'
_description = 'OSM Monitoring Module'
_license = 'Apache 2.0'
_url = 'https://osm.etsi.org/gitweb/?p=osm/MON.git;a=tree'
setup(name="osm_mon",
- version = _version,
- description = _description,
- long_description = open('README.rst').read(),
- author = _author,
- author_email = _author_email,
- maintainer = _maintainer,
- maintainer_email = _maintainer_email,
- url = _url,
- license = _license,
- packages = [_name],
- package_dir = {_name: _name},
- package_data = {_name: ['osm_mon/core/message_bus/*.py', 'osm_mon/core/models/*.json',
- 'osm_mon/plugins/OpenStack/Aodh/*.py', 'osm_mon/plugins/OpenStack/Gnocchi/*.py',
- 'osm_mon/plugins/vRealiseOps/*', 'osm_mon/plugins/CloudWatch/*']},
- data_files = [('/etc/systemd/system/', ['scripts/kafka.sh']),
- ],
+ version=_version,
+ description=_description,
+ long_description=open('README.rst').read(),
+ author=_author,
+ author_email=_author_email,
+ maintainer=_maintainer,
+ maintainer_email=_maintainer_email,
+ url=_url,
+ license=_license,
+ packages=[_name],
+ package_dir={_name: _name},
+ package_data={_name: ['osm_mon/core/message_bus/*.py', 'osm_mon/core/models/*.json',
+ 'osm_mon/plugins/OpenStack/Aodh/*.py', 'osm_mon/plugins/OpenStack/Gnocchi/*.py',
+ 'osm_mon/plugins/vRealiseOps/*', 'osm_mon/plugins/CloudWatch/*']},
scripts=['osm_mon/plugins/vRealiseOps/vROPs_Webservice/vrops_webservice',
- 'kafkad', 'osm_mon/core/message_bus/common_consumer.py'],
+ 'osm_mon/core/message_bus/common_consumer.py'],
+ install_requires=parse_requirements('requirements.txt'),
include_package_data=True,
+ dependency_links=[
+ 'git+https://osm.etsi.org/gerrit/osm/common.git@857731b#egg=osm-common'
+ ]
)
# For those usages not covered by the Apache License, Version 2.0 please
# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
##
-
-hacking>=0.10.0,<0.11
-
flake8<3.0
mock
-oslosphinx>=2.5.0 # Apache-2.0
-oslotest>=1.10.0 # Apache-2.0
-os-testr
-testrepository>=0.0.18
-pylint
-python-subunit>=0.0.18
-pytest
-testscenarios>=0.4
-testtools>=1.4.0
-kafka
# in multiple virtualenvs. This configuration file will run the
# test suite on all supported python versions. To use it, "pip install tox"
# and then run "tox" from this directory.
-
[tox]
-minversion = 1.6
-envlist = py27
-skipsdist = True
+envlist = py3
+toxworkdir={homedir}/.tox
[testenv]
-usedevelop = True
-install_command = pip install -r requirements.txt -U {opts} {packages}
-commands = sh tools/pretty_tox.sh '{posargs}'
+basepythons = python3
+commands=python3 -m unittest discover -v
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
deps = -r{toxinidir}/test-requirements.txt
-whitelist_externals = sh
-setenv =
- VIRTUAL_ENV={envdir}
-[testenv:pep8]
-commands = flake8 plugins
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands =
+ flake8 osm_mon
-[pep8]
-max-line-length = 80
+[testenv:build]
+basepython = python3
+deps = stdeb
+ setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
[flake8]
# E123, E125 skipped as they are invalid PEP-8.
-max-line-length = 80
+max-line-length = 120
show-source = True
ignore = E123,E125,E241
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,devops_stages/*,.rst
+
+