# under the License.
# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com.
include requirements.txt
include README.rst
include kafkad
-recursive-include osm-mon *
+recursive-include osm_mon *
recursive-include devops-stages *
recursive-include test *
cp README.rst build/
cp setup.py build/
cp kafkad build/
- cp -r osm-mon build/
+ cp -r osm_mon build/
cp -r devops-stages build/
cp -r scripts build/
#pip install -r requirements.txt
#pip install -r test-requirements.txt
build: clean openstack_plugins prepare
- python -m py_compile build/osm-mon/plugins/OpenStack/*.py
+ python -m py_compile build/osm_mon/plugins/OpenStack/*.py
build: clean vrops_plugins prepare
- python -m py_compile build/osm-mon/plugins/vRealiseOps/*.py
+ python -m py_compile build/osm_mon/plugins/vRealiseOps/*.py
build: clean cloudwatch_plugins prepare
- python -m py_compile build/osm-mon/plugins/CloudWatch/*.py
+ python -m py_compile build/osm_mon/plugins/CloudWatch/*.py
build: clean core prepare
- python -m py_compile build/osm-mon/core/message_bus/*.py
+ python -m py_compile build/osm_mon/core/message_bus/*.py
pip: prepare
cd build ./setup.py sdist
#__date__ = "14/Sep/2017"
#!/bin/bash
+make clean all BRANCH=master
make package
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you
-# may not use this file except in compliance with the License. You may
-# obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied. See the License for the specific language governing
-# permissions and limitations under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-"""A common KafkaConsumer for all MON plugins."""
-
-import json
-import logging
-import sys
-
-sys.path.append("/root/MON")
-
-logging.basicConfig(filename='MON_plugins.log',
- format='%(asctime)s %(message)s',
- datefmt='%m/%d/%Y %I:%M:%S %p', filemode='a',
- level=logging.INFO)
-log = logging.getLogger(__name__)
-
-from kafka import KafkaConsumer
-from kafka.errors import KafkaError
-
-from plugins.OpenStack.Aodh import alarming
-from plugins.OpenStack.common import Common
-from plugins.OpenStack.Gnocchi import metrics
-
-
-# Initialize servers
-server = {'server': 'localhost:9092'}
-
-# Initialize consumers for alarms and metrics
-common_consumer = KafkaConsumer(group_id='osm_mon',
- bootstrap_servers=server['server'])
-
-# Create OpenStack alarming and metric instances
-auth_token = None
-openstack_auth = Common()
-openstack_metrics = metrics.Metrics()
-openstack_alarms = alarming.Alarming()
-
-
-def get_vim_type(message):
- """Get the vim type that is required by the message."""
- try:
- return json.loads(message.value)["vim_type"].lower()
- except Exception as exc:
- log.warn("vim_type is not configured correctly; %s", exc)
- return None
-
-# Define subscribe the consumer for the plugins
-topics = ['metric_request', 'alarm_request', 'access_credentials']
-common_consumer.subscribe(topics)
-
-try:
- log.info("Listening for alarm_request and metric_request messages")
- for message in common_consumer:
- # Check the message topic
- if message.topic == "metric_request":
- # Check the vim desired by the message
- vim_type = get_vim_type(message)
- if vim_type == "openstack":
- log.info("This message is for the OpenStack plugin.")
- openstack_metrics.metric_calls(
- message, openstack_auth, auth_token)
-
- elif vim_type == "cloudwatch":
- log.info("This message is for the CloudWatch plugin.")
-
- elif vim_type == "vrops":
- log.info("This message is for the vROPs plugin.")
-
- else:
- log.debug("vim_type is misconfigured or unsupported; %s",
- vim_type)
-
- elif message.topic == "alarm_request":
- # Check the vim desired by the message
- vim_type = get_vim_type(message)
- if vim_type == "openstack":
- log.info("This message is for the OpenStack plugin.")
- openstack_alarms.alarming(message, openstack_auth, auth_token)
-
- elif vim_type == "cloudwatch":
- log.info("This message is for the CloudWatch plugin.")
-
- elif vim_type == "vrops":
- log.info("This message is for the vROPs plugin.")
-
- else:
- log.debug("vim_type is misconfigured or unsupported; %s",
- vim_type)
-
- elif message.topic == "access_credentials":
- # Check the vim desired by the message
- vim_type = get_vim_type(message)
- if vim_type == "openstack":
- log.info("This message is for the OpenStack plugin.")
- auth_token = openstack_auth._authenticate(message=message)
-
- elif vim_type == "cloudwatch":
- log.info("This message is for the CloudWatch plugin.")
-
- elif vim_type == "vrops":
- log.info("This message is for the vROPs plugin.")
-
- else:
- log.debug("vim_type is misconfigured or unsupported; %s",
- vim_type)
-
- else:
- log.info("This topic is not relevant to any of the MON plugins.")
-
-
-except KafkaError as exc:
- log.warn("Exception: %s", exc)
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-##
-
-'''
-This is a kafka consumer app that reads the messages from the message bus for
-alarms and metrics responses.
-
-'''
-
-__author__ = "Prithiv Mohan"
-__date__ = "06/Sep/2017"
-
-
-from kafka import KafkaConsumer
-from kafka.errors import KafkaError
-import json
-import logging
-import logging.config
-import os
-
-
-def logging_handler(filename, mode='a+', encoding=None):
- if not os.path.exists(filename):
- open(filename, 'a').close()
- return logging.FileHandler(filename, mode)
-
-log_config = {
- 'version': 1,
- 'formatters': {
- 'default': {
- 'format': '%(asctime)s %(levelname)s %(name)s %(message)s'
- },
- },
- 'handlers': {
- 'file': {
- '()': logging_handler,
- 'level': 'DEBUG',
- 'formatter': 'default',
- 'filename': '/var/log/osm_mon.log',
- 'mode': 'a+',
- 'encoding': 'utf-8',
- },
- },
- 'kafka': {
- 'handlers': ['file'],
- 'level': 'DEBUG',
- },
- 'root': {
- 'handlers': ['file'],
- 'level': 'DEBUG',
- },
-}
-
-
-logging.config.dictConfig(log_config)
-logger = logging.getLogger('kafka')
-
-if "BROKER_URI" in os.environ:
- broker = os.getenv("BROKER_URI")
-else:
- broker = "localhost:9092"
-
-alarm_consumer = KafkaConsumer(
- 'alarm_response', 'osm_mon', bootstrap_servers=broker)
-metric_consumer = KafkaConsumer(
- 'metric_response', 'osm_mon', bootstrap_servers=broker)
-try:
- for message in alarm_consumer:
- logger.debug(message)
- for message in metric_consumer:
- logger.debug(message)
-except KafkaError:
- log.exception()
-
-alarm_consumer.subscribe('alarm_response')
-metric_consumer.subscribe('metric_response')
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-##
-'''
-This is a kafka producer app that interacts with the SO and the plugins of the
-datacenters like OpenStack, VMWare, AWS.
-'''
-
-from kafka import KafkaProducer as kaf
-from kafka.errors import KafkaError
-import logging
-import json
-import jsmin
-import os
-from os import listdir
-from jsmin import jsmin
-
-__author__ = "Prithiv Mohan"
-__date__ = "06/Sep/2017"
-
-json_path = os.path.join(os.pardir+"/models/")
-
-
-class KafkaProducer(object):
-
- def __init__(self, topic):
-
- self._topic = topic
-
- if "BROKER_URI" in os.environ:
- broker = os.getenv("BROKER_URI")
- else:
- broker = "localhost:9092"
-
- '''
- If the broker URI is not set in the env, by default,
- localhost container is taken as the host because an instance of
- is already running.
- '''
-
- self.producer = kaf(
- key_serializer=str.encode,
- value_serializer=lambda v: json.dumps(v).encode('ascii'),
- bootstrap_servers=broker, api_version=(0, 10))
-
- def publish(self, key, value, topic=None):
- try:
- future = self.producer.send(topic=topic, key=key, value=value)
- self.producer.flush()
- except Exception:
- logging.exception("Error publishing to {} topic." .format(topic))
- raise
- try:
- record_metadata = future.get(timeout=10)
- logging.debug("TOPIC:", record_metadata.topic)
- logging.debug("PARTITION:", record_metadata.partition)
- logging.debug("OFFSET:", record_metadata.offset)
- except KafkaError:
- pass
-
- def create_alarm_request(self, key, message, topic):
-
- # External to MON
-
- payload_create_alarm = jsmin(
- open(os.path.join(json_path, 'create_alarm.json')).read())
- self.publish(key,
- value=json.dumps(payload_create_alarm),
- topic='alarm_request')
-
- def create_alarm_response(self, key, message, topic):
-
- # Internal to MON
-
- payload_create_alarm_resp = jsmin(
- open(os.path.join(json_path, 'create_alarm_resp.json')).read())
-
- self.publish(key,
- value=message,
- topic='alarm_response')
-
- def acknowledge_alarm(self, key, message, topic):
-
- # Internal to MON
-
- payload_acknowledge_alarm = jsmin(
- open(os.path.join(json_path, 'acknowledge_alarm.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_acknowledge_alarm),
- topic='alarm_request')
-
- def list_alarm_request(self, key, message, topic):
-
- # External to MON
-
- payload_alarm_list_req = jsmin(
- open(os.path.join(json_path, 'list_alarm_req.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_alarm_list_req),
- topic='alarm_request')
-
- def notify_alarm(self, key, message, topic):
-
- payload_notify_alarm = jsmin(
- open(os.path.join(json_path, 'notify_alarm.json')).read())
-
- self.publish(key,
- value=message,
- topic='alarm_response')
-
- def list_alarm_response(self, key, message, topic):
-
- payload_list_alarm_resp = jsmin(
- open(os.path.join(json_path, 'list_alarm_resp.json')).read())
-
- self.publish(key,
- value=message,
- topic='alarm_response')
-
- def update_alarm_request(self, key, message, topic):
-
- # External to Mon
-
- payload_update_alarm_req = jsmin(
- open(os.path.join(json_path, 'update_alarm_req.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_update_alarm_req),
- topic='alarm_request')
-
- def update_alarm_response(self, key, message, topic):
-
- # Internal to Mon
-
- payload_update_alarm_resp = jsmin(
- open(os.path.join(json_path, 'update_alarm_resp.json')).read())
-
- self.publish(key,
- value=message,
- topic='alarm_response')
-
- def delete_alarm_request(self, key, message, topic):
-
- # External to Mon
-
- payload_delete_alarm_req = jsmin(
- open(os.path.join(json_path, 'delete_alarm_req.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_delete_alarm_req),
- topic='alarm_request')
-
- def delete_alarm_response(self, key, message, topic):
-
- # Internal to Mon
-
- payload_delete_alarm_resp = jsmin(
- open(os.path.join(json_path, 'delete_alarm_resp.json')).read())
-
- self.publish(key,
- value=message,
- topic='alarm_response')
-
- def create_metrics_request(self, key, message, topic):
-
- # External to Mon
-
- payload_create_metrics_req = jsmin(
- open(os.path.join(json_path, 'create_metric_req.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_create_metrics_req),
- topic='metric_request')
-
- def create_metrics_resp(self, key, message, topic):
-
- # Internal to Mon
-
- payload_create_metrics_resp = jsmin(
- open(os.path.join(json_path, 'create_metric_resp.json')).read())
-
- self.publish(key,
- value=message,
- topic='metric_response')
-
- def read_metric_data_request(self, key, message, topic):
-
- # External to Mon
-
- payload_read_metric_data_request = jsmin(
- open(os.path.join(json_path, 'read_metric_data_req.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_read_metric_data_request),
- topic='metric_request')
-
- def read_metric_data_response(self, key, message, topic):
-
- # Internal to Mon
-
- payload_metric_data_response = jsmin(
- open(os.path.join(json_path, 'read_metric_data_resp.json')).read())
-
- self.publish(key,
- value=message,
- topic='metric_response')
-
- def list_metric_request(self, key, message, topic):
-
- # External to MON
-
- payload_metric_list_req = jsmin(
- open(os.path.join(json_path, 'list_metric_req.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_metric_list_req),
- topic='metric_request')
-
- def list_metric_response(self, key, message, topic):
-
- # Internal to MON
-
- payload_metric_list_resp = jsmin(
- open(os.path.join(json_path, 'list_metrics_resp.json')).read())
-
- self.publish(key,
- value=message,
- topic='metric_response')
-
- def delete_metric_request(self, key, message, topic):
-
- # External to Mon
-
- payload_delete_metric_req = jsmin(
- open(os.path.join(json_path, 'delete_metric_req.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_delete_metric_req),
- topic='metric_request')
-
- def delete_metric_response(self, key, message, topic):
-
- # Internal to Mon
-
- payload_delete_metric_resp = jsmin(
- open(os.path.join(json_path, 'delete_metric_resp.json')).read())
-
- self.publish(key,
- value=message,
- topic='metric_response')
-
- def update_metric_request(self, key, message, topic):
-
- # External to Mon
-
- payload_update_metric_req = jsmin(
- open(os.path.join(json_path, 'update_metric_req.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_update_metric_req),
- topic='metric_request')
-
- def update_metric_response(self, key, message, topic):
-
- # Internal to Mon
-
- payload_update_metric_resp = jsmin(
- open(os.path.join(json_path, 'update_metric_resp.json')).read())
-
- self.publish(key,
- value=message,
- topic='metric_response')
-
- def access_credentials(self, key, message, topic):
-
- payload_access_credentials = jsmin(
- open(os.path.join(json_path, 'access_credentials.json')).read())
-
- self.publish(key,
- value=json.dumps(payload_access_credentials),
- topic='access_credentials')
+++ /dev/null
-#gitkeep file to keep the initial empty directory structure.
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for CloudWatch access credentials */
-
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "access_config":
- {
- "aws_site": { "type": "string" },
- "user": { "type": "string" },
- "password": { "type": "string" },
- "vim_tenant_name": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "vim_type",
- "aws_site",
- "user",
- "password",
- "vim_tenant_name" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for OpenStack access credentials */
-
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "access_config":
- {
- "openstack_site": { "type" : "string" },
- "user": { "type": "string" },
- "password": { "type": "string" },
- "vim_tenant_name": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "vim_type",
- "openstack_site",
- "user",
- "password",
- "vim_tenant_name" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for vROPs access credentials */
-
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "access_config":
- {
- "vrops_site": { "type": "string" },
- "vrops_user": { "type": "string" },
- "vrops_password": { "type": "string" },
- "vcloud_site": { "type": "string" },
- "admin_username": { "type": "string" },
- "admin_password": { "type": "string" },
- "nsx_manager": { "type": "string" },
- "nsx_user": { "type": "string" },
- "nsx_password": { "type": "string" },
- "vcenter_ip": { "type": "string" },
- "vcenter_port": { "type": "string" },
- "vcenter_user": { "type": "string" },
- "vcenter_password": { "type": "string" },
- "vim_tenant_name": { "type": "string" },
- "orgname": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "vim_type",
- "vrops_site",
- "vrops_user",
- "vrops_password",
- "vcloud_site",
- "admin_username",
- "admin_password",
- "vcenter_ip",
- "vcenter_port",
- "vcenter_user",
- "vcenter_password",
- "vim_tenant_name",
- "orgname" ]
-}
+++ /dev/null
-
-/* Copyright© 2017 Intel Research and Development Ireland Limited
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-# This is the message bus schema for acknowledge_alarm */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "ack_details":
- {
- "alarm_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "tenant_uuid": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "vim_type",
- "alarm_uuid",
- "resource_uuid",
- "tenant_uuid" ]
-}
\ No newline at end of file
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-# This is the message bus schema to create_alarm */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string "},
- "alarm_create_request":
- {
- "correlation_id": { "type": "integer" },
- "alarm_name": { "type": "string" },
- "metric_name": { "type": "string" },
- "tenant_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "description": { "type": "string" },
- "severity": { "type": "string" },
- "operation": { "type": "string" },
- "threshold_value": { "type": "integer" },
- "unit": { "type": "string" },
- "statistic": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "vim_type",
- "correlation_id",
- "alarm_name",
- "metric_name",
- "resource_uuid",
- "severity",
- "operation",
- "threshold_value",
- "unit",
- "statistic" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for create_alarm response */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "alarm_create_response":
- {
- "correlation_id": { "type": "integer" },
- "alarm_uuid": { "type": "string" },
- "status": { "type": "boolean" }
- },
- "required": [ "schema_version",
- "schema_type",
- "correlation_id",
- "alarm_uuid",
- "status" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema to create_metric */
-
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "tenant_uuid": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "vim_type": { "type": "string" },
- "metric_create":
- {
- "metric_name": { "type" : "string" },
- "metric_unit": { "type": "string" },
- "resource_uuid": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "correlation_id",
- "vim_type",
- "metric_name",
- "metric_unit",
- "resource_uuid" ]
-}
\ No newline at end of file
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for create_metric response*/
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "metric_create_response":
- {
- "metric_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "status": { "type": "boolean" }
- },
- "required": [ "schema_type",
- "schema_version",
- "correlation_id",
- "metric_uuid",
- "resource_uuid",
- "status" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema to delete_alarm */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "alarm_delete_request":
- {
- "alarm_uuid": { "type": "string" },
- "correlation_id": { "type": "integer" }
- },
- "required": [ "schema_version",
- "schema_type",
- "vim_type",
- "alarm_uuid",
- "correlation_id"
- ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
- This is the message bus schema for delete_metric_response */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "alarm_deletion_response":
- {
- "correlation_id": { "type": "integer" },
- "alarm_uuid": { "type": "string" },
- "status": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "correlation_id",
- "alarm_uuid",
- "status" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema to delete_metric */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "metric_name": { "type": "string" },
- "metric_uuid": { "type": "string" },
- "resource_id": { "type": "string" },
- "tenant_uuid": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "vim_type": { "type": "string" },
- "required": [ "schema_verion",
- "schema_type",
- "metric_name",
- "metric_uuid",
- "resource_uuid",
- "correlation_id",
- "vim_type" ]
-}
\ No newline at end of file
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for delete_metric_response */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "metric_name": { "type": "string" },
- "metric_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "tenant_uuid": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "status": { "type": "boolean" },
- "required": [ "schema_version",
- "schema_type",
- "metric_name",
- "metric_uuid",
- "resource_uuid",
- "status",
- "correlation_id" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-# This is the message bus schema to list_alarm */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "alarm_list_request":
- {
- "correlation_id": { "type": "integer" },
- "resource_uuid": { "type": "string" },
- "alarm_name": { "type": "string" },
- "severity": { "type" : "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "vim_type",
- "correlation_id",
- "resource_uuid"
- ]
-}
\ No newline at end of file
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for list_alarm response */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "list_alarm_resp": { "$ref": "definitions.json#/notify_details" }
-}
\ No newline at end of file
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema to list_metric */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "metrics_list_request":
- {
- "metric_name": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "resource_uuid": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "vim_type",
- "correlation_id"
- ]
-}
\ No newline at end of file
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for list_metric response */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "tenant_uuid": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "vim_type": { "type": "string" },
- "metrics_list":
- [{
- "type": "array",
- "properties":{
- "metric_name": { "type": "string" },
- "metric_uuid": { "type": "string" },
- "metric_unit": { "type": "string" },
- "resource_uuid": { "type": "string" }
- }
- }],
- "required": [ "schema_version",
- "schema_type",
- "correlation_id",
- "vim_type",
- "metric_name",
- "metric_uuid",
- "metric_unit",
- "resource_uuid" ]
-}
\ No newline at end of file
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema to notify_alarm */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "definitions":
- {
- "notify_details":
- {
- "alarm_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "description": { "type": "string" },
- "tenant_uuid": { "type": "string" },
- "vim_type": { "type": "string" },
- "severity": { "type" : "string" },
- "status": { "type": "string" },
- "start_date": { "type": "string" },
- "update_date": { "type": "string" },
- "cancel_date": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "alarm_uuid",
- "resource_uuid",
- "vim_type",
- "severity",
- "status",
- "start_date" ]
- }
-}
\ No newline at end of file
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema to read_metric_data */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "metric_name": { "type": "string" },
- "metric_uuid": { "type": "string" },
- "resource_uuid": { "type": "string" },
- "tenant_uuid": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "vim_type": { "type": "string" },
- "collection_period": { "type": "integer" },
- "collection_unit": { "type": "string" },
- "required": ["schema_version",
- "schema_type",
- "metric_name",
- "metric_uuid",
- "correlation_id",
- "vim_type",
- "collection_period",
- "collection_unit",
- "resource_uuid"]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for read_metric_data response */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": {"type": "string" },
- "metric_name": { "type": "string" },
- "metric_uuid": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "resource_uuid": { "type": "string" },
- "tenant_uuid": { "type": "string" },
- "metrics_data":
- {
- "time_series": [{
- "type": "array",
- "properties":
- { "time_stamp":
- { "type": "integer" }}}
- ]
- },
- "metrics_series": [{
- "type": "array",
- "properties":
- { "data":
- { "type":
- ["integer",
- "string",
- "decimal"
- ]
- }
- }
- }
- ],
- "unit": { "type": "string" },
- "required": [ "schema_version",
- "schema_type",
- "metric_name",
- "metric_uuid",
- "resource_uuid",
- "correlation_id",
- "time_series",
- "metrics_series" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema to update_alarm */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "vim_type": { "type": "string" },
- "alarm_update_request":
- {
- "correlation_id": { "type": "integer" },
- "alarm_uuid": { "type": "string" },
- "metric_uuid": { "type": "string" },
- "description": { "type": "string" },
- "severity": { "type": "string" },
- "operation": { "type": "string" },
- "threshold_value": { "type": "string" },
- "unit": { "type": "string" },
- "statistic": { "type": "string" }
- },
- "required": [ "schema_version",
- "scema_type",
- "vim_type",
- "correlation_id",
- "alarm_uuid",
- "metric_uuid" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for update_alarm response */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "alarm_update_response":
- {
- "correlation_id": { "type": "integer" },
- "alarm_uuid": { "type": "string" },
- "status": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "correlation_id",
- "alarm_uuid",
- "status" ]
-}
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema for update_metric response */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "tenant_uuid": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "vim_type": { "type": "string" },
- "metric_create":
- {
- "metric_name": { "type": "string" },
- "metric_unit": { "type": "string" },
- "resource_uuid": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "correlation_id",
- "vim_type",
- "resource_uuid"
- ]
-}
\ No newline at end of file
+++ /dev/null
-/* Copyright© 2017 Intel Research and Development Ireland Limited
- # This file is part of OSM Monitoring module
- # All Rights Reserved to Intel Corporation
-
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
- # not use this file except in compliance with the License. You may obtain
- # a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- # License for the specific language governing permissions and limitations
- # under the License.
-
- # For those usages not covered by the Apache License, Version 2.0 please
- # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
- # This is the message bus schema to update_metric */
-
-{
- "schema_version": { "type": "string" },
- "schema_type": { "type": "string" },
- "correlation_id": { "type": "integer" },
- "metric_update_response":
- {
- "metric_uuid": { "type": "string" },
- "status": { "type": "boolean" },
- "resource_uuid": { "type": "string" }
- },
- "required": [ "schema_version",
- "schema_type",
- "correlation_id",
- "metric_uuid",
- "resource_uuid",
- "status"]
-}
\ No newline at end of file
+++ /dev/null
-#gitkeep file to keep the initial empty directory structure.
+++ /dev/null
-##
-# Copyright 2017 xFlow Research Pvt. Ltd
-# This file is part of MON module
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: wajeeha.hamid@xflowresearch.com
-##
-
-'''
-Connecting with AWS services --CloudWatch/EC2 using Required keys
-'''
-
-__author__ = "Wajeeha Hamid"
-__date__ = "18-September-2017"
-
-import sys
-import os
-
-try:
- import boto
- import boto.ec2
- import boto.vpc
- import boto.ec2.cloudwatch
- import boto.ec2.connection
- import logging as log
- from boto.ec2.cloudwatch.alarm import MetricAlarm
- from boto.ec2.cloudwatch.dimension import Dimension
- from boto.sns import connect_to_region
- from boto.utils import get_instance_metadata
-
-except:
- exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
-
-
-class Connection():
- """Connection Establishement with AWS -- VPC/EC2/CloudWatch"""
-#-----------------------------------------------------------------------------------------------------------------------------
- def setEnvironment(self):
-
- """Credentials for connecting to AWS-CloudWatch"""
- self.AWS_KEY = os.environ.get("AWS_ACCESS_KEY_ID")
- self.AWS_SECRET = os.environ.get("AWS_SECRET_ACCESS_KEY")
- self.AWS_REGION = os.environ.get("AWS_EC2_REGION","us-west-2")
- #TOPIC = 'YOUR_TOPIC'
-#-----------------------------------------------------------------------------------------------------------------------------
- def connection_instance(self):
- try:
- #VPC Connection
- self.vpc_conn = boto.vpc.connect_to_region(self.AWS_REGION,
- aws_access_key_id=self.AWS_KEY,
- aws_secret_access_key=self.AWS_SECRET)
-
-
- #EC2 Connection
- self.ec2_conn = boto.ec2.connect_to_region(self.AWS_REGION,
- aws_access_key_id=self.AWS_KEY,
- aws_secret_access_key=self.AWS_SECRET)
-
-
- """ TODO : Required to add actions against alarms when needed """
- #self.sns = connect_to_region(self.AWS_REGION)
- #self.topics = self.sns.get_all_topics()
- #self.topic = self.topics[u'ListTopicsResponse']['ListTopicsResult']['Topics'][0]['TopicArn']
-
- #Cloudwatch Connection
- self.cloudwatch_conn = boto.ec2.cloudwatch.connect_to_region(
- self.AWS_REGION,
- aws_access_key_id=self.AWS_KEY,
- aws_secret_access_key=self.AWS_SECRET)
- connection_dict = dict()
- connection_dict['ec2_connection'] = self.ec2_conn
- connection_dict['cloudwatch_connection'] = self.cloudwatch_conn
- return connection_dict
-
- except Exception as e:
- log.error("Failed to Connect with AWS %s: ",str(e))
-
+++ /dev/null
-##
-# Copyright 2017 xFlow Research Pvt. Ltd
-# This file is part of MON module
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: wajeeha.hamid@xflowresearch.com
-##
-
-''' Handling of alarms requests via BOTO 2.48 '''
-
-__author__ = "Wajeeha Hamid"
-__date__ = "18-September-2017"
-
-import sys
-import os
-import re
-import datetime
-import random
-import json
-import logging as log
-from random import randint
-from operator import itemgetter
-from connection import Connection
-
-
-try:
- import boto
- import boto.ec2
- import boto.vpc
- import boto.ec2.cloudwatch
- import boto.ec2.connection
-except:
- exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
-
-STATISTICS = {
- "AVERAGE": "Average",
- "MINIMUM": "Minimum",
- "MAXIMUM": "Maximum",
- "COUNT" : "SampleCount",
- "SUM" : "Sum"}
-
-OPERATIONS = {
- "GE" : ">=",
- "LE" : "<=",
- "GT" : ">",
- "LT" : "<",
- "EQ" : "="}
-
-class MetricAlarm():
- """Alarms Functionality Handler -- Carries out alarming requests and responses via BOTO.Cloudwatch """
- def __init__(self):
- self.alarm_resp = dict()
- self.del_resp = dict()
-
- def config_alarm(self,cloudwatch_conn,create_info):
- """Configure or Create a new alarm"""
- inner_dict = dict()
- """ Alarm Name to ID Mapping """
- alarm_info = create_info['alarm_create_request']
- alarm_id = alarm_info['alarm_name'] + "_" + alarm_info['resource_uuid']
- if self.is_present(cloudwatch_conn,alarm_id)['status'] == True:
- alarm_id = None
- log.debug ("Alarm already exists, Try updating the alarm using 'update_alarm_configuration()'")
- return alarm_id
- else:
- try:
- if alarm_info['statistic'] in STATISTICS:
- if alarm_info['operation'] in OPERATIONS:
- alarm = boto.ec2.cloudwatch.alarm.MetricAlarm(
- connection = cloudwatch_conn,
- name = alarm_info['alarm_name'] + "_" + alarm_info['resource_uuid'],
- metric = alarm_info['metric_name'],
- namespace = "AWS/EC2",
- statistic = STATISTICS[alarm_info['statistic']],
- comparison = OPERATIONS[alarm_info['operation']],
- threshold = alarm_info['threshold_value'],
- period = 60,
- evaluation_periods = 1,
- unit=alarm_info['unit'],
- description = alarm_info['severity'] + ";" + alarm_id + ";" + alarm_info['description'],
- dimensions = {'InstanceId':alarm_info['resource_uuid']},
- alarm_actions = None,
- ok_actions = None,
- insufficient_data_actions = None)
-
- """Setting Alarm Actions :
- alarm_actions = ['arn:aws:swf:us-west-2:465479087178:action/actions/AWS_EC2.InstanceId.Stop/1.0']"""
-
- status=cloudwatch_conn.put_metric_alarm(alarm)
-
- log.debug ("Alarm Configured Succesfully")
- self.alarm_resp['schema_version'] = str(create_info['schema_version'])
- self.alarm_resp['schema_type'] = 'create_alarm_response'
-
- inner_dict['correlation_id'] = str(alarm_info['correlation_id'])
- inner_dict['alarm_uuid'] = str(alarm_id)
- inner_dict['status'] = status
-
- self.alarm_resp['alarm_create_response'] = inner_dict
-
- if status == True:
- return self.alarm_resp
- else:
- return None
- else:
- log.error("Operation not supported")
- return None
- else:
- log.error("Statistic not supported")
- return None
- except Exception as e:
- log.error("Alarm Configuration Failed: " + str(e))
-
-#-----------------------------------------------------------------------------------------------------------------------------
- def update_alarm(self,cloudwatch_conn,update_info):
-
- """Update or reconfigure an alarm"""
- inner_dict = dict()
- alarm_info = update_info['alarm_update_request']
-
- """Alarm Name to ID Mapping"""
- alarm_id = alarm_info['alarm_uuid']
- status = self.is_present(cloudwatch_conn,alarm_id)
-
- """Verifying : Alarm exists already"""
- if status['status'] == False:
- alarm_id = None
- log.debug("Alarm not found, Try creating the alarm using 'configure_alarm()'")
- return alarm_id
- else:
- try:
- if alarm_info['statistic'] in STATISTICS:
- if alarm_info['operation'] in OPERATIONS:
- alarm = boto.ec2.cloudwatch.alarm.MetricAlarm(
- connection = cloudwatch_conn,
- name = status['info'].name ,
- metric = alarm_info['metric_name'],
- namespace = "AWS/EC2",
- statistic = STATISTICS[alarm_info['statistic']],
- comparison = OPERATIONS[alarm_info['operation']],
- threshold = alarm_info['threshold_value'],
- period = 60,
- evaluation_periods = 1,
- unit=alarm_info['unit'],
- description = alarm_info['severity'] + ";" + alarm_id + ";" + alarm_info['description'],
- dimensions = {'InstanceId':str(status['info'].dimensions['InstanceId']).split("'")[1]},
- alarm_actions = None,
- ok_actions = None,
- insufficient_data_actions = None)
-
- """Setting Alarm Actions :
- alarm_actions = ['arn:aws:swf:us-west-2:465479087178:action/actions/AWS_EC2.InstanceId.Stop/1.0']"""
-
- status=cloudwatch_conn.put_metric_alarm(alarm)
- log.debug("Alarm %s Updated ",alarm.name)
- self.alarm_resp['schema_version'] = str(update_info['schema_version'])
- self.alarm_resp['schema_type'] = 'update_alarm_response'
-
- inner_dict['correlation_id'] = str(alarm_info['correlation_id'])
- inner_dict['alarm_uuid'] = str(alarm_id)
- inner_dict['status'] = status
-
- self.alarm_resp['alarm_update_response'] = inner_dict
- return self.alarm_resp
- else:
- log.error("Operation not supported")
- return None
- else:
- log.error("Statistic not supported")
- return None
- except Exception as e:
- log.error ("Error in Updating Alarm " + str(e))
-
-#-----------------------------------------------------------------------------------------------------------------------------
- def delete_Alarm(self,cloudwatch_conn,del_info_all):
-
- """Deletes an Alarm with specified alarm_id"""
- inner_dict = dict()
- del_info = del_info_all['alarm_delete_request']
- status = self.is_present(cloudwatch_conn,del_info['alarm_uuid'])
- try:
- if status['status'] == True:
- del_status=cloudwatch_conn.delete_alarms(status['info'].name)
- self.del_resp['schema_version'] = str(del_info_all['schema_version'])
- self.del_resp['schema_type'] = 'delete_alarm_response'
- inner_dict['correlation_id'] = str(del_info['correlation_id'])
- inner_dict['alarm_id'] = str(del_info['alarm_uuid'])
- inner_dict['status'] = del_status
- self.del_resp['alarm_deletion_response'] = inner_dict
- return self.del_resp
- return None
- except Exception as e:
- log.error("Alarm Not Deleted: " + str(e))
-#-----------------------------------------------------------------------------------------------------------------------------
- def alarms_list(self,cloudwatch_conn,list_info):
-
- """Get a list of alarms that are present on a particular VIM type"""
- alarm_list = []
- alarm_info = dict()
- inner_dict = list_info['alarm_list_request']
- try: #id vim
- alarms = cloudwatch_conn.describe_alarms()
- itr = 0
- for alarm in alarms:
- list_info['alarm_list_request']['alarm_uuid'] = str(alarm.description).split(';')[1]
-
- #Severity = alarm_name = resource_uuid = ""
- if inner_dict['severity'] == "" and inner_dict['alarm_name'] == "" and inner_dict['resource_uuid'] == "":
- alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
- itr += 1
- #alarm_name = resource_uuid = ""
- if inner_dict['severity'] == str(alarm.description).split(';')[0] and inner_dict['alarm_name'] == "" and inner_dict['resource_uuid'] == "":
- alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
- itr += 1
- #severity = resource_uuid = ""
- if inner_dict['severity'] == "" and inner_dict['alarm_name'] in alarm.name and inner_dict['resource_uuid'] == "":
- alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
- itr += 1
- #severity = alarm_name = ""
- if inner_dict['severity'] == "" and inner_dict['alarm_name'] == "" and inner_dict['resource_uuid'] == str(alarm.dimensions['InstanceId']).split("'")[1]:
- alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
- itr += 1
- #resource_uuid = ""
- if inner_dict['severity'] == str(alarm.description).split(';')[0] and inner_dict['alarm_name'] in alarm.name and inner_dict['resource_uuid'] == "":
- alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
- itr += 1
- #alarm_name = ""
- if inner_dict['severity'] == str(alarm.description).split(';')[0] and inner_dict['alarm_name'] == "" and inner_dict['resource_uuid'] == str(alarm.dimensions['InstanceId']).split("'")[1]:
- alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
- itr += 1
- #severity = ""
- if inner_dict['severity'] == "" and inner_dict['alarm_name'] in alarm.name and inner_dict['resource_uuid'] == str(alarm.dimensions['InstanceId']).split("'")[1]:
- alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
- itr += 1
- #Everything provided
- if inner_dict['severity'] == str(alarm.description).split(';')[0] and inner_dict['alarm_name'] in alarm.name and inner_dict['resource_uuid'] == str(alarm.dimensions['InstanceId']).split("'")[1]:
- alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
- itr += 1
-
- alarm_info['schema_version'] = str(list_info['schema_version'])
- alarm_info['schema_type'] = 'list_alarm_response'
- alarm_info['list_alarm_resp'] = alarm_list
-
- return alarm_info
- except Exception as e:
- log.error("Error in Getting List : %s",str(e))
-#-----------------------------------------------------------------------------------------------------------------------------
- def alarm_details(self,cloudwatch_conn,ack_info):
-
- """Get an individual alarm details specified by alarm_name"""
- try:
- alarms_details=cloudwatch_conn.describe_alarm_history()
- alarm_details_all = dict()
- alarm_details_dict = dict()
- ack_info_all = ack_info
-
-
- if 'ack_details' in ack_info:
- ack_info = ack_info['ack_details']
- elif 'alarm_list_request' in ack_info:
- ack_info = ack_info['alarm_list_request']
-
- is_present = self.is_present(cloudwatch_conn,ack_info['alarm_uuid'])
-
- for itr in range (len(alarms_details)):
- if alarms_details[itr].name == is_present['info'].name :#name, timestamp, summary
- if 'created' in alarms_details[itr].summary:
- alarm_details_dict['status'] = "New"
- elif 'updated' in alarms_details[itr].summary:
- alarm_details_dict['status'] = "Update"
- elif 'deleted' in alarms_details[itr].summary:
- alarm_details_dict['status'] = "Canceled"
-
- status = alarms_details[itr].summary.split()
- alarms = cloudwatch_conn.describe_alarms()
- for alarm in alarms:
- if str(alarm.description).split(';')[1] == ack_info['alarm_uuid']:
- alarm_details_dict['alarm_uuid'] = str(ack_info['alarm_uuid'])
- alarm_details_dict['resource_uuid'] = str(alarm.dimensions['InstanceId']).split("'")[1]
- alarm_details_dict['description'] = str(alarm.description).split(';')[1]
- alarm_details_dict['severity'] = str(alarm.description).split(';')[0]
- alarm_details_dict['start_date_time'] = str(alarms_details[itr].timestamp)
- alarm_details_dict['vim_type'] = str(ack_info_all['vim_type'])
- #TODO : tenant id
- if 'ack_details' in ack_info_all:
- alarm_details_all['schema_version'] = str(ack_info_all['schema_version'])
- alarm_details_all['schema_type'] = 'notify_alarm'
- alarm_details_all['notify_details'] = alarm_details_dict
- return alarm_details_all
-
- elif 'alarm_list_request' in ack_info_all:
- return alarm_details_dict
-
- except Exception as e:
- log.error("Error getting alarm details: %s",str(e))
-#-----------------------------------------------------------------------------------------------------------------------------
- def is_present(self,cloudwatch_conn,alarm_id):
- """Finding alarm from already configured alarms"""
- alarm_info = dict()
- try:
- alarms = cloudwatch_conn.describe_alarms()
- for alarm in alarms:
- if str(alarm.description).split(';')[1] == alarm_id:
- alarm_info['status'] = True
- alarm_info['info'] = alarm
- return alarm_info
- alarm_info['status'] = False
- return alarm_info
- except Exception as e:
- log.error("Error Finding Alarm",str(e))
-#-----------------------------------------------------------------------------------------------------------------------------
-
\ No newline at end of file
+++ /dev/null
-##
-# Copyright 2017 xFlow Research Pvt. Ltd
-# This file is part of MON module
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: wajeeha.hamid@xflowresearch.com
-##
-
-'''
-AWS-Plugin implements all the methods of MON to interact with AWS using the BOTO client
-'''
-
-__author__ = "Wajeeha Hamid"
-__date__ = "18-Sept-2017"
-
-import sys
-import datetime
-import json
-import logging as log
-
-try:
- import boto
- import boto.ec2
- import boto.vpc
- import boto.ec2.cloudwatch
- import boto.ec2.connection
-except:
- exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
-
-
-
-class Metrics():
-
- def createMetrics(self,cloudwatch_conn,metric_info):
- try:
-
- '''createMetrics will be returning the metric_uuid=0 and
- status=True when the metric is supported by AWS'''
-
- supported=self.check_metric(metric_info['metric_name'])
- metric_resp = dict()
- metric_resp['resource_uuid'] = metric_info['resource_uuid']
-
- if supported['status'] == True:
- metric_resp['status'] = True
- metric_resp['metric_uuid'] = 0
- log.debug("Metrics Configured Succesfully : %s" , metric_resp)
- else:
- metric_resp['status'] = False
- metric_resp['metric_uuid'] = None
- log.error("Metric name is not supported")
-
- return metric_resp
-
- except Exception as e:
- log.error("Metric Configuration Failed: " + str(e))
-#-----------------------------------------------------------------------------------------------------------------------------
-
- def metricsData(self,cloudwatch_conn,data_info):
-
- """Getting Metrics Stats for an Hour.The datapoints are
- received after every one minute.
- Time interval can be modified using Timedelta value"""
-
- try:
- metric_info = dict()
- metric_info_dict = dict()
- timestamp_arr = {}
- value_arr = {}
-
- supported=self.check_metric(data_info['metric_name'])
-
- if supported['status'] == True:
- if int(data_info['collection_period']) % 60 == 0:
- metric_stats=cloudwatch_conn.get_metric_statistics(60, datetime.datetime.utcnow() - datetime.timedelta(seconds=int(data_info['collection_period'])),
- datetime.datetime.utcnow(),supported['metric_name'],'AWS/EC2', 'Maximum',
- dimensions={'InstanceId':data_info['resource_uuid']}, unit='Percent')
- index = 0
- for itr in range (len(metric_stats)):
- timestamp_arr[index] = str(metric_stats[itr]['Timestamp'])
- value_arr[index] = metric_stats[itr]['Maximum']
- index +=1
- metric_info_dict['time_series'] = timestamp_arr
- metric_info_dict['metrics_series'] = value_arr
- log.debug("Metrics Data : %s", metric_info_dict)
- return metric_info_dict
- else:
- log.error("Collection Period should be a multiple of 60")
- return False
-
- else:
- log.error("Metric name is not supported")
- return False
-
- except Exception as e:
- log.error("Error returning Metrics Data" + str(e))
-
-#-----------------------------------------------------------------------------------------------------------------------------
- def updateMetrics(self,cloudwatch_conn,metric_info):
-
- '''updateMetrics will be returning the metric_uuid=0 and
- status=True when the metric is supported by AWS'''
- try:
- supported=self.check_metric(metric_info['metric_name'])
- update_resp = dict()
- update_resp['resource_uuid'] = metric_info['resource_uuid']
- if supported['status'] == True:
- update_resp['status'] = True
- update_resp['metric_uuid'] = 0
- log.debug("Metric Updated : %s", update_resp)
- else:
- update_resp['status'] = False
- update_resp['metric_uuid'] = None
- log.error("Metric name is not supported")
-
- return update_resp
-
- except Exception as e:
- log.error("Error in Update Metrics" + str(e))
-#-----------------------------------------------------------------------------------------------------------------------------
- def deleteMetrics(self,cloudwatch_conn,del_info):
-
- ''' " Not supported in AWS"
- Returning the required parameters with status = False'''
- try:
- supported=self.check_metric(del_info['metric_name'])
- metric_resp = dict()
- del_resp = dict()
- if supported['status'] == True:
- del_resp['schema_version'] = del_info['schema_version']
- del_resp['schema_type'] = "delete_metric_response"
- del_resp['metric_name'] = del_info['metric_name']
- del_resp['metric_uuid'] = del_info['metric_uuid']
- del_resp['resource_uuid'] = del_info['resource_uuid']
- # TODO : yet to finalize
- del_resp['tenant_uuid'] = del_info['tenant_uuid']
- del_resp['correlation_id'] = del_info['correlation_uuid']
- del_resp['status'] = False
- log.info("Metric Deletion Not supported in AWS : %s",del_resp)
- return del_resp
- else:
- log.error("Metric name is not supported")
- return False
-
- except Exception as e:
- log.error(" Metric Deletion Not supported in AWS : " + str(e))
-#------------------------------------------------------------------------------------------------------------------------------------
-
- def listMetrics(self,cloudwatch_conn ,list_info):
-
- '''Returns the list of available AWS/EC2 metrics on which
- alarms have been configured and the metrics are being monitored'''
- try:
- supported = self.check_metric(list_info['metric_name'])
- if supported['status'] == True:
- metrics_list = []
- metrics_data = dict()
-
- #To get the list of associated metrics with the alarms
- alarms = cloudwatch_conn.describe_alarms()
- itr = 0
- if list_info['metric_name'] == "":
- for alarm in alarms:
- metrics_info = dict()
- instance_id = str(alarm.dimensions['InstanceId']).split("'")[1]
- metrics_info['metric_name'] = str(alarm.metric)
- metrics_info['metric_uuid'] = 0
- metrics_info['metric_unit'] = str(alarm.unit)
- metrics_info['resource_uuid'] = instance_id
- metrics_list.insert(itr,metrics_info)
- itr += 1
- print metrics_list
- return metrics_list
- else:
- for alarm in alarms:
- metrics_info = dict()
- if alarm.metric == supported['metric_name']:
- instance_id = str(alarm.dimensions['InstanceId']).split("'")[1]
- metrics_info['metric_name'] = str(alarm.metric)
- metrics_info['metric_uuid'] = 0
- metrics_info['metric_unit'] = str(alarm.unit)
- metrics_info['resource_uuid'] = instance_id
- metrics_list.insert(itr,metrics_info)
- itr += 1
- return metrics_list
- log.debug("Metrics List : %s",metrics_list)
- else:
- log.error("Metric name is not supported")
- return False
-
- except Exception as e:
- log.error("Error in Getting Metric List " + str(e))
-
-#------------------------------------------------------------------------------------------------------------------------------------
-
- def check_metric(self,metric_name):
-
- ''' Checking whether the metric is supported by AWS '''
- try:
- check_resp = dict()
- # metric_name
- if metric_name == 'CPU_UTILIZATION':
- metric_name = 'CPUUtilization'
- metric_status = True
- elif metric_name == 'DISK_READ_OPS':
- metric_name = 'DiskReadOps'
- metric_status = True
- elif metric_name == 'DISK_WRITE_OPS':
- metric_name = 'DiskWriteOps'
- metric_status = True
- elif metric_name == 'DISK_READ_BYTES':
- metric_name = 'DiskReadBytes'
- metric_status = True
- elif metric_name == 'DISK_WRITE_BYTES':
- metric_name = 'DiskWriteBytes'
- metric_status = True
- elif metric_name == 'PACKETS_RECEIVED':
- metric_name = 'NetworkPacketsIn'
- metric_status = True
- elif metric_name == 'PACKETS_SENT':
- metric_name = 'NetworkPacketsOut'
- metric_status = True
- elif metric_name == "":
- metric_name = None
- metric_status = True
- log.info("Metric Not Supported by AWS plugin ")
- else:
- metric_name = None
- metric_status = False
- log.info("Metric Not Supported by AWS plugin ")
- check_resp['metric_name'] = metric_name
- #status
- if metric_status == True:
- check_resp['status'] = True
- else:
- check_resp['status'] = False
-
- return check_resp
-
- except Exception as e:
- log.error("Error in Plugin Inputs %s",str(e))
-#--------------------------------------------------------------------------------------------------------------------------------------
-
-
-
-
-
-
-
-
+++ /dev/null
-##
-# Copyright 2017 xFlow Research Pvt. Ltd
-# This file is part of MON module
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: wajeeha.hamid@xflowresearch.com
-##
-
-'''
-AWS-Plugin implements all the methods of MON to interact with AWS using the BOTO client
-'''
-
-__author__ = "Wajeeha Hamid"
-__date__ = "18-September-2017"
-
-import sys
-import json
-import logging as log
-from jsmin import jsmin
-from connection import Connection
-from metric_alarms import MetricAlarm
-from metrics import Metrics
-from kafka import KafkaConsumer
-sys.path.append("../../core/message-bus")
-from producer import KafkaProducer
-
-class Plugin():
- """Receives Alarm info from MetricAlarm and connects with the consumer/producer"""
- def __init__ (self):
- self.conn = Connection()
- self.metricAlarm = MetricAlarm()
- self.metric = Metrics()
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
- self._consumer = KafkaConsumer(server['topic'], bootstrap_servers=server['server'])
- self._consumer.subscribe(['alarm_request'])
- self.producer = KafkaProducer('')
-#---------------------------------------------------------------------------------------------------------------------------
- def connection(self):
- """Connecting instances with CloudWatch"""
- self.conn.setEnvironment()
- self.conn = self.conn.connection_instance()
- self.cloudwatch_conn = self.conn['cloudwatch_connection']
- self.ec2_conn = self.conn['ec2_connection']
-#---------------------------------------------------------------------------------------------------------------------------
- def configure_alarm(self,alarm_info):
- alarm_id = self.metricAlarm.config_alarm(self.cloudwatch_conn,alarm_info)
- return alarm_id
-#---------------------------------------------------------------------------------------------------------------------------
- def update_alarm_configuration(self,test):
- alarm_id = self.metricAlarm.update_alarm(self.cloudwatch_conn,test)
- return alarm_id
-#---------------------------------------------------------------------------------------------------------------------------
- def delete_alarm(self,alarm_id):
- return self.metricAlarm.delete_Alarm(self.cloudwatch_conn,alarm_id)
-#---------------------------------------------------------------------------------------------------------------------------
- def get_alarms_list(self,instance_id):
- return self.metricAlarm.alarms_list(self.cloudwatch_conn,instance_id)
-#---------------------------------------------------------------------------------------------------------------------------
- def get_ack_details(self,ack_info):
- return self.metricAlarm.alarm_details(self.cloudwatch_conn,ack_info)
-#---------------------------------------------------------------------------------------------------------------------------
- def get_metrics_data(self,metric_name,period,instance_id):
- return self.metric.metricsData(self.cloudwatch_conn,metric_name,period,instance_id)
-#---------------------------------------------------------------------------------------------------------------------------
-
- def consumer(self):
- """Consume info from the message bus to manage alarms."""
- try:
- for message in self._consumer:
- # Check the Functionlity that needs to be performed: topic = 'alarms'/'metrics'/'Access_Credentials'
- if message.topic == "alarm_request":
- log.info("Action required against: %s" % (message.topic))
- alarm_info = json.loads(message.value)
-
- if message.key == "create_alarm_request":
- if alarm_info['vim_type'] == 'AWS':
- alarm_inner_dict = alarm_info['alarm_create_request']
- metric_status = self.check_metric(alarm_inner_dict['metric_name'])
- if self.check_resource(alarm_inner_dict['resource_uuid']) == True and metric_status['status'] == True:
- log.debug ("Resource and Metrics exists")
-
- alarm_info['alarm_create_request']['metric_name'] = metric_status['metric_name']
- #Generate a valid response message, send via producer
- config_resp = self.configure_alarm(alarm_info) #alarm_info = message.value
- if config_resp == None:
- log.debug("Alarm Already exists")
- payload = json.dumps(config_resp)
- file = open('../../core/models/create_alarm_resp.json','wb').write((payload))
- self.producer.create_alarm_response(key='create_alarm_response',message=payload,topic = 'alarm_response')
- else:
- payload = json.dumps(config_resp)
- file = open('../../core/models/create_alarm_resp.json','wb').write((payload))
-
- self.producer.create_alarm_response(key='create_alarm_response',message=payload,topic = 'alarm_response')
- log.info("New alarm created with alarm info: %s", config_resp)
- else:
- log.error("Resource ID doesn't exists")
- else:
- log.error("Plugin inputs are incorrect")
-
-
- elif message.key == "acknowledge_alarm":
- alarm_inner_dict = alarm_info['ack_details']
- if alarm_info['vim_type'] == 'AWS':
- if self.check_resource(alarm_inner_dict['resource_uuid']) == True:
- alarm_info = json.loads(message.value)
- #Generate a valid response message, send via producer
- ack_details = self.get_ack_details(alarm_info)
- payload = json.dumps(ack_details)
- file = open('../../core/models/notify_alarm.json','wb').write((payload))
- self.producer.notify_alarm(key='notify_alarm',message=payload,topic = 'alarm_response')
- log.info("Acknowledge sent: %s", ack_details)
- else:
- log.error("Resource ID is Incorrect")
- else:
- log.error(" VIM type incorrect ")
-
-
- elif message.key == "update_alarm_request":
- if alarm_info['vim_type'] == 'AWS':
- alarm_inner_dict = alarm_info['alarm_update_request']
- metric_status = self.check_metric(alarm_inner_dict['metric_name'])
-
- if metric_status['status'] == True:
- log.debug ("Resource and Metrics exists")
- alarm_info['alarm_update_request']['metric_name'] = metric_status['metric_name']
- #Generate a valid response message, send via producer
- update_resp = self.update_alarm_configuration(alarm_info)
- if update_resp == None:
- payload = json.dumps(update_resp)
- file = open('../../core/models/update_alarm_resp.json','wb').write((payload))
- self.producer.update_alarm_response(key='update_alarm_response',message=payload,topic = 'alarm_response')
- log.debug("Alarm Already exists")
- else:
- payload = json.dumps(update_resp)
- file = open('../../core/models/update_alarm_resp.json','wb').write((payload))
- self.producer.update_alarm_response(key='update_alarm_response',message=payload,topic = 'alarm_response')
- log.info("Alarm Updated with alarm info: %s", update_resp)
- else:
- log.info ("Metric Not Supported")
- else:
- log.error(" VIM type Incorrect ")
-
- elif message.key == "delete_alarm_request":
- if alarm_info['vim_type'] == 'AWS':
- del_info = json.loads(message.value)
- #Generate a valid response message, send via producer
- del_resp = self.delete_alarm(del_info)
- payload = json.dumps(del_resp)
- file = open('../../core/models/delete_alarm_resp.json','wb').write((payload))
- self.producer.delete_alarm_response(key='delete_alarm_response',message=payload,topic = 'alarm_response')
- log.info("Alarm Deleted with alarm info: %s", del_resp)
- else:
- log.error(" VIM type Incorrect ")
-
- elif message.key == "alarm_list_request":
- alarm_inner_dict = alarm_info['alarm_list_request']
- if alarm_info['vim_type'] == 'AWS':
- if self.check_resource(alarm_inner_dict['resource_uuid']) == True or alarm_inner_dict['resource_uuid'] == "":
- #Generate a valid response message, send via producer
- list_resp = self.get_alarms_list(alarm_info)#['alarm_names']
- payload = json.dumps(list_resp)
- file = open('../../core/models/list_alarm_resp.json','wb').write((payload))
- self.producer.list_alarm_response(key='list_alarm_response',message=payload,topic = 'alarm_response')
- else:
- log.error("Resource ID is Incorrect")
- else:
- log.error(" VIM type Incorrect ")
-
- else:
- log.debug("Unknown key, no action will be performed")
-
- else:
- log.info("Message topic not relevant to this plugin: %s",
- message.topic)
- except Exception as e:
- log.error("Consumer exception: %s", str(e))
-#---------------------------------------------------------------------------------------------------------------------------
- def check_resource(self,resource_uuid):
- '''Finding Resource with the resource_uuid'''
- try:
- check_resp = dict()
- instances = self.ec2_conn.get_all_instance_status()
-
- #resource_id
- for instance_id in instances:
- instance_id = str(instance_id).split(':')[1]
- if instance_id == resource_uuid:
- check_resp['resource_uuid'] = resource_uuid
- return True
- return False
-
- except Exception as e:
- log.error("Error in Plugin Inputs %s",str(e))
-#---------------------------------------------------------------------------------------------------------------------------
- def check_metric(self,metric_name):
- ''' Checking whether the metric is supported by AWS '''
- try:
- check_resp = dict()
-
- #metric_name
- if metric_name == 'CPU_UTILIZATION':
- metric_name = 'CPUUtilization'
- metric_status = True
- elif metric_name == 'DISK_READ_OPS':
- metric_name = 'DiskReadOps'
- metric_status = True
- elif metric_name == 'DISK_WRITE_OPS':
- metric_name = 'DiskWriteOps'
- metric_status = True
- elif metric_name == 'DISK_READ_BYTES':
- metric_name = 'DiskReadBytes'
- metric_status = True
- elif metric_name == 'DISK_WRITE_BYTES':
- metric_name = 'DiskWriteBytes'
- metric_status = True
- elif metric_name == 'PACKETS_RECEIVED':
- metric_name = 'NetworkPacketsIn'
- metric_status = True
- elif metric_name == 'PACKETS_SENT':
- metric_name = 'NetworkPacketsOut'
- metric_status = True
- else:
- metric_name = None
- metric_status = False
- check_resp['metric_name'] = metric_name
- #status
- if metric_status == True:
- check_resp['status'] = True
- return check_resp
- except Exception as e:
- log.error("Error in Plugin Inputs %s",str(e))
-#---------------------------------------------------------------------------------------------------------------------------
-
-obj = Plugin()
-obj.connection()
-obj.consumer()
+++ /dev/null
-##
-# Copyright 2017 xFlow Research Pvt. Ltd
-# This file is part of MON module
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact with: wajeeha.hamid@xflowresearch.com
-##
-
-'''
-AWS-Plugin implements all the methods of MON to interact with AWS using the BOTO client
-'''
-
-__author__ = "Wajeeha Hamid"
-__date__ = "18-September-2017"
-
-import sys
-import json
-from connection import Connection
-from metric_alarms import MetricAlarm
-from metrics import Metrics
-sys.path.append("../../core/message_bus")
-from producer import KafkaProducer
-from kafka import KafkaConsumer
-import logging as log
-
-class plugin_metrics():
- """Receives Alarm info from MetricAlarm and connects with the consumer/producer """
- def __init__ (self):
- self.conn = Connection()
- self.metric = Metrics()
-
- #server = {'server': 'localhost:9092', 'topic': 'metrics_request'}
- #Initialize a Consumer object to consume message from the SO
- self._consumer = KafkaConsumer(bootstrap_servers='localhost:9092')
- self._consumer.subscribe(['metric_request'])
-
- #producer = KafkaProducer('create_metric_request')
-
- self.producer = KafkaProducer('')
-#---------------------------------------------------------------------------------------------------------------------------
- def connection(self):
- try:
- """Connecting instances with CloudWatch"""
- self.conn.setEnvironment()
- self.conn = self.conn.connection_instance()
- self.cloudwatch_conn = self.conn['cloudwatch_connection']
- self.ec2_conn = self.conn['ec2_connection']
-
- except Exception as e:
- log.error("Failed to Connect with AWS %s: " + str(e))
-#---------------------------------------------------------------------------------------------------------------------------
- def create_metric_request(self,metric_info):
- '''Comaptible API using normalized parameters'''
- metric_resp = self.metric.createMetrics(self.cloudwatch_conn,metric_info)
- return metric_resp
-#---------------------------------------------------------------------------------------------------------------------------
- def update_metric_request(self,updated_info):
- '''Comaptible API using normalized parameters'''
- update_resp = self.metric.updateMetrics(self.cloudwatch_conn,updated_info)
- return update_resp
-#---------------------------------------------------------------------------------------------------------------------------
- def delete_metric_request(self,delete_info):
- '''Comaptible API using normalized parameters'''
- del_resp = self.metric.deleteMetrics(self.cloudwatch_conn,delete_info)
- return del_resp
-#---------------------------------------------------------------------------------------------------------------------------
- def list_metrics_request(self,list_info):
- '''Comaptible API using normalized parameters'''
- list_resp = self.metric.listMetrics(self.cloudwatch_conn,list_info)
- return list_resp
-#---------------------------------------------------------------------------------------------------------------------------
- def read_metrics_data(self,list_info):
- '''Comaptible API using normalized parameters
- Read all metric data related to a specified metric'''
- data_resp=self.metric.metricsData(self.cloudwatch_conn,list_info)
- return data_resp
-#---------------------------------------------------------------------------------------------------------------------------
-
- def consumer(self):
- '''Consumer will consume the message from SO,
- 1) parse the message and trigger the methods ac
- cording to keys and topics provided in request.
-
- 2) The response from plugin is saved in json format.
-
- 3) The producer object then calls the producer response
- methods to send the response back to message bus
- '''
-
- try:
- for message in self._consumer:
- metric_info = json.loads(message.value)
- print metric_info
- metric_response = dict()
-
- if metric_info['vim_type'] == 'AWS':
- log.debug ("VIM support : AWS")
-
- # Check the Functionlity that needs to be performed: topic = 'alarms'/'metrics'/'Access_Credentials'
- if message.topic == "metric_request":
- log.info("Action required against: %s" % (message.topic))
-
- if message.key == "create_metric_request":
- if self.check_resource(metric_info['metric_create']['resource_uuid']) == True:
- metric_resp = self.create_metric_request(metric_info['metric_create']) #alarm_info = message.value
- metric_response['schema_version'] = metric_info['schema_version']
- metric_response['schema_type'] = "create_metric_response"
- metric_response['metric_create_response'] = metric_resp
- payload = json.dumps(metric_response)
- file = open('../../core/models/create_metric_resp.json','wb').write((payload))
- self.producer.create_metrics_resp(key='create_metric_response',message=payload,topic = 'metric_response')
-
- log.info("Metric configured: %s", metric_resp)
- return metric_response
-
- elif message.key == "update_metric_request":
- if self.check_resource(metric_info['metric_create']['resource_uuid']) == True:
- update_resp = self.update_metric_request(metric_info['metric_create'])
- metric_response['schema_version'] = metric_info['schema_version']
- metric_response['schema_type'] = "update_metric_response"
- metric_response['metric_update_response'] = update_resp
- payload = json.dumps(metric_response)
- print payload
- file = open('../../core/models/update_metric_resp.json','wb').write((payload))
- self.producer.update_metric_response(key='update_metric_response',message=payload,topic = 'metric_response')
-
- log.info("Metric Updates: %s",metric_response)
- return metric_response
-
- elif message.key == "delete_metric_request":
- if self.check_resource(metric_info['resource_uuid']) == True:
- del_resp=self.delete_metric_request(metric_info)
- payload = json.dumps(del_resp)
- file = open('../../core/models/delete_metric_resp.json','wb').write((payload))
- self.producer.delete_metric_response(key='delete_metric_response',message=payload,topic = 'metric_response')
-
- log.info("Metric Deletion Not supported in AWS : %s",del_resp)
- return del_resp
-
- elif message.key == "list_metric_request":
- if self.check_resource(metric_info['metrics_list_request']['resource_uuid']) == True:
- list_resp = self.list_metrics_request(metric_info['metrics_list_request'])
- metric_response['schema_version'] = metric_info['schema_version']
- metric_response['schema_type'] = "list_metric_response"
- metric_response['correlation_id'] = metric_info['metrics_list_request']['correlation_id']
- metric_response['vim_type'] = metric_info['vim_type']
- metric_response['metrics_list'] = list_resp
- payload = json.dumps(metric_response)
- file = open('../../core/models/list_metric_resp.json','wb').write((payload))
- self.producer.list_metric_response(key='list_metrics_response',message=payload,topic = 'metric_response')
-
- log.info("Metric List: %s",metric_response)
- return metric_response
-
- elif message.key == "read_metric_data_request":
- if self.check_resource(metric_info['resource_uuid']) == True:
- data_resp = self.read_metrics_data(metric_info)
- metric_response['schema_version'] = metric_info['schema_version']
- metric_response['schema_type'] = "read_metric_data_response"
- metric_response['metric_name'] = metric_info['metric_name']
- metric_response['metric_uuid'] = metric_info['metric_uuid']
- metric_response['correlation_id'] = metric_info['correlation_uuid']
- metric_response['resource_uuid'] = metric_info['resource_uuid']
- metric_response['tenant_uuid'] = metric_info['tenant_uuid']
- metric_response['metrics_data'] = data_resp
- payload = json.dumps(metric_response)
- file = open('../../core/models/read_metric_data_resp.json','wb').write((payload))
- self.producer.read_metric_data_response(key='read_metric_data_response',message=payload,topic = 'metric_response')
-
- log.info("Metric Data Response: %s",metric_response)
- return metric_response
-
- else:
- log.debug("Unknown key, no action will be performed")
- else:
- log.info("Message topic not relevant to this plugin: %s",
- message.topic)
- else:
- print "Bad VIM Request"
- except Exception as e:
- log.error("Consumer exception: %s", str(e))
-
-#---------------------------------------------------------------------------------------------------------------------------
- def check_resource(self,resource_uuid):
-
- '''Checking the resource_uuid is present in EC2 instances'''
- try:
- check_resp = dict()
- instances = self.ec2_conn.get_all_instance_status()
- status_resource = False
-
- #resource_id
- for instance_id in instances:
- instance_id = str(instance_id).split(':')[1]
- if instance_id == resource_uuid:
- check_resp['resource_uuid'] = resource_uuid
- status_resource = True
- else:
- status_resource = False
-
- #status
- return status_resource
-
- except Exception as e:
- log.error("Error in Plugin Inputs %s",str(e))
-#---------------------------------------------------------------------------------------------------------------------------
-
-obj = plugin_metrics()
-obj.connection()
-obj.consumer()
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Aodh plugin for OSM MON."""
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Carry out alarming requests via Aodh API."""
-
-import json
-
-import logging
-
-from core.message_bus.producer import KafkaProducer
-
-from plugins.OpenStack.response import OpenStack_Response
-from plugins.OpenStack.settings import Config
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-ALARM_NAMES = {
- "average_memory_usage_above_threshold": "average_memory_utilization",
- "disk_read_ops": "disk_read_ops",
- "disk_write_ops": "disk_write_ops",
- "disk_read_bytes": "disk_read_bytes",
- "disk_write_bytes": "disk_write_bytes",
- "net_packets_dropped": "packets_dropped",
- "packets_in_above_threshold": "packets_received",
- "packets_out_above_threshold": "packets_sent",
- "cpu_utilization_above_threshold": "cpu_utilization"}
-
-SEVERITIES = {
- "warning": "low",
- "minor": "low",
- "major": "moderate",
- "critical": "critical",
- "indeterminate": "critical"}
-
-STATISTICS = {
- "average": "avg",
- "minimum": "min",
- "maximum": "max",
- "count": "count",
- "sum": "sum"}
-
-
-class Alarming(object):
- """Carries out alarming requests and responses via Aodh API."""
-
- def __init__(self):
- """Create the OpenStack alarming instance."""
- # Initialize configuration and notifications
- config = Config.instance()
- config.read_environ("aodh")
-
- # Initialise authentication for API requests
- self.auth_token = None
- self.endpoint = None
- self.common = None
-
- # Use the Response class to generate valid json response messages
- self._response = OpenStack_Response()
-
- # Initializer a producer to send responses back to SO
- self._producer = KafkaProducer("alarm_response")
-
- def alarming(self, message, common, auth_token):
- """Consume info from the message bus to manage alarms."""
- values = json.loads(message.value)
- self.common = common
-
- log.info("OpenStack alarm action required.")
-
- # Generate and auth_token and endpoint for request
- if auth_token is not None:
- if self.auth_token != auth_token:
- log.info("Auth_token for alarming set by access_credentials.")
- self.auth_token = auth_token
- else:
- log.info("Auth_token has not been updated.")
- else:
- log.info("Using environment variables to set auth_token for Aodh.")
- self.auth_token = self.common._authenticate()
-
- if self.endpoint is None:
- log.info("Generating a new endpoint for Aodh.")
- self.endpoint = self.common.get_endpoint("alarming")
-
- if message.key == "create_alarm_request":
- # Configure/Update an alarm
- alarm_details = values['alarm_create_request']
-
- alarm_id, alarm_status = self.configure_alarm(
- self.endpoint, self.auth_token, alarm_details)
-
- # Generate a valid response message, send via producer
- try:
- if alarm_status is True:
- log.info("Alarm successfully created")
-
- resp_message = self._response.generate_response(
- 'create_alarm_response', status=alarm_status,
- alarm_id=alarm_id,
- cor_id=alarm_details['correlation_id'])
- log.info("Response Message: %s", resp_message)
- self._producer.create_alarm_response(
- 'create_alarm_resonse', resp_message,
- 'alarm_response')
- except Exception as exc:
- log.warn("Response creation failed: %s", exc)
-
- elif message.key == "list_alarm_request":
- # Check for a specifed: alarm_name, resource_uuid, severity
- # and generate the appropriate list
- list_details = values['alarm_list_request']
-
- alarm_list = self.list_alarms(
- self.endpoint, self.auth_token, list_details)
-
- try:
- # Generate and send a list response back
- resp_message = self._response.generate_response(
- 'list_alarm_response', alarm_list=alarm_list,
- cor_id=list_details['correlation_id'])
- log.info("Response Message: %s", resp_message)
- self._producer.list_alarm_response(
- 'list_alarm_response', resp_message,
- 'alarm_response')
- except Exception as exc:
- log.warn("Failed to send a valid response back.")
-
- elif message.key == "delete_alarm_request":
- request_details = values['alarm_delete_request']
- alarm_id = request_details['alarm_uuid']
-
- resp_status = self.delete_alarm(
- self.endpoint, self.auth_token, alarm_id)
-
- # Generate and send a response message
- try:
- resp_message = self._response.generate_response(
- 'delete_alarm_response', alarm_id=alarm_id,
- status=resp_status,
- cor_id=request_details['correlation_id'])
- log.info("Response message: %s", resp_message)
- self._producer.delete_alarm_response(
- 'delete_alarm_response', resp_message,
- 'alarm_response')
- except Exception as exc:
- log.warn("Failed to create delete reponse:%s", exc)
-
- elif message.key == "acknowledge_alarm":
- # Acknowledge that an alarm has been dealt with by the SO
- alarm_id = values['ack_details']['alarm_uuid']
-
- response = self.update_alarm_state(
- self.endpoint, self.auth_token, alarm_id)
-
- # Log if an alarm was reset
- if response is True:
- log.info("Acknowledged the alarm and cleared it.")
- else:
- log.warn("Failed to acknowledge/clear the alarm.")
-
- elif message.key == "update_alarm_request":
- # Update alarm configurations
- alarm_details = values['alarm_update_request']
-
- alarm_id, status = self.update_alarm(
- self.endpoint, self.auth_token, alarm_details)
-
- # Generate a response for an update request
- try:
- resp_message = self._response.generate_response(
- 'update_alarm_response', alarm_id=alarm_id,
- cor_id=alarm_details['correlation_id'],
- status=status)
- log.info("Response message: %s", resp_message)
- self._producer.update_alarm_response(
- 'update_alarm_response', resp_message,
- 'alarm_response')
- except Exception as exc:
- log.warn("Failed to send an update response:%s", exc)
-
- else:
- log.debug("Unknown key, no action will be performed")
-
- return
-
- def configure_alarm(self, endpoint, auth_token, values):
- """Create requested alarm in Aodh."""
- url = "{}/v2/alarms/".format(endpoint)
-
- # Check if the desired alarm is supported
- alarm_name = values['alarm_name'].lower()
- metric_name = values['metric_name'].lower()
- resource_id = values['resource_uuid']
-
- if alarm_name not in ALARM_NAMES.keys():
- log.warn("This alarm is not supported, by a valid metric.")
- return None, False
- if ALARM_NAMES[alarm_name] != metric_name:
- log.warn("This is not the correct metric for this alarm.")
- return None, False
-
- # Check for the required metric
- metric_id = self.check_for_metric(auth_token, metric_name, resource_id)
-
- try:
- if metric_id is not None:
- # Create the alarm if metric is available
- payload = self.check_payload(values, metric_name, resource_id,
- alarm_name)
- new_alarm = self.common._perform_request(
- url, auth_token, req_type="post", payload=payload)
- return json.loads(new_alarm.text)['alarm_id'], True
- else:
- log.warn("The required Gnocchi metric does not exist.")
- return None, False
-
- except Exception as exc:
- log.warn("Failed to create the alarm: %s", exc)
- return None, False
-
- def delete_alarm(self, endpoint, auth_token, alarm_id):
- """Delete alarm function."""
- url = "{}/v2/alarms/%s".format(endpoint) % (alarm_id)
-
- try:
- result = self.common._perform_request(
- url, auth_token, req_type="delete")
- if str(result.status_code) == "404":
- log.info("Alarm doesn't exist: %s", result.status_code)
- # If status code is 404 alarm did not exist
- return False
- else:
- return True
-
- except Exception as exc:
- log.warn("Failed to delete alarm: %s because %s.", alarm_id, exc)
- return False
-
- def list_alarms(self, endpoint, auth_token, list_details):
- """Generate the requested list of alarms."""
- url = "{}/v2/alarms/".format(endpoint)
- a_list, name_list, sev_list, res_list = [], [], [], []
-
- # TODO(mcgoughh): for now resource_id is a mandatory field
- # Check for a reqource is
- try:
- resource = list_details['resource_uuid']
- except KeyError as exc:
- log.warn("Resource id not specified for list request: %s", exc)
- return None
-
- # Checking what fields are specified for a list request
- try:
- name = list_details['alarm_name'].lower()
- if name not in ALARM_NAMES.keys():
- log.warn("This alarm is not supported, won't be used!")
- name = None
- except KeyError as exc:
- log.info("Alarm name isn't specified.")
- name = None
-
- try:
- severity = list_details['severity'].lower()
- sev = SEVERITIES[severity]
- except KeyError as exc:
- log.info("Severity is unspecified/incorrectly configured")
- sev = None
-
- # Perform the request to get the desired list
- try:
- result = self.common._perform_request(
- url, auth_token, req_type="get")
-
- if result is not None:
- # Get list based on resource id
- for alarm in json.loads(result.text):
- rule = alarm['gnocchi_resources_threshold_rule']
- if resource == rule['resource_id']:
- res_list.append(str(alarm))
- if not res_list:
- log.info("No alarms for this resource")
- return a_list
-
- # Generate specified listed if requested
- if name is not None and sev is not None:
- log.info("Return a list of %s alarms with %s severity.",
- name, sev)
- for alarm in json.loads(result.text):
- if name == alarm['name']:
- name_list.append(str(alarm))
- for alarm in json.loads(result.text):
- if sev == alarm['severity']:
- sev_list.append(str(alarm))
- name_sev_list = list(set(name_list).intersection(sev_list))
- a_list = list(set(name_sev_list).intersection(res_list))
- elif name is not None:
- log.info("Returning a %s list of alarms.", name)
- for alarm in json.loads(result.text):
- if name == alarm['name']:
- name_list.append(str(alarm))
- a_list = list(set(name_list).intersection(res_list))
- elif sev is not None:
- log.info("Returning %s severity alarm list.", sev)
- for alarm in json.loads(result.text):
- if sev == alarm['severity']:
- sev_list.append(str(alarm))
- a_list = list(set(sev_list).intersection(res_list))
- else:
- log.info("Returning an entire list of alarms.")
- a_list = res_list
- else:
- log.info("There are no alarms!")
-
- except Exception as exc:
- log.info("Failed to generate required list: %s", exc)
- return None
-
- return a_list
-
- def update_alarm_state(self, endpoint, auth_token, alarm_id):
- """Set the state of an alarm to ok when ack message is received."""
- url = "{}/v2/alarms/%s/state".format(endpoint) % alarm_id
- payload = json.dumps("ok")
-
- try:
- self.common._perform_request(
- url, auth_token, req_type="put", payload=payload)
- return True
- except Exception as exc:
- log.warn("Unable to update alarm state: %s", exc)
- return False
-
- def update_alarm(self, endpoint, auth_token, values):
- """Get alarm name for an alarm configuration update."""
- # Get already existing alarm details
- url = "{}/v2/alarms/%s".format(endpoint) % values['alarm_uuid']
-
- # Gets current configurations about the alarm
- try:
- result = self.common._perform_request(
- url, auth_token, req_type="get")
- alarm_name = json.loads(result.text)['name']
- rule = json.loads(result.text)['gnocchi_resources_threshold_rule']
- alarm_state = json.loads(result.text)['state']
- resource_id = rule['resource_id']
- metric_name = rule['metric']
- except Exception as exc:
- log.warn("Failed to retreive existing alarm info: %s.\
- Can only update OSM alarms.", exc)
- return None, False
-
- # Generates and check payload configuration for alarm update
- payload = self.check_payload(values, metric_name, resource_id,
- alarm_name, alarm_state=alarm_state)
-
- # Updates the alarm configurations with the valid payload
- if payload is not None:
- try:
- update_alarm = self.common._perform_request(
- url, auth_token, req_type="put", payload=payload)
-
- return json.loads(update_alarm.text)['alarm_id'], True
- except Exception as exc:
- log.warn("Alarm update could not be performed: %s", exc)
- return None, False
- return None, False
-
- def check_payload(self, values, metric_name, resource_id,
- alarm_name, alarm_state=None):
- """Check that the payload is configuration for update/create alarm."""
- try:
- # Check state and severity
- severity = values['severity'].lower()
- if severity == "indeterminate":
- alarm_state = "insufficient data"
- if alarm_state is None:
- alarm_state = "ok"
-
- statistic = values['statistic'].lower()
- # Try to configure the payload for the update/create request
- # Can only update: threshold, operation, statistic and
- # the severity of the alarm
- rule = {'threshold': values['threshold_value'],
- 'comparison_operator': values['operation'].lower(),
- 'metric': metric_name,
- 'resource_id': resource_id,
- 'resource_type': 'generic',
- 'aggregation_method': STATISTICS[statistic]}
- payload = json.dumps({'state': alarm_state,
- 'name': alarm_name,
- 'severity': SEVERITIES[severity],
- 'type': 'gnocchi_resources_threshold',
- 'gnocchi_resources_threshold_rule': rule, })
- return payload
- except KeyError as exc:
- log.warn("Alarm is not configured correctly: %s", exc)
- return None
-
- def get_alarm_state(self, endpoint, auth_token, alarm_id):
- """Get the state of the alarm."""
- url = "{}/v2/alarms/%s/state".format(endpoint) % alarm_id
-
- try:
- alarm_state = self.common._perform_request(
- url, auth_token, req_type="get")
- return json.loads(alarm_state.text)
- except Exception as exc:
- log.warn("Failed to get the state of the alarm:%s", exc)
- return None
-
- def check_for_metric(self, auth_token, m_name, r_id):
- """Check for the alarm metric."""
- try:
- endpoint = self.common.get_endpoint("metric")
-
- url = "{}/v1/metric/".format(endpoint)
- metric_list = self.common._perform_request(
- url, auth_token, req_type="get")
-
- for metric in json.loads(metric_list.text):
- name = metric['name']
- resource = metric['resource_id']
- if (name == m_name and resource == r_id):
- metric_id = metric['id']
- log.info("The required metric exists, an alarm will be created.")
- return metric_id
- except Exception as exc:
- log.info("Desired Gnocchi metric not found:%s", exc)
- return None
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Notifier class for alarm notification response."""
-
-import json
-import logging as log
-
-try:
- import aodhclient
-except ImportError:
- log.warn("Failed to import the aodhclient")
-
-
-from core.message_bus.producer import KafkaProducer
-
-from plugins.OpenStack.Aodh.alarming import Alarming
-from plugins.OpenStack.response import OpenStack_Response
-from plugins.OpenStack.settings import Config
-
-__author__ = "Helena McGough"
-
-ALARM_NAMES = [
- "average_memory_usage_above_threshold",
- "disk_read_ops",
- "disk_write_ops",
- "disk_read_bytes",
- "disk_write_bytes",
- "net_packets_dropped",
- "packets_in_above_threshold",
- "packets_out_above_threshold",
- "cpu_utilization_above_threshold"]
-
-
-def register_notifier():
- """Run the notifier instance."""
- config = Config.instance()
- instance = Notifier(config=config)
- instance.config()
- instance.notify()
-
-
-class Notifier(object):
- """Alarm Notification class."""
-
- def __init__(self, config):
- """Initialize alarm notifier."""
- log.info("Initialize the notifier for the SO.")
- self._config = config
- self._response = OpenStack_Response()
- self._producer = KafkaProducer("alarm_response")
- self._alarming = Alarming()
-
- def config(self):
- """Configure the alarm notifier."""
- log.info("Configure the notifier instance.")
- self._config.read_environ("aodh")
-
- def notify(self):
- """Send alarm notifications responses to the SO."""
- log.info("Checking for alarm notifications")
- auth_token, endpoint = self._alarming.authenticate()
-
- while(1):
- alarm_list = self._alarming.list_alarms(endpoint, auth_token)
- for alarm in json.loads(alarm_list):
- alarm_id = alarm['alarm_id']
- alarm_name = alarm['name']
- # Send a notification response to the SO on alarm trigger
- if alarm_name in ALARM_NAMES:
- alarm_state = self._alarming.get_alarm_state(
- endpoint, auth_token, alarm_id)
- if alarm_state == "alarm":
- # Generate and send an alarm notification response
- try:
- a_date = alarm['state_timestamp'].replace("T", " ")
- rule = alarm['gnocchi_resources_threshold_rule']
- resp_message = self._response.generate_response(
- 'notify_alarm', a_id=alarm_id,
- r_id=rule['resource_id'],
- sev=alarm['severity'], date=a_date,
- state=alarm_state, vim_type="OpenStack")
- self._producer.notify_alarm(
- 'notify_alarm', resp_message, 'alarm_response')
- except Exception as exc:
- log.warn("Failed to send notify response:%s", exc)
-
-if aodhclient:
- register_notifier()
+++ /dev/null
-#gitkeep file to keep the initial empty directory structure.
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Gnocchi plugin for OSM MON."""
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Carry out OpenStack metric requests via Gnocchi API."""
-
-import datetime
-import json
-import logging
-
-import time
-
-from core.message_bus.producer import KafkaProducer
-
-from plugins.OpenStack.response import OpenStack_Response
-from plugins.OpenStack.settings import Config
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-METRIC_MAPPINGS = {
- "average_memory_utilization": "memory.percent",
- "disk_read_ops": "disk.disk_ops",
- "disk_write_ops": "disk.disk_ops",
- "disk_read_bytes": "disk.disk_octets",
- "disk_write_bytes": "disk.disk_octets",
- "packets_dropped": "interface.if_dropped",
- "packets_received": "interface.if_packets",
- "packets_sent": "interface.if_packets",
- "cpu_utilization": "cpu.percent",
-}
-
-PERIOD_MS = {
- "HR": 3600000,
- "DAY": 86400000,
- "WEEK": 604800000,
- "MONTH": 2629746000,
- "YEAR": 31556952000
-}
-
-
-class Metrics(object):
- """OpenStack metric requests performed via the Gnocchi API."""
-
- def __init__(self):
- """Initialize the metric actions."""
- # Configure an instance of the OpenStack metric plugin
- config = Config.instance()
- config.read_environ("gnocchi")
-
- # Initialise authentication for API requests
- self.auth_token = None
- self.endpoint = None
- self._common = None
-
- # Use the Response class to generate valid json response messages
- self._response = OpenStack_Response()
-
- # Initializer a producer to send responses back to SO
- self._producer = KafkaProducer("metric_response")
-
- def metric_calls(self, message, common, auth_token):
- """Consume info from the message bus to manage metric requests."""
- values = json.loads(message.value)
- self._common = common
- log.info("OpenStack metric action required.")
-
- # Generate and auth_token and endpoint for request
- if auth_token is not None:
- if self.auth_token != auth_token:
- log.info("Auth_token for metrics set by access_credentials.")
- self.auth_token = auth_token
- else:
- log.info("Auth_token has not been updated.")
- else:
- log.info("Using environment variables to set Gnocchi auth_token.")
- self.auth_token = self._common._authenticate()
-
- if self.endpoint is None:
- log.info("Generating a new endpoint for Gnocchi.")
- self.endpoint = self._common.get_endpoint("metric")
-
- if message.key == "create_metric_request":
- # Configure metric
- metric_details = values['metric_create']
- metric_id, resource_id, status = self.configure_metric(
- self.endpoint, self.auth_token, metric_details)
-
- # Generate and send a create metric response
- try:
- resp_message = self._response.generate_response(
- 'create_metric_response', status=status,
- cor_id=values['correlation_id'],
- metric_id=metric_id, r_id=resource_id)
- log.info("Response messages: %s", resp_message)
- self._producer.create_metrics_resp(
- 'create_metric_response', resp_message,
- 'metric_response')
- except Exception as exc:
- log.warn("Failed to create response: %s", exc)
-
- elif message.key == "read_metric_data_request":
- # Read all metric data related to a specified metric
- timestamps, metric_data = self.read_metric_data(
- self.endpoint, self.auth_token, values)
-
- # Generate and send a response message
- try:
- resp_message = self._response.generate_response(
- 'read_metric_data_response',
- m_id=values['metric_uuid'],
- m_name=values['metric_name'],
- r_id=values['resource_uuid'],
- cor_id=values['correlation_id'],
- times=timestamps, metrics=metric_data)
- log.info("Response message: %s", resp_message)
- self._producer.read_metric_data_response(
- 'read_metric_data_response', resp_message,
- 'metric_response')
- except Exception as exc:
- log.warn("Failed to send read metric response:%s", exc)
-
- elif message.key == "delete_metric_request":
- # delete the specified metric in the request
- metric_id = values['metric_uuid']
- status = self.delete_metric(
- self.endpoint, self.auth_token, metric_id)
-
- # Generate and send a response message
- try:
- resp_message = self._response.generate_response(
- 'delete_metric_response', m_id=metric_id,
- m_name=values['metric_name'],
- status=status, r_id=values['resource_uuid'],
- cor_id=values['correlation_id'])
- log.info("Response message: %s", resp_message)
- self._producer.delete_metric_response(
- 'delete_metric_response', resp_message,
- 'metric_response')
- except Exception as exc:
- log.warn("Failed to send delete response:%s", exc)
-
- elif message.key == "update_metric_request":
- # Gnocchi doesn't support configuration updates
- # Log and send a response back to this effect
- log.warn("Gnocchi doesn't support metric configuration\
- updates.")
- req_details = values['metric_create']
- metric_name = req_details['metric_name']
- resource_id = req_details['resource_uuid']
- metric_id = self.get_metric_id(
- self.endpoint, self.auth_token, metric_name, resource_id)
-
- # Generate and send a response message
- try:
- resp_message = self._response.generate_response(
- 'update_metric_response', status=False,
- cor_id=values['correlation_id'],
- r_id=resource_id, m_id=metric_id)
- log.info("Response message: %s", resp_message)
- self._producer.update_metric_response(
- 'update_metric_response', resp_message,
- 'metric_response')
- except Exception as exc:
- log.warn("Failed to send an update response:%s", exc)
-
- elif message.key == "list_metric_request":
- list_details = values['metrics_list_request']
-
- metric_list = self.list_metrics(
- self.endpoint, self.auth_token, list_details)
-
- # Generate and send a response message
- try:
- resp_message = self._response.generate_response(
- 'list_metric_response', m_list=metric_list,
- cor_id=list_details['correlation_id'])
- log.info("Response message: %s", resp_message)
- self._producer.list_metric_response(
- 'list_metric_response', resp_message,
- 'metric_response')
- except Exception as exc:
- log.warn("Failed to send a list response:%s", exc)
-
- else:
- log.warn("Unknown key, no action will be performed.")
-
- return
-
- def configure_metric(self, endpoint, auth_token, values):
- """Create the new metric in Gnocchi."""
- try:
- resource_id = values['resource_uuid']
- except KeyError:
- log.warn("Resource is not defined correctly.")
- return None, None, False
-
- # Check/Normalize metric name
- metric_name, norm_name = self.get_metric_name(values)
- if norm_name is None:
- log.warn("This metric is not supported by this plugin.")
- return None, resource_id, False
-
- # Check for an existing metric for this resource
- metric_id = self.get_metric_id(
- endpoint, auth_token, metric_name, resource_id)
-
- if metric_id is None:
- # Try appending metric to existing resource
- try:
- base_url = "{}/v1/resource/generic/%s/metric"
- res_url = base_url.format(endpoint) % resource_id
- payload = {metric_name: {'archive_policy_name': 'high',
- 'unit': values['metric_unit']}}
- result = self._common._perform_request(
- res_url, auth_token, req_type="post",
- payload=json.dumps(payload))
- # Get id of newly created metric
- for row in json.loads(result.text):
- if row['name'] == metric_name:
- metric_id = row['id']
- log.info("Appended metric to existing resource.")
-
- return metric_id, resource_id, True
- except Exception as exc:
- # Gnocchi version of resource does not exist creating a new one
- log.info("Failed to append metric to existing resource:%s",
- exc)
- try:
- url = "{}/v1/resource/generic".format(endpoint)
- metric = {'name': metric_name,
- 'archive_policy_name': 'high',
- 'unit': values['metric_unit'], }
-
- resource_payload = json.dumps({'id': resource_id,
- 'metrics': {
- metric_name: metric}})
-
- resource = self._common._perform_request(
- url, auth_token, req_type="post",
- payload=resource_payload)
-
- # Return the newly created resource_id for creating alarms
- new_resource_id = json.loads(resource.text)['id']
- log.info("Created new resource for metric: %s",
- new_resource_id)
-
- metric_id = self.get_metric_id(
- endpoint, auth_token, metric_name, new_resource_id)
-
- return metric_id, new_resource_id, True
- except Exception as exc:
- log.warn("Failed to create a new resource:%s", exc)
- return None, None, False
-
- else:
- log.info("This metric already exists for this resource.")
-
- return metric_id, resource_id, False
-
- def delete_metric(self, endpoint, auth_token, metric_id):
- """Delete metric."""
- url = "{}/v1/metric/%s".format(endpoint) % (metric_id)
-
- try:
- result = self._common._perform_request(
- url, auth_token, req_type="delete")
- if str(result.status_code) == "404":
- log.warn("Failed to delete the metric.")
- return False
- else:
- return True
- except Exception as exc:
- log.warn("Failed to carry out delete metric request:%s", exc)
- return False
-
- def list_metrics(self, endpoint, auth_token, values):
- """List all metrics."""
- url = "{}/v1/metric/".format(endpoint)
-
- # Check for a specified list
- try:
- # Check if the metric_name was specified for the list
- metric_name = values['metric_name'].lower()
- if metric_name not in METRIC_MAPPINGS.keys():
- log.warn("This metric is not supported, won't be listed.")
- metric_name = None
- except KeyError as exc:
- log.info("Metric name is not specified: %s", exc)
- metric_name = None
-
- try:
- resource = values['resource_uuid']
- except KeyError as exc:
- log.info("Resource is not specified:%s", exc)
- resource = None
-
- try:
- result = self._common._perform_request(
- url, auth_token, req_type="get")
- metrics = json.loads(result.text)
-
- if metrics is not None:
- # Format the list response
- if metric_name is not None and resource is not None:
- metric_list = self.response_list(
- metrics, metric_name=metric_name, resource=resource)
- log.info("Returning an %s resource list for %s metrics",
- metric_name, resource)
- elif metric_name is not None:
- metric_list = self.response_list(
- metrics, metric_name=metric_name)
- log.info("Returning a list of %s metrics", metric_name)
- elif resource is not None:
- metric_list = self.response_list(
- metrics, resource=resource)
- log.info("Return a list of %s resource metrics", resource)
- else:
- metric_list = self.response_list(metrics)
- log.info("Returning a complete list of metrics")
-
- return metric_list
- else:
- log.info("There are no metrics available")
- return []
- except Exception as exc:
- log.warn("Failed to generate any metric list. %s", exc)
- return None
-
- def get_metric_id(self, endpoint, auth_token, metric_name, resource_id):
- """Check if the desired metric already exists for the resource."""
- url = "{}/v1/resource/generic/%s".format(endpoint) % resource_id
-
- try:
- # Try return the metric id if it exists
- result = self._common._perform_request(
- url, auth_token, req_type="get")
- return json.loads(result.text)['metrics'][metric_name]
- except Exception:
- log.info("Metric doesn't exist. No metric_id available")
- return None
-
- def get_metric_name(self, values):
- """Check metric name configuration and normalize."""
- try:
- # Normalize metric name
- metric_name = values['metric_name'].lower()
- return metric_name, METRIC_MAPPINGS[metric_name]
- except KeyError:
- log.info("Metric name %s is invalid.", metric_name)
- return metric_name, None
-
- def read_metric_data(self, endpoint, auth_token, values):
- """Collectd metric measures over a specified time period."""
- timestamps = []
- data = []
- try:
- # Try and collect measures
- metric_id = values['metric_uuid']
- collection_unit = values['collection_unit'].upper()
- collection_period = values['collection_period']
-
- # Define the start and end time based on configurations
- stop_time = time.strftime("%Y-%m-%d") + "T" + time.strftime("%X")
- end_time = int(round(time.time() * 1000))
- if collection_unit == 'YEAR':
- diff = PERIOD_MS[collection_unit]
- else:
- diff = collection_period * PERIOD_MS[collection_unit]
- s_time = (end_time - diff) / 1000.0
- start_time = datetime.datetime.fromtimestamp(s_time).strftime(
- '%Y-%m-%dT%H:%M:%S.%f')
- base_url = "{}/v1/metric/%(0)s/measures?start=%(1)s&stop=%(2)s"
- url = base_url.format(endpoint) % {
- "0": metric_id, "1": start_time, "2": stop_time}
-
- # Perform metric data request
- metric_data = self._common._perform_request(
- url, auth_token, req_type="get")
-
- # Generate a list of the requested timestamps and data
- for r in json.loads(metric_data.text):
- timestamp = r[0].replace("T", " ")
- timestamps.append(timestamp)
- data.append(r[2])
-
- return timestamps, data
- except Exception as exc:
- log.warn("Failed to gather specified measures: %s", exc)
- return timestamps, data
-
- def response_list(self, metric_list, metric_name=None, resource=None):
- """Create the appropriate lists for a list response."""
- resp_list, name_list, res_list = [], [], []
-
- # Create required lists
- for row in metric_list:
- # Only list OSM metrics
- if row['name'] in METRIC_MAPPINGS.keys():
- metric = {"metric_name": row['name'],
- "metric_uuid": row['id'],
- "metric_unit": row['unit'],
- "resource_uuid": row['resource_id']}
- resp_list.append(str(metric))
- # Generate metric_name specific list
- if metric_name is not None:
- if row['name'] == metric_name:
- metric = {"metric_name": row['name'],
- "metric_uuid": row['id'],
- "metric_unit": row['unit'],
- "resource_uuid": row['resource_id']}
- name_list.append(str(metric))
- # Generate resource specific list
- if resource is not None:
- if row['resource_id'] == resource:
- metric = {"metric_name": row['name'],
- "metric_uuid": row['id'],
- "metric_unit": row['unit'],
- "resource_uuid": row['resource_id']}
- res_list.append(str(metric))
-
- # Join required lists
- if metric_name is not None and resource is not None:
- return list(set(res_list).intersection(name_list))
- elif metric_name is not None:
- return name_list
- elif resource is not None:
- return list(set(res_list).intersection(resp_list))
- else:
- return resp_list
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""OpenStack plugin for OSM MON."""
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Common methods for the OpenStack plugins."""
-import json
-
-import logging
-
-from keystoneclient.v3 import client
-
-from plugins.OpenStack.settings import Config
-
-import requests
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-
-class Common(object):
- """Common calls for Gnocchi/Aodh plugins."""
-
- def __init__(self):
- """Create the common instance."""
- self._auth_token = None
- self._ks = None
- self.openstack_url = None
- self.user = None
- self.password = None
- self.tenant = None
-
- def _authenticate(self, message=None):
- """Authenticate and/or renew the authentication token."""
- if self._auth_token is not None:
- return self._auth_token
-
- if message is not None:
- values = json.loads(message.value)['access_config']
- self.openstack_url = values['openstack_site']
- self.user = values['user']
- self.password = values['password']
- self.tenant = values['vim_tenant_name']
-
- try:
- # try to authenticate with supplied access_credentials
- self._ks = client.Client(auth_url=self.openstack_url,
- username=self.user,
- password=self.password,
- tenant_name=self.tenant)
- self._auth_token = self._ks.auth_token
- log.info("Authenticating with access_credentials from SO.")
- return self._auth_token
- except Exception as exc:
- log.warn("Authentication failed with access_credentials: %s",
- exc)
-
- else:
- log.info("Access_credentials were not sent from SO.")
-
- # If there are no access_credentials or they fail use env variables
- try:
- cfg = Config.instance()
- self._ks = client.Client(auth_url=cfg.OS_AUTH_URL,
- username=cfg.OS_USERNAME,
- password=cfg.OS_PASSWORD,
- tenant_name=cfg.OS_TENANT_NAME)
- log.info("Authenticating with environment varialbles.")
- self._auth_token = self._ks.auth_token
- except Exception as exc:
-
- log.warn("Authentication failed: %s", exc)
-
- self._auth_token = None
-
- return self._auth_token
-
- def get_endpoint(self, service_type):
- """Get the endpoint for Gnocchi/Aodh."""
- try:
- return self._ks.service_catalog.url_for(
- service_type=service_type,
- endpoint_type='internalURL',
- region_name='RegionOne')
- except Exception as exc:
- log.warning("Failed to retreive endpoint for service due to: %s",
- exc)
- return None
-
- @classmethod
- def _perform_request(cls, url, auth_token,
- req_type=None, payload=None, params=None):
- """Perform the POST/PUT/GET/DELETE request."""
- # request headers
- headers = {'X-Auth-Token': auth_token,
- 'Content-type': 'application/json'}
- # perform request and return its result
- if req_type == "put":
- response = requests.put(
- url, data=payload, headers=headers,
- timeout=1)
- elif req_type == "get":
- response = requests.get(
- url, params=params, headers=headers, timeout=1)
- elif req_type == "delete":
- response = requests.delete(
- url, headers=headers, timeout=1)
- else:
- response = requests.post(
- url, data=payload, headers=headers,
- timeout=1)
-
- # Raises exception if there was an error
- try:
- response.raise_for_status()
- # pylint: disable=broad-except
- except Exception:
- # Log out the result of the request for debugging purpose
- log.debug(
- 'Result: %s, %d',
- response.status_code, response.text)
- return response
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Generate valid responses to send back to the SO."""
-
-import json
-import logging
-log = logging.getLogger(__name__)
-
-__author__ = "Helena McGough"
-
-schema_version = "1.0"
-
-
-class OpenStack_Response(object):
- """Generates responses for SO from OpenStaack plugins."""
-
- def __init__(self):
- """Initialize OpenStack Response instance."""
-
- def generate_response(self, key, **kwargs):
- """Make call to appropriate response function."""
- if key == "list_alarm_response":
- message = self.alarm_list_response(**kwargs)
- elif key == "create_alarm_response":
- message = self.create_alarm_response(**kwargs)
- elif key == "delete_alarm_response":
- message = self.delete_alarm_response(**kwargs)
- elif key == "update_alarm_response":
- message = self.update_alarm_response(**kwargs)
- elif key == "create_metric_response":
- message = self.metric_create_response(**kwargs)
- elif key == "read_metric_data_response":
- message = self.read_metric_data_response(**kwargs)
- elif key == "delete_metric_response":
- message = self.delete_metric_response(**kwargs)
- elif key == "update_metric_response":
- message = self.update_metric_response(**kwargs)
- elif key == "list_metric_response":
- message = self.list_metric_response(**kwargs)
- elif key == "notify_alarm":
- message = self.notify_alarm(**kwargs)
- else:
- log.warn("Failed to generate a valid response message.")
- message = None
-
- return message
-
- def alarm_list_response(self, **kwargs):
- """Generate the response for an alarm list request."""
- alarm_list_resp = {"schema_version": schema_version,
- "schema_type": "list_alarm_response",
- "correlation_id": kwargs['cor_id'],
- "list_alarm_resp": kwargs['alarm_list']}
- return json.dumps(alarm_list_resp)
-
- def create_alarm_response(self, **kwargs):
- """Generate a response for a create alarm request."""
- create_alarm_resp = {"schema_version": schema_version,
- "schema_type": "create_alarm_response",
- "alarm_create_response": {
- "correlation_id": kwargs['cor_id'],
- "alarm_uuid": kwargs['alarm_id'],
- "status": kwargs['status']}}
- return json.dumps(create_alarm_resp)
-
- def delete_alarm_response(self, **kwargs):
- """Generate a response for a delete alarm request."""
- delete_alarm_resp = {"schema_version": schema_version,
- "schema_type": "alarm_deletion_response",
- "alarm_deletion_response": {
- "correlation_id": kwargs['cor_id'],
- "alarm_uuid": kwargs['alarm_id'],
- "status": kwargs['status']}}
- return json.dumps(delete_alarm_resp)
-
- def update_alarm_response(self, **kwargs):
- """Generate a response for an update alarm request."""
- update_alarm_resp = {"schema_version": schema_version,
- "schema_type": "update_alarm_response",
- "alarm_update_response": {
- "correlation_id": kwargs['cor_id'],
- "alarm_uuid": kwargs['alarm_id'],
- "status": kwargs['status']}}
- return json.dumps(update_alarm_resp)
-
- def metric_create_response(self, **kwargs):
- """Generate a response for a create metric request."""
- create_metric_resp = {"schema_version": schema_version,
- "schema_type": "create_metric_response",
- "correlation_id": kwargs['cor_id'],
- "metric_create_response": {
- "metric_uuid": kwargs['metric_id'],
- "resource_uuid": kwargs['r_id'],
- "status": kwargs['status']}}
- return json.dumps(create_metric_resp)
-
- def read_metric_data_response(self, **kwargs):
- """Generate a response for a read metric data request."""
- read_metric_data_resp = {"schema_version": schema_version,
- "schema_type": "read_metric_data_response",
- "metric_name": kwargs['m_name'],
- "metric_uuid": kwargs['m_id'],
- "resource_uuid": kwargs['r_id'],
- "correlation_id": kwargs['cor_id'],
- "metrics_data": {
- "time_series": kwargs['times'],
- "metrics_series": kwargs['metrics']}}
- return json.dumps(read_metric_data_resp)
-
- def delete_metric_response(self, **kwargs):
- """Generate a response for a delete metric request."""
- delete_metric_resp = {"schema_version": schema_version,
- "schema_type": "delete_metric_response",
- "metric_name": kwargs['m_name'],
- "metric_uuid": kwargs['m_id'],
- "resource_uuid": kwargs['r_id'],
- "correlation_id": kwargs['cor_id'],
- "status": kwargs['status']}
- return json.dumps(delete_metric_resp)
-
- def update_metric_response(self, **kwargs):
- """Generate a repsonse for an update metric request."""
- update_metric_resp = {"schema_version": schema_version,
- "schema_type": "update_metric_response",
- "correlation_id": kwargs['cor_id'],
- "metric_update_response": {
- "metric_uuid": kwargs['m_id'],
- "status": kwargs['status'],
- "resource_uuid": kwargs['r_id']}}
- return json.dumps(update_metric_resp)
-
- def list_metric_response(self, **kwargs):
- """Generate a response for a list metric request."""
- list_metric_resp = {"schema_version": schema_version,
- "schema_type": "list_metric_response",
- "correlation_id": kwargs['cor_id'],
- "metrics_list": kwargs['m_list']}
- return json.dumps(list_metric_resp)
-
- def notify_alarm(self, **kwargs):
- """Generate a response to send alarm notifications."""
- notify_alarm_resp = {"schema_version": schema_version,
- "schema_type": "notify_alarm",
- "notify_details": {
- "alarm_uuid": kwargs['a_id'],
- "resource_uuid": kwargs['r_id'],
- "vim_type": kwargs['vim_type'],
- "severity": kwargs['sev'],
- "status": kwargs['state'],
- "start_date": kwargs['date']}}
- return json.dumps(notify_alarm_resp)
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Configurations for the OpenStack plugins."""
-
-import logging
-import os
-
-from collections import namedtuple
-
-from plugins.OpenStack.singleton import Singleton
-
-import six
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-
-class BadConfigError(Exception):
- """Configuration exception."""
-
- pass
-
-
-class CfgParam(namedtuple('CfgParam', ['key', 'default', 'data_type'])):
- """Configuration parameter definition."""
-
- def value(self, data):
- """Convert a string to the parameter type."""
- try:
- return self.data_type(data)
- except (ValueError, TypeError):
- raise BadConfigError(
- 'Invalid value "%s" for configuration parameter "%s"' % (
- data, self.key))
-
-
-@Singleton
-class Config(object):
- """Plugin confguration."""
-
- _configuration = [
- CfgParam('OS_AUTH_URL', None, six.text_type),
- CfgParam('OS_IDENTITY_API_VERSION', "3", six.text_type),
- CfgParam('OS_USERNAME', None, six.text_type),
- CfgParam('OS_PASSWORD', "password", six.text_type),
- CfgParam('OS_TENANT_NAME', "service", six.text_type),
- ]
-
- _config_dict = {cfg.key: cfg for cfg in _configuration}
- _config_keys = _config_dict.keys()
-
- def __init__(self):
- """Set the default values."""
- for cfg in self._configuration:
- setattr(self, cfg.key, cfg.default)
-
- def read_environ(self, service):
- """Check the appropriate environment variables and update defaults."""
- for key in self._config_keys:
- try:
- if (key == "OS_IDENTITY_API_VERSION" or key == "OS_PASSWORD"):
- val = str(os.environ[key])
- setattr(self, key, val)
- elif (key == "OS_AUTH_URL"):
- val = str(os.environ[key]) + "/v3"
- setattr(self, key, val)
- else:
- # Default username for a service is it's name
- setattr(self, 'OS_USERNAME', service)
- log.info("Configuration complete!")
- return
- except KeyError as exc:
- log.warn("Falied to configure plugin: %s", exc)
- log.warn("Try re-authenticating your OpenStack deployment.")
- return
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Simple singleton class."""
-
-from __future__ import unicode_literals
-
-__author__ = "Helena McGough"
-
-
-class Singleton(object):
- """Simple singleton class."""
-
- def __init__(self, decorated):
- """Initialize singleton instance."""
- self._decorated = decorated
-
- def instance(self):
- """Return singleton instance."""
- try:
- return self._instance
- except AttributeError:
- self._instance = self._decorated()
- return self._instance
+++ /dev/null
-# -*- coding: utf-8 -*-
-
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Plugins for OSM MON."""
+++ /dev/null
-#gitkeep file to keep the initial empty directory structure.
+++ /dev/null
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2016-2017 VMware Inc.
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: osslegalrouting@vmware.com
-##
-
-"""
-vROPs Kafka Consumer that consumes the request messages
-"""
-
-
-from kafka import KafkaConsumer
-from kafka.errors import KafkaError
-import logging as log
-
-class vROP_KafkaConsumer(object):
- """
- Kafka Consumer for vROPs
- """
-
- def __init__(self, topics=[], broker_uri=None):
- """
- Method to initize KafkaConsumer
- Args:
- broker_uri - hostname:port uri of Kafka broker
- topics - list of topics to subscribe
- Returns:
- None
- """
-
- if broker_uri is None:
- self.broker = '0.0.0.0:9092'
- else:
- self.broker = broker_uri
-
- self.topic = topics
- print ("vROPs Consumer started, Broker URI: {}".format(self.broker))
- print ("Subscribed Topics {}".format(self.topic))
- try:
- self.vrops_consumer = KafkaConsumer(bootstrap_servers=self.broker)
- self.vrops_consumer.subscribe(self.topic)
- except Exception as exp:
- msg = "fail to create consumer for topic {} with broker {} Error : {}"\
- .format(self.topic, self.broker, exp)
- log.error(msg)
- raise Exception(msg)
-
+++ /dev/null
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2016-2017 VMware Inc.
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: osslegalrouting@vmware.com
-##
-
-"""
-Montoring metrics & creating Alarm definations in vROPs
-"""
-
-import requests
-import logging
-from pyvcloud.vcloudair import VCA
-from xml.etree import ElementTree as XmlElementTree
-import traceback
-import time
-import json
-from OpenSSL.crypto import load_certificate, FILETYPE_PEM
-import os
-import datetime
-from socket import getfqdn
-
-from requests.packages.urllib3.exceptions import InsecureRequestWarning
-requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
-
-OPERATION_MAPPING = {'GE':'GT_EQ', 'LE':'LT_EQ', 'GT':'GT', 'LT':'LT', 'EQ':'EQ'}
-severity_mano2vrops = {'WARNING':'WARNING', 'MINOR':'WARNING', 'MAJOR':"IMMEDIATE",\
- 'CRITICAL':'CRITICAL', 'INDETERMINATE':'UNKNOWN'}
-PERIOD_MSEC = {'HR':3600000,'DAY':86400000,'WEEK':604800000,'MONTH':2678400000,'YEAR':31536000000}
-
-#To Do - Add actual webhook url & certificate
-#SSL_CERTIFICATE_FILE_NAME = 'vROPs_Webservice/SSL_certificate/www.vrops_webservice.com.cert'
-#webhook_url = "https://mano-dev-1:8080/notify/" #for testing
-webhook_url = "https://" + getfqdn() + ":8080/notify/"
-SSL_CERTIFICATE_FILE_NAME = ('vROPs_Webservice/SSL_certificate/' + getfqdn() + ".cert")
-#SSL_CERTIFICATE_FILE_NAME = 'vROPs_Webservice/SSL_certificate/10.172.137.214.cert' #for testing
-
-MODULE_DIR = os.path.dirname(__file__)
-CONFIG_FILE_NAME = 'vrops_config.xml'
-CONFIG_FILE_PATH = os.path.join(MODULE_DIR, CONFIG_FILE_NAME)
-SSL_CERTIFICATE_FILE_PATH = os.path.join(MODULE_DIR, SSL_CERTIFICATE_FILE_NAME)
-
-class MonPlugin():
- """MON Plugin class for vROPs telemetry plugin
- """
- def __init__(self):
- """Constructor of MON plugin
- Params:
- 'access_config': dictionary with VIM access information based on VIM type.
- This contains a consolidate version of VIM & monitoring tool config at creation and
- particular VIM config at their attachment.
- For VIM type: 'vmware',
- access_config - {'vrops_site':<>, 'vrops_user':<>, 'vrops_password':<>,
- 'vcloud-site':<>,'admin_username':<>,'admin_password':<>,
- 'nsx_manager':<>,'nsx_user':<>,'nsx_password':<>,
- 'vcenter_ip':<>,'vcenter_port':<>,'vcenter_user':<>,'vcenter_password':<>,
- 'vim_tenant_name':<>,'orgname':<>}
-
- #To Do
- Returns: Raise an exception if some needed parameter is missing, but it must not do any connectivity
- check against the VIM
- """
- self.logger = logging.getLogger('PluginReceiver.MonPlugin')
- self.logger.setLevel(logging.DEBUG)
-
- access_config = self.get_default_Params('Access_Config')
- self.access_config = access_config
- if not bool(access_config):
- self.logger.error("Access configuration not provided in vROPs Config file")
- raise KeyError("Access configuration not provided in vROPs Config file")
-
- try:
- self.vrops_site = access_config['vrops_site']
- self.vrops_user = access_config['vrops_user']
- self.vrops_password = access_config['vrops_password']
- self.vcloud_site = access_config['vcloud-site']
- self.admin_username = access_config['admin_username']
- self.admin_password = access_config['admin_password']
- self.tenant_id = access_config['tenant_id']
- except KeyError as exp:
- self.logger.error("Check Access configuration in vROPs Config file: {}".format(exp))
- raise KeyError("Check Access configuration in vROPs Config file: {}".format(exp))
-
-
- def configure_alarm(self, config_dict = {}):
- """Configures or creates a new alarm using the input parameters in config_dict
- Params:
- "alarm_name": Alarm name in string format
- "description": Description of alarm in string format
- "resource_uuid": Resource UUID for which alarm needs to be configured. in string format
- "Resource type": String resource type: 'VDU' or 'host'
- "Severity": 'WARNING', 'MINOR', 'MAJOR', 'CRITICAL'
- "metric_name": Metric key in string format
- "operation": One of ('GE', 'LE', 'GT', 'LT', 'EQ')
- "threshold_value": Defines the threshold (up to 2 fraction digits) that,
- if crossed, will trigger the alarm.
- "unit": Unit of measurement in string format
- "statistic": AVERAGE, MINIMUM, MAXIMUM, COUNT, SUM
-
- Default parameters for each alarm are read from the plugin specific config file.
- Dict of default parameters is as follows:
- default_params keys = {'cancel_cycles','wait_cycles','resource_kind','adapter_kind',
- 'alarm_type','alarm_subType',impact}
-
- Returns the UUID of created alarm or None
- """
- alarm_def = None
- #1) get alarm & metrics parameters from plugin specific file
- def_a_params = self.get_default_Params(config_dict['alarm_name'])
- if not def_a_params:
- self.logger.warn("Alarm not supported: {}".format(config_dict['alarm_name']))
- return None
- metric_key_params = self.get_default_Params(config_dict['metric_name'])
- if not metric_key_params:
- self.logger.warn("Metric not supported: {}".format(config_dict['metric_name']))
- return None
- #2) create symptom definition
- vrops_alarm_name = def_a_params['vrops_alarm']+ '-' + config_dict['resource_uuid']
- symptom_params ={'cancel_cycles': (def_a_params['cancel_period']/300)*def_a_params['cancel_cycles'],
- 'wait_cycles': (def_a_params['period']/300)*def_a_params['evaluation'],
- 'resource_kind_key': def_a_params['resource_kind'],
- 'adapter_kind_key': def_a_params['adapter_kind'],
- 'symptom_name':vrops_alarm_name,
- 'severity': severity_mano2vrops[config_dict['severity']],
- 'metric_key':metric_key_params['metric_key'],
- 'operation':OPERATION_MAPPING[config_dict['operation']],
- 'threshold_value':config_dict['threshold_value']}
- symptom_uuid = self.create_symptom(symptom_params)
- if symptom_uuid is not None:
- self.logger.info("Symptom defined: {} with ID: {}".format(symptom_params['symptom_name'],symptom_uuid))
- else:
- self.logger.warn("Failed to create Symptom: {}".format(symptom_params['symptom_name']))
- return None
- #3) create alert definition
- #To Do - Get type & subtypes for all 5 alarms
- alarm_params = {'name':vrops_alarm_name,
- 'description':config_dict['description']\
- if config_dict['description'] is not None else config_dict['alarm_name'],
- 'adapterKindKey':def_a_params['adapter_kind'],
- 'resourceKindKey':def_a_params['resource_kind'],
- 'waitCycles':1, 'cancelCycles':1,
- 'type':def_a_params['alarm_type'], 'subType':def_a_params['alarm_subType'],
- 'severity':severity_mano2vrops[config_dict['severity']],
- 'symptomDefinitionId':symptom_uuid,
- 'impact':def_a_params['impact']}
-
- alarm_def = self.create_alarm_definition(alarm_params)
- if alarm_def is None:
- self.logger.warn("Failed to create Alert: {}".format(alarm_params['name']))
- return None
-
- self.logger.info("Alarm defined: {} with ID: {}".format(alarm_params['name'],alarm_def))
-
- #4) Find vm_moref_id from vApp uuid in vCD
- vm_moref_id = self.get_vm_moref_id(config_dict['resource_uuid'])
- if vm_moref_id is None:
- self.logger.warn("Failed to find vm morefid for vApp in vCD: {}".format(config_dict['resource_uuid']))
- return None
-
- #5) Based on vm_moref_id, find VM's corresponding resource_id in vROPs to set notification
- resource_id = self.get_vm_resource_id(vm_moref_id)
- if resource_id is None:
- self.logger.warn("Failed to find resource in vROPs: {}".format(config_dict['resource_uuid']))
- return None
-
- #6) Configure alarm notification for a particular VM using it's resource_id
- notification_id = self.create_alarm_notification_rule(vrops_alarm_name, alarm_def, resource_id)
- if notification_id is None:
- return None
- else:
- alarm_def_uuid = alarm_def.split('-', 1)[1]
- self.logger.info("Alarm defination created with notification: {} with ID: {}"\
- .format(alarm_params['name'],alarm_def_uuid))
- #Return alarm defination UUID by removing 'AlertDefinition' from UUID
- return (alarm_def_uuid)
-
- def get_default_Params(self, metric_alarm_name):
- """
- Read the default config parameters from plugin specific file stored with plugin file.
- Params:
- metric_alarm_name: Name of the alarm, whose congif params to be read from the config file.
- """
- a_params = {}
- try:
- source = open(CONFIG_FILE_PATH, 'r')
- except IOError as exp:
- msg = ("Could not read Config file: {}, \nException: {}"\
- .format(CONFIG_FILE_PATH, exp))
- self.logger.error(msg)
- raise IOError(msg)
-
- tree = XmlElementTree.parse(source)
- alarms = tree.getroot()
- for alarm in alarms:
- if alarm.tag == metric_alarm_name:
- for param in alarm:
- if param.tag in ("period", "evaluation", "cancel_period", "alarm_type",\
- "cancel_cycles", "alarm_subType"):
- a_params[param.tag] = int(param.text)
- elif param.tag in ("enabled", "repeat"):
- if(param.text.lower() == "true"):
- a_params[param.tag] = True
- else:
- a_params[param.tag] = False
- else:
- a_params[param.tag] = param.text
- source.close()
- return a_params
-
-
- def create_symptom(self, symptom_params):
- """Create Symptom definition for an alarm
- Params:
- symptom_params: Dict of parameters required for defining a symptom as follows
- cancel_cycles
- wait_cycles
- resource_kind_key = "VirtualMachine"
- adapter_kind_key = "VMWARE"
- symptom_name = Test_Memory_Usage_TooHigh
- severity
- metric_key
- operation = GT_EQ
- threshold_value = 85
- Returns the uuid of Symptom definition
- """
- symptom_id = None
-
- try:
- api_url = '/suite-api/api/symptomdefinitions'
- headers = {'Content-Type': 'application/xml'}
- data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
- <ops:symptom-definition cancelCycles="{0:s}" waitCycles="{1:s}"
- resourceKindKey="{2:s}" adapterKindKey="{3:s}"
- xmlns:xs="http://www.w3.org/2001/XMLSchema"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns:ops="http://webservice.vmware.com/vRealizeOpsMgr/1.0/">
- <ops:name>{4:s}</ops:name>
- <ops:state severity="{5:s}">
- <ops:condition xsi:type="ops:htCondition">
- <ops:key>{6:s}</ops:key>
- <ops:operator>{7:s}</ops:operator>
- <ops:value>{8:s}</ops:value>
- <ops:valueType>NUMERIC</ops:valueType>
- <ops:instanced>false</ops:instanced>
- <ops:thresholdType>STATIC</ops:thresholdType>
- </ops:condition>
- </ops:state>
- </ops:symptom-definition>"""\
- .format(str(symptom_params['cancel_cycles']),str(symptom_params['wait_cycles']),
- symptom_params['resource_kind_key'], symptom_params['adapter_kind_key'],
- symptom_params['symptom_name'],symptom_params['severity'],
- symptom_params['metric_key'],symptom_params['operation'],
- str(symptom_params['threshold_value']))
-
- resp = requests.post(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- headers=headers,
- verify = False,
- data=data)
-
- if resp.status_code != 201:
- self.logger.warn("Failed to create Symptom definition: {}, response {}"\
- .format(symptom_params['symptom_name'], resp.content))
- return None
-
- symptom_xmlroot = XmlElementTree.fromstring(resp.content)
- if symptom_xmlroot is not None and 'id' in symptom_xmlroot.attrib:
- symptom_id = symptom_xmlroot.attrib['id']
-
- return symptom_id
-
- except Exception as exp:
- self.logger.warn("Error creating symptom definition : {}\n{}"\
- .format(exp, traceback.format_exc()))
-
-
- def create_alarm_definition(self, alarm_params):
- """
- Create an alarm definition in vROPs
- Params:
- 'name': Alarm Name,
- 'description':Alarm description,
- 'adapterKindKey': Adapter type in vROPs "VMWARE",
- 'resourceKindKey':Resource type in vROPs "VirtualMachine",
- 'waitCycles': No of wait cycles,
- 'cancelCycles': No of cancel cycles,
- 'type': Alarm type,
- 'subType': Alarm subtype,
- 'severity': Severity in vROPs "CRITICAL",
- 'symptomDefinitionId':symptom Definition uuid,
- 'impact': impact 'risk'
- Returns:
- 'alarm_uuid': returns alarm uuid
- """
-
- alarm_uuid = None
-
- try:
- api_url = '/suite-api/api/alertdefinitions'
- headers = {'Content-Type': 'application/xml'}
- data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
- <ops:alert-definition xmlns:xs="http://www.w3.org/2001/XMLSchema"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns:ops="http://webservice.vmware.com/vRealizeOpsMgr/1.0/">
- <ops:name>{0:s}</ops:name>
- <ops:description>{1:s}</ops:description>
- <ops:adapterKindKey>{2:s}</ops:adapterKindKey>
- <ops:resourceKindKey>{3:s}</ops:resourceKindKey>
- <ops:waitCycles>1</ops:waitCycles>
- <ops:cancelCycles>1</ops:cancelCycles>
- <ops:type>{4:s}</ops:type>
- <ops:subType>{5:s}</ops:subType>
- <ops:states>
- <ops:state severity="{6:s}">
- <ops:symptom-set>
- <ops:symptomDefinitionIds>
- <ops:symptomDefinitionId>{7:s}</ops:symptomDefinitionId>
- </ops:symptomDefinitionIds>
- <ops:relation>SELF</ops:relation>
- <ops:aggregation>ALL</ops:aggregation>
- <ops:symptomSetOperator>AND</ops:symptomSetOperator>
- </ops:symptom-set>
- <ops:impact>
- <ops:impactType>BADGE</ops:impactType>
- <ops:detail>{8:s}</ops:detail>
- </ops:impact>
- </ops:state>
- </ops:states>
- </ops:alert-definition>"""\
- .format(alarm_params['name'],alarm_params['description'],
- alarm_params['adapterKindKey'],alarm_params['resourceKindKey'],
- str(alarm_params['type']),str(alarm_params['subType']),
- alarm_params['severity'],alarm_params['symptomDefinitionId'],
- alarm_params['impact'])
-
- resp = requests.post(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- headers=headers,
- verify = False,
- data=data)
-
- if resp.status_code != 201:
- self.logger.warn("Failed to create Alarm definition: {}, response {}"\
- .format(alarm_params['name'], resp.content))
- return None
-
- alarm_xmlroot = XmlElementTree.fromstring(resp.content)
- for child in alarm_xmlroot:
- if child.tag.split("}")[1] == 'id':
- alarm_uuid = child.text
-
- return alarm_uuid
-
- except Exception as exp:
- self.logger.warn("Error creating alarm definition : {}\n{}".format(exp, traceback.format_exc()))
-
-
- def configure_rest_plugin(self):
- """
- Creates REST Plug-in for vROPs outbound alerts
-
- Returns Plugin ID
- """
- plugin_id = None
- plugin_name = 'MON_module_REST_Plugin'
- plugin_id = self.check_if_plugin_configured(plugin_name)
-
- #If REST plugin not configured, configure it
- if plugin_id is not None:
- return plugin_id
- else:
- try:
- cert_file_string = open(SSL_CERTIFICATE_FILE_PATH, "rb").read()
- except IOError as exp:
- msg = ("Could not read SSL certificate file: {}".format(SSL_CERTIFICATE_FILE_PATH))
- self.logger.error(msg)
- raise IOError(msg)
- cert = load_certificate(FILETYPE_PEM, cert_file_string)
- certificate = cert.digest("sha1")
- api_url = '/suite-api/api/alertplugins'
- headers = {'Content-Type': 'application/xml'}
- data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
- <ops:notification-plugin version="0" xmlns:xs="http://www.w3.org/2001/XMLSchema"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns:ops="http://webservice.vmware.com/vRealizeOpsMgr/1.0/">
- <ops:pluginTypeId>RestPlugin</ops:pluginTypeId>
- <ops:name>{0:s}</ops:name>
- <ops:configValues>
- <ops:configValue name="Url">{1:s}</ops:configValue>
- <ops:configValue name="Content-type">application/json</ops:configValue>
- <ops:configValue name="Certificate">{2:s}</ops:configValue>
- <ops:configValue name="ConnectionCount">20</ops:configValue>
- </ops:configValues>
- </ops:notification-plugin>""".format(plugin_name, webhook_url, certificate)
-
- resp = requests.post(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- headers=headers,
- verify = False,
- data=data)
-
- if resp.status_code is not 201:
- self.logger.warn("Failed to create REST Plugin: {} for url: {}, \nresponse code: {},"\
- "\nresponse content: {}".format(plugin_name, webhook_url,\
- resp.status_code, resp.content))
- return None
-
- plugin_xmlroot = XmlElementTree.fromstring(resp.content)
- if plugin_xmlroot is not None:
- for child in plugin_xmlroot:
- if child.tag.split("}")[1] == 'pluginId':
- plugin_id = plugin_xmlroot.find('{http://webservice.vmware.com/vRealizeOpsMgr/1.0/}pluginId').text
-
- if plugin_id is None:
- self.logger.warn("Failed to get REST Plugin ID for {}, url: {}".format(plugin_name, webhook_url))
- return None
- else:
- self.logger.info("Created REST Plugin: {} with ID : {} for url: {}".format(plugin_name, plugin_id, webhook_url))
- status = self.enable_rest_plugin(plugin_id, plugin_name)
- if status is False:
- self.logger.warn("Failed to enable created REST Plugin: {} for url: {}".format(plugin_name, webhook_url))
- return None
- else:
- self.logger.info("Enabled REST Plugin: {} for url: {}".format(plugin_name, webhook_url))
- return plugin_id
-
- def check_if_plugin_configured(self, plugin_name):
- """Check if the REST plugin is already created
- Returns: plugin_id: if already created, None: if needs to be created
- """
- plugin_id = None
- #Find the REST Plugin id details for - MON_module_REST_Plugin
- api_url = '/suite-api/api/alertplugins'
- headers = {'Accept': 'application/xml'}
- namespace = {'params':"http://webservice.vmware.com/vRealizeOpsMgr/1.0/"}
-
- resp = requests.get(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
-
- if resp.status_code is not 200:
- self.logger.warn("Failed to REST GET Alarm plugin details \nResponse code: {}\nResponse content: {}"\
- .format(resp.status_code, resp.content))
- return None
-
- # Look for specific plugin & parse pluginId for 'MON_module_REST_Plugin'
- xmlroot_resp = XmlElementTree.fromstring(resp.content)
- for notify_plugin in xmlroot_resp.findall('params:notification-plugin',namespace):
- if notify_plugin.find('params:name',namespace) is not None and\
- notify_plugin.find('params:pluginId',namespace) is not None:
- if notify_plugin.find('params:name',namespace).text == plugin_name:
- plugin_id = notify_plugin.find('params:pluginId',namespace).text
-
- if plugin_id is None:
- self.logger.warn("REST plugin {} not found".format('MON_module_REST_Plugin'))
- return None
- else:
- self.logger.info("Found REST Plugin: {}".format(plugin_name))
- return plugin_id
-
-
- def enable_rest_plugin(self, plugin_id, plugin_name):
- """
- Enable the REST plugin using plugin_id
- Params: plugin_id: plugin ID string that is to be enabled
- Returns: status (Boolean) - True for success, False for failure
- """
-
- if plugin_id is None or plugin_name is None:
- self.logger.debug("enable_rest_plugin() : Plugin ID or plugin_name not provided for {} plugin"\
- .format(plugin_name))
- return False
-
- try:
- api_url = "/suite-api/api/alertplugins/{}/enable/True".format(plugin_id)
-
- resp = requests.put(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- verify = False)
-
- if resp.status_code is not 204:
- self.logger.warn("Failed to enable REST plugin {}. \nResponse code {}\nResponse Content: {}"\
- .format(plugin_name, resp.status_code, resp.content))
- return False
-
- self.logger.info("Enabled REST plugin {}.".format(plugin_name))
- return True
-
- except Exception as exp:
- self.logger.warn("Error enabling REST plugin for {} plugin: Exception: {}\n{}"\
- .format(plugin_name, exp, traceback.format_exc()))
-
- def create_alarm_notification_rule(self, alarm_name, alarm_id, resource_id):
- """
- Create notification rule for each alarm
- Params:
- alarm_name
- alarm_id
- resource_id
-
- Returns:
- notification_id: notification_id or None
- """
- notification_name = 'notify_' + alarm_name
- notification_id = None
- plugin_name = 'MON_module_REST_Plugin'
-
- #1) Find the REST Plugin id details for - MON_module_REST_Plugin
- plugin_id = self.check_if_plugin_configured(plugin_name)
- if plugin_id is None:
- self.logger.warn("Failed to get REST plugin_id for : {}".format('MON_module_REST_Plugin'))
- return None
-
- #2) Create Alarm notification rule
- api_url = '/suite-api/api/notifications/rules'
- headers = {'Content-Type': 'application/xml'}
- data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
- <ops:notification-rule xmlns:xs="http://www.w3.org/2001/XMLSchema"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns:ops="http://webservice.vmware.com/vRealizeOpsMgr/1.0/">
- <ops:name>{0:s}</ops:name>
- <ops:pluginId>{1:s}</ops:pluginId>
- <ops:resourceFilter resourceId="{2:s}">
- <ops:matchResourceIdOnly>true</ops:matchResourceIdOnly>
- </ops:resourceFilter>
- <ops:alertDefinitionIdFilters>
- <ops:values>{3:s}</ops:values>
- </ops:alertDefinitionIdFilters>
- </ops:notification-rule>"""\
- .format(notification_name, plugin_id, resource_id, alarm_id)
-
- resp = requests.post(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- headers=headers,
- verify = False,
- data=data)
-
- if resp.status_code is not 201:
- self.logger.warn("Failed to create Alarm notification rule {} for {} alarm."\
- "\nResponse code: {}\nResponse content: {}"\
- .format(notification_name, alarm_name, resp.status_code, resp.content))
- return None
-
- #parse notification id from response
- xmlroot_resp = XmlElementTree.fromstring(resp.content)
- if xmlroot_resp is not None and 'id' in xmlroot_resp.attrib:
- notification_id = xmlroot_resp.attrib.get('id')
-
- self.logger.info("Created Alarm notification rule {} for {} alarm.".format(notification_name, alarm_name))
- return notification_id
-
- def get_vm_moref_id(self, vapp_uuid):
- """
- Get the moref_id of given VM
- """
- try:
- if vapp_uuid:
- vm_details = self.get_vapp_details_rest(vapp_uuid)
- if vm_details and "vm_vcenter_info" in vm_details:
- vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
-
- self.logger.info("Found vm_moref_id: {} for vApp UUID: {}".format(vm_moref_id, vapp_uuid))
- return vm_moref_id
-
- except Exception as exp:
- self.logger.warn("Error occurred while getting VM moref ID for VM : {}\n{}"\
- .format(exp, traceback.format_exc()))
-
-
- def get_vapp_details_rest(self, vapp_uuid=None):
- """
- Method retrieve vapp detail from vCloud director
-
- Args:
- vapp_uuid - is vapp identifier.
-
- Returns:
- Returns VM MOref ID or return None
- """
-
- parsed_respond = {}
- vca = None
-
- vca = self.connect_as_admin()
-
- if not vca:
- self.logger.warn("connect() to vCD is failed")
- if vapp_uuid is None:
- return None
-
- url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
- get_vapp_restcall = ''.join(url_list)
-
- if vca.vcloud_session and vca.vcloud_session.organization:
- response = requests.get(get_vapp_restcall,
- headers=vca.vcloud_session.get_vcloud_headers(),
- verify=vca.verify)
-
- if response.status_code != 200:
- self.logger.warn("REST API call {} failed. Return status code {}"\
- .format(get_vapp_restcall, response.content))
- return parsed_respond
-
- try:
- xmlroot_respond = XmlElementTree.fromstring(response.content)
-
- namespaces = {'vm': 'http://www.vmware.com/vcloud/v1.5',
- "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
- "xmlns":"http://www.vmware.com/vcloud/v1.5"
- }
-
- # parse children section for other attrib
- children_section = xmlroot_respond.find('vm:Children/', namespaces)
- if children_section is not None:
- vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
- if vCloud_extension_section is not None:
- vm_vcenter_info = {}
- vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
- vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
- if vmext is not None:
- vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
- parsed_respond["vm_vcenter_info"]= vm_vcenter_info
-
- except Exception as exp :
- self.logger.warn("Error occurred calling rest api for getting vApp details: {}\n{}"\
- .format(exp, traceback.format_exc()))
-
- return parsed_respond
-
-
- def connect_as_admin(self):
- """ Method connect as pvdc admin user to vCloud director.
- There are certain action that can be done only by provider vdc admin user.
- Organization creation / provider network creation etc.
-
- Returns:
- The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
- """
-
- self.logger.info("Logging in to a VCD org as admin.")
-
- vca_admin = VCA(host=self.vcloud_site,
- username=self.admin_username,
- service_type='standalone',
- version='5.9',
- verify=False,
- log=False)
- result = vca_admin.login(password=self.admin_password, org='System')
- if not result:
- self.logger.warn("Can't connect to a vCloud director as: {}".format(self.admin_username))
- result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
- if result is True:
- self.logger.info("Successfully logged to a vcloud direct org: {} as user: {}"\
- .format('System', self.admin_username))
-
- return vca_admin
-
-
- def get_vm_resource_id(self, vm_moref_id):
- """ Find resource ID in vROPs using vm_moref_id
- """
- if vm_moref_id is None:
- return None
-
- api_url = '/suite-api/api/resources'
- headers = {'Accept': 'application/xml'}
- namespace = {'params':"http://webservice.vmware.com/vRealizeOpsMgr/1.0/"}
-
- resp = requests.get(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
-
- if resp.status_code is not 200:
- self.logger.warn("Failed to get resource details from vROPs for {}\nResponse code:{}\nResponse Content: {}"\
- .format(vm_moref_id, resp.status_code, resp.content))
- return None
-
- try:
- xmlroot_respond = XmlElementTree.fromstring(resp.content)
- for resource in xmlroot_respond.findall('params:resource',namespace):
- if resource is not None:
- resource_key = resource.find('params:resourceKey',namespace)
- if resource_key is not None:
- if resource_key.find('params:adapterKindKey',namespace).text == 'VMWARE' and \
- resource_key.find('params:resourceKindKey',namespace).text == 'VirtualMachine':
- for child in resource_key:
- if child.tag.split('}')[1]=='resourceIdentifiers':
- resourceIdentifiers = child
- for r_id in resourceIdentifiers:
- if r_id.find('params:value',namespace).text == vm_moref_id:
- self.logger.info("Found Resource ID : {} in vROPs for {}"\
- .format(resource.attrib['identifier'], vm_moref_id))
- return resource.attrib['identifier']
- except Exception as exp:
- self.logger.warn("Error in parsing {}\n{}".format(exp, traceback.format_exc()))
-
-
- def get_metrics_data(self, metric={}):
- """Get an individual metric's data of a resource.
- Params:
- 'metric_name': Normalized name of metric (string)
- 'resource_uuid': Resource UUID (string)
- 'period': Time period in Period Unit for which metrics data to be collected from
- Monitoring tool from now.
- 'period_unit': Period measurement unit can be one of 'HR', 'DAY', 'MONTH', 'YEAR'
-
- Return a dict that contains:
- 'metric_name': Normalized name of metric (string)
- 'resource_uuid': Resource UUID (string)
- 'tenant_id': tenent id name in which the resource is present in string format
- 'metrics_data': Dictionary containing time_series & metric_series data.
- 'time_series': List of individual time stamp values in msec
- 'metric_series': List of individual metrics data values
- Raises an exception upon error or when network is not found
- """
- return_data = {}
- return_data['schema_version'] = 1.0
- return_data['schema_type'] = 'read_metric_data_response'
- return_data['metric_name'] = metric['metric_name']
- #To do - No metric_uuid in vROPs, thus returning '0'
- return_data['metric_uuid'] = '0'
- return_data['correlation_id'] = metric['correlation_id']
- return_data['resource_uuid'] = metric['resource_uuid']
- return_data['metrics_data'] = {'time_series':[], 'metric_series':[]}
- #To do - Need confirmation about uuid & id
- if 'tenant_uuid' in metric and metric['tenant_uuid'] is not None:
- return_data['tenant_uuid'] = metric['tenant_uuid']
- else:
- return_data['tenant_uuid'] = None
- return_data['unit'] = None
- #return_data['tenant_id'] = self.tenant_id
- #self.logger.warn("return_data: {}".format(return_data))
-
- #1) Get metric details from plugin specific file & format it into vROPs metrics
- metric_key_params = self.get_default_Params(metric['metric_name'])
-
- if not metric_key_params:
- self.logger.warn("Metric not supported: {}".format(metric['metric_name']))
- #To Do: Return message
- return return_data
-
- return_data['unit'] = metric_key_params['unit']
-
- #2) Find the resource id in vROPs based on OSM resource_uuid
- #2.a) Find vm_moref_id from vApp uuid in vCD
- vm_moref_id = self.get_vm_moref_id(metric['resource_uuid'])
- if vm_moref_id is None:
- self.logger.warn("Failed to find vm morefid for vApp in vCD: {}".format(config_dict['resource_uuid']))
- return return_data
- #2.b) Based on vm_moref_id, find VM's corresponding resource_id in vROPs to set notification
- resource_id = self.get_vm_resource_id(vm_moref_id)
- if resource_id is None:
- self.logger.warn("Failed to find resource in vROPs: {}".format(config_dict['resource_uuid']))
- return return_data
-
- #3) Calculate begin & end time for period & period unit
- end_time = int(round(time.time() * 1000))
- if metric['collection_unit'] == 'YR':
- time_diff = PERIOD_MSEC[metric['collection_unit']]
- else:
- time_diff = metric['collection_period']* PERIOD_MSEC[metric['collection_unit']]
- begin_time = end_time - time_diff
-
- #4) Get the metrics data
- self.logger.info("metric_key_params['metric_key'] = {}".format(metric_key_params['metric_key']))
- self.logger.info("end_time: {}, begin_time: {}".format(end_time, begin_time))
-
- url_list = ['/suite-api/api/resources/', resource_id, '/stats?statKey=',\
- metric_key_params['metric_key'], '&begin=', str(begin_time),'&end=',str(end_time)]
- api_url = ''.join(url_list)
- headers = {'Accept': 'application/json'}
-
- resp = requests.get(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
-
- if resp.status_code is not 200:
- self.logger.warn("Failed to retrive Metric data from vROPs for {}\nResponse code:{}\nResponse Content: {}"\
- .format(metric['metric_name'], resp.status_code, resp.content))
- return return_data
-
- #5) Convert to required format
- metrics_data = {}
- json_data = json.loads(resp.content)
- for resp_key,resp_val in json_data.iteritems():
- if resp_key == 'values':
- data = json_data['values'][0]
- for data_k,data_v in data.iteritems():
- if data_k == 'stat-list':
- stat_list = data_v
- for stat_list_k,stat_list_v in stat_list.iteritems():
- for stat_keys,stat_vals in stat_list_v[0].iteritems():
- if stat_keys == 'timestamps':
- metrics_data['time_series'] = stat_list_v[0]['timestamps']
- if stat_keys == 'data':
- metrics_data['metric_series'] = stat_list_v[0]['data']
-
- return_data['metrics_data'] = metrics_data
-
- return return_data
-
- def update_alarm_configuration(self, new_alarm_config):
- """Update alarm configuration (i.e. Symptom & alarm) as per request
- """
- #1) Get Alarm details from it's uuid & find the symptom defination
- alarm_details_json, alarm_details = self.get_alarm_defination_details(new_alarm_config['alarm_uuid'])
- if alarm_details_json is None:
- return None
-
- try:
- #2) Update the symptom defination
- if alarm_details['alarm_id'] is not None and alarm_details['symptom_definition_id'] is not None:
- symptom_defination_id = alarm_details['symptom_definition_id']
- else:
- self.logger.info("Symptom Defination ID not found for {}".format(new_alarm_config['alarm_uuid']))
- return None
-
- symptom_uuid = self.update_symptom_defination(symptom_defination_id, new_alarm_config)
-
- #3) Update the alarm defination & Return UUID if successful update
- if symptom_uuid is None:
- self.logger.info("Symptom Defination details not found for {}"\
- .format(new_alarm_config['alarm_uuid']))
- return None
- else:
- alarm_uuid = self.reconfigure_alarm(alarm_details_json, new_alarm_config)
- if alarm_uuid is None:
- return None
- else:
- return alarm_uuid
- except:
- self.logger.error("Exception while updating alarm: {}".format(traceback.format_exc()))
-
- def get_alarm_defination_details(self, alarm_uuid):
- """Get alarm details based on alarm UUID
- """
- if alarm_uuid is None:
- self.logger.warn("get_alarm_defination_details: Alarm UUID not provided")
- return None, None
-
- alarm_details = {}
- json_data = {}
- api_url = '/suite-api/api/alertdefinitions/AlertDefinition-'
- headers = {'Accept': 'application/json'}
-
- resp = requests.get(self.vrops_site + api_url + alarm_uuid,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
-
- if resp.status_code is not 200:
- self.logger.warn("Alarm to be updated not found: {}\nResponse code:{}\nResponse Content: {}"\
- .format(alarm_uuid, resp.status_code, resp.content))
- return None, None
-
- try:
- json_data = json.loads(resp.content)
- if json_data['id'] is not None:
- alarm_details['alarm_id'] = json_data['id']
- alarm_details['alarm_name'] = json_data['name']
- alarm_details['adapter_kind'] = json_data['adapterKindKey']
- alarm_details['resource_kind'] = json_data['resourceKindKey']
- alarm_details['type'] = json_data['type']
- alarm_details['sub_type'] = json_data['subType']
- alarm_details['symptom_definition_id'] = json_data['states'][0]['base-symptom-set']['symptomDefinitionIds'][0]
- except exception as exp:
- self.logger.warn("Exception while retriving alarm defination details: {}".format(exp))
- return None, None
-
- return json_data, alarm_details
-
-
- def update_symptom_defination(self, symptom_uuid, new_alarm_config):
- """Update symptom defination based on new alarm input configuration
- """
- #1) Get symptom defination details
- symptom_details = self.get_symptom_defination_details(symptom_uuid)
- #print "\n\nsymptom_details: {}".format(symptom_details)
- if symptom_details is None:
- return None
-
- if new_alarm_config.has_key('severity') and new_alarm_config['severity'] is not None:
- symptom_details['state']['severity'] = severity_mano2vrops[new_alarm_config['severity']]
- if new_alarm_config.has_key('operation') and new_alarm_config['operation'] is not None:
- symptom_details['state']['condition']['operator'] = OPERATION_MAPPING[new_alarm_config['operation']]
- if new_alarm_config.has_key('threshold_value') and new_alarm_config['threshold_value'] is not None:
- symptom_details['state']['condition']['value'] = new_alarm_config['threshold_value']
- #Find vrops metric key from metric_name, if required
- """
- if new_alarm_config.has_key('metric_name') and new_alarm_config['metric_name'] is not None:
- metric_key_params = self.get_default_Params(new_alarm_config['metric_name'])
- if not metric_key_params:
- self.logger.warn("Metric not supported: {}".format(config_dict['metric_name']))
- return None
- symptom_details['state']['condition']['key'] = metric_key_params['metric_key']
- """
- self.logger.info("Fetched Symptom details : {}".format(symptom_details))
-
- api_url = '/suite-api/api/symptomdefinitions'
- headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
- data = json.dumps(symptom_details)
- resp = requests.put(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- headers=headers,
- verify = False,
- data=data)
-
- if resp.status_code != 200:
- self.logger.warn("Failed to update Symptom definition: {}, response {}"\
- .format(symptom_uuid, resp.content))
- return None
-
-
- if symptom_uuid is not None:
- self.logger.info("Symptom defination updated {} for alarm: {}"\
- .format(symptom_uuid, new_alarm_config['alarm_uuid']))
- return symptom_uuid
- else:
- self.logger.warn("Failed to update Symptom Defination {} for : {}"\
- .format(symptom_uuid, new_alarm_config['alarm_uuid']))
- return None
-
-
- def get_symptom_defination_details(self, symptom_uuid):
- """Get symptom defination details
- """
- symptom_details = {}
- if symptom_uuid is None:
- self.logger.warn("get_symptom_defination_details: Symptom UUID not provided")
- return None
-
- api_url = '/suite-api/api/symptomdefinitions/'
- headers = {'Accept': 'application/json'}
-
- resp = requests.get(self.vrops_site + api_url + symptom_uuid,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
-
- if resp.status_code is not 200:
- self.logger.warn("Symptom defination not found {} \nResponse code:{}\nResponse Content: {}"\
- .format(symptom_uuid, resp.status_code, resp.content))
- return None
-
- symptom_details = json.loads(resp.content)
- #print "New symptom Details: {}".format(symptom_details)
- return symptom_details
-
-
- def reconfigure_alarm(self, alarm_details_json, new_alarm_config):
- """Reconfigure alarm defination as per input
- """
- if new_alarm_config.has_key('severity') and new_alarm_config['severity'] is not None:
- alarm_details_json['states'][0]['severity'] = new_alarm_config['severity']
- if new_alarm_config.has_key('description') and new_alarm_config['description'] is not None:
- alarm_details_json['description'] = new_alarm_config['description']
-
- api_url = '/suite-api/api/alertdefinitions'
- headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
- data = json.dumps(alarm_details_json)
- resp = requests.put(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- headers=headers,
- verify = False,
- data=data)
-
- if resp.status_code != 200:
- self.logger.warn("Failed to create Symptom definition: {}, response code {}, response content: {}"\
- .format(symptom_uuid, resp.status_code, resp.content))
- return None
- else:
- parsed_alarm_details = json.loads(resp.content)
- alarm_def_uuid = parsed_alarm_details['id'].split('-', 1)[1]
- self.logger.info("Successfully updated Alarm defination: {}".format(alarm_def_uuid))
- return alarm_def_uuid
-
- def delete_alarm_configuration(self, delete_alarm_req_dict):
- """Delete complete alarm configuration
- """
- if delete_alarm_req_dict['alarm_uuid'] is None:
- self.logger.info("delete_alarm_configuration: Alarm UUID not provided")
- return None
- #1)Get alarm & symptom defination details
- alarm_details_json, alarm_details = self.get_alarm_defination_details(delete_alarm_req_dict['alarm_uuid'])
- if alarm_details is None or alarm_details_json is None:
- return None
-
- #2) Delete alarm notfication
- rule_id = self.delete_notification_rule(alarm_details['alarm_name'])
- if rule_id is None:
- return None
-
- #3) Delete alarm configuraion
- alarm_id = self.delete_alarm_defination(alarm_details['alarm_id'])
- if alarm_id is None:
- return None
-
- #4) Delete alarm symptom
- symptom_id = self.delete_symptom_definition(alarm_details['symptom_definition_id'])
- if symptom_id is None:
- return None
- else:
- self.logger.info("Completed deleting alarm configuration: {}"\
- .format(delete_alarm_req_dict['alarm_uuid']))
- return delete_alarm_req_dict['alarm_uuid']
-
- def delete_notification_rule(self, alarm_name):
- """Deleted notification rule defined for a particular alarm
- """
- rule_id = self.get_notification_rule_id_by_alarm_name(alarm_name)
- if rule_id is None:
- return None
- else:
- api_url = '/suite-api/api/notifications/rules/'
- headers = {'Accept':'application/json'}
- resp = requests.delete(self.vrops_site + api_url + rule_id,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
- if resp.status_code is not 204:
- self.logger.warn("Failed to delete notification rules for {}".format(alarm_name))
- return None
- else:
- self.logger.info("Deleted notification rules for {}".format(alarm_name))
- return rule_id
-
- def get_notification_rule_id_by_alarm_name(self, alarm_name):
- """Find created Alarm notification rule id by alarm name
- """
- alarm_notify_id = 'notify_' + alarm_name
- api_url = '/suite-api/api/notifications/rules'
- headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
- resp = requests.get(self.vrops_site + api_url,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
-
- if resp.status_code is not 200:
- self.logger.warn("Failed to get notification rules details for {}"\
- .format(delete_alarm_req_dict['alarm_name']))
- return None
-
- notifications = json.loads(resp.content)
- if notifications is not None and notifications.has_key('notification-rule'):
- notifications_list = notifications['notification-rule']
- for dict in notifications_list:
- if dict['name'] is not None and dict['name'] == alarm_notify_id:
- notification_id = dict['id']
- self.logger.info("Found Notification id to be deleted: {} for {}"\
- .format(notification_id, alarm_name))
- return notification_id
-
- self.logger.warn("Notification id to be deleted not found for {}"\
- .format(notification_id, alarm_name))
- return None
-
- def delete_alarm_defination(self, alarm_id):
- """Delete created Alarm defination
- """
- api_url = '/suite-api/api/alertdefinitions/'
- headers = {'Accept':'application/json'}
- resp = requests.delete(self.vrops_site + api_url + alarm_id,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
- if resp.status_code is not 204:
- self.logger.warn("Failed to delete alarm definition {}".format(alarm_id))
- return None
- else:
- self.logger.info("Deleted alarm definition {}".format(alarm_id))
- return alarm_id
-
- def delete_symptom_definition(self, symptom_id):
- """Delete symptom defination
- """
- api_url = '/suite-api/api/symptomdefinitions/'
- headers = {'Accept':'application/json'}
- resp = requests.delete(self.vrops_site + api_url + symptom_id,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
- if resp.status_code is not 204:
- self.logger.warn("Failed to delete symptom definition {}".format(symptom_id))
- return None
- else:
- self.logger.info("Deleted symptom definition {}".format(symptom_id))
- return symptom_id
-
-
- def verify_metric_support(self, metric_info):
- """Verify, if Metric is supported by vROPs plugin, verify metric unit & return status
- Returns:
- status: True if supported, False if not supported
- """
- status = False
- if 'metric_name' not in metric_info:
- self.logger.debug("Metric name not provided: {}".format(metric_info))
- return status
- metric_key_params = self.get_default_Params(metric_info['metric_name'])
- if not metric_key_params:
- self.logger.warn("Metric not supported: {}".format(metric_info['metric_name']))
- return status
- else:
- #If Metric is supported, verify optional metric unit & return status
- if 'metric_unit' in metric_info:
- if metric_key_params.get('unit') == metric_info['metric_unit']:
- self.logger.info("Metric is supported with unit: {}".format(metric_info['metric_name']))
- status = True
- else:
- self.logger.debug("Metric supported but there is unit mismatch for: {}."\
- "Supported unit: {}"\
- .format(metric_info['metric_name'],metric_key_params['unit']))
- status = True
- return status
-
- def get_triggered_alarms_list(self, list_alarm_input):
- """Get list of triggered alarms on a resource based on alarm input request.
- """
- #TO Do - Need to add filtering of alarms based on Severity & alarm name
-
- triggered_alarms_list = []
- if list_alarm_input['resource_uuid'] is None:
- return triggered_alarms_list
-
- #1)Find vROPs resource ID using RO resource UUID
- vrops_resource_id = self.get_vrops_resourceid_from_ro_uuid(list_alarm_input['resource_uuid'])
- if vrops_resource_id is None:
- return triggered_alarms_list
-
- #2)Get triggered alarms on particular resource
- triggered_alarms_list = self.get_triggered_alarms_on_resource(list_alarm_input['resource_uuid'], vrops_resource_id)
- return triggered_alarms_list
-
- def get_vrops_resourceid_from_ro_uuid(self, ro_resource_uuid):
- """Fetch vROPs resource ID using resource UUID from RO/SO
- """
- #1) Find vm_moref_id from vApp uuid in vCD
- vm_moref_id = self.get_vm_moref_id(ro_resource_uuid)
- if vm_moref_id is None:
- self.logger.warn("Failed to find vm morefid for vApp in vCD: {}".format(ro_resource_uuid))
- return None
-
- #2) Based on vm_moref_id, find VM's corresponding resource_id in vROPs to set notification
- vrops_resource_id = self.get_vm_resource_id(vm_moref_id)
- if vrops_resource_id is None:
- self.logger.warn("Failed to find resource in vROPs: {}".format(ro_resource_uuid))
- return None
- return vrops_resource_id
-
-
- def get_triggered_alarms_on_resource(self, ro_resource_uuid, vrops_resource_id):
- """Get triggered alarms on particular resource & return list of dictionary of alarms
- """
- resource_alarms = []
- api_url = '/suite-api/api/alerts?resourceId='
- headers = {'Accept':'application/json'}
- resp = requests.get(self.vrops_site + api_url + vrops_resource_id,
- auth=(self.vrops_user, self.vrops_password),
- verify = False, headers = headers)
-
- if resp.status_code is not 200:
- self.logger.warn("Failed to get notification rules details for {}"\
- .format(delete_alarm_req_dict['alarm_name']))
- return None
-
- all_alerts = json.loads(resp.content)
- if all_alerts.has_key('alerts'):
- if not all_alerts['alerts']:
- self.logger.info("No alarms present on resource {}".format(ro_resource_uuid))
- return resource_alarms
- all_alerts_list = all_alerts['alerts']
- for alarm in all_alerts_list:
- #self.logger.info("Triggered Alarm {}".format(alarm))
- if alarm['alertDefinitionName'] is not None and\
- len(alarm['alertDefinitionName'].split('-', 1)) == 2:
- if alarm['alertDefinitionName'].split('-', 1)[1] == ro_resource_uuid:
- alarm_instance = {}
- alarm_instance['alarm_uuid'] = alarm['alertDefinitionId'].split('-', 1)[1]
- alarm_instance['resource_uuid'] = ro_resource_uuid
- alarm_instance['alarm_instance_uuid'] = alarm['alertId']
- alarm_instance['vim_type'] = 'VMware'
- #find severity of alarm
- severity = None
- for key,value in severity_mano2vrops.iteritems():
- if value == alarm['alertLevel']:
- severity = key
- if severity is None:
- severity = 'INDETERMINATE'
- alarm_instance['severity'] = severity
- alarm_instance['status'] = alarm['status']
- alarm_instance['start_date'] = self.convert_date_time(alarm['startTimeUTC'])
- alarm_instance['update_date'] = self.convert_date_time(alarm['updateTimeUTC'])
- alarm_instance['cancel_date'] = self.convert_date_time(alarm['cancelTimeUTC'])
- self.logger.info("Triggered Alarm on resource {}".format(alarm_instance))
- resource_alarms.append(alarm_instance)
- if not resource_alarms:
- self.logger.info("No alarms present on resource {}".format(ro_resource_uuid))
- return resource_alarms
-
- def convert_date_time(self, date_time):
- """Convert the input UTC time in msec to OSM date time format
- """
- date_time_formatted = '0000-00-00T00:00:00'
- if date_time != 0:
- complete_datetime = datetime.datetime.fromtimestamp(date_time/1000.0).isoformat('T')
- date_time_formatted = complete_datetime.split('.',1)[0]
- return date_time_formatted
-
-
+++ /dev/null
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2016-2017 VMware Inc.
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: osslegalrouting@vmware.com
-##
-
-"""
-Montoring plugin receiver that consumes the request messages &
-responds using producer for vROPs
-"""
-
-import sys
-from mon_plugin_vrops import MonPlugin
-from kafka_consumer_vrops import vROP_KafkaConsumer
-#Core producer
-sys.path.append("../../core/message_bus")
-from producer import KafkaProducer
-#from core.message_bus.producer import KafkaProducer
-import json
-import logging
-import traceback
-import os
-from xml.etree import ElementTree as XmlElementTree
-
-req_config_params = ('vrops_site', 'vrops_user', 'vrops_password',
- 'vcloud-site','admin_username','admin_password',
- 'vcenter_ip','vcenter_port','vcenter_user','vcenter_password',
- 'vim_tenant_name','orgname','tenant_id')
-MODULE_DIR = os.path.dirname(__file__)
-CONFIG_FILE_NAME = 'vrops_config.xml'
-CONFIG_FILE_PATH = os.path.join(MODULE_DIR, CONFIG_FILE_NAME)
-
-def set_logger():
- """Set Logger
- """
- BASE_DIR = os.path.dirname(os.path.dirname(__file__))
- logger = logging.getLogger()
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- handler = logging.FileHandler(os.path.join(BASE_DIR,"mon_vrops_log.log"))
- handler.setFormatter(formatter)
- logger.addHandler(handler)
-
-
-class PluginReceiver():
- """MON Plugin receiver receiving request messages & responding using producer for vROPs
- telemetry plugin
- """
- def __init__(self):
- """Constructor of PluginReceiver
- """
-
- topics = ['alarm_request', 'metric_request', 'access_credentials']
-
- self.logger = logging.getLogger('PluginReceiver')
- self.logger.setLevel(logging.DEBUG)
-
- #To Do - Add broker uri
- broker_uri = None
- #self.mon_plugin = MonPlugin()
- self.consumer = vROP_KafkaConsumer(topics, broker_uri)
- #Core producer
- self.producer_alarms = KafkaProducer('alarm_response')
- self.producer_metrics = KafkaProducer('metric_response')
- self.producer_access_credentials = KafkaProducer('vim_access_credentials_response')
-
-
- def consume(self):
- """Consume the message, act on it & respond
- """
- try:
- for message in self.consumer.vrops_consumer:
- vim_type = None
- self.logger.info("Message received:\nTopic={}:{}:{}:\nKey={}\nValue={}"\
- .format(message.topic, message.partition, message.offset, message.key, message.value))
- message_values = json.loads(message.value)
- if message_values.has_key('vim_type') and message_values['vim_type'] is not None:
- vim_type = message_values['vim_type'].lower()
- if vim_type == 'vmware':
- self.logger.info("Action required for: {}".format(message.topic))
- if message.topic == 'alarm_request':
- if message.key == "create_alarm_request":
- config_alarm_info = json.loads(message.value)
- alarm_uuid = self.create_alarm(config_alarm_info['alarm_create_request'])
- self.logger.info("Alarm created with alarm uuid: {}".format(alarm_uuid))
- #Publish message using producer
- self.publish_create_alarm_status(alarm_uuid, config_alarm_info)
- elif message.key == "update_alarm_request":
- update_alarm_info = json.loads(message.value)
- alarm_uuid = self.update_alarm(update_alarm_info['alarm_update_request'])
- self.logger.info("Alarm defination updated : alarm uuid: {}".format(alarm_uuid))
- #Publish message using producer
- self.publish_update_alarm_status(alarm_uuid, update_alarm_info)
- elif message.key == "delete_alarm_request":
- delete_alarm_info = json.loads(message.value)
- alarm_uuid = self.delete_alarm(delete_alarm_info['alarm_delete_request'])
- self.logger.info("Alarm defination deleted : alarm uuid: {}".format(alarm_uuid))
- #Publish message using producer
- self.publish_delete_alarm_status(alarm_uuid, delete_alarm_info)
- elif message.key == "list_alarm_request":
- request_input = json.loads(message.value)
- triggered_alarm_list = self.list_alarms(request_input['alarm_list_request'])
- #Publish message using producer
- self.publish_list_alarm_response(triggered_alarm_list, request_input)
- elif message.topic == 'metric_request':
- if message.key == "read_metric_data_request":
- metric_request_info = json.loads(message.value)
- mon_plugin_obj = MonPlugin()
- metrics_data = mon_plugin_obj.get_metrics_data(metric_request_info)
- self.logger.info("Collected Metrics Data: {}".format(metrics_data))
- #Publish message using producer
- self.publish_metrics_data_status(metrics_data)
- elif message.key == "create_metric_request":
- metric_info = json.loads(message.value)
- metric_status = self.verify_metric(metric_info['metric_create'])
- #Publish message using producer
- self.publish_create_metric_response(metric_info, metric_status)
- elif message.key == "update_metric_request":
- metric_info = json.loads(message.value)
- metric_status = self.verify_metric(metric_info['metric_create'])
- #Publish message using producer
- self.publish_update_metric_response(metric_info, metric_status)
- elif message.key == "delete_metric_request":
- metric_info = json.loads(message.value)
- #Deleting Metric Data is not allowed. Publish status as False
- self.logger.warn("Deleting Metric is not allowed: {}".format(metric_info['metric_name']))
- #Publish message using producer
- self.publish_delete_metric_response(metric_info)
- elif message.topic == 'access_credentials':
- if message.key == "vim_access_credentials":
- access_info = json.loads(message.value)
- access_update_status = self.update_access_credentials(access_info['access_config'])
- self.publish_access_update_response(access_update_status, access_info)
-
- except:
- self.logger.error("Exception in receiver: {}".format(traceback.format_exc()))
-
-
- def create_alarm(self, config_alarm_info):
- """Create alarm using vROPs plugin
- """
- mon_plugin = MonPlugin()
- plugin_uuid = mon_plugin.configure_rest_plugin()
- alarm_uuid = mon_plugin.configure_alarm(config_alarm_info)
- return alarm_uuid
-
- def publish_create_alarm_status(self, alarm_uuid, config_alarm_info):
- """Publish create alarm status using producer
- """
- topic = 'alarm_response'
- msg_key = 'create_alarm_response'
- response_msg = {"schema_version":1.0,
- "schema_type":"create_alarm_response",
- "alarm_create_response":
- {"correlation_id":config_alarm_info["alarm_create_request"]["correlation_id"],
- "alarm_uuid":alarm_uuid,
- "status": True if alarm_uuid else False
- }
- }
- self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
- .format(topic, msg_key, response_msg))
- #Core producer
- self.producer_alarms.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
-
- def update_alarm(self, update_alarm_info):
- """Updare already created alarm
- """
- mon_plugin = MonPlugin()
- alarm_uuid = mon_plugin.update_alarm_configuration(update_alarm_info)
- return alarm_uuid
-
- def publish_update_alarm_status(self, alarm_uuid, update_alarm_info):
- """Publish update alarm status requests using producer
- """
- topic = 'alarm_response'
- msg_key = 'update_alarm_response'
- response_msg = {"schema_version":1.0,
- "schema_type":"update_alarm_response",
- "alarm_update_response":
- {"correlation_id":update_alarm_info["alarm_update_request"]["correlation_id"],
- "alarm_uuid":alarm_uuid,
- "status": True if alarm_uuid else False
- }
- }
- self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
- .format(topic, msg_key, response_msg))
- #Core producer
- self.producer_alarms.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
-
- def delete_alarm(self, delete_alarm_info):
- """Delete alarm configuration
- """
- mon_plugin = MonPlugin()
- alarm_uuid = mon_plugin.delete_alarm_configuration(delete_alarm_info)
- return alarm_uuid
-
- def publish_delete_alarm_status(self, alarm_uuid, delete_alarm_info):
- """Publish update alarm status requests using producer
- """
- topic = 'alarm_response'
- msg_key = 'delete_alarm_response'
- response_msg = {"schema_version":1.0,
- "schema_type":"delete_alarm_response",
- "alarm_deletion_response":
- {"correlation_id":delete_alarm_info["alarm_delete_request"]["correlation_id"],
- "alarm_uuid":alarm_uuid,
- "status": True if alarm_uuid else False
- }
- }
- self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
- .format(topic, msg_key, response_msg))
- #Core producer
- self.producer_alarms.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
-
-
- def publish_metrics_data_status(self, metrics_data):
- """Publish the requested metric data using producer
- """
- topic = 'metric_response'
- msg_key = 'read_metric_data_response'
- self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
- .format(topic, msg_key, metrics_data))
- #Core producer
- self.producer_metrics.publish(key=msg_key, value=json.dumps(metrics_data), topic=topic)
-
-
- def verify_metric(self, metric_info):
- """Verify if metric is supported or not
- """
- mon_plugin = MonPlugin()
- metric_key_status = mon_plugin.verify_metric_support(metric_info)
- return metric_key_status
-
- def publish_create_metric_response(self, metric_info, metric_status):
- """Publish create metric response
- """
- topic = 'metric_response'
- msg_key = 'create_metric_response'
- response_msg = {"schema_version":1.0,
- "schema_type":"create_metric_response",
- "correlation_id":metric_info['correlation_id'],
- "metric_create_response":
- {
- "metric_uuid":0,
- "resource_uuid":metric_info['metric_create']['resource_uuid'],
- "status":metric_status
- }
- }
- self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
- .format(topic, msg_key, response_msg))
- #Core producer
- self.producer_metrics.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
-
- def publish_update_metric_response(self, metric_info, metric_status):
- """Publish update metric response
- """
- topic = 'metric_response'
- msg_key = 'update_metric_response'
- response_msg = {"schema_version":1.0,
- "schema_type":"metric_update_response",
- "correlation_id":metric_info['correlation_id'],
- "metric_update_response":
- {
- "metric_uuid":0,
- "resource_uuid":metric_info['metric_create']['resource_uuid'],
- "status":metric_status
- }
- }
- self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
- .format(topic, msg_key, response_msg))
- #Core producer
- self.producer_metrics.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
-
- def publish_delete_metric_response(self, metric_info):
- """
- """
- topic = 'metric_response'
- msg_key = 'delete_metric_response'
- response_msg = {"schema_version":1.0,
- "schema_type":"delete_metric_response",
- "correlation_id":metric_info['correlation_id'],
- "metric_name":metric_info['metric_name'],
- "metric_uuid":0,
- "resource_uuid":metric_info['resource_uuid'],
- "tenant_uuid":metric_info['tenant_uuid'],
- "status":False
- }
- self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
- .format(topic, msg_key, response_msg))
- #Core producer
- self.producer_metrics.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
-
- def list_alarms(self, list_alarm_input):
- """Collect list of triggered alarms based on input
- """
- mon_plugin = MonPlugin()
- triggered_alarms = mon_plugin.get_triggered_alarms_list(list_alarm_input)
- return triggered_alarms
-
-
- def publish_list_alarm_response(self, triggered_alarm_list, list_alarm_input):
- """Publish list of triggered alarms
- """
- topic = 'alarm_response'
- msg_key = 'list_alarm_response'
- response_msg = {"schema_version":1.0,
- "schema_type":"list_alarm_response",
- "correlation_id":list_alarm_input['alarm_list_request']['correlation_id'],
- #"resource_uuid":list_alarm_input['alarm_list_request']['resource_uuid'],
- "list_alarm_resp":triggered_alarm_list
- }
- self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
- .format(topic, msg_key, response_msg))
- #Core producer
- self.producer_alarms.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
-
-
- def update_access_credentials(self, access_info):
- """Verify if all the required access config params are provided and
- updates access config in default vrops config file
- """
- update_status = False
- wr_status = False
- #Check if all the required config params are passed in request
- if not all (keys in access_info for keys in req_config_params):
- self.logger.debug("All required Access Config Parameters not provided")
- self.logger.debug("List of required Access Config Parameters: {}".format(req_config_params))
- self.logger.debug("List of given Access Config Parameters: {}".format(access_info))
- return update_status
-
- wr_status = self.write_access_config(access_info)
- return wr_status #True/False
-
- def write_access_config(self, access_info):
- """Write access configuration to vROPs config file.
- """
- wr_status = False
- try:
- tree = XmlElementTree.parse(CONFIG_FILE_PATH)
- root = tree.getroot()
- alarmParams = {}
- for config in root:
- if config.tag == 'Access_Config':
- for param in config:
- for key,val in access_info.iteritems():
- if param.tag == key:
- #print param.tag, val
- param.text = val
-
- tree.write(CONFIG_FILE_PATH)
- wr_status = True
- except Exception as exp:
- self.logger.warn("Failed to update Access Config Parameters: {}".format(exp))
-
- return wr_status
-
-
- def publish_access_update_response(self, access_update_status, access_info_req):
- """Publish access update response
- """
- topic = 'access_credentials'
- msg_key = 'vim_access_credentials_response'
- response_msg = {"schema_version":1.0,
- "schema_type":"vim_access_credentials_response",
- "correlation_id":access_info_req['access_config']['correlation_id'],
- "status":access_update_status
- }
- self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
- .format(topic, msg_key, response_msg))
- #Core Add producer
- self.producer_access_credentials.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
-
-def main():
- #log.basicConfig(filename='mon_vrops_log.log',level=log.DEBUG)
- set_logger()
- plugin_rcvr = PluginReceiver()
- plugin_rcvr.consume()
-
-if __name__ == "__main__":
- main()
-
+++ /dev/null
-#!/usr/bin/env bash
-
-##
-# Copyright 2016-2017 VMware Inc.
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: osslegalrouting@vmware.com
-##
-
-BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-SSL_Cert_Dir="${BASEDIR}/SSL_certificate"
-THISHOST=$(hostname -f)
-Domain_Name="${THISHOST}"
-#Domain_Name="www.vrops_webservice.com"
-WebServiceFile="${BASEDIR}/vrops_webservice"
-
-echo '
- #################################################################
- ##### Installing Require Packages #####
- #################################################################'
-
-#Function to install packages using apt-get
-function install_packages(){
- [ -x /usr/bin/apt-get ] && apt-get install -y $*
-
- #check properly installed
- for PACKAGE in $*
- do
- PACKAGE_INSTALLED="no"
- [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes"
- if [ "$PACKAGE_INSTALLED" = "no" ]
- then
- echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" >&2
- exit 1
- fi
- done
- }
-
-apt-get update # To get the latest package lists
-
-[ "$_DISTRO" == "Ubuntu" ] && install_packages "python-yaml python-bottle python-jsonschema python-requests libxml2-dev libxslt-dev python-dev python-pip openssl"
-[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "python-jsonschema python-requests libxslt-devel libxml2-devel python-devel python-pip openssl"
-#The only way to install python-bottle on Centos7 is with easy_install or pip
-[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && easy_install -U bottle
-
-#required for vmware connector TODO move that to separete opt in install script
-sudo pip install --upgrade pip
-sudo pip install cherrypy
-
-echo '
- #################################################################
- ##### Genrate SSL Certificate #####
- #################################################################'
-#Create SSL Certifcate folder and file
-mkdir "${SSL_Cert_Dir}"
-
-openssl genrsa -out "${SSL_Cert_Dir}/${Domain_Name}".key 2048
-openssl req -new -x509 -key "${SSL_Cert_Dir}/${Domain_Name}".key -out "${SSL_Cert_Dir}/${Domain_Name}".cert -days 3650 -subj /CN="${Domain_Name}"
-
-echo '
- #################################################################
- ##### Start Web Service #####
- #################################################################'
-
-nohup python "${WebServiceFile}" &
-
-echo '
- #################################################################
- ##### Done #####
- #################################################################'
-
-
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-##
-# Copyright 2016-2017 VMware Inc.
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: osslegalrouting@vmware.com
-##
-
-"""
- Webservice for vRealize Operations (vROPs) to post/notify alarms details.
-
-"""
-__author__ = "Arpita Kate"
-__date__ = "$15-Sept-2017 16:09:29$"
-__version__ = '0.1'
-
-
-from bottle import (ServerAdapter, route, run, server_names, redirect, default_app,
- request, response, template, debug, TEMPLATE_PATH , static_file)
-from socket import getfqdn
-from datetime import datetime
-from xml.etree import ElementTree as ET
-import logging
-import os
-import json
-import sys
-import requests
-sys.path.append("../../../core/message_bus")
-from producer import KafkaProducer
-#from core.message_bus.producer import KafkaProducer
-
-try:
- from cheroot.wsgi import Server as WSGIServer
- from cheroot.ssl.pyopenssl import pyOpenSSLAdapter
-except ImportError:
- from cherrypy.wsgiserver import CherryPyWSGIServer as WSGIServer
- from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
-
-#Set Constants
-BASE_DIR = os.path.dirname(os.path.dirname(__file__))
-CERT_DIR = os.path.join(BASE_DIR, "SSL_certificate")
-certificate_name = getfqdn() + ".cert"
-key_name = getfqdn() + ".key"
-CERTIFICATE = os.path.join(CERT_DIR, certificate_name)
-KEY = os.path.join(CERT_DIR, key_name)
-#CERTIFICATE = os.path.join(CERT_DIR, "www.vrops_webservice.com.cert")
-#KEY = os.path.join(CERT_DIR, "www.vrops_webservice.com.key")
-CONFIG_FILE = os.path.join(BASE_DIR, '../vrops_config.xml')
-#Severity Mapping from vROPs to OSM
-VROPS_SEVERITY_TO_OSM_MAPPING = {
- "ALERT_CRITICALITY_LEVEL_CRITICAL":"CRITICAL",
- "ALERT_CRITICALITY_LEVEL_WARNING":"WARNING",
- "ALERT_CRITICALITY_LEVEL_IMMEDIATE":"MAJOR",
- "ALERT_CRITICALITY_LEVEL_INFO":"INDETERMINATE",
- "ALERT_CRITICALITY_LEVEL_AUTO":"INDETERMINATE",
- "ALERT_CRITICALITY_LEVEL_UNKNOWN":"INDETERMINATE",
- "ALERT_CRITICALITY_LEVEL_NONE":"INDETERMINATE"
- }
-
-#Set logger
-logger = logging.getLogger('vROPs_Webservice')
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-hdlr = logging.FileHandler(os.path.join(BASE_DIR,"vrops_webservice.log"))
-hdlr.setFormatter(formatter)
-logger.addHandler(hdlr)
-logger.setLevel(logging.DEBUG)
-
-
-def format_datetime(str_date):
- """
- Method to format datetime
- Args:
- str_date - datetime string
- Returns:
- formated datetime
- """
- date_fromat = "%Y-%m-%dT%H:%M:%S"
- formated_datetime = None
- try:
- datetime_obj = datetime.fromtimestamp(float(str_date)/1000.)
- formated_datetime = datetime_obj.strftime(date_fromat)
- except Exception as exp:
- logger.error('Exception: {} occured while converting date {} into format {}'.format(
- exp,str_date, date_fromat))
-
- return formated_datetime
-
-def get_alarm_config():
- """
- Method to get configuration parameters
- Args:
- None
- Returns:
- dictionary of config parameters
- """
- alarm_config = {}
- try:
- xml_content = ET.parse(CONFIG_FILE)
- alarms = xml_content.getroot()
- for alarm in alarms:
- if alarm.tag == 'Access_Config':
- for param in alarm:
- alarm_config[param.tag] = param.text
- except Exception as exp:
- logger.error('Exception: {} occured while parsing config file.'.format(exp))
-
- return alarm_config
-
-def get_alarm_definationID(alarm_uuid):
- """
- Method to get alarm/alert defination ID
- Args:
- alarm_uuid : UUID of alarm
- Returns:
- alarm defination ID
- """
- alarm_definationID = None
- if alarm_uuid :
- try:
- access_config = get_alarm_config()
- headers = {'Accept': 'application/json'}
- api_url = '{}/suite-api/api/alerts/{}'.format(access_config.get('vrops_site'), alarm_uuid)
- api_response = requests.get(
- api_url,
- auth=(access_config.get('vrops_user'), access_config.get('vrops_password')),
- verify = False, headers = headers
- )
-
- if api_response.status_code == 200:
- data = api_response.json()
- if data.get("alertDefinitionId") is not None:
- alarm_definationID = '-'.join(data.get("alertDefinitionId").split('-')[1:])
- else:
- logger.error("Failed to get alert definition ID for alarm {}".format(alarm_uuid))
- except Exception as exp:
- logger.error( "Exception occured while getting alert definition ID for alarm : {}".format(exp, alarm_uuid))
-
- return alarm_definationID
-
-
-@route('/notify/<alarmID>', method='POST')
-def notify_alarm(alarmID):
- """
- Method notify alarm details by publishing message at Kafka message bus
- Args:
- alarmID - Name of alarm
- Returns:
- response code
- """
- logger.info("Request:{} from:{} {} {} ".format(request, request.remote_addr, request.method, request.url))
- response.headers['Content-Type'] = 'application/json'
- try:
- postdata = json.loads(request.body.read())
- notify_details = {}
- alaram_config = get_alarm_config()
- #Parse noditfy data
- notify_details['alarm_uuid'] = get_alarm_definationID(postdata.get('alertId'))
- notify_details['description'] = postdata.get('info')
- notify_details['alarm_instance_uuid'] = alarmID
- notify_details['resource_uuid'] = '-'.join(postdata.get('alertName').split('-')[1:])
- notify_details['tenant_uuid'] = alaram_config.get('tenant_id')
- notify_details['vim_type'] = "VMware"
- notify_details['severity'] = VROPS_SEVERITY_TO_OSM_MAPPING.get(postdata.get('criticality'), 'INDETERMINATE')
- notify_details['status'] = postdata.get('status')
- if postdata.get('startDate'):
- notify_details['start_date_time'] = format_datetime(postdata.get('startDate'))
- if postdata.get('updateDate'):
- notify_details['update_date_time'] = format_datetime(postdata.get('updateDate'))
- if postdata.get('cancelDate'):
- notify_details['cancel_date_time'] = format_datetime(postdata.get('cancelDate'))
-
- alarm_details = {'schema_version': 1.0,
- 'schema_type': "notify_alarm",
- 'notify_details': notify_details
- }
- alarm_data = json.dumps(alarm_details)
- logger.info("Alarm details: {}".format(alarm_data))
-
- #Publish Alarm details
- kafkaMsgProducer = KafkaProducer()
- kafkaMsgProducer.publish(topic='alarm_response', key='notify_alarm', value=alarm_data)
-
- #return 201 on Success
- response.status = 201
-
- except Exception as exp:
- logger.error('Exception: {} occured while notifying alarm {}.'.format(exp, alarmID))
- #return 500 on Error
- response.status = 500
-
- return response
-
-
-class SSLWebServer(ServerAdapter):
- """
- CherryPy web server with SSL support.
- """
-
- def run(self, handler):
- """
- Runs a CherryPy Server using the SSL certificate.
- """
- server = WSGIServer((self.host, self.port), handler)
- server.ssl_adapter = pyOpenSSLAdapter(
- certificate=CERTIFICATE,
- private_key=KEY,
- # certificate_chain="intermediate_cert.crt"
- )
-
- try:
- server.start()
- logger.info("Started vROPs Web Service")
- except Exception as exp:
- server.stop()
- logger.error("Exception: {} Stopped vROPs Web Service".format(exp))
-
-
-if __name__ == "__main__":
- #Start SSL Web Service
- logger.info("Start vROPs Web Service")
- app = default_app()
- server_names['sslwebserver'] = SSLWebServer
- run(app=app,host=getfqdn(), port=8080, server='sslwebserver')
-
-
-
+++ /dev/null
-<!--
-##
-# Copyright 2016-2017 VMware Inc.
-# This file is part of ETSI OSM
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: osslegalrouting@vmware.com
-##
--->
-<alarmsDefaultConfig>
- <Average_Memory_Usage_Above_Threshold>
- <vrops_alarm>Avg_Mem_Usage_Above_Thr</vrops_alarm>
- <period>300</period>
- <evaluation>2</evaluation>
- <cancel_period>300</cancel_period>
- <cancel_cycles>2</cancel_cycles>
- <enabled>true</enabled>
- <repeat>false</repeat>
- <action>acknowledge</action>
- <resource_kind>VirtualMachine</resource_kind>
- <adapter_kind>VMWARE</adapter_kind>
- <alarm_type>16</alarm_type>
- <alarm_subType>19</alarm_subType>
- <impact>risk</impact>
- <unit>%</unit>
- </Average_Memory_Usage_Above_Threshold>
- <Read_Latency_Above_Threshold>
- <vrops_alarm>RD_Latency_Above_Thr</vrops_alarm>
- <period>300</period>
- <evaluation>3</evaluation>
- <cancel_period>300</cancel_period>
- <cancel_cycles>3</cancel_cycles>
- <enabled>true</enabled>
- <repeat>false</repeat>
- <action>acknowledge</action>
- <resource_kind>VirtualMachine</resource_kind>
- <adapter_kind>VMWARE</adapter_kind>
- <alarm_type>18</alarm_type>
- <alarm_subType>19</alarm_subType>
- <impact>risk</impact>
- <unit>msec</unit>
- </Read_Latency_Above_Threshold>
- <Write_Latency_Above_Threshold>
- <vrops_alarm>WR_Latency_Above_Thr</vrops_alarm>
- <period>300</period>
- <evaluation>3</evaluation>
- <cancel_period>300</cancel_period>
- <cancel_cycles>3</cancel_cycles>
- <enabled>true</enabled>
- <repeat>false</repeat>
- <action>acknowledge</action>
- <resource_kind>VirtualMachine</resource_kind>
- <adapter_kind>VMWARE</adapter_kind>
- <alarm_type>18</alarm_type>
- <alarm_subType>19</alarm_subType>
- <impact>risk</impact>
- <unit>msec</unit>
- </Write_Latency_Above_Threshold>
- <Net_Packets_Dropped>
- <vrops_alarm>Net_Packets_Dropped</vrops_alarm>
- <period>300</period>
- <evaluation>1</evaluation>
- <cancel_period>300</cancel_period>
- <cancel_cycles>1</cancel_cycles>
- <enabled>true</enabled>
- <repeat>false</repeat>
- <action>acknowledge</action>
- <resource_kind>VirtualMachine</resource_kind>
- <adapter_kind>VMWARE</adapter_kind>
- <alarm_type>19</alarm_type>
- <alarm_subType>19</alarm_subType>
- <impact>risk</impact>
- <unit>nos</unit>
- </Net_Packets_Dropped>
- <CPU_Utilization_Above_Threshold>
- <vrops_alarm>CPU_Utilization_Above_Thr</vrops_alarm>
- <period>300</period>
- <evaluation>1</evaluation>
- <cancel_period>300</cancel_period>
- <cancel_cycles>1</cancel_cycles>
- <enabled>true</enabled>
- <repeat>false</repeat>
- <action>acknowledge</action>
- <resource_kind>VirtualMachine</resource_kind>
- <adapter_kind>VMWARE</adapter_kind>
- <alarm_type>16</alarm_type>
- <alarm_subType>19</alarm_subType>
- <impact>risk</impact>
- <unit>msec</unit>
- </CPU_Utilization_Above_Threshold>
- <AVERAGE_MEMORY_UTILIZATION>
- <metric_key>mem|usage_average</metric_key>
- <unit>%</unit>
- </AVERAGE_MEMORY_UTILIZATION>
- <CPU_UTILIZATION>
- <metric_key>cpu|usage_average</metric_key>
- <unit>%</unit>
- </CPU_UTILIZATION>
- <READ_LATENCY_0>
- <metric_key>virtualDisk:scsi0:0|totalReadLatency_average</metric_key>
- <unit>msec</unit>
- </READ_LATENCY_0>
- <WRITE_LATENCY_0>
- <metric_key>virtualDisk:scsi0:0|totalWriteLatency_average</metric_key>
- <unit>msec</unit>
- </WRITE_LATENCY_0>
- <READ_LATENCY_1>
- <metric_key>virtualDisk:scsi0:1|totalReadLatency_average</metric_key>
- <unit>msec</unit>
- </READ_LATENCY_1>
- <WRITE_LATENCY_1>
- <metric_key>virtualDisk:scsi0:1|totalWriteLatency_average</metric_key>
- <unit>msec</unit>
- </WRITE_LATENCY_1>
- <PACKETS_DROPPED_0>
- <metric_key>net:4000|dropped</metric_key>
- <unit>nos</unit>
- </PACKETS_DROPPED_0>
- <PACKETS_DROPPED_1>
- <metric_key>net:4001|dropped</metric_key>
- <unit>nos</unit>
- </PACKETS_DROPPED_1>
- <PACKETS_DROPPED_2>
- <metric_key>net:4002|dropped</metric_key>
- <unit>nos</unit>
- </PACKETS_DROPPED_2>
- <PACKETS_RECEIVED>
- <metric_key>net:Aggregate of all instances|packetsRxPerSec</metric_key>
- <unit>nos</unit>
- </PACKETS_RECEIVED>
- <PACKETS_SENT>
- <metric_key>net:Aggregate of all instances|packetsTxPerSec</metric_key>
- <unit>nos</unit>
- </PACKETS_SENT>
- <Access_Config>
- <vrops_site>https://192.169.241.123</vrops_site>
- <vrops_user>Admin</vrops_user>
- <vrops_password>VMware1!</vrops_password>
- <vcloud-site>https://mano-vcd-1.corp.local</vcloud-site>
- <admin_username>administrator</admin_username>
- <admin_password>VMware1!</admin_password>
- <nsx_manager>https://192.169.241.104</nsx_manager>
- <nsx_user>admin</nsx_user>
- <nsx_password>VMware1!</nsx_password>
- <vcenter_ip>192.169.241.103</vcenter_ip>
- <vcenter_port>443</vcenter_port>
- <vcenter_user>administrator@vsphere.local</vcenter_user>
- <vcenter_password>VMware1!</vcenter_password>
- <vim_tenant_name>Org2-VDC-PVDC1</vim_tenant_name>
- <orgname>Org2</orgname>
- <tenant_id>Org2-VDC-PVDC1</tenant_id>
- </Access_Config>
-</alarmsDefaultConfig>
-
-
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-#gitkeep file to keep the initial empty directory structure.
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "alarm_ack",
-"vim_type": "AWS",
-"ack_details":
-{
-"alarm_uuid": "CPU_Utilization_i-098da78cbd8304e17",
-"resource_uuid": "i-098da78cbd8304e17",
-"tenant_uuid": ""
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_alarm_request",
-"vim_type": "AWS",
-"alarm_create_request":
-{
-"correlation_id": "SO123",
-"alarm_name": "CPU_Utilization_Above_Threshold",
-"resource_uuid": "i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "GE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_alarm_request",
-"vim_type": "AWS",
-"alarm_create_request":
-{
-"correlation_id": "SO123",
-"alarm_name": "CPU_Utilization_Above_Threshold1",
-"resource_uuid": "i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "GE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_alarm_request",
-"vim_type": "AWS",
-"alarm_create_request":
-{
-"correlation_id": "SO123",
-"alarm_name": "CPU_Utilization_Above_Threshold",
-"resource_uuid": "i-09462760703837b26",
-"description": "",
-"severity": "Critical",
-"operation": "GE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_alarm_request",
-"vim_type": "AWS",
-"alarm_create_request":
-{
-"correlation_id": "SO123",
-"alarm_name": "CPU_Utilization_Above_Threshold",
-"resource_uuid": "i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "GE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_alarm_request",
-"vim_type": "AWS",
-"alarm_create_request":
-{
-"correlation_id": "SO123",
-"alarm_name": "CPU_Utilization_Above_Threshold2",
-"resource_uuid": "i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "Greaterthan",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_alarm_request",
-"vim_type": "AWS",
-"alarm_create_request":
-{
-"correlation_id": "SO123",
-"alarm_name": "CPU_Utilization_Above_Threshold2",
-"resource_uuid": "i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "GE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_alarm_request",
-"vim_type": "AWS",
-"alarm_create_request":
-{
-"correlation_id": "SO123",
-"alarm_name": "CPU_Utilization_Above_Threshold2",
-"resource_uuid": "i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "GE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAX"
-}
-}
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_alarm_request",
-"vim_type": "AWS",
-"alarm_create_request":
-{
-"correlation_id": "SO123",
-"alarm_name": "CPU_Utilization_Above_Threshold2",
-"resource_uuid": "i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "GE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_metrics_request",
-"tenant_uuid": "",
-"correlation_id": "SO123",
-"vim_type": "AWS",
-"metric_create":
-{
-"metric_name": "CPU_UTILIZ",
-"metric_unit": "",
-"resource_uuid": "i-098da78cbd8304e17"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_metrics_request",
-"tenant_uuid": "",
-"correlation_id": "SO123",
-"vim_type": "AWS",
-"metric_create":
-{
-"metric_name": "CPU_UTILIZATION",
-"metric_unit": "",
-"resource_uuid": "i-098da78cbd8304e17"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "delete_alarm_request",
-"vim_type": "AWS",
-"alarm_delete_request":
-{
-"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e16",
-"correlation_id": "SO123"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "delete_alarm_request",
-"vim_type": "AWS",
-"alarm_delete_request":
-{
-"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
-"correlation_id": "SO123"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "delete_alarm_request",
-"vim_type": "AWS",
-"alarm_delete_request":
-{
-"alarm_uuid": "CPU_Utilization_Above_Threshold1_i-098da78cbd8304e17",
-"correlation_id": "SO123"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "delete_alarm_request",
-"vim_type": "AWS",
-"alarm_delete_request":
-{
-"alarm_uuid": "CPU_Utilization_Above_Threshold_i-09462760703837b26",
-"correlation_id": "SO123"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "delete_alarm_request",
-"vim_type": "AWS",
-"alarm_delete_request":
-{
-"alarm_uuid": "CPU_Utilization_Above_Threshold2_i-098da78cbd8304e17",
-"correlation_id": "SO123"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "delete_alarm_request",
-"vim_type": "AWS",
-"alarm_delete_request":
-{
-"alarm_uuid": "CPU_Utilization_Above_Threshold4_i-098da78cbd8304e17",
-"correlation_id": "SO123"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "delete_metric_data_request",
-"metric_name": "CPU_UTILIATION",
-"metric_uuid": "",
-"resource_uuid": "i-098da78cbd8304e17",
-"tenant_uuid": "",
-"correlation_uuid": "S0123",
-"vim_type": "AWS"
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "delete_metric_data_request",
-"metric_name": "CPU_UTILIZATION",
-"metric_uuid": "",
-"resource_uuid": "i-098da78cbd8304e17",
-"tenant_uuid": "",
-"correlation_uuid": "S0123",
-"vim_type": "AWS"
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "list_alarm_request",
-"vim_type": "AWS",
-"alarm_list_request":
-{
-"correlation_id": "SO123",
-"resource_uuid": "",
-"alarm_name": "",
-"severity": ""
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "list_alarm_request",
-"vim_type": "AWS",
-"alarm_list_request":
-{
-"correlation_id": "SO123",
-"resource_uuid": "i-098da78cbd8304e17",
-"alarm_name": "",
-"severity": ""
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "list_alarm_request",
-"vim_type": "AWS",
-"alarm_list_request":
-{
-"correlation_id": "SO123",
-"resource_uuid": "i-098da78cbd8304e17",
-"alarm_name": "",
-"severity": "Critical"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "list_metrics_request",
-"vim_type": "AWS",
-"metrics_list_request":
-{
-"metric_name": "CPU_UTILZATION",
-"correlation_id": "SO123",
-"resource_uuid": "i-098da78cbd8304e17"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "list_metrics_request",
-"vim_type": "AWS",
-"metrics_list_request":
-{
-"metric_name": "CPU_UTILIZATION",
-"correlation_id": "SO123",
-"resource_uuid": "i-098da78cbd8304e17"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "read_metric_data_request",
-"metric_name": "CPU_UTILIZATION",
-"metric_uuid": "0",
-"resource_uuid": "i-098da78cbd8304e17",
-"tenant_uuid": "",
-"correlation_uuid": "SO123",
-"vim_type":"AWS",
-"collection_period":"3500" ,
-"collection_unit": ""
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "read_metric_data_request",
-"metric_name": "CPU_UTILIZATION",
-"metric_uuid": "0",
-"resource_uuid": "i-098da78cbd8304e17",
-"tenant_uuid": "",
-"correlation_uuid": "SO123",
-"vim_type":"AWS",
-"collection_period":"3600" ,
-"collection_unit": ""
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "read_metric_data_request",
-"metric_name": "CPU_UTLIZATION",
-"metric_uuid": "0",
-"resource_uuid": "i-098da78cbd8304e17",
-"tenant_uuid": "",
-"correlation_uuid": "SO123",
-"vim_type":"AWS",
-"collection_period":"3600" ,
-"collection_unit": ""
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "read_metric_data_request",
-"metric_name": "CPU_UTILIZATION",
-"metric_uuid": "0",
-"resource_uuid": "i-098da78cbd8304e17",
-"tenant_uuid": "",
-"correlation_uuid": "SO123",
-"vim_type":"AWS",
-"collection_period":"3600" ,
-"collection_unit": ""
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "update_alarm_request",
-"vim_type": "AWS",
-"alarm_update_request":
-{
-"correlation_id": "SO123",
-"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e13",
-"description": "",
-"severity": "Critical",
-"operation": "LE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "update_alarm_request",
-"vim_type": "AWS",
-"alarm_update_request":
-{
-"correlation_id": "SO123",
-"alarm_uuid": "CPU_Utilization_Above_Threshold4_i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "LE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "update_alarm_request",
-"vim_type": "AWS",
-"alarm_update_request":
-{
-"correlation_id": "SO123",
-"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "Less",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "update_alarm_request",
-"vim_type": "AWS",
-"alarm_update_request":
-{
-"correlation_id": "SO123",
-"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "LE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "update_alarm_request",
-"vim_type": "AWS",
-"alarm_update_request":
-{
-"correlation_id": "SO123",
-"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "LE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAX"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "update_alarm_request",
-"vim_type": "AWS",
-"alarm_update_request":
-{
-"correlation_id": "SO123",
-"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "LE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_alarm_request",
-"vim_type": "AWS",
-"alarm_create_request":
-{
-"correlation_id": "SO123",
-"alarm_name": "CPU_Utilization_Above_Threshold4",
-"resource_uuid": "i-098da78cbd8304e17",
-"description": "",
-"severity": "Critical",
-"operation": "GE",
-"threshold_value": 1.5,
-"unit": "",
-"metric_name": "CPU_UTILIZATION",
-"statistic": "MAXIMUM"
-}
-}
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_metrics_request",
-"tenant_uuid": "",
-"correlation_id": "SO123",
-"vim_type": "AWS",
-"metric_create":
-{
-"metric_name": "CPU_UTILIZ",
-"metric_unit": "",
-"resource_uuid": "i-098da78cbd8304e17"
-}
-}
\ No newline at end of file
+++ /dev/null
-{
-"schema_version": "1.0",
-"schema_type": "create_metrics_request",
-"tenant_uuid": "",
-"correlation_id": "SO123",
-"vim_type": "AWS",
-"metric_create":
-{
-"metric_name": "CPU_UTILIZATION",
-"metric_unit": "",
-"resource_uuid": "i-098da78cbd8304e17"
-}
-}
\ No newline at end of file
+++ /dev/null
-from connection import Connection
-import unittest
-import sys
-import jsmin
-import json
-import os
-import time
-from jsmin import jsmin
-sys.path.append("../../test/core/")
-from test_producer import KafkaProducer
-from kafka import KafkaConsumer
-try:
- import boto
- import boto.ec2
- import boto.vpc
- import boto.ec2.cloudwatch
- import boto.ec2.connection
-except:
- exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
-
-#--------------------------------------------------------------------------------------------------------------------------------------
-
-# Test Producer object to generate request
-
-producer = KafkaProducer('create_alarm_request')
-obj = Connection()
-connections = obj.setEnvironment()
-connections_res = obj.connection_instance()
-cloudwatch_conn = connections_res['cloudwatch_connection']
-
-#--------------------------------------------------------------------------------------------------------------------------------------
-
-'''Test E2E Flow : Test cases has been tested one at a time.
-1) Commom Request is generated using request function in test_producer.py(/test/core)
-2) The request is then consumed by the comsumer (plugin)
-3) The response is sent back on the message bus in plugin_alarm.py using
- response functions in producer.py(/core/message-bus)
-4) The response is then again consumed by the unit_tests_alarms.py
- and the test cases has been applied on the response.
-'''
-
-class config_alarm_name_test(unittest.TestCase):
-
-
- def setUp(self):
- pass
- #To generate a request of testing new alarm name and new instance id in create alarm request
- def test_differentName_differentInstance(self):
- time.sleep(2)
- producer.request("test_schemas/create_alarm/create_alarm_differentName_differentInstance.json",'create_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "create_alarm_response":
- info = json.loads(json.loads(message.value))
- print info
- time.sleep(1)
- self.assertTrue(info['alarm_create_response']['status'])
- return
-
- #To generate a request of testing new alarm name and existing instance id in create alarm request
- def test_differentName_sameInstance(self):
- time.sleep(2)
- producer.request("test_schemas/create_alarm/create_alarm_differentName_sameInstance.json",'create_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "create_alarm_response":
- info = json.loads(json.loads(message.value))
- print info
- time.sleep(1)
- producer.request("test_schemas/delete_alarm/name_valid_delete1.json",'delete_alarm_request','','alarm_request')
- self.assertTrue(info['alarm_create_response']['status'])
- return
-
- #To generate a request of testing existing alarm name and new instance id in create alarm request
- def test_sameName_differentInstance(self):
- time.sleep(2)
- producer.request("test_schemas/create_alarm/create_alarm_sameName_differentInstance.json",'create_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "create_alarm_response":
- info = json.loads(json.loads(message.value))
- print info
- time.sleep(1)
- producer.request("test_schemas/delete_alarm/name_valid_delete2.json",'delete_alarm_request', '','alarm_request')
- self.assertTrue(info['alarm_create_response']['status'])
- return
-
- #To generate a request of testing existing alarm name and existing instance id in create alarm request
- def test_sameName_sameInstance(self):
- time.sleep(2)
- producer.request("test_schemas/create_alarm/create_alarm_sameName_sameInstance.json",'create_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "create_alarm_response":
- info = json.loads(json.loads(message.value))
- print info,"---"
- time.sleep(1)
- producer.request("test_schemas/delete_alarm/name_valid.json",'delete_alarm_request', '','alarm_request')
- self.assertEqual(info, None)
- return
-
- #To generate a request of testing valid statistics in create alarm request
- def test_statisticValid(self):
- time.sleep(2)
- producer.request("test_schemas/create_alarm/statistic_valid.json",'create_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "create_alarm_response":
- info = json.loads(json.loads(message.value))
- print info
- time.sleep(1)
- producer.request("test_schemas/delete_alarm/name_valid_delete3.json",'delete_alarm_request', '','alarm_request')
- self.assertTrue(info['alarm_create_response']['status'])
- return
-
- #To generate a request of testing Invalid statistics in create alarm request
- def test_statisticValidNot(self):
- time.sleep(2)
- producer.request("test_schemas/create_alarm/statistic_invalid.json",'create_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "create_alarm_response":
- info = json.loads(json.loads(message.value))
- print info,"---"
- time.sleep(1)
- producer.request("test_schemas/delete_alarm/name_valid_delete3.json",'delete_alarm_request', '','alarm_request')
- self.assertEqual(info, None)
- return
-
- #To generate a request of testing valid operation in create alarm request
- def test_operationValid(self):
- time.sleep(2)
- producer.request("test_schemas/create_alarm/operation_valid.json",'create_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "create_alarm_response":
- info = json.loads(json.loads(message.value))
- print info
- time.sleep(1)
- producer.request("test_schemas/delete_alarm/name_valid_delete3.json",'delete_alarm_request', '','alarm_request')
- self.assertTrue(info['alarm_create_response']['status'])
- return
-
- #To generate a request of testing Invalid operation in create alarm request
- def test_operationValidNot(self):
- time.sleep(2)
- producer.request("test_schemas/create_alarm/operation_invalid.json",'create_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "create_alarm_response":
- info = json.loads(json.loads(message.value))
- print info
- time.sleep(1)
- self.assertEqual(info,None)
- return
-
-
-#--------------------------------------------------------------------------------------------------------------------------------------
-class update_alarm_name_test(unittest.TestCase):
-
- #To generate a request of testing valid alarm_id in update alarm request
- def test_nameValid(self):
- producer.request("test_schemas/update_alarm/update_alarm_new_alarm.json",'create_alarm_request', '','alarm_request')
- time.sleep(2)
- producer.request("test_schemas/update_alarm/name_valid.json",'update_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "update_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- producer.request("test_schemas/delete_alarm/name_valid_delete4.json",'delete_alarm_request', '','alarm_request')
- self.assertTrue(info['alarm_update_response']['status'])
- return
-
- #To generate a request of testing invalid alarm_id in update alarm request
- def test_nameInvalid(self):
- time.sleep(2)
- producer.request("test_schemas/update_alarm/name_invalid.json",'update_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "update_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- self.assertEqual(info,None)
- return
-
- #To generate a request of testing valid statistics in update alarm request
- def test_statisticValid(self):
- producer.request("test_schemas/create_alarm/create_alarm_differentName_differentInstance.json",'create_alarm_request', '','alarm_request')
- time.sleep(2)
- producer.request("test_schemas/update_alarm/statistic_valid.json",'update_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "update_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- producer.request("test_schemas/delete_alarm/name_valid.json",'delete_alarm_request', '','alarm_request')
- self.assertTrue(info['alarm_update_response']['status'])
- return
-
- #To generate a request of testing Invalid statistics in update alarm request
- def test_statisticInvalid(self):
- time.sleep(2)
- producer.request("test_schemas/update_alarm/statistic_invalid.json",'update_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "update_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- self.assertEqual(info,None)
- return
-
- #To generate a request of testing valid operation in update alarm request
- def test_operationValid(self):
- producer.request("test_schemas/create_alarm/create_alarm_differentName_differentInstance.json",'create_alarm_request', '','alarm_request')
- time.sleep(2)
- producer.request("test_schemas/update_alarm/operation_valid.json",'update_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "update_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- producer.request("test_schemas/delete_alarm/name_valid.json",'delete_alarm_request', '','alarm_request')
- self.assertTrue(info['alarm_update_response']['status'])
- return
-
-#--------------------------------------------------------------------------------------------------------------------------------------
-class delete_alarm_test(unittest.TestCase):
-
- #To generate a request of testing valid alarm_id in delete alarm request
- def test_nameValid(self):
- producer.request("test_schemas/create_alarm/create_alarm_differentName_differentInstance.json",'create_alarm_request', '','alarm_request')
- time.sleep(2)
- producer.request("test_schemas/delete_alarm/name_valid.json",'delete_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "delete_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- self.assertTrue(info['alarm_deletion_response']['status'])
- return
-
- #To generate a request of testing Invalid alarm_id in delete alarm request
- def test_nameInvalid(self):
- time.sleep(2)
- producer.request("test_schemas/delete_alarm/name_invalid.json",'delete_alarm_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "delete_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- self.assertEqual(info,None)
- return
-
-#--------------------------------------------------------------------------------------------------------------------------------------
-class list_alarm_test(unittest.TestCase):
-
- #To generate a request of testing valid input fields in alarm list request
- def test_valid_no_arguments(self):
- time.sleep(2)
- producer.request("test_schemas/list_alarm/list_alarm_valid_no_arguments.json",'alarm_list_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "list_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- self.assertEqual(type(info),dict)
- return
-
- #To generate a request of testing valid input fields in alarm list request
- def test_valid_one_arguments(self):
- time.sleep(2)
- producer.request("test_schemas/list_alarm/list_alarm_valid_one_arguments.json",'alarm_list_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "list_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- self.assertEqual(type(info),dict)
- return
-
- #To generate a request of testing valid input fields in alarm list request
- def test_valid_two_arguments(self):
- time.sleep(2)
- producer.request("test_schemas/list_alarm/list_alarm_valid_two_arguments.json",'alarm_list_request', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "list_alarm_response":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- self.assertEqual(type(info),dict)
- return
-
-
-#--------------------------------------------------------------------------------------------------------------------------------------
-class alarm_details_test(unittest.TestCase):
-
- #To generate a request of testing valid input fields in acknowledge alarm
- def test_Valid(self):
- time.sleep(2)
- producer.request("test_schemas/alarm_details/acknowledge_alarm.json",'acknowledge_alarm', '','alarm_request')
- server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
-
- _consumer = KafkaConsumer(bootstrap_servers=server['server'])
- _consumer.subscribe(['alarm_response'])
-
- for message in _consumer:
- if message.key == "notify_alarm":
- info = json.loads(json.loads(json.loads(message.value)))
- print info
- time.sleep(1)
- self.assertEqual(type(info),dict)
- return
-
-if __name__ == '__main__':
-
- # Saving test reults in Log file
-
- log_file = 'log_file.txt'
- f = open(log_file, "w")
- runner = unittest.TextTestRunner(f)
- unittest.main(testRunner=runner)
- f.close()
-
- # For printing results on Console
- # unittest.main()
+++ /dev/null
-from connection import Connection
-import unittest
-import sys
-import jsmin
-import json
-import os
-import time
-from jsmin import jsmin
-sys.path.append("../../test/core/")
-from test_producer import KafkaProducer
-from kafka import KafkaConsumer
-try:
- import boto
- import boto.ec2
- import boto.vpc
- import boto.ec2.cloudwatch
- import boto.ec2.connection
-except:
- exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
-
-#--------------------------------------------------------------------------------------------------------------------------------------
-
-# Test Producer object to generate request
-
-producer = KafkaProducer('')
-obj = Connection()
-connections = obj.setEnvironment()
-connections_res = obj.connection_instance()
-cloudwatch_conn = connections_res['cloudwatch_connection']
-
-# Consumer Object to consume response from message bus
-server = {'server': 'localhost:9092', 'topic': 'metric_request'}
-_consumer = KafkaConsumer(bootstrap_servers=server['server'])
-_consumer.subscribe(['metric_response'])
-
-#--------------------------------------------------------------------------------------------------------------------------------------
-
-'''Test E2E Flow : Test cases has been tested one at a time.
-1) Commom Request is generated using request function in test_producer.py(/core/message-bus)
-2) The request is then consumed by the comsumer (plugin)
-3) The response is sent back on the message bus in plugin_metrics.py using
- response functions in producer.py(/core/message-bus)
-4) The response is then again consumed by the unit_tests_metrics.py
- and the test cases has been applied on the response.
-'''
-class test_create_metrics(unittest.TestCase):
-
- def test_status_positive(self):
- time.sleep(2)
- # To generate Request of testing valid meric_name in create metrics requests
- producer.request("create_metrics/create_metric_req_valid.json",'create_metric_request', '','metric_request')
-
- for message in _consumer:
- if message.key == "create_metric_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertTrue(resp['metric_create_response']['status'])
- self.assertEqual(resp['metric_create_response']['metric_uuid'],0)
- return
-
- def test_status_negative(self):
- time.sleep(2)
- # To generate Request of testing invalid meric_name in create metrics requests
- producer.request("create_metrics/create_metric_req_invalid.json",'create_metric_request', '','metric_request')
-
- for message in _consumer:
- if message.key == "create_metric_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertFalse(resp['metric_create_response']['status'])
- self.assertEqual(resp['metric_create_response']['metric_uuid'],None)
- return
-
-class test_metrics_data(unittest.TestCase):
-
- def test_met_name_positive(self):
- time.sleep(2)
- # To generate Request of testing valid meric_name in read_metric_data_request
- producer.request("read_metrics_data/read_metric_name_req_valid.json",'read_metric_data_request', '','metric_request')
- for message in _consumer:
- if message.key == "read_metric_data_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertEqual(type(resp['metrics_data']),dict)
- return
-
- def test_met_name_negative(self):
- time.sleep(2)
- # To generate Request of testing invalid meric_name in read_metric_data_request
- producer.request("read_metrics_data/read_metric_name_req_invalid.json",'read_metric_data_request', '','metric_request')
- for message in _consumer:
- if message.key == "read_metric_data_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertFalse(resp['metrics_data'])
- return
-
- def test_coll_period_positive(self):
- # To generate Request of testing valid collection_period in read_metric_data_request
- # For AWS metric_data_stats collection period should be a multiple of 60
- time.sleep(2)
- producer.request("read_metrics_data/read_coll_period_req_valid.json",'read_metric_data_request', '','metric_request')
- for message in _consumer:
- if message.key == "read_metric_data_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertEqual(type(resp),dict)
- return
-
- def test_coll_period_negative(self):
- time.sleep(2)
- # To generate Request of testing invalid collection_period in read_metric_data_request
- producer.request("read_metrics_data/read_coll_period_req_invalid.json",'read_metric_data_request', '','metric_request')
- for message in _consumer:
- if message.key == "read_metric_data_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertFalse(resp['metrics_data'])
- return
-
-class test_update_metrics(unittest.TestCase):
-
- def test_upd_status_positive(self):
- time.sleep(2)
- # To generate Request of testing valid meric_name in update metrics requests
- producer.request("update_metrics/update_metric_req_valid.json",'update_metric_request', '','metric_request')
- for message in _consumer:
- if message.key == "update_metric_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertTrue(resp['metric_update_response']['status'])
- self.assertEqual(resp['metric_update_response']['metric_uuid'],0)
- return
-
- def test_upd_status_negative(self):
- time.sleep(2)
- # To generate Request of testing invalid meric_name in update metrics requests
- producer.request("update_metrics/update_metric_req_invalid.json",'update_metric_request', '','metric_request')
- for message in _consumer:
- if message.key == "update_metric_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertFalse(resp['metric_update_response']['status'])
- self.assertEqual(resp['metric_update_response']['metric_uuid'],None)
- return
-
-class test_delete_metrics(unittest.TestCase):
-
- def test_del_met_name_positive(self):
- time.sleep(2)
- # To generate Request of testing valid meric_name in delete metrics requests
- producer.request("delete_metrics/delete_metric_req_valid.json",'delete_metric_request', '','metric_request')
- for message in _consumer:
- if message.key == "delete_metric_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertFalse(resp['status'])
- return
-
- def test_del_met_name_negative(self):
- time.sleep(2)
- # To generate Request of testing invalid meric_name in delete metrics requests
- producer.request("delete_metrics/delete_metric_req_invalid.json",'delete_metric_request', '','metric_request')
- for message in _consumer:
- if message.key == "delete_metric_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertFalse(resp)
- return
-
-class test_list_metrics(unittest.TestCase):
-
- def test_list_met_name_positive(self):
- time.sleep(2)
- # To generate Request of testing valid meric_name in list metrics requests
- producer.request("list_metrics/list_metric_req_valid.json",'list_metric_request', '','metric_request')
- for message in _consumer:
- if message.key == "list_metrics_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertEqual(type(resp['metrics_list']),list)
- return
-
- def test_list_met_name_negitive(self):
- time.sleep(2)
- # To generate Request of testing invalid meric_name in list metrics requests
- producer.request("list_metrics/list_metric_req_invalid.json",'list_metric_request', '','metric_request')
- for message in _consumer:
- if message.key == "list_metrics_response":
- resp = json.loads(json.loads(json.loads(message.value)))
- time.sleep(1)
- self.assertFalse(resp['metrics_list'])
- return
-
-
-if __name__ == '__main__':
-
- # Saving test reults in Log file
-
- log_file = 'log_file.txt'
- f = open(log_file, "w")
- runner = unittest.TextTestRunner(f)
- unittest.main(testRunner=runner)
- f.close()
-
- # For printing results on Console
- # unittest.main()
-
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""OpenStack plugin tests."""
-
-import logging
-
-# Initialise a logger for tests
-logging.basicConfig(filename='OpenStack_tests.log',
- format='%(asctime)s %(message)s',
- datefmt='%m/%d/%Y %I:%M:%S %p', filemode='a',
- level=logging.INFO)
-log = logging.getLogger(__name__)
+++ /dev/null
-# Copyright 2017 iIntel Research and Development Ireland Limited
-# **************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Tests for all alarm request message keys."""
-
-import json
-
-import logging
-
-import unittest
-
-import mock
-
-from plugins.OpenStack.Aodh import alarming as alarm_req
-from plugins.OpenStack.common import Common
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-
-class Message(object):
- """A class to mock a message object value for alarm requests."""
-
- def __init__(self):
- """Initialize a mocked message instance."""
- self.topic = "alarm_request"
- self.key = None
- self.value = json.dumps({"mock_value": "mock_details"})
-
-
-class TestAlarmKeys(unittest.TestCase):
- """Integration test for alarm request keys."""
-
- def setUp(self):
- """Setup the tests for alarm request keys."""
- super(TestAlarmKeys, self).setUp()
- self.alarming = alarm_req.Alarming()
- self.alarming.common = Common()
-
- @mock.patch.object(Common, "_authenticate")
- def test_alarming_env_authentication(self, auth):
- """Test getting an auth_token and endpoint for alarm requests."""
- # if auth_token is None environment variables are used to authenticare
- message = Message()
-
- self.alarming.alarming(message, self.alarming.common, None)
-
- auth.assert_called_with()
-
- @mock.patch.object(Common, "_authenticate")
- def test_acccess_cred_auth(self, auth):
- """Test receiving auth_token from access creds."""
- message = Message()
-
- self.alarming.alarming(message, self.alarming.common, "my_auth_token")
-
- auth.assert_not_called
- self.assertEqual(self.alarming.auth_token, "my_auth_token")
-
- @mock.patch.object(alarm_req.Alarming, "delete_alarm")
- def test_delete_alarm_key(self, del_alarm):
- """Test the functionality for a create alarm request."""
- # Mock a message value and key
- message = Message()
- message.key = "delete_alarm_request"
- message.value = json.dumps({"alarm_delete_request":
- {"alarm_uuid": "my_alarm_id"}})
-
- # Call the alarming functionality and check delete request
- self.alarming.alarming(message, self.alarming.common, "my_auth_token")
-
- del_alarm.assert_called_with(mock.ANY, mock.ANY, "my_alarm_id")
-
- @mock.patch.object(alarm_req.Alarming, "list_alarms")
- def test_list_alarm_key(self, list_alarm):
- """Test the functionality for a list alarm request."""
- # Mock a message with list alarm key and value
- message = Message()
- message.key = "list_alarm_request"
- message.value = json.dumps({"alarm_list_request": "my_alarm_details"})
-
- # Call the alarming functionality and check list functionality
- self.alarming.alarming(message, self.alarming.common, "my_auth_token")
- list_alarm.assert_called_with(mock.ANY, mock.ANY, "my_alarm_details")
-
- @mock.patch.object(alarm_req.Alarming, "update_alarm_state")
- def test_ack_alarm_key(self, ack_alarm):
- """Test the functionality for an acknowledge alarm request."""
- # Mock a message with acknowledge alarm key and value
- message = Message()
- message.key = "acknowledge_alarm"
- message.value = json.dumps({"ack_details":
- {"alarm_uuid": "my_alarm_id"}})
-
- # Call alarming functionality and check acknowledge functionality
- self.alarming.alarming(message, self.alarming.common, "my_auth_token")
- ack_alarm.assert_called_with(mock.ANY, mock.ANY, "my_alarm_id")
-
- @mock.patch.object(alarm_req.Alarming, "configure_alarm")
- def test_config_alarm_key(self, config_alarm):
- """Test the functionality for a create alarm request."""
- # Mock a message with config alarm key and value
- message = Message()
- message.key = "create_alarm_request"
- message.value = json.dumps({"alarm_create_request": "alarm_details"})
-
- # Call alarming functionality and check config alarm call
- config_alarm.return_value = "my_alarm_id", True
- self.alarming.alarming(message, self.alarming.common, "my_auth_token")
- config_alarm.assert_called_with(mock.ANY, mock.ANY, "alarm_details")
+++ /dev/null
-# Copyright 2017 iIntel Research and Development Ireland Limited
-# **************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Tests for all alarm request message keys."""
-
-import json
-
-import logging
-
-import unittest
-
-import mock
-
-from plugins.OpenStack.Aodh import alarming as alarm_req
-from plugins.OpenStack.common import Common
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-auth_token = mock.ANY
-endpoint = mock.ANY
-
-
-class Response(object):
- """Mock a response message class."""
-
- def __init__(self, result):
- """Initialise the response text and status code."""
- self.text = json.dumps(result)
- self.status_code = "MOCK_STATUS_CODE"
-
-
-class TestAlarming(unittest.TestCase):
- """Tests for alarming class functions."""
-
- def setUp(self):
- """Setup for tests."""
- super(TestAlarming, self).setUp()
- self.alarming = alarm_req.Alarming()
- self.alarming.common = Common()
-
- @mock.patch.object(alarm_req.Alarming, "check_payload")
- @mock.patch.object(alarm_req.Alarming, "check_for_metric")
- @mock.patch.object(Common, "_perform_request")
- def test_config_invalid_alarm_req(self, perf_req, check_metric, check_pay):
- """Test configure an invalid alarm request."""
- # Configuring with invalid alarm name results in failure
- values = {"alarm_name": "my_alarm",
- "metric_name": "my_metric",
- "resource_uuid": "my_r_id"}
- self.alarming.configure_alarm(endpoint, auth_token, values)
- perf_req.assert_not_called
- perf_req.reset_mock()
-
- # Correct alarm_name will check for metric in Gnocchi
- # If there isn't one an alarm won;t be created
- values = {"alarm_name": "disk_write_ops",
- "metric_name": "disk_write_ops",
- "resource_uuid": "my_r_id"}
-
- check_metric.return_value = None
-
- self.alarming.configure_alarm(endpoint, auth_token, values)
- perf_req.assert_not_called
-
- @mock.patch.object(alarm_req.Alarming, "check_payload")
- @mock.patch.object(alarm_req.Alarming, "check_for_metric")
- @mock.patch.object(Common, "_perform_request")
- def test_config_valid_alarm_req(self, perf_req, check_metric, check_pay):
- """Test config a valid alarm."""
- # Correct alarm_name will check for metric in Gnocchi
- # And conform that the payload is configured correctly
- values = {"alarm_name": "disk_write_ops",
- "metric_name": "disk_write_ops",
- "resource_uuid": "my_r_id"}
-
- check_metric.return_value = "my_metric_id"
- check_pay.return_value = "my_payload"
-
- self.alarming.configure_alarm(endpoint, auth_token, values)
- perf_req.assert_called_with(
- "<ANY>/v2/alarms/", auth_token,
- req_type="post", payload="my_payload")
-
- @mock.patch.object(Common, "_perform_request")
- def test_delete_alarm_req(self, perf_req):
- """Test delete alarm request."""
- self.alarming.delete_alarm(endpoint, auth_token, "my_alarm_id")
-
- perf_req.assert_called_with(
- "<ANY>/v2/alarms/my_alarm_id", auth_token, req_type="delete")
-
- @mock.patch.object(Common, "_perform_request")
- def test_invalid_list_alarm_req(self, perf_req):
- """Test invalid list alarm_req."""
- # Request will not be performed with out a resoure_id
- list_details = {"mock_details": "invalid_details"}
- self.alarming.list_alarms(endpoint, auth_token, list_details)
-
- perf_req.assert_not_called
-
- @mock.patch.object(Common, "_perform_request")
- def test_valid_list_alarm_req(self, perf_req):
- """Test valid list alarm request."""
- # Minimum requirement for an alarm list is resource_id
- list_details = {"resource_uuid": "mock_r_id"}
- self.alarming.list_alarms(endpoint, auth_token, list_details)
-
- perf_req.assert_called_with(
- "<ANY>/v2/alarms/", auth_token, req_type="get")
- perf_req.reset_mock()
-
- # Check list with alarm_name defined
- list_details = {"resource_uuid": "mock_r_id",
- "alarm_name": "my_alarm",
- "severity": "critical"}
- self.alarming.list_alarms(endpoint, auth_token, list_details)
-
- perf_req.assert_called_with(
- "<ANY>/v2/alarms/", auth_token, req_type="get")
-
- @mock.patch.object(Common, "_perform_request")
- def test_ack_alarm_req(self, perf_req):
- """Test update alarm state for acknowledge alarm request."""
- self.alarming.update_alarm_state(endpoint, auth_token, "my_alarm_id")
-
- perf_req.assert_called_with(
- "<ANY>/v2/alarms/my_alarm_id/state", auth_token, req_type="put",
- payload=json.dumps("ok"))
-
- @mock.patch.object(alarm_req.Alarming, "check_payload")
- @mock.patch.object(Common, "_perform_request")
- def test_update_alarm_invalid(self, perf_req, check_pay):
- """Test update alarm with invalid get response."""
- values = {"alarm_uuid": "my_alarm_id"}
-
- self.alarming.update_alarm(endpoint, auth_token, values)
-
- perf_req.assert_called_with(mock.ANY, auth_token, req_type="get")
- check_pay.assert_not_called
-
- @mock.patch.object(alarm_req.Alarming, "check_payload")
- @mock.patch.object(Common, "_perform_request")
- def test_update_alarm_invalid_payload(self, perf_req, check_pay):
- """Test update alarm with invalid payload."""
- resp = Response({"name": "my_alarm",
- "state": "alarm",
- "gnocchi_resources_threshold_rule":
- {"resource_id": "my_resource_id",
- "metric": "my_metric"}})
- perf_req.return_value = resp
- check_pay.return_value = None
- values = {"alarm_uuid": "my_alarm_id"}
-
- self.alarming.update_alarm(endpoint, auth_token, values)
-
- perf_req.assert_called_with(mock.ANY, auth_token, req_type="get")
- self.assertEqual(perf_req.call_count, 1)
-
- @mock.patch.object(alarm_req.Alarming, "check_payload")
- @mock.patch.object(Common, "_perform_request")
- def test_update_alarm_valid(self, perf_req, check_pay):
- """Test valid update alarm request."""
- resp = Response({"name": "my_alarm",
- "state": "alarm",
- "gnocchi_resources_threshold_rule":
- {"resource_id": "my_resource_id",
- "metric": "my_metric"}})
- perf_req.return_value = resp
- values = {"alarm_uuid": "my_alarm_id"}
-
- self.alarming.update_alarm(endpoint, auth_token, values)
-
- check_pay.assert_called_with(values, "my_metric", "my_resource_id",
- "my_alarm", alarm_state="alarm")
-
- self.assertEqual(perf_req.call_count, 2)
- # Second call is the update request
- perf_req.assert_called_with(
- '<ANY>/v2/alarms/my_alarm_id', auth_token,
- req_type="put", payload=check_pay.return_value)
-
- def test_check_valid_payload(self):
- """Test the check payload function for a valid payload."""
- values = {"severity": "warning",
- "statistic": "COUNT",
- "threshold_value": 12,
- "operation": "GT"}
- payload = self.alarming.check_payload(
- values, "my_metric", "r_id", "alarm_name")
-
- self.assertEqual(
- json.loads(payload), {"name": "alarm_name",
- "gnocchi_resources_threshold_rule":
- {"resource_id": "r_id",
- "metric": "my_metric",
- "comparison_operator": "gt",
- "aggregation_method": "count",
- "threshold": 12,
- "resource_type": "generic"},
- "severity": "low",
- "state": "ok",
- "type": "gnocchi_resources_threshold"})
-
- def test_check_valid_state_payload(self):
- """Test the check payload function for a valid payload with state."""
- values = {"severity": "warning",
- "statistic": "COUNT",
- "threshold_value": 12,
- "operation": "GT"}
- payload = self.alarming.check_payload(
- values, "my_metric", "r_id", "alarm_name", alarm_state="alarm")
-
- self.assertEqual(
- json.loads(payload), {"name": "alarm_name",
- "gnocchi_resources_threshold_rule":
- {"resource_id": "r_id",
- "metric": "my_metric",
- "comparison_operator": "gt",
- "aggregation_method": "count",
- "threshold": 12,
- "resource_type": "generic"},
- "severity": "low",
- "state": "alarm",
- "type": "gnocchi_resources_threshold"})
-
- def test_check_invalid_payload(self):
- """Test the check payload function for an invalid payload."""
- values = {"alarm_values": "mock_invalid_details"}
- payload = self.alarming.check_payload(
- values, "my_metric", "r_id", "alarm_name")
-
- self.assertEqual(payload, None)
-
- @mock.patch.object(Common, "_perform_request")
- def test_get_alarm_state(self, perf_req):
- """Test the get alarm state function."""
- self.alarming.get_alarm_state(endpoint, auth_token, "alarm_id")
-
- perf_req.assert_called_with(
- "<ANY>/v2/alarms/alarm_id/state", auth_token, req_type="get")
-
- @mock.patch.object(Common, "get_endpoint")
- @mock.patch.object(Common, "_perform_request")
- def test_check_for_metric(self, perf_req, get_endpoint):
- """Test the check for metric function."""
- get_endpoint.return_value = "gnocchi_endpoint"
-
- self.alarming.check_for_metric(auth_token, "metric_name", "r_id")
-
- perf_req.assert_called_with(
- "gnocchi_endpoint/v1/metric/", auth_token, req_type="get")
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Tests for all common OpenStack methods."""
-
-import json
-
-import logging
-
-import unittest
-
-from keystoneclient.v3 import client
-
-import mock
-
-from plugins.OpenStack.common import Common
-from plugins.OpenStack.settings import Config
-
-import requests
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-
-class Message(object):
- """Mock a message for an access credentials request."""
-
- def __init__(self):
- """Initialise the topic and value of access_cred message."""
- self.topic = "access_credentials"
- self.value = json.dumps({"mock_value": "mock_details",
- "vim_type": "OPENSTACK",
- "access_config":
- {"openstack_site": "my_site",
- "user": "my_user",
- "password": "my_password",
- "vim_tenant_name": "my_tenant"}})
-
-
-class TestCommon(unittest.TestCase):
- """Test the common class for OpenStack plugins."""
-
- def setUp(self):
- """Test Setup."""
- super(TestCommon, self).setUp()
- self.common = Common()
-
- @mock.patch.object(client, "Client")
- def test_authenticate_exists(self, key_client):
- """Testing if an authentication token already exists."""
- # If the auth_token is already generated a new one will not be creates
- self.common._auth_token = "my_auth_token"
- token = self.common._authenticate()
-
- self.assertEqual(token, "my_auth_token")
-
- @mock.patch.object(Config, "instance")
- @mock.patch.object(client, "Client")
- def test_authenticate_none(self, key_client, cfg):
- """Test generating a new authentication token."""
- # If auth_token doesn't exist one will try to be created with keystone
- # With the configuration values from the environment
- self.common._auth_token = None
- config = cfg.return_value
- url = config.OS_AUTH_URL
- user = config.OS_USERNAME
- pword = config.OS_PASSWORD
- tenant = config.OS_TENANT_NAME
-
- self.common._authenticate()
-
- key_client.assert_called_with(auth_url=url,
- username=user,
- password=pword,
- tenant_name=tenant)
- key_client.reset_mock()
-
- @mock.patch.object(client, "Client")
- def test_authenticate_access_cred(self, key_client):
- """Test generating an auth_token using access_credentials from SO."""
- # Mock valid message from SO
- self.common._auth_token = None
- message = Message()
-
- self.common._authenticate(message=message)
-
- # The class variables are set for each consifugration
- self.assertEqual(self.common.openstack_url, "my_site")
- self.assertEqual(self.common.user, "my_user")
- self.assertEqual(self.common.password, "my_password")
- self.assertEqual(self.common.tenant, "my_tenant")
- key_client.assert_called
-
- @mock.patch.object(requests, 'post')
- def test_post_req(self, post):
- """Testing a post request."""
- self.common._perform_request("url", "auth_token", req_type="post",
- payload="payload")
-
- post.assert_called_with("url", data="payload", headers=mock.ANY,
- timeout=mock.ANY)
-
- @mock.patch.object(requests, 'get')
- def test_get_req(self, get):
- """Testing a get request."""
- # Run the defualt get request without any parameters
- self.common._perform_request("url", "auth_token", req_type="get")
-
- get.assert_called_with("url", params=None, headers=mock.ANY,
- timeout=mock.ANY)
-
- # Test with some parameters specified
- get.reset_mock()
- self.common._perform_request("url", "auth_token", req_type="get",
- params="some parameters")
-
- get.assert_called_with("url", params="some parameters",
- headers=mock.ANY, timeout=mock.ANY)
-
- @mock.patch.object(requests, 'put')
- def test_put_req(self, put):
- """Testing a put request."""
- self.common._perform_request("url", "auth_token", req_type="put",
- payload="payload")
- put.assert_called_with("url", data="payload", headers=mock.ANY,
- timeout=mock.ANY)
-
- @mock.patch.object(requests, 'delete')
- def test_delete_req(self, delete):
- """Testing a delete request."""
- self.common._perform_request("url", "auth_token", req_type="delete")
-
- delete.assert_called_with("url", headers=mock.ANY, timeout=mock.ANY)
+++ /dev/null
-# Copyright 2017 iIntel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Tests for all metric request message keys."""
-
-import json
-
-import logging
-
-import unittest
-
-import mock
-
-from plugins.OpenStack.Gnocchi import metrics as metric_req
-
-from plugins.OpenStack.common import Common
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-# Mock auth_token and endpoint
-endpoint = mock.ANY
-auth_token = mock.ANY
-
-# Mock a valid metric list for some tests, and a resultant list
-metric_list = [{"name": "disk_write_ops",
- "id": "metric_id",
- "unit": "units",
- "resource_id": "r_id"}]
-result_list = ["metric_id", "r_id", "units", "disk_write_ops"]
-
-
-class Response(object):
- """Mock a response object for requests."""
-
- def __init__(self):
- """Initialise test and status code values."""
- self.text = json.dumps("mock_response_text")
- self.status_code = "STATUS_CODE"
-
-
-class TestMetricCalls(unittest.TestCase):
- """Integration test for metric request keys."""
-
- def setUp(self):
- """Setup the tests for metric request keys."""
- super(TestMetricCalls, self).setUp()
- self.metrics = metric_req.Metrics()
- self.metrics._common = Common()
-
- @mock.patch.object(metric_req.Metrics, "get_metric_name")
- @mock.patch.object(metric_req.Metrics, "get_metric_id")
- @mock.patch.object(Common, "_perform_request")
- def test_invalid_config_metric_req(
- self, perf_req, get_metric, get_metric_name):
- """Test the configure metric function, for an invalid metric."""
- # Test invalid configuration for creating a metric
- values = {"metric_details": "invalid_metric"}
-
- m_id, r_id, status = self.metrics.configure_metric(
- endpoint, auth_token, values)
-
- perf_req.assert_not_called
- self.assertEqual(m_id, None)
- self.assertEqual(r_id, None)
- self.assertEqual(status, False)
-
- # Test with an invalid metric name, will not perform request
- values = {"resource_uuid": "r_id"}
- get_metric_name.return_value = "metric_name", None
-
- m_id, r_id, status = self.metrics.configure_metric(
- endpoint, auth_token, values)
-
- perf_req.assert_not_called
- self.assertEqual(m_id, None)
- self.assertEqual(r_id, "r_id")
- self.assertEqual(status, False)
- get_metric_name.reset_mock()
-
- # If metric exists, it won't be recreated
- get_metric_name.return_value = "metric_name", "norm_name"
- get_metric.return_value = "metric_id"
-
- m_id, r_id, status = self.metrics.configure_metric(
- endpoint, auth_token, values)
-
- perf_req.assert_not_called
- self.assertEqual(m_id, "metric_id")
- self.assertEqual(r_id, "r_id")
- self.assertEqual(status, False)
-
- @mock.patch.object(metric_req.Metrics, "get_metric_name")
- @mock.patch.object(metric_req.Metrics, "get_metric_id")
- @mock.patch.object(Common, "_perform_request")
- def test_valid_config_metric_req(
- self, perf_req, get_metric, get_metric_name):
- """Test the configure metric function, for a valid metric."""
- # Test valid configuration and payload for creating a metric
- values = {"resource_uuid": "r_id",
- "metric_unit": "units"}
- get_metric_name.return_value = "metric_name", "norm_name"
- get_metric.return_value = None
- payload = {"id": "r_id",
- "metrics": {"metric_name":
- {"archive_policy_name": "high",
- "name": "metric_name",
- "unit": "units"}}}
-
- self.metrics.configure_metric(endpoint, auth_token, values)
-
- perf_req.assert_called_with(
- "<ANY>/v1/resource/generic", auth_token, req_type="post",
- payload=json.dumps(payload))
-
- @mock.patch.object(Common, "_perform_request")
- def test_delete_metric_req(self, perf_req):
- """Test the delete metric function."""
- self.metrics.delete_metric(endpoint, auth_token, "metric_id")
-
- perf_req.assert_called_with(
- "<ANY>/v1/metric/metric_id", auth_token, req_type="delete")
-
- @mock.patch.object(Common, "_perform_request")
- def test_delete_metric_invalid_status(self, perf_req):
- """Test invalid response for delete request."""
- perf_req.return_value = "404"
-
- status = self.metrics.delete_metric(endpoint, auth_token, "metric_id")
-
- self.assertEqual(status, False)
-
- @mock.patch.object(metric_req.Metrics, "response_list")
- @mock.patch.object(Common, "_perform_request")
- def test_complete_list_metric_req(self, perf_req, resp_list):
- """Test the complete list metric function."""
- # Test listing metrics without any configuration options
- values = {}
- resp = Response()
- perf_req.return_value = resp
- self.metrics.list_metrics(endpoint, auth_token, values)
-
- perf_req.assert_called_with(
- "<ANY>/v1/metric/", auth_token, req_type="get")
- resp_list.assert_called_with("mock_response_text")
-
- @mock.patch.object(metric_req.Metrics, "response_list")
- @mock.patch.object(Common, "_perform_request")
- def test_resource_list_metric_req(self, perf_req, resp_list):
- """Test the resource list metric function."""
- # Test listing metrics with a resource id specified
- values = {"resource_uuid": "resource_id"}
- resp = Response()
- perf_req.return_value = resp
- self.metrics.list_metrics(endpoint, auth_token, values)
-
- perf_req.assert_called_with(
- "<ANY>/v1/metric/", auth_token, req_type="get")
- resp_list.assert_called_with(
- "mock_response_text", resource="resource_id")
-
- @mock.patch.object(metric_req.Metrics, "response_list")
- @mock.patch.object(Common, "_perform_request")
- def test_name_list_metric_req(self, perf_req, resp_list):
- """Test the metric_name list metric function."""
- # Test listing metrics with a metric_name specified
- values = {"metric_name": "disk_write_bytes"}
- resp = Response()
- perf_req.return_value = resp
- self.metrics.list_metrics(endpoint, auth_token, values)
-
- perf_req.assert_called_with(
- "<ANY>/v1/metric/", auth_token, req_type="get")
- resp_list.assert_called_with(
- "mock_response_text", metric_name="disk_write_bytes")
-
- @mock.patch.object(metric_req.Metrics, "response_list")
- @mock.patch.object(Common, "_perform_request")
- def test_combined_list_metric_req(self, perf_req, resp_list):
- """Test the combined resource and metric list metric function."""
- # Test listing metrics with a resource id and metric name specified
- values = {"resource_uuid": "resource_id",
- "metric_name": "packets_sent"}
- resp = Response()
- perf_req.return_value = resp
- self.metrics.list_metrics(endpoint, auth_token, values)
-
- perf_req.assert_called_with(
- "<ANY>/v1/metric/", auth_token, req_type="get")
- resp_list.assert_called_with(
- "mock_response_text", resource="resource_id",
- metric_name="packets_sent")
-
- @mock.patch.object(Common, "_perform_request")
- def test_get_metric_id(self, perf_req):
- """Test get_metric_id function."""
- self.metrics.get_metric_id(endpoint, auth_token, "my_metric", "r_id")
-
- perf_req.assert_called_with(
- "<ANY>/v1/resource/generic/r_id", auth_token, req_type="get")
-
- def test_get_metric_name(self):
- """Test the result from the get_metric_name function."""
- # test with a valid metric_name
- values = {"metric_name": "disk_write_ops"}
-
- metric_name, norm_name = self.metrics.get_metric_name(values)
-
- self.assertEqual(metric_name, "disk_write_ops")
- self.assertEqual(norm_name, "disk.disk_ops")
-
- # test with an invalid metric name
- values = {"metric_name": "my_invalid_metric"}
-
- metric_name, norm_name = self.metrics.get_metric_name(values)
-
- self.assertEqual(metric_name, "my_invalid_metric")
- self.assertEqual(norm_name, None)
-
- @mock.patch.object(Common, "_perform_request")
- def test_valid_read_data_req(self, perf_req):
- """Test the read metric data function, for a valid call."""
- values = {"metric_uuid": "metric_id",
- "collection_unit": "DAY",
- "collection_period": 1}
-
- self.metrics.read_metric_data(endpoint, auth_token, values)
-
- perf_req.assert_called_once
-
- @mock.patch.object(Common, "_perform_request")
- def test_invalid_read_data_req(self, perf_req):
- """Test the read metric data function, for an invalid call."""
- # Teo empty lists wil be returned because the values are invalid
- values = {}
-
- times, data = self.metrics.read_metric_data(
- endpoint, auth_token, values)
-
- self.assertEqual(times, [])
- self.assertEqual(data, [])
-
- def test_complete_response_list(self):
- """Test the response list function for formating metric lists."""
- # Mock a list for testing purposes, with valid OSM metric
- resp_list = self.metrics.response_list(metric_list)
-
- # Check for the expected values in the resulting list
- for l in result_list:
- self.assertIn(l, resp_list[0])
-
- def test_name_response_list(self):
- """Test the response list with metric name configured."""
- # Mock the metric name to test a metric name list
- # Test with a name that is not in the list
- invalid_name = "my_metric"
- resp_list = self.metrics.response_list(
- metric_list, metric_name=invalid_name)
-
- self.assertEqual(resp_list, [])
-
- # Test with a name on the list
- valid_name = "disk_write_ops"
- resp_list = self.metrics.response_list(
- metric_list, metric_name=valid_name)
-
- # Check for the expected values in the resulting list
- for l in result_list:
- self.assertIn(l, resp_list[0])
-
- def test_resource_response_list(self):
- """Test the response list with resource_id configured."""
- # Mock a resource_id to test a resource list
- # Test with resource not on the list
- invalid_id = "mock_resource"
- resp_list = self.metrics.response_list(metric_list, resource=invalid_id)
-
- self.assertEqual(resp_list, [])
-
- # Test with a resource on the list
- valid_id = "r_id"
- resp_list = self.metrics.response_list(metric_list, resource=valid_id)
-
- # Check for the expected values in the resulting list
- for l in result_list:
- self.assertIn(l, resp_list[0])
-
- def test_combined_response_list(self):
- """Test the response list function with resource_id and metric_name."""
- # Test for a combined resource and name list
- # resource and name are on the lisat
- valid_name = "disk_write_ops"
- valid_id = "r_id"
- resp_list = self.metrics.response_list(
- metric_list, metric_name=valid_name, resource=valid_id)
-
- # Check for the expected values in the resulting list
- for l in result_list:
- self.assertIn(l, resp_list[0])
-
- # resource not on list
- invalid_id = "mock_resource"
- resp_list = self.metrics.response_list(
- metric_list, metric_name=valid_name, resource=invalid_id)
-
- self.assertEqual(resp_list, [])
-
- # metric name not on list
- invalid_name = "mock_metric"
- resp_list = self.metrics.response_list(
- metric_list, metric_name=invalid_name, resource=valid_id)
-
- self.assertEqual(resp_list, [])
+++ /dev/null
-# Copyright 2017 iIntel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Tests for all metric request message keys."""
-
-import json
-
-import logging
-
-import unittest
-
-import mock
-
-from plugins.OpenStack.Gnocchi import metrics as metric_req
-
-from plugins.OpenStack.common import Common
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-
-class Message(object):
- """A class to mock a message object value for metric requests."""
-
- def __init__(self):
- """Initialize a mocked message instance."""
- self.topic = "metric_request"
- self.key = None
- self.value = json.dumps({"mock_message": "message_details"})
-
-
-class TestMetricReq(unittest.TestCase):
- """Integration test for metric request keys."""
-
- def setUp(self):
- """Setup the tests for metric request keys."""
- super(TestMetricReq, self).setUp()
- self.common = Common()
- self.metrics = metric_req.Metrics()
-
- @mock.patch.object(Common, "_authenticate")
- def test_access_cred_metric_auth(self, auth):
- """Test authentication with access credentials."""
- message = Message()
-
- self.metrics.metric_calls(message, self.common, "my_auth_token")
-
- auth.assert_not_called
- self.assertEqual(self.metrics.auth_token, "my_auth_token")
-
- @mock.patch.object(Common, "_authenticate")
- def test_env_metric_auth(self, auth):
- """Test authentication with environment variables."""
- message = Message()
-
- self.metrics.metric_calls(message, self.common, None)
-
- auth.assert_called_with()
-
- @mock.patch.object(metric_req.Metrics, "delete_metric")
- def test_delete_metric_key(self, del_metric):
- """Test the functionality for a delete metric request."""
- # Mock a message value and key
- message = Message()
- message.key = "delete_metric_request"
- message.value = json.dumps({"metric_uuid": "my_metric_id"})
-
- # Call the metric functionality and check delete request
- self.metrics.metric_calls(message, self.common, "my_auth_token")
-
- del_metric.assert_called_with(mock.ANY, mock.ANY, "my_metric_id")
-
- @mock.patch.object(metric_req.Metrics, "list_metrics")
- def test_list_metric_key(self, list_metrics):
- """Test the functionality for a list metric request."""
- # Mock a message with list metric key and value
- message = Message()
- message.key = "list_metric_request"
- message.value = json.dumps({"metrics_list_request": "metric_details"})
-
- # Call the metric functionality and check list functionality
- self.metrics.metric_calls(message, self.common, "my_auth_token")
- list_metrics.assert_called_with(mock.ANY, mock.ANY, "metric_details")
-
- @mock.patch.object(metric_req.Metrics, "read_metric_data")
- @mock.patch.object(metric_req.Metrics, "list_metrics")
- @mock.patch.object(metric_req.Metrics, "delete_metric")
- @mock.patch.object(metric_req.Metrics, "configure_metric")
- def test_update_metric_key(self, config_metric, delete_metric, list_metrics,
- read_data):
- """Test the functionality for an update metric request."""
- # Mock a message with update metric key and value
- message = Message()
- message.key = "update_metric_request"
- message.value = json.dumps({"metric_create":
- {"metric_name": "my_metric",
- "resource_uuid": "my_r_id"}})
-
- # Call metric functionality and confirm no function is called
- # Gnocchi does not support updating a metric configuration
- self.metrics.metric_calls(message, self.common, "my_auth_token")
- config_metric.assert_not_called
- list_metrics.assert_not_called
- delete_metric.assert_not_called
- read_data.assert_not_called
-
- @mock.patch.object(metric_req.Metrics, "configure_metric")
- def test_config_metric_key(self, config_metric):
- """Test the functionality for a create metric request."""
- # Mock a message with create metric key and value
- message = Message()
- message.key = "create_metric_request"
- message.value = json.dumps({"metric_create": "metric_details"})
-
- # Call metric functionality and check config metric
- config_metric.return_value = "metric_id", "resource_id", True
- self.metrics.metric_calls(message, self.common, "my_auth_token")
- config_metric.assert_called_with(mock.ANY, mock.ANY, "metric_details")
-
- @mock.patch.object(metric_req.Metrics, "read_metric_data")
- def test_read_data_key(self, read_data):
- """Test the functionality for a read metric data request."""
- # Mock a message with a read data key and value
- message = Message()
- message.key = "read_metric_data_request"
- message.value = json.dumps({"alarm_uuid": "alarm_id"})
-
- # Call metric functionality and check read data metrics
- read_data.return_value = "time_stamps", "data_values"
- self.metrics.metric_calls(message, self.common, "my_auth_token")
- read_data.assert_called_with(
- mock.ANY, mock.ANY, json.loads(message.value))
+++ /dev/null
-# Copyright 2017 iIntel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Test that the correct responses are generated for each message."""
-
-import logging
-
-import unittest
-
-import mock
-
-from plugins.OpenStack import response as resp
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-
-class TestOpenStackResponse(unittest.TestCase):
- """Tests for responses generated by the OpenStack plugins."""
-
- def setUp(self):
- """Setup for testing OpenStack plugin responses."""
- super(TestOpenStackResponse, self).setUp()
- self.plugin_resp = resp.OpenStack_Response()
-
- def test_invalid_key(self):
- """Test if an invalid key is entered for a response."""
- message = self.plugin_resp.generate_response("mock_invalid_key")
- self.assertEqual(message, None)
-
- @mock.patch.object(
- resp.OpenStack_Response, "alarm_list_response")
- def test_list_alarm_resp(self, alarm_list_resp):
- """Test out a function call for a list alarm response."""
- message = self.plugin_resp.generate_response("list_alarm_response")
- self.assertEqual(alarm_list_resp.return_value, message)
-
- @mock.patch.object(
- resp.OpenStack_Response, "list_metric_response")
- def test_list_metric_resp(self, metric_list_resp):
- """Test list metric response function call."""
- message = self.plugin_resp.generate_response("list_metric_response")
- self.assertEqual(message, metric_list_resp.return_value)
-
- @mock.patch.object(
- resp.OpenStack_Response, "delete_alarm_response")
- def test_delete_alarm_resp(self, del_alarm_resp):
- """Test delete alarm response function call."""
- message = self.plugin_resp.generate_response("delete_alarm_response")
- self.assertEqual(message, del_alarm_resp.return_value)
-
- @mock.patch.object(
- resp.OpenStack_Response, "delete_metric_response")
- def test_delete_metric_resp(self, del_metric_resp):
- """Test the response functionality of delete metric response."""
- message = self.plugin_resp.generate_response("delete_metric_response")
- self.assertEqual(message, del_metric_resp.return_value)
-
- @mock.patch.object(
- resp.OpenStack_Response, "create_alarm_response")
- def test_create_alarm_resp(self, config_alarm_resp):
- """Test create alarm response function call."""
- message = self.plugin_resp.generate_response("create_alarm_response")
- self.assertEqual(message, config_alarm_resp.return_value)
-
- @mock.patch.object(
- resp.OpenStack_Response, "metric_create_response")
- def test_create_metric_resp(self, config_metric_resp):
- """Test create metric response function call."""
- message = self.plugin_resp.generate_response("create_metric_response")
- self.assertEqual(message, config_metric_resp.return_value)
-
- @mock.patch.object(
- resp.OpenStack_Response, "update_alarm_response")
- def test_update_alarm_resp(self, up_alarm_resp):
- """Test update alarm response function call."""
- message = self.plugin_resp.generate_response("update_alarm_response")
- self.assertEqual(message, up_alarm_resp.return_value)
-
- @mock.patch.object(
- resp.OpenStack_Response, "update_metric_response")
- def test_update_metric_resp(self, up_metric_resp):
- """Test update metric response function call."""
- message = self.plugin_resp.generate_response("update_metric_response")
- self.assertEqual(message, up_metric_resp.return_value)
-
- @mock.patch.object(
- resp.OpenStack_Response, "notify_alarm")
- def test_notify_alarm(self, notify_alarm):
- """Test notify alarm response function call."""
- message = self.plugin_resp.generate_response("notify_alarm")
- self.assertEqual(message, notify_alarm.return_value)
-
- @mock.patch.object(
- resp.OpenStack_Response, "read_metric_data_response")
- def test_read_metric_data_resp(self, read_data_resp):
- """Test read metric data response function call."""
- message = self.plugin_resp.generate_response(
- "read_metric_data_response")
- self.assertEqual(message, read_data_resp.return_value)
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-"""Tests for settings for OpenStack plugins configurations."""
-
-import logging
-
-import os
-
-import unittest
-
-import mock
-
-from plugins.OpenStack.settings import Config
-
-__author__ = "Helena McGough"
-
-log = logging.getLogger(__name__)
-
-
-class TestSettings(unittest.TestCase):
- """Test the settings class for OpenStack plugin configuration."""
-
- def setUp(self):
- """Test Setup."""
- super(TestSettings, self).setUp()
- self.cfg = Config.instance()
-
- def test_set_os_username(self):
- """Test reading the environment for OpenStack plugin configuration."""
- self.cfg.read_environ("my_service")
-
- self.assertEqual(self.cfg.OS_USERNAME, "my_service")
-
- @mock.patch.object(os, "environ")
- def test_read_environ(self, environ):
- """Test reading environment variables for configuration."""
- self.cfg.read_environ("my_service")
-
- # Called for each key in the configuration dictionary
- environ.assert_called_once
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
-##
-
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-#__author__ = "Prithiv Mohan"
-#__date__ = "25/Sep/2017"
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-#__author__ = "Prithiv Mohan"
-#__date__ = "25/Sep/2017"
-
-import sys
-import threading
-import pytest
-from kafka import KafkaConsumer, KafkaProducer
-
-def test_end_to_end(kafka_broker):
- connect_str = ':'.join([kafka_broker.host, str(kafka_broker.port)])
- producer = KafkaProducer(bootstrap_servers=connect_str,
- retries=5,
- max_block_ms=10000,
- value_serializer=str.encode)
- consumer = KafkaConsumer(bootstrap_servers=connect_str,
- group_id=None,
- consumer_timeout_ms=10000,
- auto_offset_reset='earliest',
- value_deserializer=bytes.decode)
-
- topic = 'TutorialTopic'
-
- messages = 100
- futures = []
- for i in range(messages):
- futures.append(producer.send(topic, 'msg %d' % i))
- ret = [f.get(timeout=30) for f in futures]
- assert len(ret) == messages
-
- producer.close()
-
- consumer.subscribe([topic])
- msgs = set()
- for i in range(messages):
- try:
- msgs.add(next(consumer).value)
- except StopIteration:
- break
-
- assert msgs == set(['msg %d' % i for i in range(messages)])
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-##
-"""This is a KafkaProducer with a request function to test the plugins."""
-
-import json
-
-import logging as log
-
-import os
-
-import jsmin
-
-from kafka import KafkaProducer as kaf
-
-from kafka.errors import KafkaError
-
-
-class KafkaProducer(object):
- """A KafkaProducer for testing purposes."""
-
- def __init__(self, topic):
- """Initialize a KafkaProducer and it's topic."""
- self._topic = topic
-
- if "ZOOKEEPER_URI" in os.environ:
- broker = os.getenv("ZOOKEEPER_URI")
- else:
- broker = "localhost:9092"
-
- '''
- If the zookeeper broker URI is not set in the env, by default,
- localhost container is taken as the host because an instance of
- is already running.
- '''
-
- self.producer = kaf(
- key_serializer=str.encode,
- value_serializer=lambda v: json.dumps(v).encode('ascii'),
- bootstrap_servers=broker, api_version=(0, 10))
-
- def publish(self, key, value, topic):
- """Send messages to the message bus with a defing key and topic."""
- try:
- future = self.producer.send(topic=topic, key=key, value=value)
- self.producer.flush()
- except Exception:
- log.exception("Error publishing to {} topic." .format(topic))
- raise
- try:
- record_metadata = future.get(timeout=10)
- log.debug("TOPIC:", record_metadata.topic)
- log.debug("PARTITION:", record_metadata.partition)
- log.debug("OFFSET:", record_metadata.offset)
- except KafkaError:
- pass
-
- def request(self, path, key, message, topic):
- """Test json files are loaded and sent on the message bus."""
- # External to MON
- payload_create_alarm = jsmin(open(os.path.join(path)).read())
- self.publish(key=key,
- value=json.loads(payload_create_alarm),
- topic=topic)
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you
+# may not use this file except in compliance with the License. You may
+# obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+"""A common KafkaConsumer for all MON plugins."""
+
+import json
+import logging
+import sys
+
+sys.path.append("/root/MON")
+
+logging.basicConfig(filename='MON_plugins.log',
+ format='%(asctime)s %(message)s',
+ datefmt='%m/%d/%Y %I:%M:%S %p', filemode='a',
+ level=logging.INFO)
+log = logging.getLogger(__name__)
+
+from kafka import KafkaConsumer
+from kafka.errors import KafkaError
+
+from plugins.OpenStack.Aodh import alarming
+from plugins.OpenStack.common import Common
+from plugins.OpenStack.Gnocchi import metrics
+
+
+# Initialize servers
+server = {'server': 'localhost:9092'}
+
+# Initialize consumers for alarms and metrics
+common_consumer = KafkaConsumer(group_id='osm_mon',
+ bootstrap_servers=server['server'])
+
+# Create OpenStack alarming and metric instances
+auth_token = None
+openstack_auth = Common()
+openstack_metrics = metrics.Metrics()
+openstack_alarms = alarming.Alarming()
+
+
+def get_vim_type(message):
+ """Get the vim type that is required by the message."""
+ try:
+ return json.loads(message.value)["vim_type"].lower()
+ except Exception as exc:
+ log.warn("vim_type is not configured correctly; %s", exc)
+ return None
+
+# Define subscribe the consumer for the plugins
+topics = ['metric_request', 'alarm_request', 'access_credentials']
+common_consumer.subscribe(topics)
+
+try:
+ log.info("Listening for alarm_request and metric_request messages")
+ for message in common_consumer:
+ # Check the message topic
+ if message.topic == "metric_request":
+ # Check the vim desired by the message
+ vim_type = get_vim_type(message)
+ if vim_type == "openstack":
+ log.info("This message is for the OpenStack plugin.")
+ openstack_metrics.metric_calls(
+ message, openstack_auth, auth_token)
+
+ elif vim_type == "cloudwatch":
+ log.info("This message is for the CloudWatch plugin.")
+
+ elif vim_type == "vrops":
+ log.info("This message is for the vROPs plugin.")
+
+ else:
+ log.debug("vim_type is misconfigured or unsupported; %s",
+ vim_type)
+
+ elif message.topic == "alarm_request":
+ # Check the vim desired by the message
+ vim_type = get_vim_type(message)
+ if vim_type == "openstack":
+ log.info("This message is for the OpenStack plugin.")
+ openstack_alarms.alarming(message, openstack_auth, auth_token)
+
+ elif vim_type == "cloudwatch":
+ log.info("This message is for the CloudWatch plugin.")
+
+ elif vim_type == "vrops":
+ log.info("This message is for the vROPs plugin.")
+
+ else:
+ log.debug("vim_type is misconfigured or unsupported; %s",
+ vim_type)
+
+ elif message.topic == "access_credentials":
+ # Check the vim desired by the message
+ vim_type = get_vim_type(message)
+ if vim_type == "openstack":
+ log.info("This message is for the OpenStack plugin.")
+ auth_token = openstack_auth._authenticate(message=message)
+
+ elif vim_type == "cloudwatch":
+ log.info("This message is for the CloudWatch plugin.")
+
+ elif vim_type == "vrops":
+ log.info("This message is for the vROPs plugin.")
+
+ else:
+ log.debug("vim_type is misconfigured or unsupported; %s",
+ vim_type)
+
+ else:
+ log.info("This topic is not relevant to any of the MON plugins.")
+
+
+except KafkaError as exc:
+ log.warn("Exception: %s", exc)
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+##
+
+'''
+This is a kafka consumer app that reads the messages from the message bus for
+alarms and metrics responses.
+
+'''
+
+__author__ = "Prithiv Mohan"
+__date__ = "06/Sep/2017"
+
+
+from kafka import KafkaConsumer
+from kafka.errors import KafkaError
+import json
+import logging
+import logging.config
+import os
+
+
+def logging_handler(filename, mode='a+', encoding=None):
+ if not os.path.exists(filename):
+ open(filename, 'a').close()
+ return logging.FileHandler(filename, mode)
+
+log_config = {
+ 'version': 1,
+ 'formatters': {
+ 'default': {
+ 'format': '%(asctime)s %(levelname)s %(name)s %(message)s'
+ },
+ },
+ 'handlers': {
+ 'file': {
+ '()': logging_handler,
+ 'level': 'DEBUG',
+ 'formatter': 'default',
+ 'filename': '/var/log/osm_mon.log',
+ 'mode': 'a+',
+ 'encoding': 'utf-8',
+ },
+ },
+ 'kafka': {
+ 'handlers': ['file'],
+ 'level': 'DEBUG',
+ },
+ 'root': {
+ 'handlers': ['file'],
+ 'level': 'DEBUG',
+ },
+}
+
+
+logging.config.dictConfig(log_config)
+logger = logging.getLogger('kafka')
+
+if "BROKER_URI" in os.environ:
+ broker = os.getenv("BROKER_URI")
+else:
+ broker = "localhost:9092"
+
+alarm_consumer = KafkaConsumer(
+ 'alarm_response', 'osm_mon', bootstrap_servers=broker)
+metric_consumer = KafkaConsumer(
+ 'metric_response', 'osm_mon', bootstrap_servers=broker)
+try:
+ for message in alarm_consumer:
+ logger.debug(message)
+ for message in metric_consumer:
+ logger.debug(message)
+except KafkaError:
+ log.exception()
+
+alarm_consumer.subscribe('alarm_response')
+metric_consumer.subscribe('metric_response')
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+##
+'''
+This is a kafka producer app that interacts with the SO and the plugins of the
+datacenters like OpenStack, VMWare, AWS.
+'''
+
+from kafka import KafkaProducer as kaf
+from kafka.errors import KafkaError
+import logging
+import json
+import jsmin
+import os
+from os import listdir
+from jsmin import jsmin
+
+__author__ = "Prithiv Mohan"
+__date__ = "06/Sep/2017"
+
+json_path=os.path.abspath(os.pardir+"/MON/osm_mon/core/models/")
+
+class KafkaProducer(object):
+
+ def __init__(self, topic):
+
+ self._topic = topic
+
+ if "BROKER_URI" in os.environ:
+ broker = os.getenv("BROKER_URI")
+ else:
+ broker = "localhost:9092"
+
+ '''
+ If the broker URI is not set in the env, by default,
+ localhost container is taken as the host because an instance of
+ is already running.
+ '''
+
+ self.producer = kaf(
+ key_serializer=str.encode,
+ value_serializer=lambda v: json.dumps(v).encode('ascii'),
+ bootstrap_servers=broker, api_version=(0, 10))
+
+ def publish(self, key, value, topic=None):
+ try:
+ future = self.producer.send(topic=topic, key=key, value=value)
+ self.producer.flush()
+ except Exception:
+ logging.exception("Error publishing to {} topic." .format(topic))
+ raise
+ try:
+ record_metadata = future.get(timeout=10)
+ logging.debug("TOPIC:", record_metadata.topic)
+ logging.debug("PARTITION:", record_metadata.partition)
+ logging.debug("OFFSET:", record_metadata.offset)
+ except KafkaError:
+ pass
+
+ def create_alarm_request(self, key, message, topic):
+
+ # External to MON
+
+ payload_create_alarm = jsmin(
+ open(os.path.join(json_path, 'create_alarm.json')).read())
+ self.publish(key,
+ value=json.dumps(payload_create_alarm),
+ topic='alarm_request')
+
+ def create_alarm_response(self, key, message, topic):
+
+ # Internal to MON
+
+ payload_create_alarm_resp = jsmin(
+ open(os.path.join(json_path, 'create_alarm_resp.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='alarm_response')
+
+ def acknowledge_alarm(self, key, message, topic):
+
+ # Internal to MON
+
+ payload_acknowledge_alarm = jsmin(
+ open(os.path.join(json_path, 'acknowledge_alarm.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_acknowledge_alarm),
+ topic='alarm_request')
+
+ def list_alarm_request(self, key, message, topic):
+
+ # External to MON
+
+ payload_alarm_list_req = jsmin(
+ open(os.path.join(json_path, 'list_alarm_req.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_alarm_list_req),
+ topic='alarm_request')
+
+ def notify_alarm(self, key, message, topic):
+
+ payload_notify_alarm = jsmin(
+ open(os.path.join(json_path, 'notify_alarm.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='alarm_response')
+
+ def list_alarm_response(self, key, message, topic):
+
+ payload_list_alarm_resp = jsmin(
+ open(os.path.join(json_path, 'list_alarm_resp.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='alarm_response')
+
+ def update_alarm_request(self, key, message, topic):
+
+ # External to Mon
+
+ payload_update_alarm_req = jsmin(
+ open(os.path.join(json_path, 'update_alarm_req.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_update_alarm_req),
+ topic='alarm_request')
+
+ def update_alarm_response(self, key, message, topic):
+
+ # Internal to Mon
+
+ payload_update_alarm_resp = jsmin(
+ open(os.path.join(json_path, 'update_alarm_resp.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='alarm_response')
+
+ def delete_alarm_request(self, key, message, topic):
+
+ # External to Mon
+
+ payload_delete_alarm_req = jsmin(
+ open(os.path.join(json_path, 'delete_alarm_req.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_delete_alarm_req),
+ topic='alarm_request')
+
+ def delete_alarm_response(self, key, message, topic):
+
+ # Internal to Mon
+
+ payload_delete_alarm_resp = jsmin(
+ open(os.path.join(json_path, 'delete_alarm_resp.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='alarm_response')
+
+ def create_metrics_request(self, key, message, topic):
+
+ # External to Mon
+
+ payload_create_metrics_req = jsmin(
+ open(os.path.join(json_path, 'create_metric_req.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_create_metrics_req),
+ topic='metric_request')
+
+ def create_metrics_resp(self, key, message, topic):
+
+ # Internal to Mon
+
+ payload_create_metrics_resp = jsmin(
+ open(os.path.join(json_path, 'create_metric_resp.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='metric_response')
+
+ def read_metric_data_request(self, key, message, topic):
+
+ # External to Mon
+
+ payload_read_metric_data_request = jsmin(
+ open(os.path.join(json_path, 'read_metric_data_req.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_read_metric_data_request),
+ topic='metric_request')
+
+ def read_metric_data_response(self, key, message, topic):
+
+ # Internal to Mon
+
+ payload_metric_data_response = jsmin(
+ open(os.path.join(json_path, 'read_metric_data_resp.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='metric_response')
+
+ def list_metric_request(self, key, message, topic):
+
+ # External to MON
+
+ payload_metric_list_req = jsmin(
+ open(os.path.join(json_path, 'list_metric_req.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_metric_list_req),
+ topic='metric_request')
+
+ def list_metric_response(self, key, message, topic):
+
+ # Internal to MON
+
+ payload_metric_list_resp = jsmin(
+ open(os.path.join(json_path, 'list_metrics_resp.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='metric_response')
+
+ def delete_metric_request(self, key, message, topic):
+
+ # External to Mon
+
+ payload_delete_metric_req = jsmin(
+ open(os.path.join(json_path, 'delete_metric_req.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_delete_metric_req),
+ topic='metric_request')
+
+ def delete_metric_response(self, key, message, topic):
+
+ # Internal to Mon
+
+ payload_delete_metric_resp = jsmin(
+ open(os.path.join(json_path, 'delete_metric_resp.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='metric_response')
+
+ def update_metric_request(self, key, message, topic):
+
+ # External to Mon
+
+ payload_update_metric_req = jsmin(
+ open(os.path.join(json_path, 'update_metric_req.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_update_metric_req),
+ topic='metric_request')
+
+ def update_metric_response(self, key, message, topic):
+
+ # Internal to Mon
+
+ payload_update_metric_resp = jsmin(
+ open(os.path.join(json_path, 'update_metric_resp.json')).read())
+
+ self.publish(key,
+ value=message,
+ topic='metric_response')
+
+ def access_credentials(self, key, message, topic):
+
+ payload_access_credentials = jsmin(
+ open(os.path.join(json_path, 'access_credentials.json')).read())
+
+ self.publish(key,
+ value=json.dumps(payload_access_credentials),
+ topic='access_credentials')
--- /dev/null
+#gitkeep file to keep the initial empty directory structure.
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for CloudWatch access credentials */
+
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "vim_type": { "type": "string" },
+ "access_config":
+ {
+ "aws_site": { "type": "string" },
+ "user": { "type": "string" },
+ "password": { "type": "string" },
+ "vim_tenant_name": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "vim_type",
+ "aws_site",
+ "user",
+ "password",
+ "vim_tenant_name" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for OpenStack access credentials */
+
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "vim_type": { "type": "string" },
+ "access_config":
+ {
+ "openstack_site": { "type" : "string" },
+ "user": { "type": "string" },
+ "password": { "type": "string" },
+ "vim_tenant_name": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "vim_type",
+ "openstack_site",
+ "user",
+ "password",
+ "vim_tenant_name" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for vROPs access credentials */
+
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "vim_type": { "type": "string" },
+ "access_config":
+ {
+ "vrops_site": { "type": "string" },
+ "vrops_user": { "type": "string" },
+ "vrops_password": { "type": "string" },
+ "vcloud_site": { "type": "string" },
+ "admin_username": { "type": "string" },
+ "admin_password": { "type": "string" },
+ "nsx_manager": { "type": "string" },
+ "nsx_user": { "type": "string" },
+ "nsx_password": { "type": "string" },
+ "vcenter_ip": { "type": "string" },
+ "vcenter_port": { "type": "string" },
+ "vcenter_user": { "type": "string" },
+ "vcenter_password": { "type": "string" },
+ "vim_tenant_name": { "type": "string" },
+ "orgname": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "vim_type",
+ "vrops_site",
+ "vrops_user",
+ "vrops_password",
+ "vcloud_site",
+ "admin_username",
+ "admin_password",
+ "vcenter_ip",
+ "vcenter_port",
+ "vcenter_user",
+ "vcenter_password",
+ "vim_tenant_name",
+ "orgname" ]
+}
--- /dev/null
+
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+# This is the message bus schema for acknowledge_alarm */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "vim_type": { "type": "string" },
+ "ack_details":
+ {
+ "alarm_uuid": { "type": "string" },
+ "resource_uuid": { "type": "string" },
+ "tenant_uuid": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "vim_type",
+ "alarm_uuid",
+ "resource_uuid",
+ "tenant_uuid" ]
+}
\ No newline at end of file
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+# This is the message bus schema to create_alarm */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "vim_type": { "type": "string "},
+ "alarm_create_request":
+ {
+ "correlation_id": { "type": "integer" },
+ "alarm_name": { "type": "string" },
+ "metric_name": { "type": "string" },
+ "tenant_uuid": { "type": "string" },
+ "resource_uuid": { "type": "string" },
+ "description": { "type": "string" },
+ "severity": { "type": "string" },
+ "operation": { "type": "string" },
+ "threshold_value": { "type": "integer" },
+ "unit": { "type": "string" },
+ "statistic": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "vim_type",
+ "correlation_id",
+ "alarm_name",
+ "metric_name",
+ "resource_uuid",
+ "severity",
+ "operation",
+ "threshold_value",
+ "unit",
+ "statistic" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for create_alarm response */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "alarm_create_response":
+ {
+ "correlation_id": { "type": "integer" },
+ "alarm_uuid": { "type": "string" },
+ "status": { "type": "boolean" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "correlation_id",
+ "alarm_uuid",
+ "status" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema to create_metric */
+
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "tenant_uuid": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "vim_type": { "type": "string" },
+ "metric_create":
+ {
+ "metric_name": { "type" : "string" },
+ "metric_unit": { "type": "string" },
+ "resource_uuid": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "correlation_id",
+ "vim_type",
+ "metric_name",
+ "metric_unit",
+ "resource_uuid" ]
+}
\ No newline at end of file
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for create_metric response*/
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "metric_create_response":
+ {
+ "metric_uuid": { "type": "string" },
+ "resource_uuid": { "type": "string" },
+ "status": { "type": "boolean" }
+ },
+ "required": [ "schema_type",
+ "schema_version",
+ "correlation_id",
+ "metric_uuid",
+ "resource_uuid",
+ "status" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema to delete_alarm */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "vim_type": { "type": "string" },
+ "alarm_delete_request":
+ {
+ "alarm_uuid": { "type": "string" },
+ "correlation_id": { "type": "integer" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "vim_type",
+ "alarm_uuid",
+ "correlation_id"
+ ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+ This is the message bus schema for delete_metric_response */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "alarm_deletion_response":
+ {
+ "correlation_id": { "type": "integer" },
+ "alarm_uuid": { "type": "string" },
+ "status": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "correlation_id",
+ "alarm_uuid",
+ "status" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema to delete_metric */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "metric_name": { "type": "string" },
+ "metric_uuid": { "type": "string" },
+ "resource_id": { "type": "string" },
+ "tenant_uuid": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "vim_type": { "type": "string" },
+ "required": [ "schema_verion",
+ "schema_type",
+ "metric_name",
+ "metric_uuid",
+ "resource_uuid",
+ "correlation_id",
+ "vim_type" ]
+}
\ No newline at end of file
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for delete_metric_response */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "metric_name": { "type": "string" },
+ "metric_uuid": { "type": "string" },
+ "resource_uuid": { "type": "string" },
+ "tenant_uuid": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "status": { "type": "boolean" },
+ "required": [ "schema_version",
+ "schema_type",
+ "metric_name",
+ "metric_uuid",
+ "resource_uuid",
+ "status",
+ "correlation_id" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+# This is the message bus schema to list_alarm */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "vim_type": { "type": "string" },
+ "alarm_list_request":
+ {
+ "correlation_id": { "type": "integer" },
+ "resource_uuid": { "type": "string" },
+ "alarm_name": { "type": "string" },
+ "severity": { "type" : "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "vim_type",
+ "correlation_id",
+ "resource_uuid"
+ ]
+}
\ No newline at end of file
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for list_alarm response */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "list_alarm_resp": { "$ref": "definitions.json#/notify_details" }
+}
\ No newline at end of file
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema to list_metric */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "vim_type": { "type": "string" },
+ "metrics_list_request":
+ {
+ "metric_name": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "resource_uuid": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "vim_type",
+ "correlation_id"
+ ]
+}
\ No newline at end of file
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for list_metric response */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "tenant_uuid": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "vim_type": { "type": "string" },
+ "metrics_list":
+ [{
+ "type": "array",
+ "properties":{
+ "metric_name": { "type": "string" },
+ "metric_uuid": { "type": "string" },
+ "metric_unit": { "type": "string" },
+ "resource_uuid": { "type": "string" }
+ }
+ }],
+ "required": [ "schema_version",
+ "schema_type",
+ "correlation_id",
+ "vim_type",
+ "metric_name",
+ "metric_uuid",
+ "metric_unit",
+ "resource_uuid" ]
+}
\ No newline at end of file
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema to notify_alarm */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "definitions":
+ {
+ "notify_details":
+ {
+ "alarm_uuid": { "type": "string" },
+ "resource_uuid": { "type": "string" },
+ "description": { "type": "string" },
+ "tenant_uuid": { "type": "string" },
+ "vim_type": { "type": "string" },
+ "severity": { "type" : "string" },
+ "status": { "type": "string" },
+ "start_date": { "type": "string" },
+ "update_date": { "type": "string" },
+ "cancel_date": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "alarm_uuid",
+ "resource_uuid",
+ "vim_type",
+ "severity",
+ "status",
+ "start_date" ]
+ }
+}
\ No newline at end of file
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema to read_metric_data */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "metric_name": { "type": "string" },
+ "metric_uuid": { "type": "string" },
+ "resource_uuid": { "type": "string" },
+ "tenant_uuid": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "vim_type": { "type": "string" },
+ "collection_period": { "type": "integer" },
+ "collection_unit": { "type": "string" },
+ "required": ["schema_version",
+ "schema_type",
+ "metric_name",
+ "metric_uuid",
+ "correlation_id",
+ "vim_type",
+ "collection_period",
+ "collection_unit",
+ "resource_uuid"]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for read_metric_data response */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": {"type": "string" },
+ "metric_name": { "type": "string" },
+ "metric_uuid": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "resource_uuid": { "type": "string" },
+ "tenant_uuid": { "type": "string" },
+ "metrics_data":
+ {
+ "time_series": [{
+ "type": "array",
+ "properties":
+ { "time_stamp":
+ { "type": "integer" }}}
+ ]
+ },
+ "metrics_series": [{
+ "type": "array",
+ "properties":
+ { "data":
+ { "type":
+ ["integer",
+ "string",
+ "decimal"
+ ]
+ }
+ }
+ }
+ ],
+ "unit": { "type": "string" },
+ "required": [ "schema_version",
+ "schema_type",
+ "metric_name",
+ "metric_uuid",
+ "resource_uuid",
+ "correlation_id",
+ "time_series",
+ "metrics_series" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema to update_alarm */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "vim_type": { "type": "string" },
+ "alarm_update_request":
+ {
+ "correlation_id": { "type": "integer" },
+ "alarm_uuid": { "type": "string" },
+ "metric_uuid": { "type": "string" },
+ "description": { "type": "string" },
+ "severity": { "type": "string" },
+ "operation": { "type": "string" },
+ "threshold_value": { "type": "string" },
+ "unit": { "type": "string" },
+ "statistic": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "scema_type",
+ "vim_type",
+ "correlation_id",
+ "alarm_uuid",
+ "metric_uuid" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for update_alarm response */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "alarm_update_response":
+ {
+ "correlation_id": { "type": "integer" },
+ "alarm_uuid": { "type": "string" },
+ "status": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "correlation_id",
+ "alarm_uuid",
+ "status" ]
+}
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema for update_metric response */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "tenant_uuid": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "vim_type": { "type": "string" },
+ "metric_create":
+ {
+ "metric_name": { "type": "string" },
+ "metric_unit": { "type": "string" },
+ "resource_uuid": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "correlation_id",
+ "vim_type",
+ "resource_uuid"
+ ]
+}
\ No newline at end of file
--- /dev/null
+/* Copyright© 2017 Intel Research and Development Ireland Limited
+ # This file is part of OSM Monitoring module
+ # All Rights Reserved to Intel Corporation
+
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+ # This is the message bus schema to update_metric */
+
+{
+ "schema_version": { "type": "string" },
+ "schema_type": { "type": "string" },
+ "correlation_id": { "type": "integer" },
+ "metric_update_response":
+ {
+ "metric_uuid": { "type": "string" },
+ "status": { "type": "boolean" },
+ "resource_uuid": { "type": "string" }
+ },
+ "required": [ "schema_version",
+ "schema_type",
+ "correlation_id",
+ "metric_uuid",
+ "resource_uuid",
+ "status"]
+}
\ No newline at end of file
--- /dev/null
+#gitkeep file to keep the initial empty directory structure.
--- /dev/null
+##
+# Copyright 2017 xFlow Research Pvt. Ltd
+# This file is part of MON module
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: wajeeha.hamid@xflowresearch.com
+##
+
+'''
+Connecting with AWS services --CloudWatch/EC2 using Required keys
+'''
+
+__author__ = "Wajeeha Hamid"
+__date__ = "18-September-2017"
+
+import sys
+import os
+
+try:
+ import boto
+ import boto.ec2
+ import boto.vpc
+ import boto.ec2.cloudwatch
+ import boto.ec2.connection
+ import logging as log
+ from boto.ec2.cloudwatch.alarm import MetricAlarm
+ from boto.ec2.cloudwatch.dimension import Dimension
+ from boto.sns import connect_to_region
+ from boto.utils import get_instance_metadata
+
+except:
+ exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
+
+
+class Connection():
+ """Connection Establishement with AWS -- VPC/EC2/CloudWatch"""
+#-----------------------------------------------------------------------------------------------------------------------------
+ def setEnvironment(self):
+
+ """Credentials for connecting to AWS-CloudWatch"""
+ self.AWS_KEY = os.environ.get("AWS_ACCESS_KEY_ID")
+ self.AWS_SECRET = os.environ.get("AWS_SECRET_ACCESS_KEY")
+ self.AWS_REGION = os.environ.get("AWS_EC2_REGION","us-west-2")
+ #TOPIC = 'YOUR_TOPIC'
+#-----------------------------------------------------------------------------------------------------------------------------
+ def connection_instance(self):
+ try:
+ #VPC Connection
+ self.vpc_conn = boto.vpc.connect_to_region(self.AWS_REGION,
+ aws_access_key_id=self.AWS_KEY,
+ aws_secret_access_key=self.AWS_SECRET)
+
+
+ #EC2 Connection
+ self.ec2_conn = boto.ec2.connect_to_region(self.AWS_REGION,
+ aws_access_key_id=self.AWS_KEY,
+ aws_secret_access_key=self.AWS_SECRET)
+
+
+ """ TODO : Required to add actions against alarms when needed """
+ #self.sns = connect_to_region(self.AWS_REGION)
+ #self.topics = self.sns.get_all_topics()
+ #self.topic = self.topics[u'ListTopicsResponse']['ListTopicsResult']['Topics'][0]['TopicArn']
+
+ #Cloudwatch Connection
+ self.cloudwatch_conn = boto.ec2.cloudwatch.connect_to_region(
+ self.AWS_REGION,
+ aws_access_key_id=self.AWS_KEY,
+ aws_secret_access_key=self.AWS_SECRET)
+ connection_dict = dict()
+ connection_dict['ec2_connection'] = self.ec2_conn
+ connection_dict['cloudwatch_connection'] = self.cloudwatch_conn
+ return connection_dict
+
+ except Exception as e:
+ log.error("Failed to Connect with AWS %s: ",str(e))
+
--- /dev/null
+##
+# Copyright 2017 xFlow Research Pvt. Ltd
+# This file is part of MON module
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: wajeeha.hamid@xflowresearch.com
+##
+
+''' Handling of alarms requests via BOTO 2.48 '''
+
+__author__ = "Wajeeha Hamid"
+__date__ = "18-September-2017"
+
+import sys
+import os
+import re
+import datetime
+import random
+import json
+import logging as log
+from random import randint
+from operator import itemgetter
+from connection import Connection
+
+
+try:
+ import boto
+ import boto.ec2
+ import boto.vpc
+ import boto.ec2.cloudwatch
+ import boto.ec2.connection
+except:
+ exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
+
+STATISTICS = {
+ "AVERAGE": "Average",
+ "MINIMUM": "Minimum",
+ "MAXIMUM": "Maximum",
+ "COUNT" : "SampleCount",
+ "SUM" : "Sum"}
+
+OPERATIONS = {
+ "GE" : ">=",
+ "LE" : "<=",
+ "GT" : ">",
+ "LT" : "<",
+ "EQ" : "="}
+
+class MetricAlarm():
+ """Alarms Functionality Handler -- Carries out alarming requests and responses via BOTO.Cloudwatch """
+ def __init__(self):
+ self.alarm_resp = dict()
+ self.del_resp = dict()
+
+ def config_alarm(self,cloudwatch_conn,create_info):
+ """Configure or Create a new alarm"""
+ inner_dict = dict()
+ """ Alarm Name to ID Mapping """
+ alarm_info = create_info['alarm_create_request']
+ alarm_id = alarm_info['alarm_name'] + "_" + alarm_info['resource_uuid']
+ if self.is_present(cloudwatch_conn,alarm_id)['status'] == True:
+ alarm_id = None
+ log.debug ("Alarm already exists, Try updating the alarm using 'update_alarm_configuration()'")
+ return alarm_id
+ else:
+ try:
+ if alarm_info['statistic'] in STATISTICS:
+ if alarm_info['operation'] in OPERATIONS:
+ alarm = boto.ec2.cloudwatch.alarm.MetricAlarm(
+ connection = cloudwatch_conn,
+ name = alarm_info['alarm_name'] + "_" + alarm_info['resource_uuid'],
+ metric = alarm_info['metric_name'],
+ namespace = "AWS/EC2",
+ statistic = STATISTICS[alarm_info['statistic']],
+ comparison = OPERATIONS[alarm_info['operation']],
+ threshold = alarm_info['threshold_value'],
+ period = 60,
+ evaluation_periods = 1,
+ unit=alarm_info['unit'],
+ description = alarm_info['severity'] + ";" + alarm_id + ";" + alarm_info['description'],
+ dimensions = {'InstanceId':alarm_info['resource_uuid']},
+ alarm_actions = None,
+ ok_actions = None,
+ insufficient_data_actions = None)
+
+ """Setting Alarm Actions :
+ alarm_actions = ['arn:aws:swf:us-west-2:465479087178:action/actions/AWS_EC2.InstanceId.Stop/1.0']"""
+
+ status=cloudwatch_conn.put_metric_alarm(alarm)
+
+ log.debug ("Alarm Configured Succesfully")
+ self.alarm_resp['schema_version'] = str(create_info['schema_version'])
+ self.alarm_resp['schema_type'] = 'create_alarm_response'
+
+ inner_dict['correlation_id'] = str(alarm_info['correlation_id'])
+ inner_dict['alarm_uuid'] = str(alarm_id)
+ inner_dict['status'] = status
+
+ self.alarm_resp['alarm_create_response'] = inner_dict
+
+ if status == True:
+ return self.alarm_resp
+ else:
+ return None
+ else:
+ log.error("Operation not supported")
+ return None
+ else:
+ log.error("Statistic not supported")
+ return None
+ except Exception as e:
+ log.error("Alarm Configuration Failed: " + str(e))
+
+#-----------------------------------------------------------------------------------------------------------------------------
+ def update_alarm(self,cloudwatch_conn,update_info):
+
+ """Update or reconfigure an alarm"""
+ inner_dict = dict()
+ alarm_info = update_info['alarm_update_request']
+
+ """Alarm Name to ID Mapping"""
+ alarm_id = alarm_info['alarm_uuid']
+ status = self.is_present(cloudwatch_conn,alarm_id)
+
+ """Verifying : Alarm exists already"""
+ if status['status'] == False:
+ alarm_id = None
+ log.debug("Alarm not found, Try creating the alarm using 'configure_alarm()'")
+ return alarm_id
+ else:
+ try:
+ if alarm_info['statistic'] in STATISTICS:
+ if alarm_info['operation'] in OPERATIONS:
+ alarm = boto.ec2.cloudwatch.alarm.MetricAlarm(
+ connection = cloudwatch_conn,
+ name = status['info'].name ,
+ metric = alarm_info['metric_name'],
+ namespace = "AWS/EC2",
+ statistic = STATISTICS[alarm_info['statistic']],
+ comparison = OPERATIONS[alarm_info['operation']],
+ threshold = alarm_info['threshold_value'],
+ period = 60,
+ evaluation_periods = 1,
+ unit=alarm_info['unit'],
+ description = alarm_info['severity'] + ";" + alarm_id + ";" + alarm_info['description'],
+ dimensions = {'InstanceId':str(status['info'].dimensions['InstanceId']).split("'")[1]},
+ alarm_actions = None,
+ ok_actions = None,
+ insufficient_data_actions = None)
+
+ """Setting Alarm Actions :
+ alarm_actions = ['arn:aws:swf:us-west-2:465479087178:action/actions/AWS_EC2.InstanceId.Stop/1.0']"""
+
+ status=cloudwatch_conn.put_metric_alarm(alarm)
+ log.debug("Alarm %s Updated ",alarm.name)
+ self.alarm_resp['schema_version'] = str(update_info['schema_version'])
+ self.alarm_resp['schema_type'] = 'update_alarm_response'
+
+ inner_dict['correlation_id'] = str(alarm_info['correlation_id'])
+ inner_dict['alarm_uuid'] = str(alarm_id)
+ inner_dict['status'] = status
+
+ self.alarm_resp['alarm_update_response'] = inner_dict
+ return self.alarm_resp
+ else:
+ log.error("Operation not supported")
+ return None
+ else:
+ log.error("Statistic not supported")
+ return None
+ except Exception as e:
+ log.error ("Error in Updating Alarm " + str(e))
+
+#-----------------------------------------------------------------------------------------------------------------------------
+ def delete_Alarm(self,cloudwatch_conn,del_info_all):
+
+ """Deletes an Alarm with specified alarm_id"""
+ inner_dict = dict()
+ del_info = del_info_all['alarm_delete_request']
+ status = self.is_present(cloudwatch_conn,del_info['alarm_uuid'])
+ try:
+ if status['status'] == True:
+ del_status=cloudwatch_conn.delete_alarms(status['info'].name)
+ self.del_resp['schema_version'] = str(del_info_all['schema_version'])
+ self.del_resp['schema_type'] = 'delete_alarm_response'
+ inner_dict['correlation_id'] = str(del_info['correlation_id'])
+ inner_dict['alarm_id'] = str(del_info['alarm_uuid'])
+ inner_dict['status'] = del_status
+ self.del_resp['alarm_deletion_response'] = inner_dict
+ return self.del_resp
+ return None
+ except Exception as e:
+ log.error("Alarm Not Deleted: " + str(e))
+#-----------------------------------------------------------------------------------------------------------------------------
+ def alarms_list(self,cloudwatch_conn,list_info):
+
+ """Get a list of alarms that are present on a particular VIM type"""
+ alarm_list = []
+ alarm_info = dict()
+ inner_dict = list_info['alarm_list_request']
+ try: #id vim
+ alarms = cloudwatch_conn.describe_alarms()
+ itr = 0
+ for alarm in alarms:
+ list_info['alarm_list_request']['alarm_uuid'] = str(alarm.description).split(';')[1]
+
+ #Severity = alarm_name = resource_uuid = ""
+ if inner_dict['severity'] == "" and inner_dict['alarm_name'] == "" and inner_dict['resource_uuid'] == "":
+ alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
+ itr += 1
+ #alarm_name = resource_uuid = ""
+ if inner_dict['severity'] == str(alarm.description).split(';')[0] and inner_dict['alarm_name'] == "" and inner_dict['resource_uuid'] == "":
+ alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
+ itr += 1
+ #severity = resource_uuid = ""
+ if inner_dict['severity'] == "" and inner_dict['alarm_name'] in alarm.name and inner_dict['resource_uuid'] == "":
+ alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
+ itr += 1
+ #severity = alarm_name = ""
+ if inner_dict['severity'] == "" and inner_dict['alarm_name'] == "" and inner_dict['resource_uuid'] == str(alarm.dimensions['InstanceId']).split("'")[1]:
+ alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
+ itr += 1
+ #resource_uuid = ""
+ if inner_dict['severity'] == str(alarm.description).split(';')[0] and inner_dict['alarm_name'] in alarm.name and inner_dict['resource_uuid'] == "":
+ alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
+ itr += 1
+ #alarm_name = ""
+ if inner_dict['severity'] == str(alarm.description).split(';')[0] and inner_dict['alarm_name'] == "" and inner_dict['resource_uuid'] == str(alarm.dimensions['InstanceId']).split("'")[1]:
+ alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
+ itr += 1
+ #severity = ""
+ if inner_dict['severity'] == "" and inner_dict['alarm_name'] in alarm.name and inner_dict['resource_uuid'] == str(alarm.dimensions['InstanceId']).split("'")[1]:
+ alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
+ itr += 1
+ #Everything provided
+ if inner_dict['severity'] == str(alarm.description).split(';')[0] and inner_dict['alarm_name'] in alarm.name and inner_dict['resource_uuid'] == str(alarm.dimensions['InstanceId']).split("'")[1]:
+ alarm_list.insert(itr,self.alarm_details(cloudwatch_conn,list_info))
+ itr += 1
+
+ alarm_info['schema_version'] = str(list_info['schema_version'])
+ alarm_info['schema_type'] = 'list_alarm_response'
+ alarm_info['list_alarm_resp'] = alarm_list
+
+ return alarm_info
+ except Exception as e:
+ log.error("Error in Getting List : %s",str(e))
+#-----------------------------------------------------------------------------------------------------------------------------
+ def alarm_details(self,cloudwatch_conn,ack_info):
+
+ """Get an individual alarm details specified by alarm_name"""
+ try:
+ alarms_details=cloudwatch_conn.describe_alarm_history()
+ alarm_details_all = dict()
+ alarm_details_dict = dict()
+ ack_info_all = ack_info
+
+
+ if 'ack_details' in ack_info:
+ ack_info = ack_info['ack_details']
+ elif 'alarm_list_request' in ack_info:
+ ack_info = ack_info['alarm_list_request']
+
+ is_present = self.is_present(cloudwatch_conn,ack_info['alarm_uuid'])
+
+ for itr in range (len(alarms_details)):
+ if alarms_details[itr].name == is_present['info'].name :#name, timestamp, summary
+ if 'created' in alarms_details[itr].summary:
+ alarm_details_dict['status'] = "New"
+ elif 'updated' in alarms_details[itr].summary:
+ alarm_details_dict['status'] = "Update"
+ elif 'deleted' in alarms_details[itr].summary:
+ alarm_details_dict['status'] = "Canceled"
+
+ status = alarms_details[itr].summary.split()
+ alarms = cloudwatch_conn.describe_alarms()
+ for alarm in alarms:
+ if str(alarm.description).split(';')[1] == ack_info['alarm_uuid']:
+ alarm_details_dict['alarm_uuid'] = str(ack_info['alarm_uuid'])
+ alarm_details_dict['resource_uuid'] = str(alarm.dimensions['InstanceId']).split("'")[1]
+ alarm_details_dict['description'] = str(alarm.description).split(';')[1]
+ alarm_details_dict['severity'] = str(alarm.description).split(';')[0]
+ alarm_details_dict['start_date_time'] = str(alarms_details[itr].timestamp)
+ alarm_details_dict['vim_type'] = str(ack_info_all['vim_type'])
+ #TODO : tenant id
+ if 'ack_details' in ack_info_all:
+ alarm_details_all['schema_version'] = str(ack_info_all['schema_version'])
+ alarm_details_all['schema_type'] = 'notify_alarm'
+ alarm_details_all['notify_details'] = alarm_details_dict
+ return alarm_details_all
+
+ elif 'alarm_list_request' in ack_info_all:
+ return alarm_details_dict
+
+ except Exception as e:
+ log.error("Error getting alarm details: %s",str(e))
+#-----------------------------------------------------------------------------------------------------------------------------
+ def is_present(self,cloudwatch_conn,alarm_id):
+ """Finding alarm from already configured alarms"""
+ alarm_info = dict()
+ try:
+ alarms = cloudwatch_conn.describe_alarms()
+ for alarm in alarms:
+ if str(alarm.description).split(';')[1] == alarm_id:
+ alarm_info['status'] = True
+ alarm_info['info'] = alarm
+ return alarm_info
+ alarm_info['status'] = False
+ return alarm_info
+ except Exception as e:
+ log.error("Error Finding Alarm",str(e))
+#-----------------------------------------------------------------------------------------------------------------------------
+
\ No newline at end of file
--- /dev/null
+##
+# Copyright 2017 xFlow Research Pvt. Ltd
+# This file is part of MON module
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: wajeeha.hamid@xflowresearch.com
+##
+
+'''
+AWS-Plugin implements all the methods of MON to interact with AWS using the BOTO client
+'''
+
+__author__ = "Wajeeha Hamid"
+__date__ = "18-Sept-2017"
+
+import sys
+import datetime
+import json
+import logging as log
+
+try:
+ import boto
+ import boto.ec2
+ import boto.vpc
+ import boto.ec2.cloudwatch
+ import boto.ec2.connection
+except:
+ exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
+
+
+
+class Metrics():
+
+ def createMetrics(self,cloudwatch_conn,metric_info):
+ try:
+
+ '''createMetrics will be returning the metric_uuid=0 and
+ status=True when the metric is supported by AWS'''
+
+ supported=self.check_metric(metric_info['metric_name'])
+ metric_resp = dict()
+ metric_resp['resource_uuid'] = metric_info['resource_uuid']
+
+ if supported['status'] == True:
+ metric_resp['status'] = True
+ metric_resp['metric_uuid'] = 0
+ log.debug("Metrics Configured Succesfully : %s" , metric_resp)
+ else:
+ metric_resp['status'] = False
+ metric_resp['metric_uuid'] = None
+ log.error("Metric name is not supported")
+
+ return metric_resp
+
+ except Exception as e:
+ log.error("Metric Configuration Failed: " + str(e))
+#-----------------------------------------------------------------------------------------------------------------------------
+
+ def metricsData(self,cloudwatch_conn,data_info):
+
+ """Getting Metrics Stats for an Hour.The datapoints are
+ received after every one minute.
+ Time interval can be modified using Timedelta value"""
+
+ try:
+ metric_info = dict()
+ metric_info_dict = dict()
+ timestamp_arr = {}
+ value_arr = {}
+
+ supported=self.check_metric(data_info['metric_name'])
+
+ if supported['status'] == True:
+ if int(data_info['collection_period']) % 60 == 0:
+ metric_stats=cloudwatch_conn.get_metric_statistics(60, datetime.datetime.utcnow() - datetime.timedelta(seconds=int(data_info['collection_period'])),
+ datetime.datetime.utcnow(),supported['metric_name'],'AWS/EC2', 'Maximum',
+ dimensions={'InstanceId':data_info['resource_uuid']}, unit='Percent')
+ index = 0
+ for itr in range (len(metric_stats)):
+ timestamp_arr[index] = str(metric_stats[itr]['Timestamp'])
+ value_arr[index] = metric_stats[itr]['Maximum']
+ index +=1
+ metric_info_dict['time_series'] = timestamp_arr
+ metric_info_dict['metrics_series'] = value_arr
+ log.debug("Metrics Data : %s", metric_info_dict)
+ return metric_info_dict
+ else:
+ log.error("Collection Period should be a multiple of 60")
+ return False
+
+ else:
+ log.error("Metric name is not supported")
+ return False
+
+ except Exception as e:
+ log.error("Error returning Metrics Data" + str(e))
+
+#-----------------------------------------------------------------------------------------------------------------------------
+ def updateMetrics(self,cloudwatch_conn,metric_info):
+
+ '''updateMetrics will be returning the metric_uuid=0 and
+ status=True when the metric is supported by AWS'''
+ try:
+ supported=self.check_metric(metric_info['metric_name'])
+ update_resp = dict()
+ update_resp['resource_uuid'] = metric_info['resource_uuid']
+ if supported['status'] == True:
+ update_resp['status'] = True
+ update_resp['metric_uuid'] = 0
+ log.debug("Metric Updated : %s", update_resp)
+ else:
+ update_resp['status'] = False
+ update_resp['metric_uuid'] = None
+ log.error("Metric name is not supported")
+
+ return update_resp
+
+ except Exception as e:
+ log.error("Error in Update Metrics" + str(e))
+#-----------------------------------------------------------------------------------------------------------------------------
+ def deleteMetrics(self,cloudwatch_conn,del_info):
+
+ ''' " Not supported in AWS"
+ Returning the required parameters with status = False'''
+ try:
+ supported=self.check_metric(del_info['metric_name'])
+ metric_resp = dict()
+ del_resp = dict()
+ if supported['status'] == True:
+ del_resp['schema_version'] = del_info['schema_version']
+ del_resp['schema_type'] = "delete_metric_response"
+ del_resp['metric_name'] = del_info['metric_name']
+ del_resp['metric_uuid'] = del_info['metric_uuid']
+ del_resp['resource_uuid'] = del_info['resource_uuid']
+ # TODO : yet to finalize
+ del_resp['tenant_uuid'] = del_info['tenant_uuid']
+ del_resp['correlation_id'] = del_info['correlation_uuid']
+ del_resp['status'] = False
+ log.info("Metric Deletion Not supported in AWS : %s",del_resp)
+ return del_resp
+ else:
+ log.error("Metric name is not supported")
+ return False
+
+ except Exception as e:
+ log.error(" Metric Deletion Not supported in AWS : " + str(e))
+#------------------------------------------------------------------------------------------------------------------------------------
+
+ def listMetrics(self,cloudwatch_conn ,list_info):
+
+ '''Returns the list of available AWS/EC2 metrics on which
+ alarms have been configured and the metrics are being monitored'''
+ try:
+ supported = self.check_metric(list_info['metric_name'])
+ if supported['status'] == True:
+ metrics_list = []
+ metrics_data = dict()
+
+ #To get the list of associated metrics with the alarms
+ alarms = cloudwatch_conn.describe_alarms()
+ itr = 0
+ if list_info['metric_name'] == "":
+ for alarm in alarms:
+ metrics_info = dict()
+ instance_id = str(alarm.dimensions['InstanceId']).split("'")[1]
+ metrics_info['metric_name'] = str(alarm.metric)
+ metrics_info['metric_uuid'] = 0
+ metrics_info['metric_unit'] = str(alarm.unit)
+ metrics_info['resource_uuid'] = instance_id
+ metrics_list.insert(itr,metrics_info)
+ itr += 1
+ print metrics_list
+ return metrics_list
+ else:
+ for alarm in alarms:
+ metrics_info = dict()
+ if alarm.metric == supported['metric_name']:
+ instance_id = str(alarm.dimensions['InstanceId']).split("'")[1]
+ metrics_info['metric_name'] = str(alarm.metric)
+ metrics_info['metric_uuid'] = 0
+ metrics_info['metric_unit'] = str(alarm.unit)
+ metrics_info['resource_uuid'] = instance_id
+ metrics_list.insert(itr,metrics_info)
+ itr += 1
+ return metrics_list
+ log.debug("Metrics List : %s",metrics_list)
+ else:
+ log.error("Metric name is not supported")
+ return False
+
+ except Exception as e:
+ log.error("Error in Getting Metric List " + str(e))
+
+#------------------------------------------------------------------------------------------------------------------------------------
+
+ def check_metric(self,metric_name):
+
+ ''' Checking whether the metric is supported by AWS '''
+ try:
+ check_resp = dict()
+ # metric_name
+ if metric_name == 'CPU_UTILIZATION':
+ metric_name = 'CPUUtilization'
+ metric_status = True
+ elif metric_name == 'DISK_READ_OPS':
+ metric_name = 'DiskReadOps'
+ metric_status = True
+ elif metric_name == 'DISK_WRITE_OPS':
+ metric_name = 'DiskWriteOps'
+ metric_status = True
+ elif metric_name == 'DISK_READ_BYTES':
+ metric_name = 'DiskReadBytes'
+ metric_status = True
+ elif metric_name == 'DISK_WRITE_BYTES':
+ metric_name = 'DiskWriteBytes'
+ metric_status = True
+ elif metric_name == 'PACKETS_RECEIVED':
+ metric_name = 'NetworkPacketsIn'
+ metric_status = True
+ elif metric_name == 'PACKETS_SENT':
+ metric_name = 'NetworkPacketsOut'
+ metric_status = True
+ elif metric_name == "":
+ metric_name = None
+ metric_status = True
+ log.info("Metric Not Supported by AWS plugin ")
+ else:
+ metric_name = None
+ metric_status = False
+ log.info("Metric Not Supported by AWS plugin ")
+ check_resp['metric_name'] = metric_name
+ #status
+ if metric_status == True:
+ check_resp['status'] = True
+ else:
+ check_resp['status'] = False
+
+ return check_resp
+
+ except Exception as e:
+ log.error("Error in Plugin Inputs %s",str(e))
+#--------------------------------------------------------------------------------------------------------------------------------------
+
+
+
+
+
+
+
+
--- /dev/null
+##
+# Copyright 2017 xFlow Research Pvt. Ltd
+# This file is part of MON module
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: wajeeha.hamid@xflowresearch.com
+##
+
+'''
+AWS-Plugin implements all the methods of MON to interact with AWS using the BOTO client
+'''
+
+__author__ = "Wajeeha Hamid"
+__date__ = "18-September-2017"
+
+import sys
+import json
+import logging as log
+from jsmin import jsmin
+from connection import Connection
+from metric_alarms import MetricAlarm
+from metrics import Metrics
+from kafka import KafkaConsumer
+sys.path.append("../../core/message-bus")
+from producer import KafkaProducer
+
+class Plugin():
+ """Receives Alarm info from MetricAlarm and connects with the consumer/producer"""
+ def __init__ (self):
+ self.conn = Connection()
+ self.metricAlarm = MetricAlarm()
+ self.metric = Metrics()
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+ self._consumer = KafkaConsumer(server['topic'], bootstrap_servers=server['server'])
+ self._consumer.subscribe(['alarm_request'])
+ self.producer = KafkaProducer('')
+#---------------------------------------------------------------------------------------------------------------------------
+ def connection(self):
+ """Connecting instances with CloudWatch"""
+ self.conn.setEnvironment()
+ self.conn = self.conn.connection_instance()
+ self.cloudwatch_conn = self.conn['cloudwatch_connection']
+ self.ec2_conn = self.conn['ec2_connection']
+#---------------------------------------------------------------------------------------------------------------------------
+ def configure_alarm(self,alarm_info):
+ alarm_id = self.metricAlarm.config_alarm(self.cloudwatch_conn,alarm_info)
+ return alarm_id
+#---------------------------------------------------------------------------------------------------------------------------
+ def update_alarm_configuration(self,test):
+ alarm_id = self.metricAlarm.update_alarm(self.cloudwatch_conn,test)
+ return alarm_id
+#---------------------------------------------------------------------------------------------------------------------------
+ def delete_alarm(self,alarm_id):
+ return self.metricAlarm.delete_Alarm(self.cloudwatch_conn,alarm_id)
+#---------------------------------------------------------------------------------------------------------------------------
+ def get_alarms_list(self,instance_id):
+ return self.metricAlarm.alarms_list(self.cloudwatch_conn,instance_id)
+#---------------------------------------------------------------------------------------------------------------------------
+ def get_ack_details(self,ack_info):
+ return self.metricAlarm.alarm_details(self.cloudwatch_conn,ack_info)
+#---------------------------------------------------------------------------------------------------------------------------
+ def get_metrics_data(self,metric_name,period,instance_id):
+ return self.metric.metricsData(self.cloudwatch_conn,metric_name,period,instance_id)
+#---------------------------------------------------------------------------------------------------------------------------
+
+ def consumer(self):
+ """Consume info from the message bus to manage alarms."""
+ try:
+ for message in self._consumer:
+ # Check the Functionlity that needs to be performed: topic = 'alarms'/'metrics'/'Access_Credentials'
+ if message.topic == "alarm_request":
+ log.info("Action required against: %s" % (message.topic))
+ alarm_info = json.loads(message.value)
+
+ if message.key == "create_alarm_request":
+ if alarm_info['vim_type'] == 'AWS':
+ alarm_inner_dict = alarm_info['alarm_create_request']
+ metric_status = self.check_metric(alarm_inner_dict['metric_name'])
+ if self.check_resource(alarm_inner_dict['resource_uuid']) == True and metric_status['status'] == True:
+ log.debug ("Resource and Metrics exists")
+
+ alarm_info['alarm_create_request']['metric_name'] = metric_status['metric_name']
+ #Generate a valid response message, send via producer
+ config_resp = self.configure_alarm(alarm_info) #alarm_info = message.value
+ if config_resp == None:
+ log.debug("Alarm Already exists")
+ payload = json.dumps(config_resp)
+ file = open('../../core/models/create_alarm_resp.json','wb').write((payload))
+ self.producer.create_alarm_response(key='create_alarm_response',message=payload,topic = 'alarm_response')
+ else:
+ payload = json.dumps(config_resp)
+ file = open('../../core/models/create_alarm_resp.json','wb').write((payload))
+
+ self.producer.create_alarm_response(key='create_alarm_response',message=payload,topic = 'alarm_response')
+ log.info("New alarm created with alarm info: %s", config_resp)
+ else:
+ log.error("Resource ID doesn't exists")
+ else:
+ log.error("Plugin inputs are incorrect")
+
+
+ elif message.key == "acknowledge_alarm":
+ alarm_inner_dict = alarm_info['ack_details']
+ if alarm_info['vim_type'] == 'AWS':
+ if self.check_resource(alarm_inner_dict['resource_uuid']) == True:
+ alarm_info = json.loads(message.value)
+ #Generate a valid response message, send via producer
+ ack_details = self.get_ack_details(alarm_info)
+ payload = json.dumps(ack_details)
+ file = open('../../core/models/notify_alarm.json','wb').write((payload))
+ self.producer.notify_alarm(key='notify_alarm',message=payload,topic = 'alarm_response')
+ log.info("Acknowledge sent: %s", ack_details)
+ else:
+ log.error("Resource ID is Incorrect")
+ else:
+ log.error(" VIM type incorrect ")
+
+
+ elif message.key == "update_alarm_request":
+ if alarm_info['vim_type'] == 'AWS':
+ alarm_inner_dict = alarm_info['alarm_update_request']
+ metric_status = self.check_metric(alarm_inner_dict['metric_name'])
+
+ if metric_status['status'] == True:
+ log.debug ("Resource and Metrics exists")
+ alarm_info['alarm_update_request']['metric_name'] = metric_status['metric_name']
+ #Generate a valid response message, send via producer
+ update_resp = self.update_alarm_configuration(alarm_info)
+ if update_resp == None:
+ payload = json.dumps(update_resp)
+ file = open('../../core/models/update_alarm_resp.json','wb').write((payload))
+ self.producer.update_alarm_response(key='update_alarm_response',message=payload,topic = 'alarm_response')
+ log.debug("Alarm Already exists")
+ else:
+ payload = json.dumps(update_resp)
+ file = open('../../core/models/update_alarm_resp.json','wb').write((payload))
+ self.producer.update_alarm_response(key='update_alarm_response',message=payload,topic = 'alarm_response')
+ log.info("Alarm Updated with alarm info: %s", update_resp)
+ else:
+ log.info ("Metric Not Supported")
+ else:
+ log.error(" VIM type Incorrect ")
+
+ elif message.key == "delete_alarm_request":
+ if alarm_info['vim_type'] == 'AWS':
+ del_info = json.loads(message.value)
+ #Generate a valid response message, send via producer
+ del_resp = self.delete_alarm(del_info)
+ payload = json.dumps(del_resp)
+ file = open('../../core/models/delete_alarm_resp.json','wb').write((payload))
+ self.producer.delete_alarm_response(key='delete_alarm_response',message=payload,topic = 'alarm_response')
+ log.info("Alarm Deleted with alarm info: %s", del_resp)
+ else:
+ log.error(" VIM type Incorrect ")
+
+ elif message.key == "alarm_list_request":
+ alarm_inner_dict = alarm_info['alarm_list_request']
+ if alarm_info['vim_type'] == 'AWS':
+ if self.check_resource(alarm_inner_dict['resource_uuid']) == True or alarm_inner_dict['resource_uuid'] == "":
+ #Generate a valid response message, send via producer
+ list_resp = self.get_alarms_list(alarm_info)#['alarm_names']
+ payload = json.dumps(list_resp)
+ file = open('../../core/models/list_alarm_resp.json','wb').write((payload))
+ self.producer.list_alarm_response(key='list_alarm_response',message=payload,topic = 'alarm_response')
+ else:
+ log.error("Resource ID is Incorrect")
+ else:
+ log.error(" VIM type Incorrect ")
+
+ else:
+ log.debug("Unknown key, no action will be performed")
+
+ else:
+ log.info("Message topic not relevant to this plugin: %s",
+ message.topic)
+ except Exception as e:
+ log.error("Consumer exception: %s", str(e))
+#---------------------------------------------------------------------------------------------------------------------------
+ def check_resource(self,resource_uuid):
+ '''Finding Resource with the resource_uuid'''
+ try:
+ check_resp = dict()
+ instances = self.ec2_conn.get_all_instance_status()
+
+ #resource_id
+ for instance_id in instances:
+ instance_id = str(instance_id).split(':')[1]
+ if instance_id == resource_uuid:
+ check_resp['resource_uuid'] = resource_uuid
+ return True
+ return False
+
+ except Exception as e:
+ log.error("Error in Plugin Inputs %s",str(e))
+#---------------------------------------------------------------------------------------------------------------------------
+ def check_metric(self,metric_name):
+ ''' Checking whether the metric is supported by AWS '''
+ try:
+ check_resp = dict()
+
+ #metric_name
+ if metric_name == 'CPU_UTILIZATION':
+ metric_name = 'CPUUtilization'
+ metric_status = True
+ elif metric_name == 'DISK_READ_OPS':
+ metric_name = 'DiskReadOps'
+ metric_status = True
+ elif metric_name == 'DISK_WRITE_OPS':
+ metric_name = 'DiskWriteOps'
+ metric_status = True
+ elif metric_name == 'DISK_READ_BYTES':
+ metric_name = 'DiskReadBytes'
+ metric_status = True
+ elif metric_name == 'DISK_WRITE_BYTES':
+ metric_name = 'DiskWriteBytes'
+ metric_status = True
+ elif metric_name == 'PACKETS_RECEIVED':
+ metric_name = 'NetworkPacketsIn'
+ metric_status = True
+ elif metric_name == 'PACKETS_SENT':
+ metric_name = 'NetworkPacketsOut'
+ metric_status = True
+ else:
+ metric_name = None
+ metric_status = False
+ check_resp['metric_name'] = metric_name
+ #status
+ if metric_status == True:
+ check_resp['status'] = True
+ return check_resp
+ except Exception as e:
+ log.error("Error in Plugin Inputs %s",str(e))
+#---------------------------------------------------------------------------------------------------------------------------
+
+obj = Plugin()
+obj.connection()
+obj.consumer()
--- /dev/null
+##
+# Copyright 2017 xFlow Research Pvt. Ltd
+# This file is part of MON module
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: wajeeha.hamid@xflowresearch.com
+##
+
+'''
+AWS-Plugin implements all the methods of MON to interact with AWS using the BOTO client
+'''
+
+__author__ = "Wajeeha Hamid"
+__date__ = "18-September-2017"
+
+import sys
+import json
+from connection import Connection
+from metric_alarms import MetricAlarm
+from metrics import Metrics
+sys.path.append("../../core/message_bus")
+from producer import KafkaProducer
+from kafka import KafkaConsumer
+import logging as log
+
+class plugin_metrics():
+ """Receives Alarm info from MetricAlarm and connects with the consumer/producer """
+ def __init__ (self):
+ self.conn = Connection()
+ self.metric = Metrics()
+
+ #server = {'server': 'localhost:9092', 'topic': 'metrics_request'}
+ #Initialize a Consumer object to consume message from the SO
+ self._consumer = KafkaConsumer(bootstrap_servers='localhost:9092')
+ self._consumer.subscribe(['metric_request'])
+
+ #producer = KafkaProducer('create_metric_request')
+
+ self.producer = KafkaProducer('')
+#---------------------------------------------------------------------------------------------------------------------------
+ def connection(self):
+ try:
+ """Connecting instances with CloudWatch"""
+ self.conn.setEnvironment()
+ self.conn = self.conn.connection_instance()
+ self.cloudwatch_conn = self.conn['cloudwatch_connection']
+ self.ec2_conn = self.conn['ec2_connection']
+
+ except Exception as e:
+ log.error("Failed to Connect with AWS %s: " + str(e))
+#---------------------------------------------------------------------------------------------------------------------------
+ def create_metric_request(self,metric_info):
+ '''Comaptible API using normalized parameters'''
+ metric_resp = self.metric.createMetrics(self.cloudwatch_conn,metric_info)
+ return metric_resp
+#---------------------------------------------------------------------------------------------------------------------------
+ def update_metric_request(self,updated_info):
+ '''Comaptible API using normalized parameters'''
+ update_resp = self.metric.updateMetrics(self.cloudwatch_conn,updated_info)
+ return update_resp
+#---------------------------------------------------------------------------------------------------------------------------
+ def delete_metric_request(self,delete_info):
+ '''Comaptible API using normalized parameters'''
+ del_resp = self.metric.deleteMetrics(self.cloudwatch_conn,delete_info)
+ return del_resp
+#---------------------------------------------------------------------------------------------------------------------------
+ def list_metrics_request(self,list_info):
+ '''Comaptible API using normalized parameters'''
+ list_resp = self.metric.listMetrics(self.cloudwatch_conn,list_info)
+ return list_resp
+#---------------------------------------------------------------------------------------------------------------------------
+ def read_metrics_data(self,list_info):
+ '''Comaptible API using normalized parameters
+ Read all metric data related to a specified metric'''
+ data_resp=self.metric.metricsData(self.cloudwatch_conn,list_info)
+ return data_resp
+#---------------------------------------------------------------------------------------------------------------------------
+
+ def consumer(self):
+ '''Consumer will consume the message from SO,
+ 1) parse the message and trigger the methods ac
+ cording to keys and topics provided in request.
+
+ 2) The response from plugin is saved in json format.
+
+ 3) The producer object then calls the producer response
+ methods to send the response back to message bus
+ '''
+
+ try:
+ for message in self._consumer:
+ metric_info = json.loads(message.value)
+ print metric_info
+ metric_response = dict()
+
+ if metric_info['vim_type'] == 'AWS':
+ log.debug ("VIM support : AWS")
+
+ # Check the Functionlity that needs to be performed: topic = 'alarms'/'metrics'/'Access_Credentials'
+ if message.topic == "metric_request":
+ log.info("Action required against: %s" % (message.topic))
+
+ if message.key == "create_metric_request":
+ if self.check_resource(metric_info['metric_create']['resource_uuid']) == True:
+ metric_resp = self.create_metric_request(metric_info['metric_create']) #alarm_info = message.value
+ metric_response['schema_version'] = metric_info['schema_version']
+ metric_response['schema_type'] = "create_metric_response"
+ metric_response['metric_create_response'] = metric_resp
+ payload = json.dumps(metric_response)
+ file = open('../../core/models/create_metric_resp.json','wb').write((payload))
+ self.producer.create_metrics_resp(key='create_metric_response',message=payload,topic = 'metric_response')
+
+ log.info("Metric configured: %s", metric_resp)
+ return metric_response
+
+ elif message.key == "update_metric_request":
+ if self.check_resource(metric_info['metric_create']['resource_uuid']) == True:
+ update_resp = self.update_metric_request(metric_info['metric_create'])
+ metric_response['schema_version'] = metric_info['schema_version']
+ metric_response['schema_type'] = "update_metric_response"
+ metric_response['metric_update_response'] = update_resp
+ payload = json.dumps(metric_response)
+ print payload
+ file = open('../../core/models/update_metric_resp.json','wb').write((payload))
+ self.producer.update_metric_response(key='update_metric_response',message=payload,topic = 'metric_response')
+
+ log.info("Metric Updates: %s",metric_response)
+ return metric_response
+
+ elif message.key == "delete_metric_request":
+ if self.check_resource(metric_info['resource_uuid']) == True:
+ del_resp=self.delete_metric_request(metric_info)
+ payload = json.dumps(del_resp)
+ file = open('../../core/models/delete_metric_resp.json','wb').write((payload))
+ self.producer.delete_metric_response(key='delete_metric_response',message=payload,topic = 'metric_response')
+
+ log.info("Metric Deletion Not supported in AWS : %s",del_resp)
+ return del_resp
+
+ elif message.key == "list_metric_request":
+ if self.check_resource(metric_info['metrics_list_request']['resource_uuid']) == True:
+ list_resp = self.list_metrics_request(metric_info['metrics_list_request'])
+ metric_response['schema_version'] = metric_info['schema_version']
+ metric_response['schema_type'] = "list_metric_response"
+ metric_response['correlation_id'] = metric_info['metrics_list_request']['correlation_id']
+ metric_response['vim_type'] = metric_info['vim_type']
+ metric_response['metrics_list'] = list_resp
+ payload = json.dumps(metric_response)
+ file = open('../../core/models/list_metric_resp.json','wb').write((payload))
+ self.producer.list_metric_response(key='list_metrics_response',message=payload,topic = 'metric_response')
+
+ log.info("Metric List: %s",metric_response)
+ return metric_response
+
+ elif message.key == "read_metric_data_request":
+ if self.check_resource(metric_info['resource_uuid']) == True:
+ data_resp = self.read_metrics_data(metric_info)
+ metric_response['schema_version'] = metric_info['schema_version']
+ metric_response['schema_type'] = "read_metric_data_response"
+ metric_response['metric_name'] = metric_info['metric_name']
+ metric_response['metric_uuid'] = metric_info['metric_uuid']
+ metric_response['correlation_id'] = metric_info['correlation_uuid']
+ metric_response['resource_uuid'] = metric_info['resource_uuid']
+ metric_response['tenant_uuid'] = metric_info['tenant_uuid']
+ metric_response['metrics_data'] = data_resp
+ payload = json.dumps(metric_response)
+ file = open('../../core/models/read_metric_data_resp.json','wb').write((payload))
+ self.producer.read_metric_data_response(key='read_metric_data_response',message=payload,topic = 'metric_response')
+
+ log.info("Metric Data Response: %s",metric_response)
+ return metric_response
+
+ else:
+ log.debug("Unknown key, no action will be performed")
+ else:
+ log.info("Message topic not relevant to this plugin: %s",
+ message.topic)
+ else:
+ print "Bad VIM Request"
+ except Exception as e:
+ log.error("Consumer exception: %s", str(e))
+
+#---------------------------------------------------------------------------------------------------------------------------
+ def check_resource(self,resource_uuid):
+
+ '''Checking the resource_uuid is present in EC2 instances'''
+ try:
+ check_resp = dict()
+ instances = self.ec2_conn.get_all_instance_status()
+ status_resource = False
+
+ #resource_id
+ for instance_id in instances:
+ instance_id = str(instance_id).split(':')[1]
+ if instance_id == resource_uuid:
+ check_resp['resource_uuid'] = resource_uuid
+ status_resource = True
+ else:
+ status_resource = False
+
+ #status
+ return status_resource
+
+ except Exception as e:
+ log.error("Error in Plugin Inputs %s",str(e))
+#---------------------------------------------------------------------------------------------------------------------------
+
+obj = plugin_metrics()
+obj.connection()
+obj.consumer()
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Aodh plugin for OSM MON."""
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Carry out alarming requests via Aodh API."""
+
+import json
+
+import logging
+
+from core.message_bus.producer import KafkaProducer
+
+from plugins.OpenStack.response import OpenStack_Response
+from plugins.OpenStack.settings import Config
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+ALARM_NAMES = {
+ "average_memory_usage_above_threshold": "average_memory_utilization",
+ "disk_read_ops": "disk_read_ops",
+ "disk_write_ops": "disk_write_ops",
+ "disk_read_bytes": "disk_read_bytes",
+ "disk_write_bytes": "disk_write_bytes",
+ "net_packets_dropped": "packets_dropped",
+ "packets_in_above_threshold": "packets_received",
+ "packets_out_above_threshold": "packets_sent",
+ "cpu_utilization_above_threshold": "cpu_utilization"}
+
+SEVERITIES = {
+ "warning": "low",
+ "minor": "low",
+ "major": "moderate",
+ "critical": "critical",
+ "indeterminate": "critical"}
+
+STATISTICS = {
+ "average": "avg",
+ "minimum": "min",
+ "maximum": "max",
+ "count": "count",
+ "sum": "sum"}
+
+
+class Alarming(object):
+ """Carries out alarming requests and responses via Aodh API."""
+
+ def __init__(self):
+ """Create the OpenStack alarming instance."""
+ # Initialize configuration and notifications
+ config = Config.instance()
+ config.read_environ("aodh")
+
+ # Initialise authentication for API requests
+ self.auth_token = None
+ self.endpoint = None
+ self.common = None
+
+ # Use the Response class to generate valid json response messages
+ self._response = OpenStack_Response()
+
+ # Initializer a producer to send responses back to SO
+ self._producer = KafkaProducer("alarm_response")
+
+ def alarming(self, message, common, auth_token):
+ """Consume info from the message bus to manage alarms."""
+ values = json.loads(message.value)
+ self.common = common
+
+ log.info("OpenStack alarm action required.")
+
+ # Generate and auth_token and endpoint for request
+ if auth_token is not None:
+ if self.auth_token != auth_token:
+ log.info("Auth_token for alarming set by access_credentials.")
+ self.auth_token = auth_token
+ else:
+ log.info("Auth_token has not been updated.")
+ else:
+ log.info("Using environment variables to set auth_token for Aodh.")
+ self.auth_token = self.common._authenticate()
+
+ if self.endpoint is None:
+ log.info("Generating a new endpoint for Aodh.")
+ self.endpoint = self.common.get_endpoint("alarming")
+
+ if message.key == "create_alarm_request":
+ # Configure/Update an alarm
+ alarm_details = values['alarm_create_request']
+
+ alarm_id, alarm_status = self.configure_alarm(
+ self.endpoint, self.auth_token, alarm_details)
+
+ # Generate a valid response message, send via producer
+ try:
+ if alarm_status is True:
+ log.info("Alarm successfully created")
+
+ resp_message = self._response.generate_response(
+ 'create_alarm_response', status=alarm_status,
+ alarm_id=alarm_id,
+ cor_id=alarm_details['correlation_id'])
+ log.info("Response Message: %s", resp_message)
+ self._producer.create_alarm_response(
+ 'create_alarm_resonse', resp_message,
+ 'alarm_response')
+ except Exception as exc:
+ log.warn("Response creation failed: %s", exc)
+
+ elif message.key == "list_alarm_request":
+ # Check for a specifed: alarm_name, resource_uuid, severity
+ # and generate the appropriate list
+ list_details = values['alarm_list_request']
+
+ alarm_list = self.list_alarms(
+ self.endpoint, self.auth_token, list_details)
+
+ try:
+ # Generate and send a list response back
+ resp_message = self._response.generate_response(
+ 'list_alarm_response', alarm_list=alarm_list,
+ cor_id=list_details['correlation_id'])
+ log.info("Response Message: %s", resp_message)
+ self._producer.list_alarm_response(
+ 'list_alarm_response', resp_message,
+ 'alarm_response')
+ except Exception as exc:
+ log.warn("Failed to send a valid response back.")
+
+ elif message.key == "delete_alarm_request":
+ request_details = values['alarm_delete_request']
+ alarm_id = request_details['alarm_uuid']
+
+ resp_status = self.delete_alarm(
+ self.endpoint, self.auth_token, alarm_id)
+
+ # Generate and send a response message
+ try:
+ resp_message = self._response.generate_response(
+ 'delete_alarm_response', alarm_id=alarm_id,
+ status=resp_status,
+ cor_id=request_details['correlation_id'])
+ log.info("Response message: %s", resp_message)
+ self._producer.delete_alarm_response(
+ 'delete_alarm_response', resp_message,
+ 'alarm_response')
+ except Exception as exc:
+ log.warn("Failed to create delete reponse:%s", exc)
+
+ elif message.key == "acknowledge_alarm":
+ # Acknowledge that an alarm has been dealt with by the SO
+ alarm_id = values['ack_details']['alarm_uuid']
+
+ response = self.update_alarm_state(
+ self.endpoint, self.auth_token, alarm_id)
+
+ # Log if an alarm was reset
+ if response is True:
+ log.info("Acknowledged the alarm and cleared it.")
+ else:
+ log.warn("Failed to acknowledge/clear the alarm.")
+
+ elif message.key == "update_alarm_request":
+ # Update alarm configurations
+ alarm_details = values['alarm_update_request']
+
+ alarm_id, status = self.update_alarm(
+ self.endpoint, self.auth_token, alarm_details)
+
+ # Generate a response for an update request
+ try:
+ resp_message = self._response.generate_response(
+ 'update_alarm_response', alarm_id=alarm_id,
+ cor_id=alarm_details['correlation_id'],
+ status=status)
+ log.info("Response message: %s", resp_message)
+ self._producer.update_alarm_response(
+ 'update_alarm_response', resp_message,
+ 'alarm_response')
+ except Exception as exc:
+ log.warn("Failed to send an update response:%s", exc)
+
+ else:
+ log.debug("Unknown key, no action will be performed")
+
+ return
+
+ def configure_alarm(self, endpoint, auth_token, values):
+ """Create requested alarm in Aodh."""
+ url = "{}/v2/alarms/".format(endpoint)
+
+ # Check if the desired alarm is supported
+ alarm_name = values['alarm_name'].lower()
+ metric_name = values['metric_name'].lower()
+ resource_id = values['resource_uuid']
+
+ if alarm_name not in ALARM_NAMES.keys():
+ log.warn("This alarm is not supported, by a valid metric.")
+ return None, False
+ if ALARM_NAMES[alarm_name] != metric_name:
+ log.warn("This is not the correct metric for this alarm.")
+ return None, False
+
+ # Check for the required metric
+ metric_id = self.check_for_metric(auth_token, metric_name, resource_id)
+
+ try:
+ if metric_id is not None:
+ # Create the alarm if metric is available
+ payload = self.check_payload(values, metric_name, resource_id,
+ alarm_name)
+ new_alarm = self.common._perform_request(
+ url, auth_token, req_type="post", payload=payload)
+ return json.loads(new_alarm.text)['alarm_id'], True
+ else:
+ log.warn("The required Gnocchi metric does not exist.")
+ return None, False
+
+ except Exception as exc:
+ log.warn("Failed to create the alarm: %s", exc)
+ return None, False
+
+ def delete_alarm(self, endpoint, auth_token, alarm_id):
+ """Delete alarm function."""
+ url = "{}/v2/alarms/%s".format(endpoint) % (alarm_id)
+
+ try:
+ result = self.common._perform_request(
+ url, auth_token, req_type="delete")
+ if str(result.status_code) == "404":
+ log.info("Alarm doesn't exist: %s", result.status_code)
+ # If status code is 404 alarm did not exist
+ return False
+ else:
+ return True
+
+ except Exception as exc:
+ log.warn("Failed to delete alarm: %s because %s.", alarm_id, exc)
+ return False
+
+ def list_alarms(self, endpoint, auth_token, list_details):
+ """Generate the requested list of alarms."""
+ url = "{}/v2/alarms/".format(endpoint)
+ a_list, name_list, sev_list, res_list = [], [], [], []
+
+ # TODO(mcgoughh): for now resource_id is a mandatory field
+ # Check for a reqource is
+ try:
+ resource = list_details['resource_uuid']
+ except KeyError as exc:
+ log.warn("Resource id not specified for list request: %s", exc)
+ return None
+
+ # Checking what fields are specified for a list request
+ try:
+ name = list_details['alarm_name'].lower()
+ if name not in ALARM_NAMES.keys():
+ log.warn("This alarm is not supported, won't be used!")
+ name = None
+ except KeyError as exc:
+ log.info("Alarm name isn't specified.")
+ name = None
+
+ try:
+ severity = list_details['severity'].lower()
+ sev = SEVERITIES[severity]
+ except KeyError as exc:
+ log.info("Severity is unspecified/incorrectly configured")
+ sev = None
+
+ # Perform the request to get the desired list
+ try:
+ result = self.common._perform_request(
+ url, auth_token, req_type="get")
+
+ if result is not None:
+ # Get list based on resource id
+ for alarm in json.loads(result.text):
+ rule = alarm['gnocchi_resources_threshold_rule']
+ if resource == rule['resource_id']:
+ res_list.append(str(alarm))
+ if not res_list:
+ log.info("No alarms for this resource")
+ return a_list
+
+ # Generate specified listed if requested
+ if name is not None and sev is not None:
+ log.info("Return a list of %s alarms with %s severity.",
+ name, sev)
+ for alarm in json.loads(result.text):
+ if name == alarm['name']:
+ name_list.append(str(alarm))
+ for alarm in json.loads(result.text):
+ if sev == alarm['severity']:
+ sev_list.append(str(alarm))
+ name_sev_list = list(set(name_list).intersection(sev_list))
+ a_list = list(set(name_sev_list).intersection(res_list))
+ elif name is not None:
+ log.info("Returning a %s list of alarms.", name)
+ for alarm in json.loads(result.text):
+ if name == alarm['name']:
+ name_list.append(str(alarm))
+ a_list = list(set(name_list).intersection(res_list))
+ elif sev is not None:
+ log.info("Returning %s severity alarm list.", sev)
+ for alarm in json.loads(result.text):
+ if sev == alarm['severity']:
+ sev_list.append(str(alarm))
+ a_list = list(set(sev_list).intersection(res_list))
+ else:
+ log.info("Returning an entire list of alarms.")
+ a_list = res_list
+ else:
+ log.info("There are no alarms!")
+
+ except Exception as exc:
+ log.info("Failed to generate required list: %s", exc)
+ return None
+
+ return a_list
+
+ def update_alarm_state(self, endpoint, auth_token, alarm_id):
+ """Set the state of an alarm to ok when ack message is received."""
+ url = "{}/v2/alarms/%s/state".format(endpoint) % alarm_id
+ payload = json.dumps("ok")
+
+ try:
+ self.common._perform_request(
+ url, auth_token, req_type="put", payload=payload)
+ return True
+ except Exception as exc:
+ log.warn("Unable to update alarm state: %s", exc)
+ return False
+
+ def update_alarm(self, endpoint, auth_token, values):
+ """Get alarm name for an alarm configuration update."""
+ # Get already existing alarm details
+ url = "{}/v2/alarms/%s".format(endpoint) % values['alarm_uuid']
+
+ # Gets current configurations about the alarm
+ try:
+ result = self.common._perform_request(
+ url, auth_token, req_type="get")
+ alarm_name = json.loads(result.text)['name']
+ rule = json.loads(result.text)['gnocchi_resources_threshold_rule']
+ alarm_state = json.loads(result.text)['state']
+ resource_id = rule['resource_id']
+ metric_name = rule['metric']
+ except Exception as exc:
+ log.warn("Failed to retreive existing alarm info: %s.\
+ Can only update OSM alarms.", exc)
+ return None, False
+
+ # Generates and check payload configuration for alarm update
+ payload = self.check_payload(values, metric_name, resource_id,
+ alarm_name, alarm_state=alarm_state)
+
+ # Updates the alarm configurations with the valid payload
+ if payload is not None:
+ try:
+ update_alarm = self.common._perform_request(
+ url, auth_token, req_type="put", payload=payload)
+
+ return json.loads(update_alarm.text)['alarm_id'], True
+ except Exception as exc:
+ log.warn("Alarm update could not be performed: %s", exc)
+ return None, False
+ return None, False
+
+ def check_payload(self, values, metric_name, resource_id,
+ alarm_name, alarm_state=None):
+ """Check that the payload is configuration for update/create alarm."""
+ try:
+ # Check state and severity
+ severity = values['severity'].lower()
+ if severity == "indeterminate":
+ alarm_state = "insufficient data"
+ if alarm_state is None:
+ alarm_state = "ok"
+
+ statistic = values['statistic'].lower()
+ # Try to configure the payload for the update/create request
+ # Can only update: threshold, operation, statistic and
+ # the severity of the alarm
+ rule = {'threshold': values['threshold_value'],
+ 'comparison_operator': values['operation'].lower(),
+ 'metric': metric_name,
+ 'resource_id': resource_id,
+ 'resource_type': 'generic',
+ 'aggregation_method': STATISTICS[statistic]}
+ payload = json.dumps({'state': alarm_state,
+ 'name': alarm_name,
+ 'severity': SEVERITIES[severity],
+ 'type': 'gnocchi_resources_threshold',
+ 'gnocchi_resources_threshold_rule': rule, })
+ return payload
+ except KeyError as exc:
+ log.warn("Alarm is not configured correctly: %s", exc)
+ return None
+
+ def get_alarm_state(self, endpoint, auth_token, alarm_id):
+ """Get the state of the alarm."""
+ url = "{}/v2/alarms/%s/state".format(endpoint) % alarm_id
+
+ try:
+ alarm_state = self.common._perform_request(
+ url, auth_token, req_type="get")
+ return json.loads(alarm_state.text)
+ except Exception as exc:
+ log.warn("Failed to get the state of the alarm:%s", exc)
+ return None
+
+ def check_for_metric(self, auth_token, m_name, r_id):
+ """Check for the alarm metric."""
+ try:
+ endpoint = self.common.get_endpoint("metric")
+
+ url = "{}/v1/metric/".format(endpoint)
+ metric_list = self.common._perform_request(
+ url, auth_token, req_type="get")
+
+ for metric in json.loads(metric_list.text):
+ name = metric['name']
+ resource = metric['resource_id']
+ if (name == m_name and resource == r_id):
+ metric_id = metric['id']
+ log.info("The required metric exists, an alarm will be created.")
+ return metric_id
+ except Exception as exc:
+ log.info("Desired Gnocchi metric not found:%s", exc)
+ return None
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Notifier class for alarm notification response."""
+
+import json
+import logging as log
+
+try:
+ import aodhclient
+except ImportError:
+ log.warn("Failed to import the aodhclient")
+
+
+from core.message_bus.producer import KafkaProducer
+
+from plugins.OpenStack.Aodh.alarming import Alarming
+from plugins.OpenStack.response import OpenStack_Response
+from plugins.OpenStack.settings import Config
+
+__author__ = "Helena McGough"
+
+ALARM_NAMES = [
+ "average_memory_usage_above_threshold",
+ "disk_read_ops",
+ "disk_write_ops",
+ "disk_read_bytes",
+ "disk_write_bytes",
+ "net_packets_dropped",
+ "packets_in_above_threshold",
+ "packets_out_above_threshold",
+ "cpu_utilization_above_threshold"]
+
+
+def register_notifier():
+ """Run the notifier instance."""
+ config = Config.instance()
+ instance = Notifier(config=config)
+ instance.config()
+ instance.notify()
+
+
+class Notifier(object):
+ """Alarm Notification class."""
+
+ def __init__(self, config):
+ """Initialize alarm notifier."""
+ log.info("Initialize the notifier for the SO.")
+ self._config = config
+ self._response = OpenStack_Response()
+ self._producer = KafkaProducer("alarm_response")
+ self._alarming = Alarming()
+
+ def config(self):
+ """Configure the alarm notifier."""
+ log.info("Configure the notifier instance.")
+ self._config.read_environ("aodh")
+
+ def notify(self):
+ """Send alarm notifications responses to the SO."""
+ log.info("Checking for alarm notifications")
+ auth_token, endpoint = self._alarming.authenticate()
+
+ while(1):
+ alarm_list = self._alarming.list_alarms(endpoint, auth_token)
+ for alarm in json.loads(alarm_list):
+ alarm_id = alarm['alarm_id']
+ alarm_name = alarm['name']
+ # Send a notification response to the SO on alarm trigger
+ if alarm_name in ALARM_NAMES:
+ alarm_state = self._alarming.get_alarm_state(
+ endpoint, auth_token, alarm_id)
+ if alarm_state == "alarm":
+ # Generate and send an alarm notification response
+ try:
+ a_date = alarm['state_timestamp'].replace("T", " ")
+ rule = alarm['gnocchi_resources_threshold_rule']
+ resp_message = self._response.generate_response(
+ 'notify_alarm', a_id=alarm_id,
+ r_id=rule['resource_id'],
+ sev=alarm['severity'], date=a_date,
+ state=alarm_state, vim_type="OpenStack")
+ self._producer.notify_alarm(
+ 'notify_alarm', resp_message, 'alarm_response')
+ except Exception as exc:
+ log.warn("Failed to send notify response:%s", exc)
+
+if aodhclient:
+ register_notifier()
--- /dev/null
+#gitkeep file to keep the initial empty directory structure.
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Gnocchi plugin for OSM MON."""
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Carry out OpenStack metric requests via Gnocchi API."""
+
+import datetime
+import json
+import logging
+
+import time
+
+from core.message_bus.producer import KafkaProducer
+
+from plugins.OpenStack.response import OpenStack_Response
+from plugins.OpenStack.settings import Config
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+METRIC_MAPPINGS = {
+ "average_memory_utilization": "memory.percent",
+ "disk_read_ops": "disk.disk_ops",
+ "disk_write_ops": "disk.disk_ops",
+ "disk_read_bytes": "disk.disk_octets",
+ "disk_write_bytes": "disk.disk_octets",
+ "packets_dropped": "interface.if_dropped",
+ "packets_received": "interface.if_packets",
+ "packets_sent": "interface.if_packets",
+ "cpu_utilization": "cpu.percent",
+}
+
+PERIOD_MS = {
+ "HR": 3600000,
+ "DAY": 86400000,
+ "WEEK": 604800000,
+ "MONTH": 2629746000,
+ "YEAR": 31556952000
+}
+
+
+class Metrics(object):
+ """OpenStack metric requests performed via the Gnocchi API."""
+
+ def __init__(self):
+ """Initialize the metric actions."""
+ # Configure an instance of the OpenStack metric plugin
+ config = Config.instance()
+ config.read_environ("gnocchi")
+
+ # Initialise authentication for API requests
+ self.auth_token = None
+ self.endpoint = None
+ self._common = None
+
+ # Use the Response class to generate valid json response messages
+ self._response = OpenStack_Response()
+
+ # Initializer a producer to send responses back to SO
+ self._producer = KafkaProducer("metric_response")
+
+ def metric_calls(self, message, common, auth_token):
+ """Consume info from the message bus to manage metric requests."""
+ values = json.loads(message.value)
+ self._common = common
+ log.info("OpenStack metric action required.")
+
+ # Generate and auth_token and endpoint for request
+ if auth_token is not None:
+ if self.auth_token != auth_token:
+ log.info("Auth_token for metrics set by access_credentials.")
+ self.auth_token = auth_token
+ else:
+ log.info("Auth_token has not been updated.")
+ else:
+ log.info("Using environment variables to set Gnocchi auth_token.")
+ self.auth_token = self._common._authenticate()
+
+ if self.endpoint is None:
+ log.info("Generating a new endpoint for Gnocchi.")
+ self.endpoint = self._common.get_endpoint("metric")
+
+ if message.key == "create_metric_request":
+ # Configure metric
+ metric_details = values['metric_create']
+ metric_id, resource_id, status = self.configure_metric(
+ self.endpoint, self.auth_token, metric_details)
+
+ # Generate and send a create metric response
+ try:
+ resp_message = self._response.generate_response(
+ 'create_metric_response', status=status,
+ cor_id=values['correlation_id'],
+ metric_id=metric_id, r_id=resource_id)
+ log.info("Response messages: %s", resp_message)
+ self._producer.create_metrics_resp(
+ 'create_metric_response', resp_message,
+ 'metric_response')
+ except Exception as exc:
+ log.warn("Failed to create response: %s", exc)
+
+ elif message.key == "read_metric_data_request":
+ # Read all metric data related to a specified metric
+ timestamps, metric_data = self.read_metric_data(
+ self.endpoint, self.auth_token, values)
+
+ # Generate and send a response message
+ try:
+ resp_message = self._response.generate_response(
+ 'read_metric_data_response',
+ m_id=values['metric_uuid'],
+ m_name=values['metric_name'],
+ r_id=values['resource_uuid'],
+ cor_id=values['correlation_id'],
+ times=timestamps, metrics=metric_data)
+ log.info("Response message: %s", resp_message)
+ self._producer.read_metric_data_response(
+ 'read_metric_data_response', resp_message,
+ 'metric_response')
+ except Exception as exc:
+ log.warn("Failed to send read metric response:%s", exc)
+
+ elif message.key == "delete_metric_request":
+ # delete the specified metric in the request
+ metric_id = values['metric_uuid']
+ status = self.delete_metric(
+ self.endpoint, self.auth_token, metric_id)
+
+ # Generate and send a response message
+ try:
+ resp_message = self._response.generate_response(
+ 'delete_metric_response', m_id=metric_id,
+ m_name=values['metric_name'],
+ status=status, r_id=values['resource_uuid'],
+ cor_id=values['correlation_id'])
+ log.info("Response message: %s", resp_message)
+ self._producer.delete_metric_response(
+ 'delete_metric_response', resp_message,
+ 'metric_response')
+ except Exception as exc:
+ log.warn("Failed to send delete response:%s", exc)
+
+ elif message.key == "update_metric_request":
+ # Gnocchi doesn't support configuration updates
+ # Log and send a response back to this effect
+ log.warn("Gnocchi doesn't support metric configuration\
+ updates.")
+ req_details = values['metric_create']
+ metric_name = req_details['metric_name']
+ resource_id = req_details['resource_uuid']
+ metric_id = self.get_metric_id(
+ self.endpoint, self.auth_token, metric_name, resource_id)
+
+ # Generate and send a response message
+ try:
+ resp_message = self._response.generate_response(
+ 'update_metric_response', status=False,
+ cor_id=values['correlation_id'],
+ r_id=resource_id, m_id=metric_id)
+ log.info("Response message: %s", resp_message)
+ self._producer.update_metric_response(
+ 'update_metric_response', resp_message,
+ 'metric_response')
+ except Exception as exc:
+ log.warn("Failed to send an update response:%s", exc)
+
+ elif message.key == "list_metric_request":
+ list_details = values['metrics_list_request']
+
+ metric_list = self.list_metrics(
+ self.endpoint, self.auth_token, list_details)
+
+ # Generate and send a response message
+ try:
+ resp_message = self._response.generate_response(
+ 'list_metric_response', m_list=metric_list,
+ cor_id=list_details['correlation_id'])
+ log.info("Response message: %s", resp_message)
+ self._producer.list_metric_response(
+ 'list_metric_response', resp_message,
+ 'metric_response')
+ except Exception as exc:
+ log.warn("Failed to send a list response:%s", exc)
+
+ else:
+ log.warn("Unknown key, no action will be performed.")
+
+ return
+
+ def configure_metric(self, endpoint, auth_token, values):
+ """Create the new metric in Gnocchi."""
+ try:
+ resource_id = values['resource_uuid']
+ except KeyError:
+ log.warn("Resource is not defined correctly.")
+ return None, None, False
+
+ # Check/Normalize metric name
+ metric_name, norm_name = self.get_metric_name(values)
+ if norm_name is None:
+ log.warn("This metric is not supported by this plugin.")
+ return None, resource_id, False
+
+ # Check for an existing metric for this resource
+ metric_id = self.get_metric_id(
+ endpoint, auth_token, metric_name, resource_id)
+
+ if metric_id is None:
+ # Try appending metric to existing resource
+ try:
+ base_url = "{}/v1/resource/generic/%s/metric"
+ res_url = base_url.format(endpoint) % resource_id
+ payload = {metric_name: {'archive_policy_name': 'high',
+ 'unit': values['metric_unit']}}
+ result = self._common._perform_request(
+ res_url, auth_token, req_type="post",
+ payload=json.dumps(payload))
+ # Get id of newly created metric
+ for row in json.loads(result.text):
+ if row['name'] == metric_name:
+ metric_id = row['id']
+ log.info("Appended metric to existing resource.")
+
+ return metric_id, resource_id, True
+ except Exception as exc:
+ # Gnocchi version of resource does not exist creating a new one
+ log.info("Failed to append metric to existing resource:%s",
+ exc)
+ try:
+ url = "{}/v1/resource/generic".format(endpoint)
+ metric = {'name': metric_name,
+ 'archive_policy_name': 'high',
+ 'unit': values['metric_unit'], }
+
+ resource_payload = json.dumps({'id': resource_id,
+ 'metrics': {
+ metric_name: metric}})
+
+ resource = self._common._perform_request(
+ url, auth_token, req_type="post",
+ payload=resource_payload)
+
+ # Return the newly created resource_id for creating alarms
+ new_resource_id = json.loads(resource.text)['id']
+ log.info("Created new resource for metric: %s",
+ new_resource_id)
+
+ metric_id = self.get_metric_id(
+ endpoint, auth_token, metric_name, new_resource_id)
+
+ return metric_id, new_resource_id, True
+ except Exception as exc:
+ log.warn("Failed to create a new resource:%s", exc)
+ return None, None, False
+
+ else:
+ log.info("This metric already exists for this resource.")
+
+ return metric_id, resource_id, False
+
+ def delete_metric(self, endpoint, auth_token, metric_id):
+ """Delete metric."""
+ url = "{}/v1/metric/%s".format(endpoint) % (metric_id)
+
+ try:
+ result = self._common._perform_request(
+ url, auth_token, req_type="delete")
+ if str(result.status_code) == "404":
+ log.warn("Failed to delete the metric.")
+ return False
+ else:
+ return True
+ except Exception as exc:
+ log.warn("Failed to carry out delete metric request:%s", exc)
+ return False
+
+ def list_metrics(self, endpoint, auth_token, values):
+ """List all metrics."""
+ url = "{}/v1/metric/".format(endpoint)
+
+ # Check for a specified list
+ try:
+ # Check if the metric_name was specified for the list
+ metric_name = values['metric_name'].lower()
+ if metric_name not in METRIC_MAPPINGS.keys():
+ log.warn("This metric is not supported, won't be listed.")
+ metric_name = None
+ except KeyError as exc:
+ log.info("Metric name is not specified: %s", exc)
+ metric_name = None
+
+ try:
+ resource = values['resource_uuid']
+ except KeyError as exc:
+ log.info("Resource is not specified:%s", exc)
+ resource = None
+
+ try:
+ result = self._common._perform_request(
+ url, auth_token, req_type="get")
+ metrics = json.loads(result.text)
+
+ if metrics is not None:
+ # Format the list response
+ if metric_name is not None and resource is not None:
+ metric_list = self.response_list(
+ metrics, metric_name=metric_name, resource=resource)
+ log.info("Returning an %s resource list for %s metrics",
+ metric_name, resource)
+ elif metric_name is not None:
+ metric_list = self.response_list(
+ metrics, metric_name=metric_name)
+ log.info("Returning a list of %s metrics", metric_name)
+ elif resource is not None:
+ metric_list = self.response_list(
+ metrics, resource=resource)
+ log.info("Return a list of %s resource metrics", resource)
+ else:
+ metric_list = self.response_list(metrics)
+ log.info("Returning a complete list of metrics")
+
+ return metric_list
+ else:
+ log.info("There are no metrics available")
+ return []
+ except Exception as exc:
+ log.warn("Failed to generate any metric list. %s", exc)
+ return None
+
+ def get_metric_id(self, endpoint, auth_token, metric_name, resource_id):
+ """Check if the desired metric already exists for the resource."""
+ url = "{}/v1/resource/generic/%s".format(endpoint) % resource_id
+
+ try:
+ # Try return the metric id if it exists
+ result = self._common._perform_request(
+ url, auth_token, req_type="get")
+ return json.loads(result.text)['metrics'][metric_name]
+ except Exception:
+ log.info("Metric doesn't exist. No metric_id available")
+ return None
+
+ def get_metric_name(self, values):
+ """Check metric name configuration and normalize."""
+ try:
+ # Normalize metric name
+ metric_name = values['metric_name'].lower()
+ return metric_name, METRIC_MAPPINGS[metric_name]
+ except KeyError:
+ log.info("Metric name %s is invalid.", metric_name)
+ return metric_name, None
+
+ def read_metric_data(self, endpoint, auth_token, values):
+ """Collectd metric measures over a specified time period."""
+ timestamps = []
+ data = []
+ try:
+ # Try and collect measures
+ metric_id = values['metric_uuid']
+ collection_unit = values['collection_unit'].upper()
+ collection_period = values['collection_period']
+
+ # Define the start and end time based on configurations
+ stop_time = time.strftime("%Y-%m-%d") + "T" + time.strftime("%X")
+ end_time = int(round(time.time() * 1000))
+ if collection_unit == 'YEAR':
+ diff = PERIOD_MS[collection_unit]
+ else:
+ diff = collection_period * PERIOD_MS[collection_unit]
+ s_time = (end_time - diff) / 1000.0
+ start_time = datetime.datetime.fromtimestamp(s_time).strftime(
+ '%Y-%m-%dT%H:%M:%S.%f')
+ base_url = "{}/v1/metric/%(0)s/measures?start=%(1)s&stop=%(2)s"
+ url = base_url.format(endpoint) % {
+ "0": metric_id, "1": start_time, "2": stop_time}
+
+ # Perform metric data request
+ metric_data = self._common._perform_request(
+ url, auth_token, req_type="get")
+
+ # Generate a list of the requested timestamps and data
+ for r in json.loads(metric_data.text):
+ timestamp = r[0].replace("T", " ")
+ timestamps.append(timestamp)
+ data.append(r[2])
+
+ return timestamps, data
+ except Exception as exc:
+ log.warn("Failed to gather specified measures: %s", exc)
+ return timestamps, data
+
+ def response_list(self, metric_list, metric_name=None, resource=None):
+ """Create the appropriate lists for a list response."""
+ resp_list, name_list, res_list = [], [], []
+
+ # Create required lists
+ for row in metric_list:
+ # Only list OSM metrics
+ if row['name'] in METRIC_MAPPINGS.keys():
+ metric = {"metric_name": row['name'],
+ "metric_uuid": row['id'],
+ "metric_unit": row['unit'],
+ "resource_uuid": row['resource_id']}
+ resp_list.append(str(metric))
+ # Generate metric_name specific list
+ if metric_name is not None:
+ if row['name'] == metric_name:
+ metric = {"metric_name": row['name'],
+ "metric_uuid": row['id'],
+ "metric_unit": row['unit'],
+ "resource_uuid": row['resource_id']}
+ name_list.append(str(metric))
+ # Generate resource specific list
+ if resource is not None:
+ if row['resource_id'] == resource:
+ metric = {"metric_name": row['name'],
+ "metric_uuid": row['id'],
+ "metric_unit": row['unit'],
+ "resource_uuid": row['resource_id']}
+ res_list.append(str(metric))
+
+ # Join required lists
+ if metric_name is not None and resource is not None:
+ return list(set(res_list).intersection(name_list))
+ elif metric_name is not None:
+ return name_list
+ elif resource is not None:
+ return list(set(res_list).intersection(resp_list))
+ else:
+ return resp_list
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""OpenStack plugin for OSM MON."""
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Common methods for the OpenStack plugins."""
+import json
+
+import logging
+
+from keystoneclient.v3 import client
+
+from plugins.OpenStack.settings import Config
+
+import requests
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+
+class Common(object):
+ """Common calls for Gnocchi/Aodh plugins."""
+
+ def __init__(self):
+ """Create the common instance."""
+ self._auth_token = None
+ self._ks = None
+ self.openstack_url = None
+ self.user = None
+ self.password = None
+ self.tenant = None
+
+ def _authenticate(self, message=None):
+ """Authenticate and/or renew the authentication token."""
+ if self._auth_token is not None:
+ return self._auth_token
+
+ if message is not None:
+ values = json.loads(message.value)['access_config']
+ self.openstack_url = values['openstack_site']
+ self.user = values['user']
+ self.password = values['password']
+ self.tenant = values['vim_tenant_name']
+
+ try:
+ # try to authenticate with supplied access_credentials
+ self._ks = client.Client(auth_url=self.openstack_url,
+ username=self.user,
+ password=self.password,
+ tenant_name=self.tenant)
+ self._auth_token = self._ks.auth_token
+ log.info("Authenticating with access_credentials from SO.")
+ return self._auth_token
+ except Exception as exc:
+ log.warn("Authentication failed with access_credentials: %s",
+ exc)
+
+ else:
+ log.info("Access_credentials were not sent from SO.")
+
+ # If there are no access_credentials or they fail use env variables
+ try:
+ cfg = Config.instance()
+ self._ks = client.Client(auth_url=cfg.OS_AUTH_URL,
+ username=cfg.OS_USERNAME,
+ password=cfg.OS_PASSWORD,
+ tenant_name=cfg.OS_TENANT_NAME)
+ log.info("Authenticating with environment varialbles.")
+ self._auth_token = self._ks.auth_token
+ except Exception as exc:
+
+ log.warn("Authentication failed: %s", exc)
+
+ self._auth_token = None
+
+ return self._auth_token
+
+ def get_endpoint(self, service_type):
+ """Get the endpoint for Gnocchi/Aodh."""
+ try:
+ return self._ks.service_catalog.url_for(
+ service_type=service_type,
+ endpoint_type='internalURL',
+ region_name='RegionOne')
+ except Exception as exc:
+ log.warning("Failed to retreive endpoint for service due to: %s",
+ exc)
+ return None
+
+ @classmethod
+ def _perform_request(cls, url, auth_token,
+ req_type=None, payload=None, params=None):
+ """Perform the POST/PUT/GET/DELETE request."""
+ # request headers
+ headers = {'X-Auth-Token': auth_token,
+ 'Content-type': 'application/json'}
+ # perform request and return its result
+ if req_type == "put":
+ response = requests.put(
+ url, data=payload, headers=headers,
+ timeout=1)
+ elif req_type == "get":
+ response = requests.get(
+ url, params=params, headers=headers, timeout=1)
+ elif req_type == "delete":
+ response = requests.delete(
+ url, headers=headers, timeout=1)
+ else:
+ response = requests.post(
+ url, data=payload, headers=headers,
+ timeout=1)
+
+ # Raises exception if there was an error
+ try:
+ response.raise_for_status()
+ # pylint: disable=broad-except
+ except Exception:
+ # Log out the result of the request for debugging purpose
+ log.debug(
+ 'Result: %s, %d',
+ response.status_code, response.text)
+ return response
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Generate valid responses to send back to the SO."""
+
+import json
+import logging
+log = logging.getLogger(__name__)
+
+__author__ = "Helena McGough"
+
+schema_version = "1.0"
+
+
+class OpenStack_Response(object):
+ """Generates responses for SO from OpenStaack plugins."""
+
+ def __init__(self):
+ """Initialize OpenStack Response instance."""
+
+ def generate_response(self, key, **kwargs):
+ """Make call to appropriate response function."""
+ if key == "list_alarm_response":
+ message = self.alarm_list_response(**kwargs)
+ elif key == "create_alarm_response":
+ message = self.create_alarm_response(**kwargs)
+ elif key == "delete_alarm_response":
+ message = self.delete_alarm_response(**kwargs)
+ elif key == "update_alarm_response":
+ message = self.update_alarm_response(**kwargs)
+ elif key == "create_metric_response":
+ message = self.metric_create_response(**kwargs)
+ elif key == "read_metric_data_response":
+ message = self.read_metric_data_response(**kwargs)
+ elif key == "delete_metric_response":
+ message = self.delete_metric_response(**kwargs)
+ elif key == "update_metric_response":
+ message = self.update_metric_response(**kwargs)
+ elif key == "list_metric_response":
+ message = self.list_metric_response(**kwargs)
+ elif key == "notify_alarm":
+ message = self.notify_alarm(**kwargs)
+ else:
+ log.warn("Failed to generate a valid response message.")
+ message = None
+
+ return message
+
+ def alarm_list_response(self, **kwargs):
+ """Generate the response for an alarm list request."""
+ alarm_list_resp = {"schema_version": schema_version,
+ "schema_type": "list_alarm_response",
+ "correlation_id": kwargs['cor_id'],
+ "list_alarm_resp": kwargs['alarm_list']}
+ return json.dumps(alarm_list_resp)
+
+ def create_alarm_response(self, **kwargs):
+ """Generate a response for a create alarm request."""
+ create_alarm_resp = {"schema_version": schema_version,
+ "schema_type": "create_alarm_response",
+ "alarm_create_response": {
+ "correlation_id": kwargs['cor_id'],
+ "alarm_uuid": kwargs['alarm_id'],
+ "status": kwargs['status']}}
+ return json.dumps(create_alarm_resp)
+
+ def delete_alarm_response(self, **kwargs):
+ """Generate a response for a delete alarm request."""
+ delete_alarm_resp = {"schema_version": schema_version,
+ "schema_type": "alarm_deletion_response",
+ "alarm_deletion_response": {
+ "correlation_id": kwargs['cor_id'],
+ "alarm_uuid": kwargs['alarm_id'],
+ "status": kwargs['status']}}
+ return json.dumps(delete_alarm_resp)
+
+ def update_alarm_response(self, **kwargs):
+ """Generate a response for an update alarm request."""
+ update_alarm_resp = {"schema_version": schema_version,
+ "schema_type": "update_alarm_response",
+ "alarm_update_response": {
+ "correlation_id": kwargs['cor_id'],
+ "alarm_uuid": kwargs['alarm_id'],
+ "status": kwargs['status']}}
+ return json.dumps(update_alarm_resp)
+
+ def metric_create_response(self, **kwargs):
+ """Generate a response for a create metric request."""
+ create_metric_resp = {"schema_version": schema_version,
+ "schema_type": "create_metric_response",
+ "correlation_id": kwargs['cor_id'],
+ "metric_create_response": {
+ "metric_uuid": kwargs['metric_id'],
+ "resource_uuid": kwargs['r_id'],
+ "status": kwargs['status']}}
+ return json.dumps(create_metric_resp)
+
+ def read_metric_data_response(self, **kwargs):
+ """Generate a response for a read metric data request."""
+ read_metric_data_resp = {"schema_version": schema_version,
+ "schema_type": "read_metric_data_response",
+ "metric_name": kwargs['m_name'],
+ "metric_uuid": kwargs['m_id'],
+ "resource_uuid": kwargs['r_id'],
+ "correlation_id": kwargs['cor_id'],
+ "metrics_data": {
+ "time_series": kwargs['times'],
+ "metrics_series": kwargs['metrics']}}
+ return json.dumps(read_metric_data_resp)
+
+ def delete_metric_response(self, **kwargs):
+ """Generate a response for a delete metric request."""
+ delete_metric_resp = {"schema_version": schema_version,
+ "schema_type": "delete_metric_response",
+ "metric_name": kwargs['m_name'],
+ "metric_uuid": kwargs['m_id'],
+ "resource_uuid": kwargs['r_id'],
+ "correlation_id": kwargs['cor_id'],
+ "status": kwargs['status']}
+ return json.dumps(delete_metric_resp)
+
+ def update_metric_response(self, **kwargs):
+ """Generate a repsonse for an update metric request."""
+ update_metric_resp = {"schema_version": schema_version,
+ "schema_type": "update_metric_response",
+ "correlation_id": kwargs['cor_id'],
+ "metric_update_response": {
+ "metric_uuid": kwargs['m_id'],
+ "status": kwargs['status'],
+ "resource_uuid": kwargs['r_id']}}
+ return json.dumps(update_metric_resp)
+
+ def list_metric_response(self, **kwargs):
+ """Generate a response for a list metric request."""
+ list_metric_resp = {"schema_version": schema_version,
+ "schema_type": "list_metric_response",
+ "correlation_id": kwargs['cor_id'],
+ "metrics_list": kwargs['m_list']}
+ return json.dumps(list_metric_resp)
+
+ def notify_alarm(self, **kwargs):
+ """Generate a response to send alarm notifications."""
+ notify_alarm_resp = {"schema_version": schema_version,
+ "schema_type": "notify_alarm",
+ "notify_details": {
+ "alarm_uuid": kwargs['a_id'],
+ "resource_uuid": kwargs['r_id'],
+ "vim_type": kwargs['vim_type'],
+ "severity": kwargs['sev'],
+ "status": kwargs['state'],
+ "start_date": kwargs['date']}}
+ return json.dumps(notify_alarm_resp)
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Configurations for the OpenStack plugins."""
+
+import logging
+import os
+
+from collections import namedtuple
+
+from plugins.OpenStack.singleton import Singleton
+
+import six
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+
+class BadConfigError(Exception):
+ """Configuration exception."""
+
+ pass
+
+
+class CfgParam(namedtuple('CfgParam', ['key', 'default', 'data_type'])):
+ """Configuration parameter definition."""
+
+ def value(self, data):
+ """Convert a string to the parameter type."""
+ try:
+ return self.data_type(data)
+ except (ValueError, TypeError):
+ raise BadConfigError(
+ 'Invalid value "%s" for configuration parameter "%s"' % (
+ data, self.key))
+
+
+@Singleton
+class Config(object):
+ """Plugin confguration."""
+
+ _configuration = [
+ CfgParam('OS_AUTH_URL', None, six.text_type),
+ CfgParam('OS_IDENTITY_API_VERSION', "3", six.text_type),
+ CfgParam('OS_USERNAME', None, six.text_type),
+ CfgParam('OS_PASSWORD', "password", six.text_type),
+ CfgParam('OS_TENANT_NAME', "service", six.text_type),
+ ]
+
+ _config_dict = {cfg.key: cfg for cfg in _configuration}
+ _config_keys = _config_dict.keys()
+
+ def __init__(self):
+ """Set the default values."""
+ for cfg in self._configuration:
+ setattr(self, cfg.key, cfg.default)
+
+ def read_environ(self, service):
+ """Check the appropriate environment variables and update defaults."""
+ for key in self._config_keys:
+ try:
+ if (key == "OS_IDENTITY_API_VERSION" or key == "OS_PASSWORD"):
+ val = str(os.environ[key])
+ setattr(self, key, val)
+ elif (key == "OS_AUTH_URL"):
+ val = str(os.environ[key]) + "/v3"
+ setattr(self, key, val)
+ else:
+ # Default username for a service is it's name
+ setattr(self, 'OS_USERNAME', service)
+ log.info("Configuration complete!")
+ return
+ except KeyError as exc:
+ log.warn("Falied to configure plugin: %s", exc)
+ log.warn("Try re-authenticating your OpenStack deployment.")
+ return
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Simple singleton class."""
+
+from __future__ import unicode_literals
+
+__author__ = "Helena McGough"
+
+
+class Singleton(object):
+ """Simple singleton class."""
+
+ def __init__(self, decorated):
+ """Initialize singleton instance."""
+ self._decorated = decorated
+
+ def instance(self):
+ """Return singleton instance."""
+ try:
+ return self._instance
+ except AttributeError:
+ self._instance = self._decorated()
+ return self._instance
--- /dev/null
+# -*- coding: utf-8 -*-
+
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Plugins for OSM MON."""
--- /dev/null
+#gitkeep file to keep the initial empty directory structure.
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: osslegalrouting@vmware.com
+##
+
+"""
+vROPs Kafka Consumer that consumes the request messages
+"""
+
+
+from kafka import KafkaConsumer
+from kafka.errors import KafkaError
+import logging as log
+
+class vROP_KafkaConsumer(object):
+ """
+ Kafka Consumer for vROPs
+ """
+
+ def __init__(self, topics=[], broker_uri=None):
+ """
+ Method to initize KafkaConsumer
+ Args:
+ broker_uri - hostname:port uri of Kafka broker
+ topics - list of topics to subscribe
+ Returns:
+ None
+ """
+
+ if broker_uri is None:
+ self.broker = '0.0.0.0:9092'
+ else:
+ self.broker = broker_uri
+
+ self.topic = topics
+ print ("vROPs Consumer started, Broker URI: {}".format(self.broker))
+ print ("Subscribed Topics {}".format(self.topic))
+ try:
+ self.vrops_consumer = KafkaConsumer(bootstrap_servers=self.broker)
+ self.vrops_consumer.subscribe(self.topic)
+ except Exception as exp:
+ msg = "fail to create consumer for topic {} with broker {} Error : {}"\
+ .format(self.topic, self.broker, exp)
+ log.error(msg)
+ raise Exception(msg)
+
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: osslegalrouting@vmware.com
+##
+
+"""
+Montoring metrics & creating Alarm definations in vROPs
+"""
+
+import requests
+import logging
+from pyvcloud.vcloudair import VCA
+from xml.etree import ElementTree as XmlElementTree
+import traceback
+import time
+import json
+from OpenSSL.crypto import load_certificate, FILETYPE_PEM
+import os
+import datetime
+from socket import getfqdn
+
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+OPERATION_MAPPING = {'GE':'GT_EQ', 'LE':'LT_EQ', 'GT':'GT', 'LT':'LT', 'EQ':'EQ'}
+severity_mano2vrops = {'WARNING':'WARNING', 'MINOR':'WARNING', 'MAJOR':"IMMEDIATE",\
+ 'CRITICAL':'CRITICAL', 'INDETERMINATE':'UNKNOWN'}
+PERIOD_MSEC = {'HR':3600000,'DAY':86400000,'WEEK':604800000,'MONTH':2678400000,'YEAR':31536000000}
+
+#To Do - Add actual webhook url & certificate
+#SSL_CERTIFICATE_FILE_NAME = 'vROPs_Webservice/SSL_certificate/www.vrops_webservice.com.cert'
+#webhook_url = "https://mano-dev-1:8080/notify/" #for testing
+webhook_url = "https://" + getfqdn() + ":8080/notify/"
+SSL_CERTIFICATE_FILE_NAME = ('vROPs_Webservice/SSL_certificate/' + getfqdn() + ".cert")
+#SSL_CERTIFICATE_FILE_NAME = 'vROPs_Webservice/SSL_certificate/10.172.137.214.cert' #for testing
+
+MODULE_DIR = os.path.dirname(__file__)
+CONFIG_FILE_NAME = 'vrops_config.xml'
+CONFIG_FILE_PATH = os.path.join(MODULE_DIR, CONFIG_FILE_NAME)
+SSL_CERTIFICATE_FILE_PATH = os.path.join(MODULE_DIR, SSL_CERTIFICATE_FILE_NAME)
+
+class MonPlugin():
+ """MON Plugin class for vROPs telemetry plugin
+ """
+ def __init__(self):
+ """Constructor of MON plugin
+ Params:
+ 'access_config': dictionary with VIM access information based on VIM type.
+ This contains a consolidate version of VIM & monitoring tool config at creation and
+ particular VIM config at their attachment.
+ For VIM type: 'vmware',
+ access_config - {'vrops_site':<>, 'vrops_user':<>, 'vrops_password':<>,
+ 'vcloud-site':<>,'admin_username':<>,'admin_password':<>,
+ 'nsx_manager':<>,'nsx_user':<>,'nsx_password':<>,
+ 'vcenter_ip':<>,'vcenter_port':<>,'vcenter_user':<>,'vcenter_password':<>,
+ 'vim_tenant_name':<>,'orgname':<>}
+
+ #To Do
+ Returns: Raise an exception if some needed parameter is missing, but it must not do any connectivity
+ check against the VIM
+ """
+ self.logger = logging.getLogger('PluginReceiver.MonPlugin')
+ self.logger.setLevel(logging.DEBUG)
+
+ access_config = self.get_default_Params('Access_Config')
+ self.access_config = access_config
+ if not bool(access_config):
+ self.logger.error("Access configuration not provided in vROPs Config file")
+ raise KeyError("Access configuration not provided in vROPs Config file")
+
+ try:
+ self.vrops_site = access_config['vrops_site']
+ self.vrops_user = access_config['vrops_user']
+ self.vrops_password = access_config['vrops_password']
+ self.vcloud_site = access_config['vcloud-site']
+ self.admin_username = access_config['admin_username']
+ self.admin_password = access_config['admin_password']
+ self.tenant_id = access_config['tenant_id']
+ except KeyError as exp:
+ self.logger.error("Check Access configuration in vROPs Config file: {}".format(exp))
+ raise KeyError("Check Access configuration in vROPs Config file: {}".format(exp))
+
+
+ def configure_alarm(self, config_dict = {}):
+ """Configures or creates a new alarm using the input parameters in config_dict
+ Params:
+ "alarm_name": Alarm name in string format
+ "description": Description of alarm in string format
+ "resource_uuid": Resource UUID for which alarm needs to be configured. in string format
+ "Resource type": String resource type: 'VDU' or 'host'
+ "Severity": 'WARNING', 'MINOR', 'MAJOR', 'CRITICAL'
+ "metric_name": Metric key in string format
+ "operation": One of ('GE', 'LE', 'GT', 'LT', 'EQ')
+ "threshold_value": Defines the threshold (up to 2 fraction digits) that,
+ if crossed, will trigger the alarm.
+ "unit": Unit of measurement in string format
+ "statistic": AVERAGE, MINIMUM, MAXIMUM, COUNT, SUM
+
+ Default parameters for each alarm are read from the plugin specific config file.
+ Dict of default parameters is as follows:
+ default_params keys = {'cancel_cycles','wait_cycles','resource_kind','adapter_kind',
+ 'alarm_type','alarm_subType',impact}
+
+ Returns the UUID of created alarm or None
+ """
+ alarm_def = None
+ #1) get alarm & metrics parameters from plugin specific file
+ def_a_params = self.get_default_Params(config_dict['alarm_name'])
+ if not def_a_params:
+ self.logger.warn("Alarm not supported: {}".format(config_dict['alarm_name']))
+ return None
+ metric_key_params = self.get_default_Params(config_dict['metric_name'])
+ if not metric_key_params:
+ self.logger.warn("Metric not supported: {}".format(config_dict['metric_name']))
+ return None
+ #2) create symptom definition
+ vrops_alarm_name = def_a_params['vrops_alarm']+ '-' + config_dict['resource_uuid']
+ symptom_params ={'cancel_cycles': (def_a_params['cancel_period']/300)*def_a_params['cancel_cycles'],
+ 'wait_cycles': (def_a_params['period']/300)*def_a_params['evaluation'],
+ 'resource_kind_key': def_a_params['resource_kind'],
+ 'adapter_kind_key': def_a_params['adapter_kind'],
+ 'symptom_name':vrops_alarm_name,
+ 'severity': severity_mano2vrops[config_dict['severity']],
+ 'metric_key':metric_key_params['metric_key'],
+ 'operation':OPERATION_MAPPING[config_dict['operation']],
+ 'threshold_value':config_dict['threshold_value']}
+ symptom_uuid = self.create_symptom(symptom_params)
+ if symptom_uuid is not None:
+ self.logger.info("Symptom defined: {} with ID: {}".format(symptom_params['symptom_name'],symptom_uuid))
+ else:
+ self.logger.warn("Failed to create Symptom: {}".format(symptom_params['symptom_name']))
+ return None
+ #3) create alert definition
+ #To Do - Get type & subtypes for all 5 alarms
+ alarm_params = {'name':vrops_alarm_name,
+ 'description':config_dict['description']\
+ if config_dict['description'] is not None else config_dict['alarm_name'],
+ 'adapterKindKey':def_a_params['adapter_kind'],
+ 'resourceKindKey':def_a_params['resource_kind'],
+ 'waitCycles':1, 'cancelCycles':1,
+ 'type':def_a_params['alarm_type'], 'subType':def_a_params['alarm_subType'],
+ 'severity':severity_mano2vrops[config_dict['severity']],
+ 'symptomDefinitionId':symptom_uuid,
+ 'impact':def_a_params['impact']}
+
+ alarm_def = self.create_alarm_definition(alarm_params)
+ if alarm_def is None:
+ self.logger.warn("Failed to create Alert: {}".format(alarm_params['name']))
+ return None
+
+ self.logger.info("Alarm defined: {} with ID: {}".format(alarm_params['name'],alarm_def))
+
+ #4) Find vm_moref_id from vApp uuid in vCD
+ vm_moref_id = self.get_vm_moref_id(config_dict['resource_uuid'])
+ if vm_moref_id is None:
+ self.logger.warn("Failed to find vm morefid for vApp in vCD: {}".format(config_dict['resource_uuid']))
+ return None
+
+ #5) Based on vm_moref_id, find VM's corresponding resource_id in vROPs to set notification
+ resource_id = self.get_vm_resource_id(vm_moref_id)
+ if resource_id is None:
+ self.logger.warn("Failed to find resource in vROPs: {}".format(config_dict['resource_uuid']))
+ return None
+
+ #6) Configure alarm notification for a particular VM using it's resource_id
+ notification_id = self.create_alarm_notification_rule(vrops_alarm_name, alarm_def, resource_id)
+ if notification_id is None:
+ return None
+ else:
+ alarm_def_uuid = alarm_def.split('-', 1)[1]
+ self.logger.info("Alarm defination created with notification: {} with ID: {}"\
+ .format(alarm_params['name'],alarm_def_uuid))
+ #Return alarm defination UUID by removing 'AlertDefinition' from UUID
+ return (alarm_def_uuid)
+
+ def get_default_Params(self, metric_alarm_name):
+ """
+ Read the default config parameters from plugin specific file stored with plugin file.
+ Params:
+ metric_alarm_name: Name of the alarm, whose congif params to be read from the config file.
+ """
+ a_params = {}
+ try:
+ source = open(CONFIG_FILE_PATH, 'r')
+ except IOError as exp:
+ msg = ("Could not read Config file: {}, \nException: {}"\
+ .format(CONFIG_FILE_PATH, exp))
+ self.logger.error(msg)
+ raise IOError(msg)
+
+ tree = XmlElementTree.parse(source)
+ alarms = tree.getroot()
+ for alarm in alarms:
+ if alarm.tag == metric_alarm_name:
+ for param in alarm:
+ if param.tag in ("period", "evaluation", "cancel_period", "alarm_type",\
+ "cancel_cycles", "alarm_subType"):
+ a_params[param.tag] = int(param.text)
+ elif param.tag in ("enabled", "repeat"):
+ if(param.text.lower() == "true"):
+ a_params[param.tag] = True
+ else:
+ a_params[param.tag] = False
+ else:
+ a_params[param.tag] = param.text
+ source.close()
+ return a_params
+
+
+ def create_symptom(self, symptom_params):
+ """Create Symptom definition for an alarm
+ Params:
+ symptom_params: Dict of parameters required for defining a symptom as follows
+ cancel_cycles
+ wait_cycles
+ resource_kind_key = "VirtualMachine"
+ adapter_kind_key = "VMWARE"
+ symptom_name = Test_Memory_Usage_TooHigh
+ severity
+ metric_key
+ operation = GT_EQ
+ threshold_value = 85
+ Returns the uuid of Symptom definition
+ """
+ symptom_id = None
+
+ try:
+ api_url = '/suite-api/api/symptomdefinitions'
+ headers = {'Content-Type': 'application/xml'}
+ data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+ <ops:symptom-definition cancelCycles="{0:s}" waitCycles="{1:s}"
+ resourceKindKey="{2:s}" adapterKindKey="{3:s}"
+ xmlns:xs="http://www.w3.org/2001/XMLSchema"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:ops="http://webservice.vmware.com/vRealizeOpsMgr/1.0/">
+ <ops:name>{4:s}</ops:name>
+ <ops:state severity="{5:s}">
+ <ops:condition xsi:type="ops:htCondition">
+ <ops:key>{6:s}</ops:key>
+ <ops:operator>{7:s}</ops:operator>
+ <ops:value>{8:s}</ops:value>
+ <ops:valueType>NUMERIC</ops:valueType>
+ <ops:instanced>false</ops:instanced>
+ <ops:thresholdType>STATIC</ops:thresholdType>
+ </ops:condition>
+ </ops:state>
+ </ops:symptom-definition>"""\
+ .format(str(symptom_params['cancel_cycles']),str(symptom_params['wait_cycles']),
+ symptom_params['resource_kind_key'], symptom_params['adapter_kind_key'],
+ symptom_params['symptom_name'],symptom_params['severity'],
+ symptom_params['metric_key'],symptom_params['operation'],
+ str(symptom_params['threshold_value']))
+
+ resp = requests.post(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ headers=headers,
+ verify = False,
+ data=data)
+
+ if resp.status_code != 201:
+ self.logger.warn("Failed to create Symptom definition: {}, response {}"\
+ .format(symptom_params['symptom_name'], resp.content))
+ return None
+
+ symptom_xmlroot = XmlElementTree.fromstring(resp.content)
+ if symptom_xmlroot is not None and 'id' in symptom_xmlroot.attrib:
+ symptom_id = symptom_xmlroot.attrib['id']
+
+ return symptom_id
+
+ except Exception as exp:
+ self.logger.warn("Error creating symptom definition : {}\n{}"\
+ .format(exp, traceback.format_exc()))
+
+
+ def create_alarm_definition(self, alarm_params):
+ """
+ Create an alarm definition in vROPs
+ Params:
+ 'name': Alarm Name,
+ 'description':Alarm description,
+ 'adapterKindKey': Adapter type in vROPs "VMWARE",
+ 'resourceKindKey':Resource type in vROPs "VirtualMachine",
+ 'waitCycles': No of wait cycles,
+ 'cancelCycles': No of cancel cycles,
+ 'type': Alarm type,
+ 'subType': Alarm subtype,
+ 'severity': Severity in vROPs "CRITICAL",
+ 'symptomDefinitionId':symptom Definition uuid,
+ 'impact': impact 'risk'
+ Returns:
+ 'alarm_uuid': returns alarm uuid
+ """
+
+ alarm_uuid = None
+
+ try:
+ api_url = '/suite-api/api/alertdefinitions'
+ headers = {'Content-Type': 'application/xml'}
+ data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+ <ops:alert-definition xmlns:xs="http://www.w3.org/2001/XMLSchema"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:ops="http://webservice.vmware.com/vRealizeOpsMgr/1.0/">
+ <ops:name>{0:s}</ops:name>
+ <ops:description>{1:s}</ops:description>
+ <ops:adapterKindKey>{2:s}</ops:adapterKindKey>
+ <ops:resourceKindKey>{3:s}</ops:resourceKindKey>
+ <ops:waitCycles>1</ops:waitCycles>
+ <ops:cancelCycles>1</ops:cancelCycles>
+ <ops:type>{4:s}</ops:type>
+ <ops:subType>{5:s}</ops:subType>
+ <ops:states>
+ <ops:state severity="{6:s}">
+ <ops:symptom-set>
+ <ops:symptomDefinitionIds>
+ <ops:symptomDefinitionId>{7:s}</ops:symptomDefinitionId>
+ </ops:symptomDefinitionIds>
+ <ops:relation>SELF</ops:relation>
+ <ops:aggregation>ALL</ops:aggregation>
+ <ops:symptomSetOperator>AND</ops:symptomSetOperator>
+ </ops:symptom-set>
+ <ops:impact>
+ <ops:impactType>BADGE</ops:impactType>
+ <ops:detail>{8:s}</ops:detail>
+ </ops:impact>
+ </ops:state>
+ </ops:states>
+ </ops:alert-definition>"""\
+ .format(alarm_params['name'],alarm_params['description'],
+ alarm_params['adapterKindKey'],alarm_params['resourceKindKey'],
+ str(alarm_params['type']),str(alarm_params['subType']),
+ alarm_params['severity'],alarm_params['symptomDefinitionId'],
+ alarm_params['impact'])
+
+ resp = requests.post(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ headers=headers,
+ verify = False,
+ data=data)
+
+ if resp.status_code != 201:
+ self.logger.warn("Failed to create Alarm definition: {}, response {}"\
+ .format(alarm_params['name'], resp.content))
+ return None
+
+ alarm_xmlroot = XmlElementTree.fromstring(resp.content)
+ for child in alarm_xmlroot:
+ if child.tag.split("}")[1] == 'id':
+ alarm_uuid = child.text
+
+ return alarm_uuid
+
+ except Exception as exp:
+ self.logger.warn("Error creating alarm definition : {}\n{}".format(exp, traceback.format_exc()))
+
+
+ def configure_rest_plugin(self):
+ """
+ Creates REST Plug-in for vROPs outbound alerts
+
+ Returns Plugin ID
+ """
+ plugin_id = None
+ plugin_name = 'MON_module_REST_Plugin'
+ plugin_id = self.check_if_plugin_configured(plugin_name)
+
+ #If REST plugin not configured, configure it
+ if plugin_id is not None:
+ return plugin_id
+ else:
+ try:
+ cert_file_string = open(SSL_CERTIFICATE_FILE_PATH, "rb").read()
+ except IOError as exp:
+ msg = ("Could not read SSL certificate file: {}".format(SSL_CERTIFICATE_FILE_PATH))
+ self.logger.error(msg)
+ raise IOError(msg)
+ cert = load_certificate(FILETYPE_PEM, cert_file_string)
+ certificate = cert.digest("sha1")
+ api_url = '/suite-api/api/alertplugins'
+ headers = {'Content-Type': 'application/xml'}
+ data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+ <ops:notification-plugin version="0" xmlns:xs="http://www.w3.org/2001/XMLSchema"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:ops="http://webservice.vmware.com/vRealizeOpsMgr/1.0/">
+ <ops:pluginTypeId>RestPlugin</ops:pluginTypeId>
+ <ops:name>{0:s}</ops:name>
+ <ops:configValues>
+ <ops:configValue name="Url">{1:s}</ops:configValue>
+ <ops:configValue name="Content-type">application/json</ops:configValue>
+ <ops:configValue name="Certificate">{2:s}</ops:configValue>
+ <ops:configValue name="ConnectionCount">20</ops:configValue>
+ </ops:configValues>
+ </ops:notification-plugin>""".format(plugin_name, webhook_url, certificate)
+
+ resp = requests.post(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ headers=headers,
+ verify = False,
+ data=data)
+
+ if resp.status_code is not 201:
+ self.logger.warn("Failed to create REST Plugin: {} for url: {}, \nresponse code: {},"\
+ "\nresponse content: {}".format(plugin_name, webhook_url,\
+ resp.status_code, resp.content))
+ return None
+
+ plugin_xmlroot = XmlElementTree.fromstring(resp.content)
+ if plugin_xmlroot is not None:
+ for child in plugin_xmlroot:
+ if child.tag.split("}")[1] == 'pluginId':
+ plugin_id = plugin_xmlroot.find('{http://webservice.vmware.com/vRealizeOpsMgr/1.0/}pluginId').text
+
+ if plugin_id is None:
+ self.logger.warn("Failed to get REST Plugin ID for {}, url: {}".format(plugin_name, webhook_url))
+ return None
+ else:
+ self.logger.info("Created REST Plugin: {} with ID : {} for url: {}".format(plugin_name, plugin_id, webhook_url))
+ status = self.enable_rest_plugin(plugin_id, plugin_name)
+ if status is False:
+ self.logger.warn("Failed to enable created REST Plugin: {} for url: {}".format(plugin_name, webhook_url))
+ return None
+ else:
+ self.logger.info("Enabled REST Plugin: {} for url: {}".format(plugin_name, webhook_url))
+ return plugin_id
+
+ def check_if_plugin_configured(self, plugin_name):
+ """Check if the REST plugin is already created
+ Returns: plugin_id: if already created, None: if needs to be created
+ """
+ plugin_id = None
+ #Find the REST Plugin id details for - MON_module_REST_Plugin
+ api_url = '/suite-api/api/alertplugins'
+ headers = {'Accept': 'application/xml'}
+ namespace = {'params':"http://webservice.vmware.com/vRealizeOpsMgr/1.0/"}
+
+ resp = requests.get(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+
+ if resp.status_code is not 200:
+ self.logger.warn("Failed to REST GET Alarm plugin details \nResponse code: {}\nResponse content: {}"\
+ .format(resp.status_code, resp.content))
+ return None
+
+ # Look for specific plugin & parse pluginId for 'MON_module_REST_Plugin'
+ xmlroot_resp = XmlElementTree.fromstring(resp.content)
+ for notify_plugin in xmlroot_resp.findall('params:notification-plugin',namespace):
+ if notify_plugin.find('params:name',namespace) is not None and\
+ notify_plugin.find('params:pluginId',namespace) is not None:
+ if notify_plugin.find('params:name',namespace).text == plugin_name:
+ plugin_id = notify_plugin.find('params:pluginId',namespace).text
+
+ if plugin_id is None:
+ self.logger.warn("REST plugin {} not found".format('MON_module_REST_Plugin'))
+ return None
+ else:
+ self.logger.info("Found REST Plugin: {}".format(plugin_name))
+ return plugin_id
+
+
+ def enable_rest_plugin(self, plugin_id, plugin_name):
+ """
+ Enable the REST plugin using plugin_id
+ Params: plugin_id: plugin ID string that is to be enabled
+ Returns: status (Boolean) - True for success, False for failure
+ """
+
+ if plugin_id is None or plugin_name is None:
+ self.logger.debug("enable_rest_plugin() : Plugin ID or plugin_name not provided for {} plugin"\
+ .format(plugin_name))
+ return False
+
+ try:
+ api_url = "/suite-api/api/alertplugins/{}/enable/True".format(plugin_id)
+
+ resp = requests.put(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False)
+
+ if resp.status_code is not 204:
+ self.logger.warn("Failed to enable REST plugin {}. \nResponse code {}\nResponse Content: {}"\
+ .format(plugin_name, resp.status_code, resp.content))
+ return False
+
+ self.logger.info("Enabled REST plugin {}.".format(plugin_name))
+ return True
+
+ except Exception as exp:
+ self.logger.warn("Error enabling REST plugin for {} plugin: Exception: {}\n{}"\
+ .format(plugin_name, exp, traceback.format_exc()))
+
+ def create_alarm_notification_rule(self, alarm_name, alarm_id, resource_id):
+ """
+ Create notification rule for each alarm
+ Params:
+ alarm_name
+ alarm_id
+ resource_id
+
+ Returns:
+ notification_id: notification_id or None
+ """
+ notification_name = 'notify_' + alarm_name
+ notification_id = None
+ plugin_name = 'MON_module_REST_Plugin'
+
+ #1) Find the REST Plugin id details for - MON_module_REST_Plugin
+ plugin_id = self.check_if_plugin_configured(plugin_name)
+ if plugin_id is None:
+ self.logger.warn("Failed to get REST plugin_id for : {}".format('MON_module_REST_Plugin'))
+ return None
+
+ #2) Create Alarm notification rule
+ api_url = '/suite-api/api/notifications/rules'
+ headers = {'Content-Type': 'application/xml'}
+ data = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+ <ops:notification-rule xmlns:xs="http://www.w3.org/2001/XMLSchema"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:ops="http://webservice.vmware.com/vRealizeOpsMgr/1.0/">
+ <ops:name>{0:s}</ops:name>
+ <ops:pluginId>{1:s}</ops:pluginId>
+ <ops:resourceFilter resourceId="{2:s}">
+ <ops:matchResourceIdOnly>true</ops:matchResourceIdOnly>
+ </ops:resourceFilter>
+ <ops:alertDefinitionIdFilters>
+ <ops:values>{3:s}</ops:values>
+ </ops:alertDefinitionIdFilters>
+ </ops:notification-rule>"""\
+ .format(notification_name, plugin_id, resource_id, alarm_id)
+
+ resp = requests.post(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ headers=headers,
+ verify = False,
+ data=data)
+
+ if resp.status_code is not 201:
+ self.logger.warn("Failed to create Alarm notification rule {} for {} alarm."\
+ "\nResponse code: {}\nResponse content: {}"\
+ .format(notification_name, alarm_name, resp.status_code, resp.content))
+ return None
+
+ #parse notification id from response
+ xmlroot_resp = XmlElementTree.fromstring(resp.content)
+ if xmlroot_resp is not None and 'id' in xmlroot_resp.attrib:
+ notification_id = xmlroot_resp.attrib.get('id')
+
+ self.logger.info("Created Alarm notification rule {} for {} alarm.".format(notification_name, alarm_name))
+ return notification_id
+
+ def get_vm_moref_id(self, vapp_uuid):
+ """
+ Get the moref_id of given VM
+ """
+ try:
+ if vapp_uuid:
+ vm_details = self.get_vapp_details_rest(vapp_uuid)
+ if vm_details and "vm_vcenter_info" in vm_details:
+ vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
+
+ self.logger.info("Found vm_moref_id: {} for vApp UUID: {}".format(vm_moref_id, vapp_uuid))
+ return vm_moref_id
+
+ except Exception as exp:
+ self.logger.warn("Error occurred while getting VM moref ID for VM : {}\n{}"\
+ .format(exp, traceback.format_exc()))
+
+
+ def get_vapp_details_rest(self, vapp_uuid=None):
+ """
+ Method retrieve vapp detail from vCloud director
+
+ Args:
+ vapp_uuid - is vapp identifier.
+
+ Returns:
+ Returns VM MOref ID or return None
+ """
+
+ parsed_respond = {}
+ vca = None
+
+ vca = self.connect_as_admin()
+
+ if not vca:
+ self.logger.warn("connect() to vCD is failed")
+ if vapp_uuid is None:
+ return None
+
+ url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
+ get_vapp_restcall = ''.join(url_list)
+
+ if vca.vcloud_session and vca.vcloud_session.organization:
+ response = requests.get(get_vapp_restcall,
+ headers=vca.vcloud_session.get_vcloud_headers(),
+ verify=vca.verify)
+
+ if response.status_code != 200:
+ self.logger.warn("REST API call {} failed. Return status code {}"\
+ .format(get_vapp_restcall, response.content))
+ return parsed_respond
+
+ try:
+ xmlroot_respond = XmlElementTree.fromstring(response.content)
+
+ namespaces = {'vm': 'http://www.vmware.com/vcloud/v1.5',
+ "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
+ "xmlns":"http://www.vmware.com/vcloud/v1.5"
+ }
+
+ # parse children section for other attrib
+ children_section = xmlroot_respond.find('vm:Children/', namespaces)
+ if children_section is not None:
+ vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+ if vCloud_extension_section is not None:
+ vm_vcenter_info = {}
+ vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+ vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+ if vmext is not None:
+ vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+ parsed_respond["vm_vcenter_info"]= vm_vcenter_info
+
+ except Exception as exp :
+ self.logger.warn("Error occurred calling rest api for getting vApp details: {}\n{}"\
+ .format(exp, traceback.format_exc()))
+
+ return parsed_respond
+
+
+ def connect_as_admin(self):
+ """ Method connect as pvdc admin user to vCloud director.
+ There are certain action that can be done only by provider vdc admin user.
+ Organization creation / provider network creation etc.
+
+ Returns:
+ The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
+ """
+
+ self.logger.info("Logging in to a VCD org as admin.")
+
+ vca_admin = VCA(host=self.vcloud_site,
+ username=self.admin_username,
+ service_type='standalone',
+ version='5.9',
+ verify=False,
+ log=False)
+ result = vca_admin.login(password=self.admin_password, org='System')
+ if not result:
+ self.logger.warn("Can't connect to a vCloud director as: {}".format(self.admin_username))
+ result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
+ if result is True:
+ self.logger.info("Successfully logged to a vcloud direct org: {} as user: {}"\
+ .format('System', self.admin_username))
+
+ return vca_admin
+
+
+ def get_vm_resource_id(self, vm_moref_id):
+ """ Find resource ID in vROPs using vm_moref_id
+ """
+ if vm_moref_id is None:
+ return None
+
+ api_url = '/suite-api/api/resources'
+ headers = {'Accept': 'application/xml'}
+ namespace = {'params':"http://webservice.vmware.com/vRealizeOpsMgr/1.0/"}
+
+ resp = requests.get(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+
+ if resp.status_code is not 200:
+ self.logger.warn("Failed to get resource details from vROPs for {}\nResponse code:{}\nResponse Content: {}"\
+ .format(vm_moref_id, resp.status_code, resp.content))
+ return None
+
+ try:
+ xmlroot_respond = XmlElementTree.fromstring(resp.content)
+ for resource in xmlroot_respond.findall('params:resource',namespace):
+ if resource is not None:
+ resource_key = resource.find('params:resourceKey',namespace)
+ if resource_key is not None:
+ if resource_key.find('params:adapterKindKey',namespace).text == 'VMWARE' and \
+ resource_key.find('params:resourceKindKey',namespace).text == 'VirtualMachine':
+ for child in resource_key:
+ if child.tag.split('}')[1]=='resourceIdentifiers':
+ resourceIdentifiers = child
+ for r_id in resourceIdentifiers:
+ if r_id.find('params:value',namespace).text == vm_moref_id:
+ self.logger.info("Found Resource ID : {} in vROPs for {}"\
+ .format(resource.attrib['identifier'], vm_moref_id))
+ return resource.attrib['identifier']
+ except Exception as exp:
+ self.logger.warn("Error in parsing {}\n{}".format(exp, traceback.format_exc()))
+
+
+ def get_metrics_data(self, metric={}):
+ """Get an individual metric's data of a resource.
+ Params:
+ 'metric_name': Normalized name of metric (string)
+ 'resource_uuid': Resource UUID (string)
+ 'period': Time period in Period Unit for which metrics data to be collected from
+ Monitoring tool from now.
+ 'period_unit': Period measurement unit can be one of 'HR', 'DAY', 'MONTH', 'YEAR'
+
+ Return a dict that contains:
+ 'metric_name': Normalized name of metric (string)
+ 'resource_uuid': Resource UUID (string)
+ 'tenant_id': tenent id name in which the resource is present in string format
+ 'metrics_data': Dictionary containing time_series & metric_series data.
+ 'time_series': List of individual time stamp values in msec
+ 'metric_series': List of individual metrics data values
+ Raises an exception upon error or when network is not found
+ """
+ return_data = {}
+ return_data['schema_version'] = 1.0
+ return_data['schema_type'] = 'read_metric_data_response'
+ return_data['metric_name'] = metric['metric_name']
+ #To do - No metric_uuid in vROPs, thus returning '0'
+ return_data['metric_uuid'] = '0'
+ return_data['correlation_id'] = metric['correlation_id']
+ return_data['resource_uuid'] = metric['resource_uuid']
+ return_data['metrics_data'] = {'time_series':[], 'metric_series':[]}
+ #To do - Need confirmation about uuid & id
+ if 'tenant_uuid' in metric and metric['tenant_uuid'] is not None:
+ return_data['tenant_uuid'] = metric['tenant_uuid']
+ else:
+ return_data['tenant_uuid'] = None
+ return_data['unit'] = None
+ #return_data['tenant_id'] = self.tenant_id
+ #self.logger.warn("return_data: {}".format(return_data))
+
+ #1) Get metric details from plugin specific file & format it into vROPs metrics
+ metric_key_params = self.get_default_Params(metric['metric_name'])
+
+ if not metric_key_params:
+ self.logger.warn("Metric not supported: {}".format(metric['metric_name']))
+ #To Do: Return message
+ return return_data
+
+ return_data['unit'] = metric_key_params['unit']
+
+ #2) Find the resource id in vROPs based on OSM resource_uuid
+ #2.a) Find vm_moref_id from vApp uuid in vCD
+ vm_moref_id = self.get_vm_moref_id(metric['resource_uuid'])
+ if vm_moref_id is None:
+ self.logger.warn("Failed to find vm morefid for vApp in vCD: {}".format(config_dict['resource_uuid']))
+ return return_data
+ #2.b) Based on vm_moref_id, find VM's corresponding resource_id in vROPs to set notification
+ resource_id = self.get_vm_resource_id(vm_moref_id)
+ if resource_id is None:
+ self.logger.warn("Failed to find resource in vROPs: {}".format(config_dict['resource_uuid']))
+ return return_data
+
+ #3) Calculate begin & end time for period & period unit
+ end_time = int(round(time.time() * 1000))
+ if metric['collection_unit'] == 'YR':
+ time_diff = PERIOD_MSEC[metric['collection_unit']]
+ else:
+ time_diff = metric['collection_period']* PERIOD_MSEC[metric['collection_unit']]
+ begin_time = end_time - time_diff
+
+ #4) Get the metrics data
+ self.logger.info("metric_key_params['metric_key'] = {}".format(metric_key_params['metric_key']))
+ self.logger.info("end_time: {}, begin_time: {}".format(end_time, begin_time))
+
+ url_list = ['/suite-api/api/resources/', resource_id, '/stats?statKey=',\
+ metric_key_params['metric_key'], '&begin=', str(begin_time),'&end=',str(end_time)]
+ api_url = ''.join(url_list)
+ headers = {'Accept': 'application/json'}
+
+ resp = requests.get(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+
+ if resp.status_code is not 200:
+ self.logger.warn("Failed to retrive Metric data from vROPs for {}\nResponse code:{}\nResponse Content: {}"\
+ .format(metric['metric_name'], resp.status_code, resp.content))
+ return return_data
+
+ #5) Convert to required format
+ metrics_data = {}
+ json_data = json.loads(resp.content)
+ for resp_key,resp_val in json_data.iteritems():
+ if resp_key == 'values':
+ data = json_data['values'][0]
+ for data_k,data_v in data.iteritems():
+ if data_k == 'stat-list':
+ stat_list = data_v
+ for stat_list_k,stat_list_v in stat_list.iteritems():
+ for stat_keys,stat_vals in stat_list_v[0].iteritems():
+ if stat_keys == 'timestamps':
+ metrics_data['time_series'] = stat_list_v[0]['timestamps']
+ if stat_keys == 'data':
+ metrics_data['metric_series'] = stat_list_v[0]['data']
+
+ return_data['metrics_data'] = metrics_data
+
+ return return_data
+
+ def update_alarm_configuration(self, new_alarm_config):
+ """Update alarm configuration (i.e. Symptom & alarm) as per request
+ """
+ #1) Get Alarm details from it's uuid & find the symptom defination
+ alarm_details_json, alarm_details = self.get_alarm_defination_details(new_alarm_config['alarm_uuid'])
+ if alarm_details_json is None:
+ return None
+
+ try:
+ #2) Update the symptom defination
+ if alarm_details['alarm_id'] is not None and alarm_details['symptom_definition_id'] is not None:
+ symptom_defination_id = alarm_details['symptom_definition_id']
+ else:
+ self.logger.info("Symptom Defination ID not found for {}".format(new_alarm_config['alarm_uuid']))
+ return None
+
+ symptom_uuid = self.update_symptom_defination(symptom_defination_id, new_alarm_config)
+
+ #3) Update the alarm defination & Return UUID if successful update
+ if symptom_uuid is None:
+ self.logger.info("Symptom Defination details not found for {}"\
+ .format(new_alarm_config['alarm_uuid']))
+ return None
+ else:
+ alarm_uuid = self.reconfigure_alarm(alarm_details_json, new_alarm_config)
+ if alarm_uuid is None:
+ return None
+ else:
+ return alarm_uuid
+ except:
+ self.logger.error("Exception while updating alarm: {}".format(traceback.format_exc()))
+
+ def get_alarm_defination_details(self, alarm_uuid):
+ """Get alarm details based on alarm UUID
+ """
+ if alarm_uuid is None:
+ self.logger.warn("get_alarm_defination_details: Alarm UUID not provided")
+ return None, None
+
+ alarm_details = {}
+ json_data = {}
+ api_url = '/suite-api/api/alertdefinitions/AlertDefinition-'
+ headers = {'Accept': 'application/json'}
+
+ resp = requests.get(self.vrops_site + api_url + alarm_uuid,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+
+ if resp.status_code is not 200:
+ self.logger.warn("Alarm to be updated not found: {}\nResponse code:{}\nResponse Content: {}"\
+ .format(alarm_uuid, resp.status_code, resp.content))
+ return None, None
+
+ try:
+ json_data = json.loads(resp.content)
+ if json_data['id'] is not None:
+ alarm_details['alarm_id'] = json_data['id']
+ alarm_details['alarm_name'] = json_data['name']
+ alarm_details['adapter_kind'] = json_data['adapterKindKey']
+ alarm_details['resource_kind'] = json_data['resourceKindKey']
+ alarm_details['type'] = json_data['type']
+ alarm_details['sub_type'] = json_data['subType']
+ alarm_details['symptom_definition_id'] = json_data['states'][0]['base-symptom-set']['symptomDefinitionIds'][0]
+ except exception as exp:
+ self.logger.warn("Exception while retriving alarm defination details: {}".format(exp))
+ return None, None
+
+ return json_data, alarm_details
+
+
+ def update_symptom_defination(self, symptom_uuid, new_alarm_config):
+ """Update symptom defination based on new alarm input configuration
+ """
+ #1) Get symptom defination details
+ symptom_details = self.get_symptom_defination_details(symptom_uuid)
+ #print "\n\nsymptom_details: {}".format(symptom_details)
+ if symptom_details is None:
+ return None
+
+ if new_alarm_config.has_key('severity') and new_alarm_config['severity'] is not None:
+ symptom_details['state']['severity'] = severity_mano2vrops[new_alarm_config['severity']]
+ if new_alarm_config.has_key('operation') and new_alarm_config['operation'] is not None:
+ symptom_details['state']['condition']['operator'] = OPERATION_MAPPING[new_alarm_config['operation']]
+ if new_alarm_config.has_key('threshold_value') and new_alarm_config['threshold_value'] is not None:
+ symptom_details['state']['condition']['value'] = new_alarm_config['threshold_value']
+ #Find vrops metric key from metric_name, if required
+ """
+ if new_alarm_config.has_key('metric_name') and new_alarm_config['metric_name'] is not None:
+ metric_key_params = self.get_default_Params(new_alarm_config['metric_name'])
+ if not metric_key_params:
+ self.logger.warn("Metric not supported: {}".format(config_dict['metric_name']))
+ return None
+ symptom_details['state']['condition']['key'] = metric_key_params['metric_key']
+ """
+ self.logger.info("Fetched Symptom details : {}".format(symptom_details))
+
+ api_url = '/suite-api/api/symptomdefinitions'
+ headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
+ data = json.dumps(symptom_details)
+ resp = requests.put(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ headers=headers,
+ verify = False,
+ data=data)
+
+ if resp.status_code != 200:
+ self.logger.warn("Failed to update Symptom definition: {}, response {}"\
+ .format(symptom_uuid, resp.content))
+ return None
+
+
+ if symptom_uuid is not None:
+ self.logger.info("Symptom defination updated {} for alarm: {}"\
+ .format(symptom_uuid, new_alarm_config['alarm_uuid']))
+ return symptom_uuid
+ else:
+ self.logger.warn("Failed to update Symptom Defination {} for : {}"\
+ .format(symptom_uuid, new_alarm_config['alarm_uuid']))
+ return None
+
+
+ def get_symptom_defination_details(self, symptom_uuid):
+ """Get symptom defination details
+ """
+ symptom_details = {}
+ if symptom_uuid is None:
+ self.logger.warn("get_symptom_defination_details: Symptom UUID not provided")
+ return None
+
+ api_url = '/suite-api/api/symptomdefinitions/'
+ headers = {'Accept': 'application/json'}
+
+ resp = requests.get(self.vrops_site + api_url + symptom_uuid,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+
+ if resp.status_code is not 200:
+ self.logger.warn("Symptom defination not found {} \nResponse code:{}\nResponse Content: {}"\
+ .format(symptom_uuid, resp.status_code, resp.content))
+ return None
+
+ symptom_details = json.loads(resp.content)
+ #print "New symptom Details: {}".format(symptom_details)
+ return symptom_details
+
+
+ def reconfigure_alarm(self, alarm_details_json, new_alarm_config):
+ """Reconfigure alarm defination as per input
+ """
+ if new_alarm_config.has_key('severity') and new_alarm_config['severity'] is not None:
+ alarm_details_json['states'][0]['severity'] = new_alarm_config['severity']
+ if new_alarm_config.has_key('description') and new_alarm_config['description'] is not None:
+ alarm_details_json['description'] = new_alarm_config['description']
+
+ api_url = '/suite-api/api/alertdefinitions'
+ headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
+ data = json.dumps(alarm_details_json)
+ resp = requests.put(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ headers=headers,
+ verify = False,
+ data=data)
+
+ if resp.status_code != 200:
+ self.logger.warn("Failed to create Symptom definition: {}, response code {}, response content: {}"\
+ .format(symptom_uuid, resp.status_code, resp.content))
+ return None
+ else:
+ parsed_alarm_details = json.loads(resp.content)
+ alarm_def_uuid = parsed_alarm_details['id'].split('-', 1)[1]
+ self.logger.info("Successfully updated Alarm defination: {}".format(alarm_def_uuid))
+ return alarm_def_uuid
+
+ def delete_alarm_configuration(self, delete_alarm_req_dict):
+ """Delete complete alarm configuration
+ """
+ if delete_alarm_req_dict['alarm_uuid'] is None:
+ self.logger.info("delete_alarm_configuration: Alarm UUID not provided")
+ return None
+ #1)Get alarm & symptom defination details
+ alarm_details_json, alarm_details = self.get_alarm_defination_details(delete_alarm_req_dict['alarm_uuid'])
+ if alarm_details is None or alarm_details_json is None:
+ return None
+
+ #2) Delete alarm notfication
+ rule_id = self.delete_notification_rule(alarm_details['alarm_name'])
+ if rule_id is None:
+ return None
+
+ #3) Delete alarm configuraion
+ alarm_id = self.delete_alarm_defination(alarm_details['alarm_id'])
+ if alarm_id is None:
+ return None
+
+ #4) Delete alarm symptom
+ symptom_id = self.delete_symptom_definition(alarm_details['symptom_definition_id'])
+ if symptom_id is None:
+ return None
+ else:
+ self.logger.info("Completed deleting alarm configuration: {}"\
+ .format(delete_alarm_req_dict['alarm_uuid']))
+ return delete_alarm_req_dict['alarm_uuid']
+
+ def delete_notification_rule(self, alarm_name):
+ """Deleted notification rule defined for a particular alarm
+ """
+ rule_id = self.get_notification_rule_id_by_alarm_name(alarm_name)
+ if rule_id is None:
+ return None
+ else:
+ api_url = '/suite-api/api/notifications/rules/'
+ headers = {'Accept':'application/json'}
+ resp = requests.delete(self.vrops_site + api_url + rule_id,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+ if resp.status_code is not 204:
+ self.logger.warn("Failed to delete notification rules for {}".format(alarm_name))
+ return None
+ else:
+ self.logger.info("Deleted notification rules for {}".format(alarm_name))
+ return rule_id
+
+ def get_notification_rule_id_by_alarm_name(self, alarm_name):
+ """Find created Alarm notification rule id by alarm name
+ """
+ alarm_notify_id = 'notify_' + alarm_name
+ api_url = '/suite-api/api/notifications/rules'
+ headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
+ resp = requests.get(self.vrops_site + api_url,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+
+ if resp.status_code is not 200:
+ self.logger.warn("Failed to get notification rules details for {}"\
+ .format(delete_alarm_req_dict['alarm_name']))
+ return None
+
+ notifications = json.loads(resp.content)
+ if notifications is not None and notifications.has_key('notification-rule'):
+ notifications_list = notifications['notification-rule']
+ for dict in notifications_list:
+ if dict['name'] is not None and dict['name'] == alarm_notify_id:
+ notification_id = dict['id']
+ self.logger.info("Found Notification id to be deleted: {} for {}"\
+ .format(notification_id, alarm_name))
+ return notification_id
+
+ self.logger.warn("Notification id to be deleted not found for {}"\
+ .format(notification_id, alarm_name))
+ return None
+
+ def delete_alarm_defination(self, alarm_id):
+ """Delete created Alarm defination
+ """
+ api_url = '/suite-api/api/alertdefinitions/'
+ headers = {'Accept':'application/json'}
+ resp = requests.delete(self.vrops_site + api_url + alarm_id,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+ if resp.status_code is not 204:
+ self.logger.warn("Failed to delete alarm definition {}".format(alarm_id))
+ return None
+ else:
+ self.logger.info("Deleted alarm definition {}".format(alarm_id))
+ return alarm_id
+
+ def delete_symptom_definition(self, symptom_id):
+ """Delete symptom defination
+ """
+ api_url = '/suite-api/api/symptomdefinitions/'
+ headers = {'Accept':'application/json'}
+ resp = requests.delete(self.vrops_site + api_url + symptom_id,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+ if resp.status_code is not 204:
+ self.logger.warn("Failed to delete symptom definition {}".format(symptom_id))
+ return None
+ else:
+ self.logger.info("Deleted symptom definition {}".format(symptom_id))
+ return symptom_id
+
+
+ def verify_metric_support(self, metric_info):
+ """Verify, if Metric is supported by vROPs plugin, verify metric unit & return status
+ Returns:
+ status: True if supported, False if not supported
+ """
+ status = False
+ if 'metric_name' not in metric_info:
+ self.logger.debug("Metric name not provided: {}".format(metric_info))
+ return status
+ metric_key_params = self.get_default_Params(metric_info['metric_name'])
+ if not metric_key_params:
+ self.logger.warn("Metric not supported: {}".format(metric_info['metric_name']))
+ return status
+ else:
+ #If Metric is supported, verify optional metric unit & return status
+ if 'metric_unit' in metric_info:
+ if metric_key_params.get('unit') == metric_info['metric_unit']:
+ self.logger.info("Metric is supported with unit: {}".format(metric_info['metric_name']))
+ status = True
+ else:
+ self.logger.debug("Metric supported but there is unit mismatch for: {}."\
+ "Supported unit: {}"\
+ .format(metric_info['metric_name'],metric_key_params['unit']))
+ status = True
+ return status
+
+ def get_triggered_alarms_list(self, list_alarm_input):
+ """Get list of triggered alarms on a resource based on alarm input request.
+ """
+ #TO Do - Need to add filtering of alarms based on Severity & alarm name
+
+ triggered_alarms_list = []
+ if list_alarm_input['resource_uuid'] is None:
+ return triggered_alarms_list
+
+ #1)Find vROPs resource ID using RO resource UUID
+ vrops_resource_id = self.get_vrops_resourceid_from_ro_uuid(list_alarm_input['resource_uuid'])
+ if vrops_resource_id is None:
+ return triggered_alarms_list
+
+ #2)Get triggered alarms on particular resource
+ triggered_alarms_list = self.get_triggered_alarms_on_resource(list_alarm_input['resource_uuid'], vrops_resource_id)
+ return triggered_alarms_list
+
+ def get_vrops_resourceid_from_ro_uuid(self, ro_resource_uuid):
+ """Fetch vROPs resource ID using resource UUID from RO/SO
+ """
+ #1) Find vm_moref_id from vApp uuid in vCD
+ vm_moref_id = self.get_vm_moref_id(ro_resource_uuid)
+ if vm_moref_id is None:
+ self.logger.warn("Failed to find vm morefid for vApp in vCD: {}".format(ro_resource_uuid))
+ return None
+
+ #2) Based on vm_moref_id, find VM's corresponding resource_id in vROPs to set notification
+ vrops_resource_id = self.get_vm_resource_id(vm_moref_id)
+ if vrops_resource_id is None:
+ self.logger.warn("Failed to find resource in vROPs: {}".format(ro_resource_uuid))
+ return None
+ return vrops_resource_id
+
+
+ def get_triggered_alarms_on_resource(self, ro_resource_uuid, vrops_resource_id):
+ """Get triggered alarms on particular resource & return list of dictionary of alarms
+ """
+ resource_alarms = []
+ api_url = '/suite-api/api/alerts?resourceId='
+ headers = {'Accept':'application/json'}
+ resp = requests.get(self.vrops_site + api_url + vrops_resource_id,
+ auth=(self.vrops_user, self.vrops_password),
+ verify = False, headers = headers)
+
+ if resp.status_code is not 200:
+ self.logger.warn("Failed to get notification rules details for {}"\
+ .format(delete_alarm_req_dict['alarm_name']))
+ return None
+
+ all_alerts = json.loads(resp.content)
+ if all_alerts.has_key('alerts'):
+ if not all_alerts['alerts']:
+ self.logger.info("No alarms present on resource {}".format(ro_resource_uuid))
+ return resource_alarms
+ all_alerts_list = all_alerts['alerts']
+ for alarm in all_alerts_list:
+ #self.logger.info("Triggered Alarm {}".format(alarm))
+ if alarm['alertDefinitionName'] is not None and\
+ len(alarm['alertDefinitionName'].split('-', 1)) == 2:
+ if alarm['alertDefinitionName'].split('-', 1)[1] == ro_resource_uuid:
+ alarm_instance = {}
+ alarm_instance['alarm_uuid'] = alarm['alertDefinitionId'].split('-', 1)[1]
+ alarm_instance['resource_uuid'] = ro_resource_uuid
+ alarm_instance['alarm_instance_uuid'] = alarm['alertId']
+ alarm_instance['vim_type'] = 'VMware'
+ #find severity of alarm
+ severity = None
+ for key,value in severity_mano2vrops.iteritems():
+ if value == alarm['alertLevel']:
+ severity = key
+ if severity is None:
+ severity = 'INDETERMINATE'
+ alarm_instance['severity'] = severity
+ alarm_instance['status'] = alarm['status']
+ alarm_instance['start_date'] = self.convert_date_time(alarm['startTimeUTC'])
+ alarm_instance['update_date'] = self.convert_date_time(alarm['updateTimeUTC'])
+ alarm_instance['cancel_date'] = self.convert_date_time(alarm['cancelTimeUTC'])
+ self.logger.info("Triggered Alarm on resource {}".format(alarm_instance))
+ resource_alarms.append(alarm_instance)
+ if not resource_alarms:
+ self.logger.info("No alarms present on resource {}".format(ro_resource_uuid))
+ return resource_alarms
+
+ def convert_date_time(self, date_time):
+ """Convert the input UTC time in msec to OSM date time format
+ """
+ date_time_formatted = '0000-00-00T00:00:00'
+ if date_time != 0:
+ complete_datetime = datetime.datetime.fromtimestamp(date_time/1000.0).isoformat('T')
+ date_time_formatted = complete_datetime.split('.',1)[0]
+ return date_time_formatted
+
+
--- /dev/null
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: osslegalrouting@vmware.com
+##
+
+"""
+Montoring plugin receiver that consumes the request messages &
+responds using producer for vROPs
+"""
+
+import sys
+from mon_plugin_vrops import MonPlugin
+from kafka_consumer_vrops import vROP_KafkaConsumer
+#Core producer
+sys.path.append("../../core/message_bus")
+from producer import KafkaProducer
+#from core.message_bus.producer import KafkaProducer
+import json
+import logging
+import traceback
+import os
+from xml.etree import ElementTree as XmlElementTree
+
+req_config_params = ('vrops_site', 'vrops_user', 'vrops_password',
+ 'vcloud-site','admin_username','admin_password',
+ 'vcenter_ip','vcenter_port','vcenter_user','vcenter_password',
+ 'vim_tenant_name','orgname','tenant_id')
+MODULE_DIR = os.path.dirname(__file__)
+CONFIG_FILE_NAME = 'vrops_config.xml'
+CONFIG_FILE_PATH = os.path.join(MODULE_DIR, CONFIG_FILE_NAME)
+
+def set_logger():
+ """Set Logger
+ """
+ BASE_DIR = os.path.dirname(os.path.dirname(__file__))
+ logger = logging.getLogger()
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ handler = logging.FileHandler(os.path.join(BASE_DIR,"mon_vrops_log.log"))
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+
+class PluginReceiver():
+ """MON Plugin receiver receiving request messages & responding using producer for vROPs
+ telemetry plugin
+ """
+ def __init__(self):
+ """Constructor of PluginReceiver
+ """
+
+ topics = ['alarm_request', 'metric_request', 'access_credentials']
+
+ self.logger = logging.getLogger('PluginReceiver')
+ self.logger.setLevel(logging.DEBUG)
+
+ #To Do - Add broker uri
+ broker_uri = None
+ #self.mon_plugin = MonPlugin()
+ self.consumer = vROP_KafkaConsumer(topics, broker_uri)
+ #Core producer
+ self.producer_alarms = KafkaProducer('alarm_response')
+ self.producer_metrics = KafkaProducer('metric_response')
+ self.producer_access_credentials = KafkaProducer('vim_access_credentials_response')
+
+
+ def consume(self):
+ """Consume the message, act on it & respond
+ """
+ try:
+ for message in self.consumer.vrops_consumer:
+ vim_type = None
+ self.logger.info("Message received:\nTopic={}:{}:{}:\nKey={}\nValue={}"\
+ .format(message.topic, message.partition, message.offset, message.key, message.value))
+ message_values = json.loads(message.value)
+ if message_values.has_key('vim_type') and message_values['vim_type'] is not None:
+ vim_type = message_values['vim_type'].lower()
+ if vim_type == 'vmware':
+ self.logger.info("Action required for: {}".format(message.topic))
+ if message.topic == 'alarm_request':
+ if message.key == "create_alarm_request":
+ config_alarm_info = json.loads(message.value)
+ alarm_uuid = self.create_alarm(config_alarm_info['alarm_create_request'])
+ self.logger.info("Alarm created with alarm uuid: {}".format(alarm_uuid))
+ #Publish message using producer
+ self.publish_create_alarm_status(alarm_uuid, config_alarm_info)
+ elif message.key == "update_alarm_request":
+ update_alarm_info = json.loads(message.value)
+ alarm_uuid = self.update_alarm(update_alarm_info['alarm_update_request'])
+ self.logger.info("Alarm defination updated : alarm uuid: {}".format(alarm_uuid))
+ #Publish message using producer
+ self.publish_update_alarm_status(alarm_uuid, update_alarm_info)
+ elif message.key == "delete_alarm_request":
+ delete_alarm_info = json.loads(message.value)
+ alarm_uuid = self.delete_alarm(delete_alarm_info['alarm_delete_request'])
+ self.logger.info("Alarm defination deleted : alarm uuid: {}".format(alarm_uuid))
+ #Publish message using producer
+ self.publish_delete_alarm_status(alarm_uuid, delete_alarm_info)
+ elif message.key == "list_alarm_request":
+ request_input = json.loads(message.value)
+ triggered_alarm_list = self.list_alarms(request_input['alarm_list_request'])
+ #Publish message using producer
+ self.publish_list_alarm_response(triggered_alarm_list, request_input)
+ elif message.topic == 'metric_request':
+ if message.key == "read_metric_data_request":
+ metric_request_info = json.loads(message.value)
+ mon_plugin_obj = MonPlugin()
+ metrics_data = mon_plugin_obj.get_metrics_data(metric_request_info)
+ self.logger.info("Collected Metrics Data: {}".format(metrics_data))
+ #Publish message using producer
+ self.publish_metrics_data_status(metrics_data)
+ elif message.key == "create_metric_request":
+ metric_info = json.loads(message.value)
+ metric_status = self.verify_metric(metric_info['metric_create'])
+ #Publish message using producer
+ self.publish_create_metric_response(metric_info, metric_status)
+ elif message.key == "update_metric_request":
+ metric_info = json.loads(message.value)
+ metric_status = self.verify_metric(metric_info['metric_create'])
+ #Publish message using producer
+ self.publish_update_metric_response(metric_info, metric_status)
+ elif message.key == "delete_metric_request":
+ metric_info = json.loads(message.value)
+ #Deleting Metric Data is not allowed. Publish status as False
+ self.logger.warn("Deleting Metric is not allowed: {}".format(metric_info['metric_name']))
+ #Publish message using producer
+ self.publish_delete_metric_response(metric_info)
+ elif message.topic == 'access_credentials':
+ if message.key == "vim_access_credentials":
+ access_info = json.loads(message.value)
+ access_update_status = self.update_access_credentials(access_info['access_config'])
+ self.publish_access_update_response(access_update_status, access_info)
+
+ except:
+ self.logger.error("Exception in receiver: {}".format(traceback.format_exc()))
+
+
+ def create_alarm(self, config_alarm_info):
+ """Create alarm using vROPs plugin
+ """
+ mon_plugin = MonPlugin()
+ plugin_uuid = mon_plugin.configure_rest_plugin()
+ alarm_uuid = mon_plugin.configure_alarm(config_alarm_info)
+ return alarm_uuid
+
+ def publish_create_alarm_status(self, alarm_uuid, config_alarm_info):
+ """Publish create alarm status using producer
+ """
+ topic = 'alarm_response'
+ msg_key = 'create_alarm_response'
+ response_msg = {"schema_version":1.0,
+ "schema_type":"create_alarm_response",
+ "alarm_create_response":
+ {"correlation_id":config_alarm_info["alarm_create_request"]["correlation_id"],
+ "alarm_uuid":alarm_uuid,
+ "status": True if alarm_uuid else False
+ }
+ }
+ self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
+ .format(topic, msg_key, response_msg))
+ #Core producer
+ self.producer_alarms.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
+
+ def update_alarm(self, update_alarm_info):
+ """Updare already created alarm
+ """
+ mon_plugin = MonPlugin()
+ alarm_uuid = mon_plugin.update_alarm_configuration(update_alarm_info)
+ return alarm_uuid
+
+ def publish_update_alarm_status(self, alarm_uuid, update_alarm_info):
+ """Publish update alarm status requests using producer
+ """
+ topic = 'alarm_response'
+ msg_key = 'update_alarm_response'
+ response_msg = {"schema_version":1.0,
+ "schema_type":"update_alarm_response",
+ "alarm_update_response":
+ {"correlation_id":update_alarm_info["alarm_update_request"]["correlation_id"],
+ "alarm_uuid":alarm_uuid,
+ "status": True if alarm_uuid else False
+ }
+ }
+ self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
+ .format(topic, msg_key, response_msg))
+ #Core producer
+ self.producer_alarms.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
+
+ def delete_alarm(self, delete_alarm_info):
+ """Delete alarm configuration
+ """
+ mon_plugin = MonPlugin()
+ alarm_uuid = mon_plugin.delete_alarm_configuration(delete_alarm_info)
+ return alarm_uuid
+
+ def publish_delete_alarm_status(self, alarm_uuid, delete_alarm_info):
+ """Publish update alarm status requests using producer
+ """
+ topic = 'alarm_response'
+ msg_key = 'delete_alarm_response'
+ response_msg = {"schema_version":1.0,
+ "schema_type":"delete_alarm_response",
+ "alarm_deletion_response":
+ {"correlation_id":delete_alarm_info["alarm_delete_request"]["correlation_id"],
+ "alarm_uuid":alarm_uuid,
+ "status": True if alarm_uuid else False
+ }
+ }
+ self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
+ .format(topic, msg_key, response_msg))
+ #Core producer
+ self.producer_alarms.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
+
+
+ def publish_metrics_data_status(self, metrics_data):
+ """Publish the requested metric data using producer
+ """
+ topic = 'metric_response'
+ msg_key = 'read_metric_data_response'
+ self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
+ .format(topic, msg_key, metrics_data))
+ #Core producer
+ self.producer_metrics.publish(key=msg_key, value=json.dumps(metrics_data), topic=topic)
+
+
+ def verify_metric(self, metric_info):
+ """Verify if metric is supported or not
+ """
+ mon_plugin = MonPlugin()
+ metric_key_status = mon_plugin.verify_metric_support(metric_info)
+ return metric_key_status
+
+ def publish_create_metric_response(self, metric_info, metric_status):
+ """Publish create metric response
+ """
+ topic = 'metric_response'
+ msg_key = 'create_metric_response'
+ response_msg = {"schema_version":1.0,
+ "schema_type":"create_metric_response",
+ "correlation_id":metric_info['correlation_id'],
+ "metric_create_response":
+ {
+ "metric_uuid":0,
+ "resource_uuid":metric_info['metric_create']['resource_uuid'],
+ "status":metric_status
+ }
+ }
+ self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
+ .format(topic, msg_key, response_msg))
+ #Core producer
+ self.producer_metrics.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
+
+ def publish_update_metric_response(self, metric_info, metric_status):
+ """Publish update metric response
+ """
+ topic = 'metric_response'
+ msg_key = 'update_metric_response'
+ response_msg = {"schema_version":1.0,
+ "schema_type":"metric_update_response",
+ "correlation_id":metric_info['correlation_id'],
+ "metric_update_response":
+ {
+ "metric_uuid":0,
+ "resource_uuid":metric_info['metric_create']['resource_uuid'],
+ "status":metric_status
+ }
+ }
+ self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
+ .format(topic, msg_key, response_msg))
+ #Core producer
+ self.producer_metrics.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
+
+ def publish_delete_metric_response(self, metric_info):
+ """
+ """
+ topic = 'metric_response'
+ msg_key = 'delete_metric_response'
+ response_msg = {"schema_version":1.0,
+ "schema_type":"delete_metric_response",
+ "correlation_id":metric_info['correlation_id'],
+ "metric_name":metric_info['metric_name'],
+ "metric_uuid":0,
+ "resource_uuid":metric_info['resource_uuid'],
+ "tenant_uuid":metric_info['tenant_uuid'],
+ "status":False
+ }
+ self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
+ .format(topic, msg_key, response_msg))
+ #Core producer
+ self.producer_metrics.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
+
+ def list_alarms(self, list_alarm_input):
+ """Collect list of triggered alarms based on input
+ """
+ mon_plugin = MonPlugin()
+ triggered_alarms = mon_plugin.get_triggered_alarms_list(list_alarm_input)
+ return triggered_alarms
+
+
+ def publish_list_alarm_response(self, triggered_alarm_list, list_alarm_input):
+ """Publish list of triggered alarms
+ """
+ topic = 'alarm_response'
+ msg_key = 'list_alarm_response'
+ response_msg = {"schema_version":1.0,
+ "schema_type":"list_alarm_response",
+ "correlation_id":list_alarm_input['alarm_list_request']['correlation_id'],
+ #"resource_uuid":list_alarm_input['alarm_list_request']['resource_uuid'],
+ "list_alarm_resp":triggered_alarm_list
+ }
+ self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
+ .format(topic, msg_key, response_msg))
+ #Core producer
+ self.producer_alarms.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
+
+
+ def update_access_credentials(self, access_info):
+ """Verify if all the required access config params are provided and
+ updates access config in default vrops config file
+ """
+ update_status = False
+ wr_status = False
+ #Check if all the required config params are passed in request
+ if not all (keys in access_info for keys in req_config_params):
+ self.logger.debug("All required Access Config Parameters not provided")
+ self.logger.debug("List of required Access Config Parameters: {}".format(req_config_params))
+ self.logger.debug("List of given Access Config Parameters: {}".format(access_info))
+ return update_status
+
+ wr_status = self.write_access_config(access_info)
+ return wr_status #True/False
+
+ def write_access_config(self, access_info):
+ """Write access configuration to vROPs config file.
+ """
+ wr_status = False
+ try:
+ tree = XmlElementTree.parse(CONFIG_FILE_PATH)
+ root = tree.getroot()
+ alarmParams = {}
+ for config in root:
+ if config.tag == 'Access_Config':
+ for param in config:
+ for key,val in access_info.iteritems():
+ if param.tag == key:
+ #print param.tag, val
+ param.text = val
+
+ tree.write(CONFIG_FILE_PATH)
+ wr_status = True
+ except Exception as exp:
+ self.logger.warn("Failed to update Access Config Parameters: {}".format(exp))
+
+ return wr_status
+
+
+ def publish_access_update_response(self, access_update_status, access_info_req):
+ """Publish access update response
+ """
+ topic = 'access_credentials'
+ msg_key = 'vim_access_credentials_response'
+ response_msg = {"schema_version":1.0,
+ "schema_type":"vim_access_credentials_response",
+ "correlation_id":access_info_req['access_config']['correlation_id'],
+ "status":access_update_status
+ }
+ self.logger.info("Publishing response:\nTopic={}\nKey={}\nValue={}"\
+ .format(topic, msg_key, response_msg))
+ #Core Add producer
+ self.producer_access_credentials.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)
+
+def main():
+ #log.basicConfig(filename='mon_vrops_log.log',level=log.DEBUG)
+ set_logger()
+ plugin_rcvr = PluginReceiver()
+ plugin_rcvr.consume()
+
+if __name__ == "__main__":
+ main()
+
--- /dev/null
+#!/usr/bin/env bash
+
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: osslegalrouting@vmware.com
+##
+
+BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+SSL_Cert_Dir="${BASEDIR}/SSL_certificate"
+THISHOST=$(hostname -f)
+Domain_Name="${THISHOST}"
+#Domain_Name="www.vrops_webservice.com"
+WebServiceFile="${BASEDIR}/vrops_webservice"
+
+echo '
+ #################################################################
+ ##### Installing Require Packages #####
+ #################################################################'
+
+#Function to install packages using apt-get
+function install_packages(){
+ [ -x /usr/bin/apt-get ] && apt-get install -y $*
+
+ #check properly installed
+ for PACKAGE in $*
+ do
+ PACKAGE_INSTALLED="no"
+ [ -x /usr/bin/apt-get ] && dpkg -l $PACKAGE &>> /dev/null && PACKAGE_INSTALLED="yes"
+ if [ "$PACKAGE_INSTALLED" = "no" ]
+ then
+ echo "failed to install package '$PACKAGE'. Revise network connectivity and try again" >&2
+ exit 1
+ fi
+ done
+ }
+
+apt-get update # To get the latest package lists
+
+[ "$_DISTRO" == "Ubuntu" ] && install_packages "python-yaml python-bottle python-jsonschema python-requests libxml2-dev libxslt-dev python-dev python-pip openssl"
+[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "python-jsonschema python-requests libxslt-devel libxml2-devel python-devel python-pip openssl"
+#The only way to install python-bottle on Centos7 is with easy_install or pip
+[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && easy_install -U bottle
+
+#required for vmware connector TODO move that to separete opt in install script
+sudo pip install --upgrade pip
+sudo pip install cherrypy
+
+echo '
+ #################################################################
+ ##### Genrate SSL Certificate #####
+ #################################################################'
+#Create SSL Certifcate folder and file
+mkdir "${SSL_Cert_Dir}"
+
+openssl genrsa -out "${SSL_Cert_Dir}/${Domain_Name}".key 2048
+openssl req -new -x509 -key "${SSL_Cert_Dir}/${Domain_Name}".key -out "${SSL_Cert_Dir}/${Domain_Name}".cert -days 3650 -subj /CN="${Domain_Name}"
+
+echo '
+ #################################################################
+ ##### Start Web Service #####
+ #################################################################'
+
+nohup python "${WebServiceFile}" &
+
+echo '
+ #################################################################
+ ##### Done #####
+ #################################################################'
+
+
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: osslegalrouting@vmware.com
+##
+
+"""
+ Webservice for vRealize Operations (vROPs) to post/notify alarms details.
+
+"""
+__author__ = "Arpita Kate"
+__date__ = "$15-Sept-2017 16:09:29$"
+__version__ = '0.1'
+
+
+from bottle import (ServerAdapter, route, run, server_names, redirect, default_app,
+ request, response, template, debug, TEMPLATE_PATH , static_file)
+from socket import getfqdn
+from datetime import datetime
+from xml.etree import ElementTree as ET
+import logging
+import os
+import json
+import sys
+import requests
+sys.path.append("../../../core/message_bus")
+from producer import KafkaProducer
+#from core.message_bus.producer import KafkaProducer
+
+try:
+ from cheroot.wsgi import Server as WSGIServer
+ from cheroot.ssl.pyopenssl import pyOpenSSLAdapter
+except ImportError:
+ from cherrypy.wsgiserver import CherryPyWSGIServer as WSGIServer
+ from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
+
+#Set Constants
+BASE_DIR = os.path.dirname(os.path.dirname(__file__))
+CERT_DIR = os.path.join(BASE_DIR, "SSL_certificate")
+certificate_name = getfqdn() + ".cert"
+key_name = getfqdn() + ".key"
+CERTIFICATE = os.path.join(CERT_DIR, certificate_name)
+KEY = os.path.join(CERT_DIR, key_name)
+#CERTIFICATE = os.path.join(CERT_DIR, "www.vrops_webservice.com.cert")
+#KEY = os.path.join(CERT_DIR, "www.vrops_webservice.com.key")
+CONFIG_FILE = os.path.join(BASE_DIR, '../vrops_config.xml')
+#Severity Mapping from vROPs to OSM
+VROPS_SEVERITY_TO_OSM_MAPPING = {
+ "ALERT_CRITICALITY_LEVEL_CRITICAL":"CRITICAL",
+ "ALERT_CRITICALITY_LEVEL_WARNING":"WARNING",
+ "ALERT_CRITICALITY_LEVEL_IMMEDIATE":"MAJOR",
+ "ALERT_CRITICALITY_LEVEL_INFO":"INDETERMINATE",
+ "ALERT_CRITICALITY_LEVEL_AUTO":"INDETERMINATE",
+ "ALERT_CRITICALITY_LEVEL_UNKNOWN":"INDETERMINATE",
+ "ALERT_CRITICALITY_LEVEL_NONE":"INDETERMINATE"
+ }
+
+#Set logger
+logger = logging.getLogger('vROPs_Webservice')
+formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+hdlr = logging.FileHandler(os.path.join(BASE_DIR,"vrops_webservice.log"))
+hdlr.setFormatter(formatter)
+logger.addHandler(hdlr)
+logger.setLevel(logging.DEBUG)
+
+
+def format_datetime(str_date):
+ """
+ Method to format datetime
+ Args:
+ str_date - datetime string
+ Returns:
+ formated datetime
+ """
+ date_fromat = "%Y-%m-%dT%H:%M:%S"
+ formated_datetime = None
+ try:
+ datetime_obj = datetime.fromtimestamp(float(str_date)/1000.)
+ formated_datetime = datetime_obj.strftime(date_fromat)
+ except Exception as exp:
+ logger.error('Exception: {} occured while converting date {} into format {}'.format(
+ exp,str_date, date_fromat))
+
+ return formated_datetime
+
+def get_alarm_config():
+ """
+ Method to get configuration parameters
+ Args:
+ None
+ Returns:
+ dictionary of config parameters
+ """
+ alarm_config = {}
+ try:
+ xml_content = ET.parse(CONFIG_FILE)
+ alarms = xml_content.getroot()
+ for alarm in alarms:
+ if alarm.tag == 'Access_Config':
+ for param in alarm:
+ alarm_config[param.tag] = param.text
+ except Exception as exp:
+ logger.error('Exception: {} occured while parsing config file.'.format(exp))
+
+ return alarm_config
+
+def get_alarm_definationID(alarm_uuid):
+ """
+ Method to get alarm/alert defination ID
+ Args:
+ alarm_uuid : UUID of alarm
+ Returns:
+ alarm defination ID
+ """
+ alarm_definationID = None
+ if alarm_uuid :
+ try:
+ access_config = get_alarm_config()
+ headers = {'Accept': 'application/json'}
+ api_url = '{}/suite-api/api/alerts/{}'.format(access_config.get('vrops_site'), alarm_uuid)
+ api_response = requests.get(
+ api_url,
+ auth=(access_config.get('vrops_user'), access_config.get('vrops_password')),
+ verify = False, headers = headers
+ )
+
+ if api_response.status_code == 200:
+ data = api_response.json()
+ if data.get("alertDefinitionId") is not None:
+ alarm_definationID = '-'.join(data.get("alertDefinitionId").split('-')[1:])
+ else:
+ logger.error("Failed to get alert definition ID for alarm {}".format(alarm_uuid))
+ except Exception as exp:
+ logger.error( "Exception occured while getting alert definition ID for alarm : {}".format(exp, alarm_uuid))
+
+ return alarm_definationID
+
+
+@route('/notify/<alarmID>', method='POST')
+def notify_alarm(alarmID):
+ """
+ Method notify alarm details by publishing message at Kafka message bus
+ Args:
+ alarmID - Name of alarm
+ Returns:
+ response code
+ """
+ logger.info("Request:{} from:{} {} {} ".format(request, request.remote_addr, request.method, request.url))
+ response.headers['Content-Type'] = 'application/json'
+ try:
+ postdata = json.loads(request.body.read())
+ notify_details = {}
+ alaram_config = get_alarm_config()
+ #Parse noditfy data
+ notify_details['alarm_uuid'] = get_alarm_definationID(postdata.get('alertId'))
+ notify_details['description'] = postdata.get('info')
+ notify_details['alarm_instance_uuid'] = alarmID
+ notify_details['resource_uuid'] = '-'.join(postdata.get('alertName').split('-')[1:])
+ notify_details['tenant_uuid'] = alaram_config.get('tenant_id')
+ notify_details['vim_type'] = "VMware"
+ notify_details['severity'] = VROPS_SEVERITY_TO_OSM_MAPPING.get(postdata.get('criticality'), 'INDETERMINATE')
+ notify_details['status'] = postdata.get('status')
+ if postdata.get('startDate'):
+ notify_details['start_date_time'] = format_datetime(postdata.get('startDate'))
+ if postdata.get('updateDate'):
+ notify_details['update_date_time'] = format_datetime(postdata.get('updateDate'))
+ if postdata.get('cancelDate'):
+ notify_details['cancel_date_time'] = format_datetime(postdata.get('cancelDate'))
+
+ alarm_details = {'schema_version': 1.0,
+ 'schema_type': "notify_alarm",
+ 'notify_details': notify_details
+ }
+ alarm_data = json.dumps(alarm_details)
+ logger.info("Alarm details: {}".format(alarm_data))
+
+ #Publish Alarm details
+ kafkaMsgProducer = KafkaProducer()
+ kafkaMsgProducer.publish(topic='alarm_response', key='notify_alarm', value=alarm_data)
+
+ #return 201 on Success
+ response.status = 201
+
+ except Exception as exp:
+ logger.error('Exception: {} occured while notifying alarm {}.'.format(exp, alarmID))
+ #return 500 on Error
+ response.status = 500
+
+ return response
+
+
+class SSLWebServer(ServerAdapter):
+ """
+ CherryPy web server with SSL support.
+ """
+
+ def run(self, handler):
+ """
+ Runs a CherryPy Server using the SSL certificate.
+ """
+ server = WSGIServer((self.host, self.port), handler)
+ server.ssl_adapter = pyOpenSSLAdapter(
+ certificate=CERTIFICATE,
+ private_key=KEY,
+ # certificate_chain="intermediate_cert.crt"
+ )
+
+ try:
+ server.start()
+ logger.info("Started vROPs Web Service")
+ except Exception as exp:
+ server.stop()
+ logger.error("Exception: {} Stopped vROPs Web Service".format(exp))
+
+
+if __name__ == "__main__":
+ #Start SSL Web Service
+ logger.info("Start vROPs Web Service")
+ app = default_app()
+ server_names['sslwebserver'] = SSLWebServer
+ run(app=app,host=getfqdn(), port=8080, server='sslwebserver')
+
+
+
--- /dev/null
+<!--
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: osslegalrouting@vmware.com
+##
+-->
+<alarmsDefaultConfig>
+ <Average_Memory_Usage_Above_Threshold>
+ <vrops_alarm>Avg_Mem_Usage_Above_Thr</vrops_alarm>
+ <period>300</period>
+ <evaluation>2</evaluation>
+ <cancel_period>300</cancel_period>
+ <cancel_cycles>2</cancel_cycles>
+ <enabled>true</enabled>
+ <repeat>false</repeat>
+ <action>acknowledge</action>
+ <resource_kind>VirtualMachine</resource_kind>
+ <adapter_kind>VMWARE</adapter_kind>
+ <alarm_type>16</alarm_type>
+ <alarm_subType>19</alarm_subType>
+ <impact>risk</impact>
+ <unit>%</unit>
+ </Average_Memory_Usage_Above_Threshold>
+ <Read_Latency_Above_Threshold>
+ <vrops_alarm>RD_Latency_Above_Thr</vrops_alarm>
+ <period>300</period>
+ <evaluation>3</evaluation>
+ <cancel_period>300</cancel_period>
+ <cancel_cycles>3</cancel_cycles>
+ <enabled>true</enabled>
+ <repeat>false</repeat>
+ <action>acknowledge</action>
+ <resource_kind>VirtualMachine</resource_kind>
+ <adapter_kind>VMWARE</adapter_kind>
+ <alarm_type>18</alarm_type>
+ <alarm_subType>19</alarm_subType>
+ <impact>risk</impact>
+ <unit>msec</unit>
+ </Read_Latency_Above_Threshold>
+ <Write_Latency_Above_Threshold>
+ <vrops_alarm>WR_Latency_Above_Thr</vrops_alarm>
+ <period>300</period>
+ <evaluation>3</evaluation>
+ <cancel_period>300</cancel_period>
+ <cancel_cycles>3</cancel_cycles>
+ <enabled>true</enabled>
+ <repeat>false</repeat>
+ <action>acknowledge</action>
+ <resource_kind>VirtualMachine</resource_kind>
+ <adapter_kind>VMWARE</adapter_kind>
+ <alarm_type>18</alarm_type>
+ <alarm_subType>19</alarm_subType>
+ <impact>risk</impact>
+ <unit>msec</unit>
+ </Write_Latency_Above_Threshold>
+ <Net_Packets_Dropped>
+ <vrops_alarm>Net_Packets_Dropped</vrops_alarm>
+ <period>300</period>
+ <evaluation>1</evaluation>
+ <cancel_period>300</cancel_period>
+ <cancel_cycles>1</cancel_cycles>
+ <enabled>true</enabled>
+ <repeat>false</repeat>
+ <action>acknowledge</action>
+ <resource_kind>VirtualMachine</resource_kind>
+ <adapter_kind>VMWARE</adapter_kind>
+ <alarm_type>19</alarm_type>
+ <alarm_subType>19</alarm_subType>
+ <impact>risk</impact>
+ <unit>nos</unit>
+ </Net_Packets_Dropped>
+ <CPU_Utilization_Above_Threshold>
+ <vrops_alarm>CPU_Utilization_Above_Thr</vrops_alarm>
+ <period>300</period>
+ <evaluation>1</evaluation>
+ <cancel_period>300</cancel_period>
+ <cancel_cycles>1</cancel_cycles>
+ <enabled>true</enabled>
+ <repeat>false</repeat>
+ <action>acknowledge</action>
+ <resource_kind>VirtualMachine</resource_kind>
+ <adapter_kind>VMWARE</adapter_kind>
+ <alarm_type>16</alarm_type>
+ <alarm_subType>19</alarm_subType>
+ <impact>risk</impact>
+ <unit>msec</unit>
+ </CPU_Utilization_Above_Threshold>
+ <AVERAGE_MEMORY_UTILIZATION>
+ <metric_key>mem|usage_average</metric_key>
+ <unit>%</unit>
+ </AVERAGE_MEMORY_UTILIZATION>
+ <CPU_UTILIZATION>
+ <metric_key>cpu|usage_average</metric_key>
+ <unit>%</unit>
+ </CPU_UTILIZATION>
+ <READ_LATENCY_0>
+ <metric_key>virtualDisk:scsi0:0|totalReadLatency_average</metric_key>
+ <unit>msec</unit>
+ </READ_LATENCY_0>
+ <WRITE_LATENCY_0>
+ <metric_key>virtualDisk:scsi0:0|totalWriteLatency_average</metric_key>
+ <unit>msec</unit>
+ </WRITE_LATENCY_0>
+ <READ_LATENCY_1>
+ <metric_key>virtualDisk:scsi0:1|totalReadLatency_average</metric_key>
+ <unit>msec</unit>
+ </READ_LATENCY_1>
+ <WRITE_LATENCY_1>
+ <metric_key>virtualDisk:scsi0:1|totalWriteLatency_average</metric_key>
+ <unit>msec</unit>
+ </WRITE_LATENCY_1>
+ <PACKETS_DROPPED_0>
+ <metric_key>net:4000|dropped</metric_key>
+ <unit>nos</unit>
+ </PACKETS_DROPPED_0>
+ <PACKETS_DROPPED_1>
+ <metric_key>net:4001|dropped</metric_key>
+ <unit>nos</unit>
+ </PACKETS_DROPPED_1>
+ <PACKETS_DROPPED_2>
+ <metric_key>net:4002|dropped</metric_key>
+ <unit>nos</unit>
+ </PACKETS_DROPPED_2>
+ <PACKETS_RECEIVED>
+ <metric_key>net:Aggregate of all instances|packetsRxPerSec</metric_key>
+ <unit>nos</unit>
+ </PACKETS_RECEIVED>
+ <PACKETS_SENT>
+ <metric_key>net:Aggregate of all instances|packetsTxPerSec</metric_key>
+ <unit>nos</unit>
+ </PACKETS_SENT>
+ <Access_Config>
+ <vrops_site>https://192.169.241.123</vrops_site>
+ <vrops_user>Admin</vrops_user>
+ <vrops_password>VMware1!</vrops_password>
+ <vcloud-site>https://mano-vcd-1.corp.local</vcloud-site>
+ <admin_username>administrator</admin_username>
+ <admin_password>VMware1!</admin_password>
+ <nsx_manager>https://192.169.241.104</nsx_manager>
+ <nsx_user>admin</nsx_user>
+ <nsx_password>VMware1!</nsx_password>
+ <vcenter_ip>192.169.241.103</vcenter_ip>
+ <vcenter_port>443</vcenter_port>
+ <vcenter_user>administrator@vsphere.local</vcenter_user>
+ <vcenter_password>VMware1!</vcenter_password>
+ <vim_tenant_name>Org2-VDC-PVDC1</vim_tenant_name>
+ <orgname>Org2</orgname>
+ <tenant_id>Org2-VDC-PVDC1</tenant_id>
+ </Access_Config>
+</alarmsDefaultConfig>
+
+
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+#gitkeep file to keep the initial empty directory structure.
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "alarm_ack",
+"vim_type": "AWS",
+"ack_details":
+{
+"alarm_uuid": "CPU_Utilization_i-098da78cbd8304e17",
+"resource_uuid": "i-098da78cbd8304e17",
+"tenant_uuid": ""
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_alarm_request",
+"vim_type": "AWS",
+"alarm_create_request":
+{
+"correlation_id": "SO123",
+"alarm_name": "CPU_Utilization_Above_Threshold",
+"resource_uuid": "i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "GE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_alarm_request",
+"vim_type": "AWS",
+"alarm_create_request":
+{
+"correlation_id": "SO123",
+"alarm_name": "CPU_Utilization_Above_Threshold1",
+"resource_uuid": "i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "GE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_alarm_request",
+"vim_type": "AWS",
+"alarm_create_request":
+{
+"correlation_id": "SO123",
+"alarm_name": "CPU_Utilization_Above_Threshold",
+"resource_uuid": "i-09462760703837b26",
+"description": "",
+"severity": "Critical",
+"operation": "GE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_alarm_request",
+"vim_type": "AWS",
+"alarm_create_request":
+{
+"correlation_id": "SO123",
+"alarm_name": "CPU_Utilization_Above_Threshold",
+"resource_uuid": "i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "GE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_alarm_request",
+"vim_type": "AWS",
+"alarm_create_request":
+{
+"correlation_id": "SO123",
+"alarm_name": "CPU_Utilization_Above_Threshold2",
+"resource_uuid": "i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "Greaterthan",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_alarm_request",
+"vim_type": "AWS",
+"alarm_create_request":
+{
+"correlation_id": "SO123",
+"alarm_name": "CPU_Utilization_Above_Threshold2",
+"resource_uuid": "i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "GE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_alarm_request",
+"vim_type": "AWS",
+"alarm_create_request":
+{
+"correlation_id": "SO123",
+"alarm_name": "CPU_Utilization_Above_Threshold2",
+"resource_uuid": "i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "GE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAX"
+}
+}
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_alarm_request",
+"vim_type": "AWS",
+"alarm_create_request":
+{
+"correlation_id": "SO123",
+"alarm_name": "CPU_Utilization_Above_Threshold2",
+"resource_uuid": "i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "GE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_metrics_request",
+"tenant_uuid": "",
+"correlation_id": "SO123",
+"vim_type": "AWS",
+"metric_create":
+{
+"metric_name": "CPU_UTILIZ",
+"metric_unit": "",
+"resource_uuid": "i-098da78cbd8304e17"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_metrics_request",
+"tenant_uuid": "",
+"correlation_id": "SO123",
+"vim_type": "AWS",
+"metric_create":
+{
+"metric_name": "CPU_UTILIZATION",
+"metric_unit": "",
+"resource_uuid": "i-098da78cbd8304e17"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "delete_alarm_request",
+"vim_type": "AWS",
+"alarm_delete_request":
+{
+"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e16",
+"correlation_id": "SO123"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "delete_alarm_request",
+"vim_type": "AWS",
+"alarm_delete_request":
+{
+"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
+"correlation_id": "SO123"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "delete_alarm_request",
+"vim_type": "AWS",
+"alarm_delete_request":
+{
+"alarm_uuid": "CPU_Utilization_Above_Threshold1_i-098da78cbd8304e17",
+"correlation_id": "SO123"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "delete_alarm_request",
+"vim_type": "AWS",
+"alarm_delete_request":
+{
+"alarm_uuid": "CPU_Utilization_Above_Threshold_i-09462760703837b26",
+"correlation_id": "SO123"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "delete_alarm_request",
+"vim_type": "AWS",
+"alarm_delete_request":
+{
+"alarm_uuid": "CPU_Utilization_Above_Threshold2_i-098da78cbd8304e17",
+"correlation_id": "SO123"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "delete_alarm_request",
+"vim_type": "AWS",
+"alarm_delete_request":
+{
+"alarm_uuid": "CPU_Utilization_Above_Threshold4_i-098da78cbd8304e17",
+"correlation_id": "SO123"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "delete_metric_data_request",
+"metric_name": "CPU_UTILIATION",
+"metric_uuid": "",
+"resource_uuid": "i-098da78cbd8304e17",
+"tenant_uuid": "",
+"correlation_uuid": "S0123",
+"vim_type": "AWS"
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "delete_metric_data_request",
+"metric_name": "CPU_UTILIZATION",
+"metric_uuid": "",
+"resource_uuid": "i-098da78cbd8304e17",
+"tenant_uuid": "",
+"correlation_uuid": "S0123",
+"vim_type": "AWS"
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "list_alarm_request",
+"vim_type": "AWS",
+"alarm_list_request":
+{
+"correlation_id": "SO123",
+"resource_uuid": "",
+"alarm_name": "",
+"severity": ""
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "list_alarm_request",
+"vim_type": "AWS",
+"alarm_list_request":
+{
+"correlation_id": "SO123",
+"resource_uuid": "i-098da78cbd8304e17",
+"alarm_name": "",
+"severity": ""
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "list_alarm_request",
+"vim_type": "AWS",
+"alarm_list_request":
+{
+"correlation_id": "SO123",
+"resource_uuid": "i-098da78cbd8304e17",
+"alarm_name": "",
+"severity": "Critical"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "list_metrics_request",
+"vim_type": "AWS",
+"metrics_list_request":
+{
+"metric_name": "CPU_UTILZATION",
+"correlation_id": "SO123",
+"resource_uuid": "i-098da78cbd8304e17"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "list_metrics_request",
+"vim_type": "AWS",
+"metrics_list_request":
+{
+"metric_name": "CPU_UTILIZATION",
+"correlation_id": "SO123",
+"resource_uuid": "i-098da78cbd8304e17"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "read_metric_data_request",
+"metric_name": "CPU_UTILIZATION",
+"metric_uuid": "0",
+"resource_uuid": "i-098da78cbd8304e17",
+"tenant_uuid": "",
+"correlation_uuid": "SO123",
+"vim_type":"AWS",
+"collection_period":"3500" ,
+"collection_unit": ""
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "read_metric_data_request",
+"metric_name": "CPU_UTILIZATION",
+"metric_uuid": "0",
+"resource_uuid": "i-098da78cbd8304e17",
+"tenant_uuid": "",
+"correlation_uuid": "SO123",
+"vim_type":"AWS",
+"collection_period":"3600" ,
+"collection_unit": ""
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "read_metric_data_request",
+"metric_name": "CPU_UTLIZATION",
+"metric_uuid": "0",
+"resource_uuid": "i-098da78cbd8304e17",
+"tenant_uuid": "",
+"correlation_uuid": "SO123",
+"vim_type":"AWS",
+"collection_period":"3600" ,
+"collection_unit": ""
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "read_metric_data_request",
+"metric_name": "CPU_UTILIZATION",
+"metric_uuid": "0",
+"resource_uuid": "i-098da78cbd8304e17",
+"tenant_uuid": "",
+"correlation_uuid": "SO123",
+"vim_type":"AWS",
+"collection_period":"3600" ,
+"collection_unit": ""
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "update_alarm_request",
+"vim_type": "AWS",
+"alarm_update_request":
+{
+"correlation_id": "SO123",
+"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e13",
+"description": "",
+"severity": "Critical",
+"operation": "LE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "update_alarm_request",
+"vim_type": "AWS",
+"alarm_update_request":
+{
+"correlation_id": "SO123",
+"alarm_uuid": "CPU_Utilization_Above_Threshold4_i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "LE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "update_alarm_request",
+"vim_type": "AWS",
+"alarm_update_request":
+{
+"correlation_id": "SO123",
+"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "Less",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "update_alarm_request",
+"vim_type": "AWS",
+"alarm_update_request":
+{
+"correlation_id": "SO123",
+"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "LE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "update_alarm_request",
+"vim_type": "AWS",
+"alarm_update_request":
+{
+"correlation_id": "SO123",
+"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "LE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAX"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "update_alarm_request",
+"vim_type": "AWS",
+"alarm_update_request":
+{
+"correlation_id": "SO123",
+"alarm_uuid": "CPU_Utilization_Above_Threshold_i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "LE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_alarm_request",
+"vim_type": "AWS",
+"alarm_create_request":
+{
+"correlation_id": "SO123",
+"alarm_name": "CPU_Utilization_Above_Threshold4",
+"resource_uuid": "i-098da78cbd8304e17",
+"description": "",
+"severity": "Critical",
+"operation": "GE",
+"threshold_value": 1.5,
+"unit": "",
+"metric_name": "CPU_UTILIZATION",
+"statistic": "MAXIMUM"
+}
+}
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_metrics_request",
+"tenant_uuid": "",
+"correlation_id": "SO123",
+"vim_type": "AWS",
+"metric_create":
+{
+"metric_name": "CPU_UTILIZ",
+"metric_unit": "",
+"resource_uuid": "i-098da78cbd8304e17"
+}
+}
\ No newline at end of file
--- /dev/null
+{
+"schema_version": "1.0",
+"schema_type": "create_metrics_request",
+"tenant_uuid": "",
+"correlation_id": "SO123",
+"vim_type": "AWS",
+"metric_create":
+{
+"metric_name": "CPU_UTILIZATION",
+"metric_unit": "",
+"resource_uuid": "i-098da78cbd8304e17"
+}
+}
\ No newline at end of file
--- /dev/null
+from connection import Connection
+import unittest
+import sys
+import jsmin
+import json
+import os
+import time
+from jsmin import jsmin
+sys.path.append("../../test/core/")
+from test_producer import KafkaProducer
+from kafka import KafkaConsumer
+try:
+ import boto
+ import boto.ec2
+ import boto.vpc
+ import boto.ec2.cloudwatch
+ import boto.ec2.connection
+except:
+ exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
+
+#--------------------------------------------------------------------------------------------------------------------------------------
+
+# Test Producer object to generate request
+
+producer = KafkaProducer('create_alarm_request')
+obj = Connection()
+connections = obj.setEnvironment()
+connections_res = obj.connection_instance()
+cloudwatch_conn = connections_res['cloudwatch_connection']
+
+#--------------------------------------------------------------------------------------------------------------------------------------
+
+'''Test E2E Flow : Test cases has been tested one at a time.
+1) Commom Request is generated using request function in test_producer.py(/test/core)
+2) The request is then consumed by the comsumer (plugin)
+3) The response is sent back on the message bus in plugin_alarm.py using
+ response functions in producer.py(/core/message-bus)
+4) The response is then again consumed by the unit_tests_alarms.py
+ and the test cases has been applied on the response.
+'''
+
+class config_alarm_name_test(unittest.TestCase):
+
+
+ def setUp(self):
+ pass
+ #To generate a request of testing new alarm name and new instance id in create alarm request
+ def test_differentName_differentInstance(self):
+ time.sleep(2)
+ producer.request("test_schemas/create_alarm/create_alarm_differentName_differentInstance.json",'create_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "create_alarm_response":
+ info = json.loads(json.loads(message.value))
+ print info
+ time.sleep(1)
+ self.assertTrue(info['alarm_create_response']['status'])
+ return
+
+ #To generate a request of testing new alarm name and existing instance id in create alarm request
+ def test_differentName_sameInstance(self):
+ time.sleep(2)
+ producer.request("test_schemas/create_alarm/create_alarm_differentName_sameInstance.json",'create_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "create_alarm_response":
+ info = json.loads(json.loads(message.value))
+ print info
+ time.sleep(1)
+ producer.request("test_schemas/delete_alarm/name_valid_delete1.json",'delete_alarm_request','','alarm_request')
+ self.assertTrue(info['alarm_create_response']['status'])
+ return
+
+ #To generate a request of testing existing alarm name and new instance id in create alarm request
+ def test_sameName_differentInstance(self):
+ time.sleep(2)
+ producer.request("test_schemas/create_alarm/create_alarm_sameName_differentInstance.json",'create_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "create_alarm_response":
+ info = json.loads(json.loads(message.value))
+ print info
+ time.sleep(1)
+ producer.request("test_schemas/delete_alarm/name_valid_delete2.json",'delete_alarm_request', '','alarm_request')
+ self.assertTrue(info['alarm_create_response']['status'])
+ return
+
+ #To generate a request of testing existing alarm name and existing instance id in create alarm request
+ def test_sameName_sameInstance(self):
+ time.sleep(2)
+ producer.request("test_schemas/create_alarm/create_alarm_sameName_sameInstance.json",'create_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "create_alarm_response":
+ info = json.loads(json.loads(message.value))
+ print info,"---"
+ time.sleep(1)
+ producer.request("test_schemas/delete_alarm/name_valid.json",'delete_alarm_request', '','alarm_request')
+ self.assertEqual(info, None)
+ return
+
+ #To generate a request of testing valid statistics in create alarm request
+ def test_statisticValid(self):
+ time.sleep(2)
+ producer.request("test_schemas/create_alarm/statistic_valid.json",'create_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "create_alarm_response":
+ info = json.loads(json.loads(message.value))
+ print info
+ time.sleep(1)
+ producer.request("test_schemas/delete_alarm/name_valid_delete3.json",'delete_alarm_request', '','alarm_request')
+ self.assertTrue(info['alarm_create_response']['status'])
+ return
+
+ #To generate a request of testing Invalid statistics in create alarm request
+ def test_statisticValidNot(self):
+ time.sleep(2)
+ producer.request("test_schemas/create_alarm/statistic_invalid.json",'create_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "create_alarm_response":
+ info = json.loads(json.loads(message.value))
+ print info,"---"
+ time.sleep(1)
+ producer.request("test_schemas/delete_alarm/name_valid_delete3.json",'delete_alarm_request', '','alarm_request')
+ self.assertEqual(info, None)
+ return
+
+ #To generate a request of testing valid operation in create alarm request
+ def test_operationValid(self):
+ time.sleep(2)
+ producer.request("test_schemas/create_alarm/operation_valid.json",'create_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "create_alarm_response":
+ info = json.loads(json.loads(message.value))
+ print info
+ time.sleep(1)
+ producer.request("test_schemas/delete_alarm/name_valid_delete3.json",'delete_alarm_request', '','alarm_request')
+ self.assertTrue(info['alarm_create_response']['status'])
+ return
+
+ #To generate a request of testing Invalid operation in create alarm request
+ def test_operationValidNot(self):
+ time.sleep(2)
+ producer.request("test_schemas/create_alarm/operation_invalid.json",'create_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "create_alarm_response":
+ info = json.loads(json.loads(message.value))
+ print info
+ time.sleep(1)
+ self.assertEqual(info,None)
+ return
+
+
+#--------------------------------------------------------------------------------------------------------------------------------------
+class update_alarm_name_test(unittest.TestCase):
+
+ #To generate a request of testing valid alarm_id in update alarm request
+ def test_nameValid(self):
+ producer.request("test_schemas/update_alarm/update_alarm_new_alarm.json",'create_alarm_request', '','alarm_request')
+ time.sleep(2)
+ producer.request("test_schemas/update_alarm/name_valid.json",'update_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "update_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ producer.request("test_schemas/delete_alarm/name_valid_delete4.json",'delete_alarm_request', '','alarm_request')
+ self.assertTrue(info['alarm_update_response']['status'])
+ return
+
+ #To generate a request of testing invalid alarm_id in update alarm request
+ def test_nameInvalid(self):
+ time.sleep(2)
+ producer.request("test_schemas/update_alarm/name_invalid.json",'update_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "update_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ self.assertEqual(info,None)
+ return
+
+ #To generate a request of testing valid statistics in update alarm request
+ def test_statisticValid(self):
+ producer.request("test_schemas/create_alarm/create_alarm_differentName_differentInstance.json",'create_alarm_request', '','alarm_request')
+ time.sleep(2)
+ producer.request("test_schemas/update_alarm/statistic_valid.json",'update_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "update_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ producer.request("test_schemas/delete_alarm/name_valid.json",'delete_alarm_request', '','alarm_request')
+ self.assertTrue(info['alarm_update_response']['status'])
+ return
+
+ #To generate a request of testing Invalid statistics in update alarm request
+ def test_statisticInvalid(self):
+ time.sleep(2)
+ producer.request("test_schemas/update_alarm/statistic_invalid.json",'update_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "update_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ self.assertEqual(info,None)
+ return
+
+ #To generate a request of testing valid operation in update alarm request
+ def test_operationValid(self):
+ producer.request("test_schemas/create_alarm/create_alarm_differentName_differentInstance.json",'create_alarm_request', '','alarm_request')
+ time.sleep(2)
+ producer.request("test_schemas/update_alarm/operation_valid.json",'update_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "update_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ producer.request("test_schemas/delete_alarm/name_valid.json",'delete_alarm_request', '','alarm_request')
+ self.assertTrue(info['alarm_update_response']['status'])
+ return
+
+#--------------------------------------------------------------------------------------------------------------------------------------
+class delete_alarm_test(unittest.TestCase):
+
+ #To generate a request of testing valid alarm_id in delete alarm request
+ def test_nameValid(self):
+ producer.request("test_schemas/create_alarm/create_alarm_differentName_differentInstance.json",'create_alarm_request', '','alarm_request')
+ time.sleep(2)
+ producer.request("test_schemas/delete_alarm/name_valid.json",'delete_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "delete_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ self.assertTrue(info['alarm_deletion_response']['status'])
+ return
+
+ #To generate a request of testing Invalid alarm_id in delete alarm request
+ def test_nameInvalid(self):
+ time.sleep(2)
+ producer.request("test_schemas/delete_alarm/name_invalid.json",'delete_alarm_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "delete_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ self.assertEqual(info,None)
+ return
+
+#--------------------------------------------------------------------------------------------------------------------------------------
+class list_alarm_test(unittest.TestCase):
+
+ #To generate a request of testing valid input fields in alarm list request
+ def test_valid_no_arguments(self):
+ time.sleep(2)
+ producer.request("test_schemas/list_alarm/list_alarm_valid_no_arguments.json",'alarm_list_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "list_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ self.assertEqual(type(info),dict)
+ return
+
+ #To generate a request of testing valid input fields in alarm list request
+ def test_valid_one_arguments(self):
+ time.sleep(2)
+ producer.request("test_schemas/list_alarm/list_alarm_valid_one_arguments.json",'alarm_list_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "list_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ self.assertEqual(type(info),dict)
+ return
+
+ #To generate a request of testing valid input fields in alarm list request
+ def test_valid_two_arguments(self):
+ time.sleep(2)
+ producer.request("test_schemas/list_alarm/list_alarm_valid_two_arguments.json",'alarm_list_request', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "list_alarm_response":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ self.assertEqual(type(info),dict)
+ return
+
+
+#--------------------------------------------------------------------------------------------------------------------------------------
+class alarm_details_test(unittest.TestCase):
+
+ #To generate a request of testing valid input fields in acknowledge alarm
+ def test_Valid(self):
+ time.sleep(2)
+ producer.request("test_schemas/alarm_details/acknowledge_alarm.json",'acknowledge_alarm', '','alarm_request')
+ server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
+
+ _consumer = KafkaConsumer(bootstrap_servers=server['server'])
+ _consumer.subscribe(['alarm_response'])
+
+ for message in _consumer:
+ if message.key == "notify_alarm":
+ info = json.loads(json.loads(json.loads(message.value)))
+ print info
+ time.sleep(1)
+ self.assertEqual(type(info),dict)
+ return
+
+if __name__ == '__main__':
+
+ # Saving test reults in Log file
+
+ log_file = 'log_file.txt'
+ f = open(log_file, "w")
+ runner = unittest.TextTestRunner(f)
+ unittest.main(testRunner=runner)
+ f.close()
+
+ # For printing results on Console
+ # unittest.main()
--- /dev/null
+from connection import Connection
+import unittest
+import sys
+import jsmin
+import json
+import os
+import time
+from jsmin import jsmin
+sys.path.append("../../test/core/")
+from test_producer import KafkaProducer
+from kafka import KafkaConsumer
+try:
+ import boto
+ import boto.ec2
+ import boto.vpc
+ import boto.ec2.cloudwatch
+ import boto.ec2.connection
+except:
+ exit("Boto not avialable. Try activating your virtualenv OR `pip install boto`")
+
+#--------------------------------------------------------------------------------------------------------------------------------------
+
+# Test Producer object to generate request
+
+producer = KafkaProducer('')
+obj = Connection()
+connections = obj.setEnvironment()
+connections_res = obj.connection_instance()
+cloudwatch_conn = connections_res['cloudwatch_connection']
+
+# Consumer Object to consume response from message bus
+server = {'server': 'localhost:9092', 'topic': 'metric_request'}
+_consumer = KafkaConsumer(bootstrap_servers=server['server'])
+_consumer.subscribe(['metric_response'])
+
+#--------------------------------------------------------------------------------------------------------------------------------------
+
+'''Test E2E Flow : Test cases has been tested one at a time.
+1) Commom Request is generated using request function in test_producer.py(/core/message-bus)
+2) The request is then consumed by the comsumer (plugin)
+3) The response is sent back on the message bus in plugin_metrics.py using
+ response functions in producer.py(/core/message-bus)
+4) The response is then again consumed by the unit_tests_metrics.py
+ and the test cases has been applied on the response.
+'''
+class test_create_metrics(unittest.TestCase):
+
+ def test_status_positive(self):
+ time.sleep(2)
+ # To generate Request of testing valid meric_name in create metrics requests
+ producer.request("create_metrics/create_metric_req_valid.json",'create_metric_request', '','metric_request')
+
+ for message in _consumer:
+ if message.key == "create_metric_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertTrue(resp['metric_create_response']['status'])
+ self.assertEqual(resp['metric_create_response']['metric_uuid'],0)
+ return
+
+ def test_status_negative(self):
+ time.sleep(2)
+ # To generate Request of testing invalid meric_name in create metrics requests
+ producer.request("create_metrics/create_metric_req_invalid.json",'create_metric_request', '','metric_request')
+
+ for message in _consumer:
+ if message.key == "create_metric_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertFalse(resp['metric_create_response']['status'])
+ self.assertEqual(resp['metric_create_response']['metric_uuid'],None)
+ return
+
+class test_metrics_data(unittest.TestCase):
+
+ def test_met_name_positive(self):
+ time.sleep(2)
+ # To generate Request of testing valid meric_name in read_metric_data_request
+ producer.request("read_metrics_data/read_metric_name_req_valid.json",'read_metric_data_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "read_metric_data_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertEqual(type(resp['metrics_data']),dict)
+ return
+
+ def test_met_name_negative(self):
+ time.sleep(2)
+ # To generate Request of testing invalid meric_name in read_metric_data_request
+ producer.request("read_metrics_data/read_metric_name_req_invalid.json",'read_metric_data_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "read_metric_data_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertFalse(resp['metrics_data'])
+ return
+
+ def test_coll_period_positive(self):
+ # To generate Request of testing valid collection_period in read_metric_data_request
+ # For AWS metric_data_stats collection period should be a multiple of 60
+ time.sleep(2)
+ producer.request("read_metrics_data/read_coll_period_req_valid.json",'read_metric_data_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "read_metric_data_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertEqual(type(resp),dict)
+ return
+
+ def test_coll_period_negative(self):
+ time.sleep(2)
+ # To generate Request of testing invalid collection_period in read_metric_data_request
+ producer.request("read_metrics_data/read_coll_period_req_invalid.json",'read_metric_data_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "read_metric_data_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertFalse(resp['metrics_data'])
+ return
+
+class test_update_metrics(unittest.TestCase):
+
+ def test_upd_status_positive(self):
+ time.sleep(2)
+ # To generate Request of testing valid meric_name in update metrics requests
+ producer.request("update_metrics/update_metric_req_valid.json",'update_metric_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "update_metric_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertTrue(resp['metric_update_response']['status'])
+ self.assertEqual(resp['metric_update_response']['metric_uuid'],0)
+ return
+
+ def test_upd_status_negative(self):
+ time.sleep(2)
+ # To generate Request of testing invalid meric_name in update metrics requests
+ producer.request("update_metrics/update_metric_req_invalid.json",'update_metric_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "update_metric_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertFalse(resp['metric_update_response']['status'])
+ self.assertEqual(resp['metric_update_response']['metric_uuid'],None)
+ return
+
+class test_delete_metrics(unittest.TestCase):
+
+ def test_del_met_name_positive(self):
+ time.sleep(2)
+ # To generate Request of testing valid meric_name in delete metrics requests
+ producer.request("delete_metrics/delete_metric_req_valid.json",'delete_metric_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "delete_metric_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertFalse(resp['status'])
+ return
+
+ def test_del_met_name_negative(self):
+ time.sleep(2)
+ # To generate Request of testing invalid meric_name in delete metrics requests
+ producer.request("delete_metrics/delete_metric_req_invalid.json",'delete_metric_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "delete_metric_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertFalse(resp)
+ return
+
+class test_list_metrics(unittest.TestCase):
+
+ def test_list_met_name_positive(self):
+ time.sleep(2)
+ # To generate Request of testing valid meric_name in list metrics requests
+ producer.request("list_metrics/list_metric_req_valid.json",'list_metric_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "list_metrics_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertEqual(type(resp['metrics_list']),list)
+ return
+
+ def test_list_met_name_negitive(self):
+ time.sleep(2)
+ # To generate Request of testing invalid meric_name in list metrics requests
+ producer.request("list_metrics/list_metric_req_invalid.json",'list_metric_request', '','metric_request')
+ for message in _consumer:
+ if message.key == "list_metrics_response":
+ resp = json.loads(json.loads(json.loads(message.value)))
+ time.sleep(1)
+ self.assertFalse(resp['metrics_list'])
+ return
+
+
+if __name__ == '__main__':
+
+ # Saving test reults in Log file
+
+ log_file = 'log_file.txt'
+ f = open(log_file, "w")
+ runner = unittest.TextTestRunner(f)
+ unittest.main(testRunner=runner)
+ f.close()
+
+ # For printing results on Console
+ # unittest.main()
+
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""OpenStack plugin tests."""
+
+import logging
+
+# Initialise a logger for tests
+logging.basicConfig(filename='OpenStack_tests.log',
+ format='%(asctime)s %(message)s',
+ datefmt='%m/%d/%Y %I:%M:%S %p', filemode='a',
+ level=logging.INFO)
+log = logging.getLogger(__name__)
--- /dev/null
+# Copyright 2017 iIntel Research and Development Ireland Limited
+# **************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Tests for all alarm request message keys."""
+
+import json
+
+import logging
+
+import unittest
+
+import mock
+
+from plugins.OpenStack.Aodh import alarming as alarm_req
+from plugins.OpenStack.common import Common
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+
+class Message(object):
+ """A class to mock a message object value for alarm requests."""
+
+ def __init__(self):
+ """Initialize a mocked message instance."""
+ self.topic = "alarm_request"
+ self.key = None
+ self.value = json.dumps({"mock_value": "mock_details"})
+
+
+class TestAlarmKeys(unittest.TestCase):
+ """Integration test for alarm request keys."""
+
+ def setUp(self):
+ """Setup the tests for alarm request keys."""
+ super(TestAlarmKeys, self).setUp()
+ self.alarming = alarm_req.Alarming()
+ self.alarming.common = Common()
+
+ @mock.patch.object(Common, "_authenticate")
+ def test_alarming_env_authentication(self, auth):
+ """Test getting an auth_token and endpoint for alarm requests."""
+ # if auth_token is None environment variables are used to authenticare
+ message = Message()
+
+ self.alarming.alarming(message, self.alarming.common, None)
+
+ auth.assert_called_with()
+
+ @mock.patch.object(Common, "_authenticate")
+ def test_acccess_cred_auth(self, auth):
+ """Test receiving auth_token from access creds."""
+ message = Message()
+
+ self.alarming.alarming(message, self.alarming.common, "my_auth_token")
+
+ auth.assert_not_called
+ self.assertEqual(self.alarming.auth_token, "my_auth_token")
+
+ @mock.patch.object(alarm_req.Alarming, "delete_alarm")
+ def test_delete_alarm_key(self, del_alarm):
+ """Test the functionality for a create alarm request."""
+ # Mock a message value and key
+ message = Message()
+ message.key = "delete_alarm_request"
+ message.value = json.dumps({"alarm_delete_request":
+ {"alarm_uuid": "my_alarm_id"}})
+
+ # Call the alarming functionality and check delete request
+ self.alarming.alarming(message, self.alarming.common, "my_auth_token")
+
+ del_alarm.assert_called_with(mock.ANY, mock.ANY, "my_alarm_id")
+
+ @mock.patch.object(alarm_req.Alarming, "list_alarms")
+ def test_list_alarm_key(self, list_alarm):
+ """Test the functionality for a list alarm request."""
+ # Mock a message with list alarm key and value
+ message = Message()
+ message.key = "list_alarm_request"
+ message.value = json.dumps({"alarm_list_request": "my_alarm_details"})
+
+ # Call the alarming functionality and check list functionality
+ self.alarming.alarming(message, self.alarming.common, "my_auth_token")
+ list_alarm.assert_called_with(mock.ANY, mock.ANY, "my_alarm_details")
+
+ @mock.patch.object(alarm_req.Alarming, "update_alarm_state")
+ def test_ack_alarm_key(self, ack_alarm):
+ """Test the functionality for an acknowledge alarm request."""
+ # Mock a message with acknowledge alarm key and value
+ message = Message()
+ message.key = "acknowledge_alarm"
+ message.value = json.dumps({"ack_details":
+ {"alarm_uuid": "my_alarm_id"}})
+
+ # Call alarming functionality and check acknowledge functionality
+ self.alarming.alarming(message, self.alarming.common, "my_auth_token")
+ ack_alarm.assert_called_with(mock.ANY, mock.ANY, "my_alarm_id")
+
+ @mock.patch.object(alarm_req.Alarming, "configure_alarm")
+ def test_config_alarm_key(self, config_alarm):
+ """Test the functionality for a create alarm request."""
+ # Mock a message with config alarm key and value
+ message = Message()
+ message.key = "create_alarm_request"
+ message.value = json.dumps({"alarm_create_request": "alarm_details"})
+
+ # Call alarming functionality and check config alarm call
+ config_alarm.return_value = "my_alarm_id", True
+ self.alarming.alarming(message, self.alarming.common, "my_auth_token")
+ config_alarm.assert_called_with(mock.ANY, mock.ANY, "alarm_details")
--- /dev/null
+# Copyright 2017 iIntel Research and Development Ireland Limited
+# **************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Tests for all alarm request message keys."""
+
+import json
+
+import logging
+
+import unittest
+
+import mock
+
+from plugins.OpenStack.Aodh import alarming as alarm_req
+from plugins.OpenStack.common import Common
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+auth_token = mock.ANY
+endpoint = mock.ANY
+
+
+class Response(object):
+ """Mock a response message class."""
+
+ def __init__(self, result):
+ """Initialise the response text and status code."""
+ self.text = json.dumps(result)
+ self.status_code = "MOCK_STATUS_CODE"
+
+
+class TestAlarming(unittest.TestCase):
+ """Tests for alarming class functions."""
+
+ def setUp(self):
+ """Setup for tests."""
+ super(TestAlarming, self).setUp()
+ self.alarming = alarm_req.Alarming()
+ self.alarming.common = Common()
+
+ @mock.patch.object(alarm_req.Alarming, "check_payload")
+ @mock.patch.object(alarm_req.Alarming, "check_for_metric")
+ @mock.patch.object(Common, "_perform_request")
+ def test_config_invalid_alarm_req(self, perf_req, check_metric, check_pay):
+ """Test configure an invalid alarm request."""
+ # Configuring with invalid alarm name results in failure
+ values = {"alarm_name": "my_alarm",
+ "metric_name": "my_metric",
+ "resource_uuid": "my_r_id"}
+ self.alarming.configure_alarm(endpoint, auth_token, values)
+ perf_req.assert_not_called
+ perf_req.reset_mock()
+
+ # Correct alarm_name will check for metric in Gnocchi
+ # If there isn't one an alarm won;t be created
+ values = {"alarm_name": "disk_write_ops",
+ "metric_name": "disk_write_ops",
+ "resource_uuid": "my_r_id"}
+
+ check_metric.return_value = None
+
+ self.alarming.configure_alarm(endpoint, auth_token, values)
+ perf_req.assert_not_called
+
+ @mock.patch.object(alarm_req.Alarming, "check_payload")
+ @mock.patch.object(alarm_req.Alarming, "check_for_metric")
+ @mock.patch.object(Common, "_perform_request")
+ def test_config_valid_alarm_req(self, perf_req, check_metric, check_pay):
+ """Test config a valid alarm."""
+ # Correct alarm_name will check for metric in Gnocchi
+ # And conform that the payload is configured correctly
+ values = {"alarm_name": "disk_write_ops",
+ "metric_name": "disk_write_ops",
+ "resource_uuid": "my_r_id"}
+
+ check_metric.return_value = "my_metric_id"
+ check_pay.return_value = "my_payload"
+
+ self.alarming.configure_alarm(endpoint, auth_token, values)
+ perf_req.assert_called_with(
+ "<ANY>/v2/alarms/", auth_token,
+ req_type="post", payload="my_payload")
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_delete_alarm_req(self, perf_req):
+ """Test delete alarm request."""
+ self.alarming.delete_alarm(endpoint, auth_token, "my_alarm_id")
+
+ perf_req.assert_called_with(
+ "<ANY>/v2/alarms/my_alarm_id", auth_token, req_type="delete")
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_invalid_list_alarm_req(self, perf_req):
+ """Test invalid list alarm_req."""
+ # Request will not be performed with out a resoure_id
+ list_details = {"mock_details": "invalid_details"}
+ self.alarming.list_alarms(endpoint, auth_token, list_details)
+
+ perf_req.assert_not_called
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_valid_list_alarm_req(self, perf_req):
+ """Test valid list alarm request."""
+ # Minimum requirement for an alarm list is resource_id
+ list_details = {"resource_uuid": "mock_r_id"}
+ self.alarming.list_alarms(endpoint, auth_token, list_details)
+
+ perf_req.assert_called_with(
+ "<ANY>/v2/alarms/", auth_token, req_type="get")
+ perf_req.reset_mock()
+
+ # Check list with alarm_name defined
+ list_details = {"resource_uuid": "mock_r_id",
+ "alarm_name": "my_alarm",
+ "severity": "critical"}
+ self.alarming.list_alarms(endpoint, auth_token, list_details)
+
+ perf_req.assert_called_with(
+ "<ANY>/v2/alarms/", auth_token, req_type="get")
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_ack_alarm_req(self, perf_req):
+ """Test update alarm state for acknowledge alarm request."""
+ self.alarming.update_alarm_state(endpoint, auth_token, "my_alarm_id")
+
+ perf_req.assert_called_with(
+ "<ANY>/v2/alarms/my_alarm_id/state", auth_token, req_type="put",
+ payload=json.dumps("ok"))
+
+ @mock.patch.object(alarm_req.Alarming, "check_payload")
+ @mock.patch.object(Common, "_perform_request")
+ def test_update_alarm_invalid(self, perf_req, check_pay):
+ """Test update alarm with invalid get response."""
+ values = {"alarm_uuid": "my_alarm_id"}
+
+ self.alarming.update_alarm(endpoint, auth_token, values)
+
+ perf_req.assert_called_with(mock.ANY, auth_token, req_type="get")
+ check_pay.assert_not_called
+
+ @mock.patch.object(alarm_req.Alarming, "check_payload")
+ @mock.patch.object(Common, "_perform_request")
+ def test_update_alarm_invalid_payload(self, perf_req, check_pay):
+ """Test update alarm with invalid payload."""
+ resp = Response({"name": "my_alarm",
+ "state": "alarm",
+ "gnocchi_resources_threshold_rule":
+ {"resource_id": "my_resource_id",
+ "metric": "my_metric"}})
+ perf_req.return_value = resp
+ check_pay.return_value = None
+ values = {"alarm_uuid": "my_alarm_id"}
+
+ self.alarming.update_alarm(endpoint, auth_token, values)
+
+ perf_req.assert_called_with(mock.ANY, auth_token, req_type="get")
+ self.assertEqual(perf_req.call_count, 1)
+
+ @mock.patch.object(alarm_req.Alarming, "check_payload")
+ @mock.patch.object(Common, "_perform_request")
+ def test_update_alarm_valid(self, perf_req, check_pay):
+ """Test valid update alarm request."""
+ resp = Response({"name": "my_alarm",
+ "state": "alarm",
+ "gnocchi_resources_threshold_rule":
+ {"resource_id": "my_resource_id",
+ "metric": "my_metric"}})
+ perf_req.return_value = resp
+ values = {"alarm_uuid": "my_alarm_id"}
+
+ self.alarming.update_alarm(endpoint, auth_token, values)
+
+ check_pay.assert_called_with(values, "my_metric", "my_resource_id",
+ "my_alarm", alarm_state="alarm")
+
+ self.assertEqual(perf_req.call_count, 2)
+ # Second call is the update request
+ perf_req.assert_called_with(
+ '<ANY>/v2/alarms/my_alarm_id', auth_token,
+ req_type="put", payload=check_pay.return_value)
+
+ def test_check_valid_payload(self):
+ """Test the check payload function for a valid payload."""
+ values = {"severity": "warning",
+ "statistic": "COUNT",
+ "threshold_value": 12,
+ "operation": "GT"}
+ payload = self.alarming.check_payload(
+ values, "my_metric", "r_id", "alarm_name")
+
+ self.assertEqual(
+ json.loads(payload), {"name": "alarm_name",
+ "gnocchi_resources_threshold_rule":
+ {"resource_id": "r_id",
+ "metric": "my_metric",
+ "comparison_operator": "gt",
+ "aggregation_method": "count",
+ "threshold": 12,
+ "resource_type": "generic"},
+ "severity": "low",
+ "state": "ok",
+ "type": "gnocchi_resources_threshold"})
+
+ def test_check_valid_state_payload(self):
+ """Test the check payload function for a valid payload with state."""
+ values = {"severity": "warning",
+ "statistic": "COUNT",
+ "threshold_value": 12,
+ "operation": "GT"}
+ payload = self.alarming.check_payload(
+ values, "my_metric", "r_id", "alarm_name", alarm_state="alarm")
+
+ self.assertEqual(
+ json.loads(payload), {"name": "alarm_name",
+ "gnocchi_resources_threshold_rule":
+ {"resource_id": "r_id",
+ "metric": "my_metric",
+ "comparison_operator": "gt",
+ "aggregation_method": "count",
+ "threshold": 12,
+ "resource_type": "generic"},
+ "severity": "low",
+ "state": "alarm",
+ "type": "gnocchi_resources_threshold"})
+
+ def test_check_invalid_payload(self):
+ """Test the check payload function for an invalid payload."""
+ values = {"alarm_values": "mock_invalid_details"}
+ payload = self.alarming.check_payload(
+ values, "my_metric", "r_id", "alarm_name")
+
+ self.assertEqual(payload, None)
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_get_alarm_state(self, perf_req):
+ """Test the get alarm state function."""
+ self.alarming.get_alarm_state(endpoint, auth_token, "alarm_id")
+
+ perf_req.assert_called_with(
+ "<ANY>/v2/alarms/alarm_id/state", auth_token, req_type="get")
+
+ @mock.patch.object(Common, "get_endpoint")
+ @mock.patch.object(Common, "_perform_request")
+ def test_check_for_metric(self, perf_req, get_endpoint):
+ """Test the check for metric function."""
+ get_endpoint.return_value = "gnocchi_endpoint"
+
+ self.alarming.check_for_metric(auth_token, "metric_name", "r_id")
+
+ perf_req.assert_called_with(
+ "gnocchi_endpoint/v1/metric/", auth_token, req_type="get")
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Tests for all common OpenStack methods."""
+
+import json
+
+import logging
+
+import unittest
+
+from keystoneclient.v3 import client
+
+import mock
+
+from plugins.OpenStack.common import Common
+from plugins.OpenStack.settings import Config
+
+import requests
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+
+class Message(object):
+ """Mock a message for an access credentials request."""
+
+ def __init__(self):
+ """Initialise the topic and value of access_cred message."""
+ self.topic = "access_credentials"
+ self.value = json.dumps({"mock_value": "mock_details",
+ "vim_type": "OPENSTACK",
+ "access_config":
+ {"openstack_site": "my_site",
+ "user": "my_user",
+ "password": "my_password",
+ "vim_tenant_name": "my_tenant"}})
+
+
+class TestCommon(unittest.TestCase):
+ """Test the common class for OpenStack plugins."""
+
+ def setUp(self):
+ """Test Setup."""
+ super(TestCommon, self).setUp()
+ self.common = Common()
+
+ @mock.patch.object(client, "Client")
+ def test_authenticate_exists(self, key_client):
+ """Testing if an authentication token already exists."""
+ # If the auth_token is already generated a new one will not be creates
+ self.common._auth_token = "my_auth_token"
+ token = self.common._authenticate()
+
+ self.assertEqual(token, "my_auth_token")
+
+ @mock.patch.object(Config, "instance")
+ @mock.patch.object(client, "Client")
+ def test_authenticate_none(self, key_client, cfg):
+ """Test generating a new authentication token."""
+ # If auth_token doesn't exist one will try to be created with keystone
+ # With the configuration values from the environment
+ self.common._auth_token = None
+ config = cfg.return_value
+ url = config.OS_AUTH_URL
+ user = config.OS_USERNAME
+ pword = config.OS_PASSWORD
+ tenant = config.OS_TENANT_NAME
+
+ self.common._authenticate()
+
+ key_client.assert_called_with(auth_url=url,
+ username=user,
+ password=pword,
+ tenant_name=tenant)
+ key_client.reset_mock()
+
+ @mock.patch.object(client, "Client")
+ def test_authenticate_access_cred(self, key_client):
+ """Test generating an auth_token using access_credentials from SO."""
+ # Mock valid message from SO
+ self.common._auth_token = None
+ message = Message()
+
+ self.common._authenticate(message=message)
+
+ # The class variables are set for each consifugration
+ self.assertEqual(self.common.openstack_url, "my_site")
+ self.assertEqual(self.common.user, "my_user")
+ self.assertEqual(self.common.password, "my_password")
+ self.assertEqual(self.common.tenant, "my_tenant")
+ key_client.assert_called
+
+ @mock.patch.object(requests, 'post')
+ def test_post_req(self, post):
+ """Testing a post request."""
+ self.common._perform_request("url", "auth_token", req_type="post",
+ payload="payload")
+
+ post.assert_called_with("url", data="payload", headers=mock.ANY,
+ timeout=mock.ANY)
+
+ @mock.patch.object(requests, 'get')
+ def test_get_req(self, get):
+ """Testing a get request."""
+ # Run the defualt get request without any parameters
+ self.common._perform_request("url", "auth_token", req_type="get")
+
+ get.assert_called_with("url", params=None, headers=mock.ANY,
+ timeout=mock.ANY)
+
+ # Test with some parameters specified
+ get.reset_mock()
+ self.common._perform_request("url", "auth_token", req_type="get",
+ params="some parameters")
+
+ get.assert_called_with("url", params="some parameters",
+ headers=mock.ANY, timeout=mock.ANY)
+
+ @mock.patch.object(requests, 'put')
+ def test_put_req(self, put):
+ """Testing a put request."""
+ self.common._perform_request("url", "auth_token", req_type="put",
+ payload="payload")
+ put.assert_called_with("url", data="payload", headers=mock.ANY,
+ timeout=mock.ANY)
+
+ @mock.patch.object(requests, 'delete')
+ def test_delete_req(self, delete):
+ """Testing a delete request."""
+ self.common._perform_request("url", "auth_token", req_type="delete")
+
+ delete.assert_called_with("url", headers=mock.ANY, timeout=mock.ANY)
--- /dev/null
+# Copyright 2017 iIntel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Tests for all metric request message keys."""
+
+import json
+
+import logging
+
+import unittest
+
+import mock
+
+from plugins.OpenStack.Gnocchi import metrics as metric_req
+
+from plugins.OpenStack.common import Common
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+# Mock auth_token and endpoint
+endpoint = mock.ANY
+auth_token = mock.ANY
+
+# Mock a valid metric list for some tests, and a resultant list
+metric_list = [{"name": "disk_write_ops",
+ "id": "metric_id",
+ "unit": "units",
+ "resource_id": "r_id"}]
+result_list = ["metric_id", "r_id", "units", "disk_write_ops"]
+
+
+class Response(object):
+ """Mock a response object for requests."""
+
+ def __init__(self):
+ """Initialise test and status code values."""
+ self.text = json.dumps("mock_response_text")
+ self.status_code = "STATUS_CODE"
+
+
+class TestMetricCalls(unittest.TestCase):
+ """Integration test for metric request keys."""
+
+ def setUp(self):
+ """Setup the tests for metric request keys."""
+ super(TestMetricCalls, self).setUp()
+ self.metrics = metric_req.Metrics()
+ self.metrics._common = Common()
+
+ @mock.patch.object(metric_req.Metrics, "get_metric_name")
+ @mock.patch.object(metric_req.Metrics, "get_metric_id")
+ @mock.patch.object(Common, "_perform_request")
+ def test_invalid_config_metric_req(
+ self, perf_req, get_metric, get_metric_name):
+ """Test the configure metric function, for an invalid metric."""
+ # Test invalid configuration for creating a metric
+ values = {"metric_details": "invalid_metric"}
+
+ m_id, r_id, status = self.metrics.configure_metric(
+ endpoint, auth_token, values)
+
+ perf_req.assert_not_called
+ self.assertEqual(m_id, None)
+ self.assertEqual(r_id, None)
+ self.assertEqual(status, False)
+
+ # Test with an invalid metric name, will not perform request
+ values = {"resource_uuid": "r_id"}
+ get_metric_name.return_value = "metric_name", None
+
+ m_id, r_id, status = self.metrics.configure_metric(
+ endpoint, auth_token, values)
+
+ perf_req.assert_not_called
+ self.assertEqual(m_id, None)
+ self.assertEqual(r_id, "r_id")
+ self.assertEqual(status, False)
+ get_metric_name.reset_mock()
+
+ # If metric exists, it won't be recreated
+ get_metric_name.return_value = "metric_name", "norm_name"
+ get_metric.return_value = "metric_id"
+
+ m_id, r_id, status = self.metrics.configure_metric(
+ endpoint, auth_token, values)
+
+ perf_req.assert_not_called
+ self.assertEqual(m_id, "metric_id")
+ self.assertEqual(r_id, "r_id")
+ self.assertEqual(status, False)
+
+ @mock.patch.object(metric_req.Metrics, "get_metric_name")
+ @mock.patch.object(metric_req.Metrics, "get_metric_id")
+ @mock.patch.object(Common, "_perform_request")
+ def test_valid_config_metric_req(
+ self, perf_req, get_metric, get_metric_name):
+ """Test the configure metric function, for a valid metric."""
+ # Test valid configuration and payload for creating a metric
+ values = {"resource_uuid": "r_id",
+ "metric_unit": "units"}
+ get_metric_name.return_value = "metric_name", "norm_name"
+ get_metric.return_value = None
+ payload = {"id": "r_id",
+ "metrics": {"metric_name":
+ {"archive_policy_name": "high",
+ "name": "metric_name",
+ "unit": "units"}}}
+
+ self.metrics.configure_metric(endpoint, auth_token, values)
+
+ perf_req.assert_called_with(
+ "<ANY>/v1/resource/generic", auth_token, req_type="post",
+ payload=json.dumps(payload))
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_delete_metric_req(self, perf_req):
+ """Test the delete metric function."""
+ self.metrics.delete_metric(endpoint, auth_token, "metric_id")
+
+ perf_req.assert_called_with(
+ "<ANY>/v1/metric/metric_id", auth_token, req_type="delete")
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_delete_metric_invalid_status(self, perf_req):
+ """Test invalid response for delete request."""
+ perf_req.return_value = "404"
+
+ status = self.metrics.delete_metric(endpoint, auth_token, "metric_id")
+
+ self.assertEqual(status, False)
+
+ @mock.patch.object(metric_req.Metrics, "response_list")
+ @mock.patch.object(Common, "_perform_request")
+ def test_complete_list_metric_req(self, perf_req, resp_list):
+ """Test the complete list metric function."""
+ # Test listing metrics without any configuration options
+ values = {}
+ resp = Response()
+ perf_req.return_value = resp
+ self.metrics.list_metrics(endpoint, auth_token, values)
+
+ perf_req.assert_called_with(
+ "<ANY>/v1/metric/", auth_token, req_type="get")
+ resp_list.assert_called_with("mock_response_text")
+
+ @mock.patch.object(metric_req.Metrics, "response_list")
+ @mock.patch.object(Common, "_perform_request")
+ def test_resource_list_metric_req(self, perf_req, resp_list):
+ """Test the resource list metric function."""
+ # Test listing metrics with a resource id specified
+ values = {"resource_uuid": "resource_id"}
+ resp = Response()
+ perf_req.return_value = resp
+ self.metrics.list_metrics(endpoint, auth_token, values)
+
+ perf_req.assert_called_with(
+ "<ANY>/v1/metric/", auth_token, req_type="get")
+ resp_list.assert_called_with(
+ "mock_response_text", resource="resource_id")
+
+ @mock.patch.object(metric_req.Metrics, "response_list")
+ @mock.patch.object(Common, "_perform_request")
+ def test_name_list_metric_req(self, perf_req, resp_list):
+ """Test the metric_name list metric function."""
+ # Test listing metrics with a metric_name specified
+ values = {"metric_name": "disk_write_bytes"}
+ resp = Response()
+ perf_req.return_value = resp
+ self.metrics.list_metrics(endpoint, auth_token, values)
+
+ perf_req.assert_called_with(
+ "<ANY>/v1/metric/", auth_token, req_type="get")
+ resp_list.assert_called_with(
+ "mock_response_text", metric_name="disk_write_bytes")
+
+ @mock.patch.object(metric_req.Metrics, "response_list")
+ @mock.patch.object(Common, "_perform_request")
+ def test_combined_list_metric_req(self, perf_req, resp_list):
+ """Test the combined resource and metric list metric function."""
+ # Test listing metrics with a resource id and metric name specified
+ values = {"resource_uuid": "resource_id",
+ "metric_name": "packets_sent"}
+ resp = Response()
+ perf_req.return_value = resp
+ self.metrics.list_metrics(endpoint, auth_token, values)
+
+ perf_req.assert_called_with(
+ "<ANY>/v1/metric/", auth_token, req_type="get")
+ resp_list.assert_called_with(
+ "mock_response_text", resource="resource_id",
+ metric_name="packets_sent")
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_get_metric_id(self, perf_req):
+ """Test get_metric_id function."""
+ self.metrics.get_metric_id(endpoint, auth_token, "my_metric", "r_id")
+
+ perf_req.assert_called_with(
+ "<ANY>/v1/resource/generic/r_id", auth_token, req_type="get")
+
+ def test_get_metric_name(self):
+ """Test the result from the get_metric_name function."""
+ # test with a valid metric_name
+ values = {"metric_name": "disk_write_ops"}
+
+ metric_name, norm_name = self.metrics.get_metric_name(values)
+
+ self.assertEqual(metric_name, "disk_write_ops")
+ self.assertEqual(norm_name, "disk.disk_ops")
+
+ # test with an invalid metric name
+ values = {"metric_name": "my_invalid_metric"}
+
+ metric_name, norm_name = self.metrics.get_metric_name(values)
+
+ self.assertEqual(metric_name, "my_invalid_metric")
+ self.assertEqual(norm_name, None)
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_valid_read_data_req(self, perf_req):
+ """Test the read metric data function, for a valid call."""
+ values = {"metric_uuid": "metric_id",
+ "collection_unit": "DAY",
+ "collection_period": 1}
+
+ self.metrics.read_metric_data(endpoint, auth_token, values)
+
+ perf_req.assert_called_once
+
+ @mock.patch.object(Common, "_perform_request")
+ def test_invalid_read_data_req(self, perf_req):
+ """Test the read metric data function, for an invalid call."""
+ # Teo empty lists wil be returned because the values are invalid
+ values = {}
+
+ times, data = self.metrics.read_metric_data(
+ endpoint, auth_token, values)
+
+ self.assertEqual(times, [])
+ self.assertEqual(data, [])
+
+ def test_complete_response_list(self):
+ """Test the response list function for formating metric lists."""
+ # Mock a list for testing purposes, with valid OSM metric
+ resp_list = self.metrics.response_list(metric_list)
+
+ # Check for the expected values in the resulting list
+ for l in result_list:
+ self.assertIn(l, resp_list[0])
+
+ def test_name_response_list(self):
+ """Test the response list with metric name configured."""
+ # Mock the metric name to test a metric name list
+ # Test with a name that is not in the list
+ invalid_name = "my_metric"
+ resp_list = self.metrics.response_list(
+ metric_list, metric_name=invalid_name)
+
+ self.assertEqual(resp_list, [])
+
+ # Test with a name on the list
+ valid_name = "disk_write_ops"
+ resp_list = self.metrics.response_list(
+ metric_list, metric_name=valid_name)
+
+ # Check for the expected values in the resulting list
+ for l in result_list:
+ self.assertIn(l, resp_list[0])
+
+ def test_resource_response_list(self):
+ """Test the response list with resource_id configured."""
+ # Mock a resource_id to test a resource list
+ # Test with resource not on the list
+ invalid_id = "mock_resource"
+ resp_list = self.metrics.response_list(metric_list, resource=invalid_id)
+
+ self.assertEqual(resp_list, [])
+
+ # Test with a resource on the list
+ valid_id = "r_id"
+ resp_list = self.metrics.response_list(metric_list, resource=valid_id)
+
+ # Check for the expected values in the resulting list
+ for l in result_list:
+ self.assertIn(l, resp_list[0])
+
+ def test_combined_response_list(self):
+ """Test the response list function with resource_id and metric_name."""
+ # Test for a combined resource and name list
+ # resource and name are on the lisat
+ valid_name = "disk_write_ops"
+ valid_id = "r_id"
+ resp_list = self.metrics.response_list(
+ metric_list, metric_name=valid_name, resource=valid_id)
+
+ # Check for the expected values in the resulting list
+ for l in result_list:
+ self.assertIn(l, resp_list[0])
+
+ # resource not on list
+ invalid_id = "mock_resource"
+ resp_list = self.metrics.response_list(
+ metric_list, metric_name=valid_name, resource=invalid_id)
+
+ self.assertEqual(resp_list, [])
+
+ # metric name not on list
+ invalid_name = "mock_metric"
+ resp_list = self.metrics.response_list(
+ metric_list, metric_name=invalid_name, resource=valid_id)
+
+ self.assertEqual(resp_list, [])
--- /dev/null
+# Copyright 2017 iIntel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Tests for all metric request message keys."""
+
+import json
+
+import logging
+
+import unittest
+
+import mock
+
+from plugins.OpenStack.Gnocchi import metrics as metric_req
+
+from plugins.OpenStack.common import Common
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+
+class Message(object):
+ """A class to mock a message object value for metric requests."""
+
+ def __init__(self):
+ """Initialize a mocked message instance."""
+ self.topic = "metric_request"
+ self.key = None
+ self.value = json.dumps({"mock_message": "message_details"})
+
+
+class TestMetricReq(unittest.TestCase):
+ """Integration test for metric request keys."""
+
+ def setUp(self):
+ """Setup the tests for metric request keys."""
+ super(TestMetricReq, self).setUp()
+ self.common = Common()
+ self.metrics = metric_req.Metrics()
+
+ @mock.patch.object(Common, "_authenticate")
+ def test_access_cred_metric_auth(self, auth):
+ """Test authentication with access credentials."""
+ message = Message()
+
+ self.metrics.metric_calls(message, self.common, "my_auth_token")
+
+ auth.assert_not_called
+ self.assertEqual(self.metrics.auth_token, "my_auth_token")
+
+ @mock.patch.object(Common, "_authenticate")
+ def test_env_metric_auth(self, auth):
+ """Test authentication with environment variables."""
+ message = Message()
+
+ self.metrics.metric_calls(message, self.common, None)
+
+ auth.assert_called_with()
+
+ @mock.patch.object(metric_req.Metrics, "delete_metric")
+ def test_delete_metric_key(self, del_metric):
+ """Test the functionality for a delete metric request."""
+ # Mock a message value and key
+ message = Message()
+ message.key = "delete_metric_request"
+ message.value = json.dumps({"metric_uuid": "my_metric_id"})
+
+ # Call the metric functionality and check delete request
+ self.metrics.metric_calls(message, self.common, "my_auth_token")
+
+ del_metric.assert_called_with(mock.ANY, mock.ANY, "my_metric_id")
+
+ @mock.patch.object(metric_req.Metrics, "list_metrics")
+ def test_list_metric_key(self, list_metrics):
+ """Test the functionality for a list metric request."""
+ # Mock a message with list metric key and value
+ message = Message()
+ message.key = "list_metric_request"
+ message.value = json.dumps({"metrics_list_request": "metric_details"})
+
+ # Call the metric functionality and check list functionality
+ self.metrics.metric_calls(message, self.common, "my_auth_token")
+ list_metrics.assert_called_with(mock.ANY, mock.ANY, "metric_details")
+
+ @mock.patch.object(metric_req.Metrics, "read_metric_data")
+ @mock.patch.object(metric_req.Metrics, "list_metrics")
+ @mock.patch.object(metric_req.Metrics, "delete_metric")
+ @mock.patch.object(metric_req.Metrics, "configure_metric")
+ def test_update_metric_key(self, config_metric, delete_metric, list_metrics,
+ read_data):
+ """Test the functionality for an update metric request."""
+ # Mock a message with update metric key and value
+ message = Message()
+ message.key = "update_metric_request"
+ message.value = json.dumps({"metric_create":
+ {"metric_name": "my_metric",
+ "resource_uuid": "my_r_id"}})
+
+ # Call metric functionality and confirm no function is called
+ # Gnocchi does not support updating a metric configuration
+ self.metrics.metric_calls(message, self.common, "my_auth_token")
+ config_metric.assert_not_called
+ list_metrics.assert_not_called
+ delete_metric.assert_not_called
+ read_data.assert_not_called
+
+ @mock.patch.object(metric_req.Metrics, "configure_metric")
+ def test_config_metric_key(self, config_metric):
+ """Test the functionality for a create metric request."""
+ # Mock a message with create metric key and value
+ message = Message()
+ message.key = "create_metric_request"
+ message.value = json.dumps({"metric_create": "metric_details"})
+
+ # Call metric functionality and check config metric
+ config_metric.return_value = "metric_id", "resource_id", True
+ self.metrics.metric_calls(message, self.common, "my_auth_token")
+ config_metric.assert_called_with(mock.ANY, mock.ANY, "metric_details")
+
+ @mock.patch.object(metric_req.Metrics, "read_metric_data")
+ def test_read_data_key(self, read_data):
+ """Test the functionality for a read metric data request."""
+ # Mock a message with a read data key and value
+ message = Message()
+ message.key = "read_metric_data_request"
+ message.value = json.dumps({"alarm_uuid": "alarm_id"})
+
+ # Call metric functionality and check read data metrics
+ read_data.return_value = "time_stamps", "data_values"
+ self.metrics.metric_calls(message, self.common, "my_auth_token")
+ read_data.assert_called_with(
+ mock.ANY, mock.ANY, json.loads(message.value))
--- /dev/null
+# Copyright 2017 iIntel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Test that the correct responses are generated for each message."""
+
+import logging
+
+import unittest
+
+import mock
+
+from plugins.OpenStack import response as resp
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+
+class TestOpenStackResponse(unittest.TestCase):
+ """Tests for responses generated by the OpenStack plugins."""
+
+ def setUp(self):
+ """Setup for testing OpenStack plugin responses."""
+ super(TestOpenStackResponse, self).setUp()
+ self.plugin_resp = resp.OpenStack_Response()
+
+ def test_invalid_key(self):
+ """Test if an invalid key is entered for a response."""
+ message = self.plugin_resp.generate_response("mock_invalid_key")
+ self.assertEqual(message, None)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "alarm_list_response")
+ def test_list_alarm_resp(self, alarm_list_resp):
+ """Test out a function call for a list alarm response."""
+ message = self.plugin_resp.generate_response("list_alarm_response")
+ self.assertEqual(alarm_list_resp.return_value, message)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "list_metric_response")
+ def test_list_metric_resp(self, metric_list_resp):
+ """Test list metric response function call."""
+ message = self.plugin_resp.generate_response("list_metric_response")
+ self.assertEqual(message, metric_list_resp.return_value)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "delete_alarm_response")
+ def test_delete_alarm_resp(self, del_alarm_resp):
+ """Test delete alarm response function call."""
+ message = self.plugin_resp.generate_response("delete_alarm_response")
+ self.assertEqual(message, del_alarm_resp.return_value)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "delete_metric_response")
+ def test_delete_metric_resp(self, del_metric_resp):
+ """Test the response functionality of delete metric response."""
+ message = self.plugin_resp.generate_response("delete_metric_response")
+ self.assertEqual(message, del_metric_resp.return_value)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "create_alarm_response")
+ def test_create_alarm_resp(self, config_alarm_resp):
+ """Test create alarm response function call."""
+ message = self.plugin_resp.generate_response("create_alarm_response")
+ self.assertEqual(message, config_alarm_resp.return_value)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "metric_create_response")
+ def test_create_metric_resp(self, config_metric_resp):
+ """Test create metric response function call."""
+ message = self.plugin_resp.generate_response("create_metric_response")
+ self.assertEqual(message, config_metric_resp.return_value)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "update_alarm_response")
+ def test_update_alarm_resp(self, up_alarm_resp):
+ """Test update alarm response function call."""
+ message = self.plugin_resp.generate_response("update_alarm_response")
+ self.assertEqual(message, up_alarm_resp.return_value)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "update_metric_response")
+ def test_update_metric_resp(self, up_metric_resp):
+ """Test update metric response function call."""
+ message = self.plugin_resp.generate_response("update_metric_response")
+ self.assertEqual(message, up_metric_resp.return_value)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "notify_alarm")
+ def test_notify_alarm(self, notify_alarm):
+ """Test notify alarm response function call."""
+ message = self.plugin_resp.generate_response("notify_alarm")
+ self.assertEqual(message, notify_alarm.return_value)
+
+ @mock.patch.object(
+ resp.OpenStack_Response, "read_metric_data_response")
+ def test_read_metric_data_resp(self, read_data_resp):
+ """Test read metric data response function call."""
+ message = self.plugin_resp.generate_response(
+ "read_metric_data_response")
+ self.assertEqual(message, read_data_resp.return_value)
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+"""Tests for settings for OpenStack plugins configurations."""
+
+import logging
+
+import os
+
+import unittest
+
+import mock
+
+from plugins.OpenStack.settings import Config
+
+__author__ = "Helena McGough"
+
+log = logging.getLogger(__name__)
+
+
+class TestSettings(unittest.TestCase):
+ """Test the settings class for OpenStack plugin configuration."""
+
+ def setUp(self):
+ """Test Setup."""
+ super(TestSettings, self).setUp()
+ self.cfg = Config.instance()
+
+ def test_set_os_username(self):
+ """Test reading the environment for OpenStack plugin configuration."""
+ self.cfg.read_environ("my_service")
+
+ self.assertEqual(self.cfg.OS_USERNAME, "my_service")
+
+ @mock.patch.object(os, "environ")
+ def test_read_environ(self, environ):
+ """Test reading environment variables for configuration."""
+ self.cfg.read_environ("my_service")
+
+ # Called for each key in the configuration dictionary
+ environ.assert_called_once
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: helena.mcgough@intel.com or adrian.hoban@intel.com
+##
+
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+#__author__ = "Prithiv Mohan"
+#__date__ = "25/Sep/2017"
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+
+#__author__ = "Prithiv Mohan"
+#__date__ = "25/Sep/2017"
+
+import sys
+import threading
+import pytest
+from kafka import KafkaConsumer, KafkaProducer
+
+def test_end_to_end(kafka_broker):
+ connect_str = ':'.join([kafka_broker.host, str(kafka_broker.port)])
+ producer = KafkaProducer(bootstrap_servers=connect_str,
+ retries=5,
+ max_block_ms=10000,
+ value_serializer=str.encode)
+ consumer = KafkaConsumer(bootstrap_servers=connect_str,
+ group_id=None,
+ consumer_timeout_ms=10000,
+ auto_offset_reset='earliest',
+ value_deserializer=bytes.decode)
+
+ topic = 'TutorialTopic'
+
+ messages = 100
+ futures = []
+ for i in range(messages):
+ futures.append(producer.send(topic, 'msg %d' % i))
+ ret = [f.get(timeout=30) for f in futures]
+ assert len(ret) == messages
+
+ producer.close()
+
+ consumer.subscribe([topic])
+ msgs = set()
+ for i in range(messages):
+ try:
+ msgs.add(next(consumer).value)
+ except StopIteration:
+ break
+
+ assert msgs == set(['msg %d' % i for i in range(messages)])
--- /dev/null
+# Copyright 2017 Intel Research and Development Ireland Limited
+# *************************************************************
+
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Intel Corporation
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
+##
+"""This is a KafkaProducer with a request function to test the plugins."""
+
+import json
+
+import logging as log
+
+import os
+
+import jsmin
+
+from kafka import KafkaProducer as kaf
+
+from kafka.errors import KafkaError
+
+
+class KafkaProducer(object):
+ """A KafkaProducer for testing purposes."""
+
+ def __init__(self, topic):
+ """Initialize a KafkaProducer and it's topic."""
+ self._topic = topic
+
+ if "ZOOKEEPER_URI" in os.environ:
+ broker = os.getenv("ZOOKEEPER_URI")
+ else:
+ broker = "localhost:9092"
+
+ '''
+ If the zookeeper broker URI is not set in the env, by default,
+ localhost container is taken as the host because an instance of
+ is already running.
+ '''
+
+ self.producer = kaf(
+ key_serializer=str.encode,
+ value_serializer=lambda v: json.dumps(v).encode('ascii'),
+ bootstrap_servers=broker, api_version=(0, 10))
+
+ def publish(self, key, value, topic):
+ """Send messages to the message bus with a defing key and topic."""
+ try:
+ future = self.producer.send(topic=topic, key=key, value=value)
+ self.producer.flush()
+ except Exception:
+ log.exception("Error publishing to {} topic." .format(topic))
+ raise
+ try:
+ record_metadata = future.get(timeout=10)
+ log.debug("TOPIC:", record_metadata.topic)
+ log.debug("PARTITION:", record_metadata.partition)
+ log.debug("OFFSET:", record_metadata.offset)
+ except KafkaError:
+ pass
+
+ def request(self, path, key, message, topic):
+ """Test json files are loaded and sent on the message bus."""
+ # External to MON
+ payload_create_alarm = jsmin(open(os.path.join(path)).read())
+ self.publish(key=key,
+ value=json.loads(payload_create_alarm),
+ topic=topic)
__date__ = "14/Sep/2017"
from setuptools import setup
from os import system
-_name = 'osm-mon'
+_name = 'osm_mon'
_version = '1.0'
_description = 'OSM Monitoring Module'
_author = 'Prithiv Mohan'
license = _license,
packages = [_name],
package_dir = {_name: _name},
- package_data = {_name: ['osm-mon/core/message_bus/*.py', 'osm-mon/core/models/*.json',
- 'osm-mon/plugins/OpenStack/Aodh/*.py', 'osm-mon/plugins/OpenStack/Gnocchi/*.py',
- 'osm-mon/plugins/vRealiseOps/*', 'osm-mon/plugins/CloudWatch/*']},
+ package_data = {_name: ['osm_mon/core/message_bus/*.py', 'osm_mon/core/models/*.json',
+ 'osm_mon/plugins/OpenStack/Aodh/*.py', 'osm_mon/plugins/OpenStack/Gnocchi/*.py',
+ 'osm_mon/plugins/vRealiseOps/*', 'osm_mon/plugins/CloudWatch/*']},
data_files = [('/etc/systemd/system/', ['scripts/kafka.sh']),
],
- scripts=['osm-mon/plugins/vRealiseOps/vROPs_Webservice/vrops_webservice',
- 'kafkad', 'osm-mon/core/message_bus/common_consumer'],
+ scripts=['osm_mon/plugins/vRealiseOps/vROPs_Webservice/vrops_webservice',
+ 'kafkad', 'osm_mon/core/message_bus/common_consumer'],
include_package_data=True,
)
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-#__author__ = "Prithiv Mohan"
-#__date__ = "25/Sep/2017"
-
-
-#!/bin/sh
-rm -rf pool
-rm -rf dists
-mkdir -p pool/MON
-mv .build/*.deb pool/MON/
-mkdir -p dists/unstable/MON/binary-amd64/
-apt-ftparchive packages pool/MON > dists/unstable/MON/binary-amd64/Packages
-gzip -9fk dists/unstable/MON/binary-amd64/Packages
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-#__author__ = "Prithiv Mohan"
-#__date__ = "14/Sep/2017"
-
-#!/bin/bash
-make clean all BRANCH=master
-make package
+++ /dev/null
-# Copyright 2017 Intel Research and Development Ireland Limited
-# *************************************************************
-
-# This file is part of OSM Monitoring module
-# All Rights Reserved to Intel Corporation
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: prithiv.mohan@intel.com or adrian.hoban@intel.com
-
-#__author__ = "Prithiv Mohan"
-#__date__ = "14/Sep/2017"
-
-#!/bin/bash
-echo "UNITTEST"