From: Helena McGough Date: Wed, 20 Sep 2017 16:42:22 +0000 (+0100) Subject: Minor OpenStack plugin updates X-Git-Tag: v4.0.0~79 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=refs%2Fchanges%2F74%2F2274%2F1;p=osm%2FMON.git Minor OpenStack plugin updates - Updated requirements.txt for the plugin - Updated authentication process - Refactored create metric process - Refactored the create alarm process - Updated the alarm notifer process Signed-off-by: Helena McGough --- diff --git a/plugins/OpenStack/Aodh/alarming.py b/plugins/OpenStack/Aodh/alarming.py index b5e4a3e..c54881a 100644 --- a/plugins/OpenStack/Aodh/alarming.py +++ b/plugins/OpenStack/Aodh/alarming.py @@ -33,32 +33,30 @@ from plugins.OpenStack.response import OpenStack_Response __author__ = "Helena McGough" -ALARM_NAMES = [ - "Average_Memory_Usage_Above_Threshold", - "Read_Latency_Above_Threshold", - "Write_Latency_Above_Threshold", - "DISK_READ_OPS", - "DISK_WRITE_OPS", - "DISK_READ_BYTES", - "DISK_WRITE_BYTES", - "Net_Packets_Dropped", - "Packets_in_Above_Threshold", - "Packets_out_Above_Threshold", - "CPU_Utilization_Above_Threshold"] +ALARM_NAMES = { + "average_memory_usage_above_threshold": "average_memory_utilization", + "disk_read_ops": "disk_read_ops", + "disk_write_ops": "disk_write_ops", + "disk_read_bytes": "disk_read_bytes", + "disk_write_bytes": "disk_write_bytes", + "net_packets_dropped": "packets_dropped", + "packets_in_above_threshold": "packets_received", + "packets_out_above_threshold": "packets_sent", + "cpu_utilization_above_threshold": "cpu_utilization"} SEVERITIES = { - "WARNING": "low", - "MINOR": "low", - "MAJOR": "moderate", - "CRITICAL": "critical", - "INDETERMINATE": "critical"} + "warning": "low", + "minor": "low", + "major": "moderate", + "critical": "critical", + "indeterminate": "critical"} STATISTICS = { - "AVERAGE": "avg", - "MINIMUM": "min", - "MAXIMUM": "max", - "COUNT": "count", - "SUM": "sum"} + "average": "avg", + "minimum": "min", + "maximum": "max", + "count": "count", + "sum": "sum"} class Alarming(object): @@ -93,7 +91,7 @@ class Alarming(object): log.info("Alarm action required: %s" % (message.topic)) # Generate and auth_token and endpoint for request - auth_token, endpoint = self.authenticate(values) + auth_token, endpoint = self.authenticate() if message.key == "create_alarm_request": # Configure/Update an alarm @@ -119,7 +117,7 @@ class Alarming(object): # and generate the appropriate list list_details = values['alarm_list_request'] try: - name = list_details['alarm_name'] + name = list_details['alarm_name'].lower() alarm_list = self.list_alarms( endpoint, auth_token, alarm_name=name) except Exception as a_name: @@ -132,11 +130,11 @@ class Alarming(object): log.debug("No resource id specified for this list:\ %s", r_id) try: - severe = list_details['severity'] + severe = list_details['severity'].lower() alarm_list = self.list_alarms( endpoint, auth_token, severity=severe) except Exception as exc: - log.warn("No severity specified for list: %s.\ + log.info("No severity specified for list: %s.\ will return full list.", exc) alarm_list = self.list_alarms( endpoint, auth_token) @@ -216,23 +214,34 @@ class Alarming(object): url = "{}/v2/alarms/".format(endpoint) # Check if the desired alarm is supported - alarm_name = values['alarm_name'] - if alarm_name not in ALARM_NAMES: + alarm_name = values['alarm_name'].lower() + metric_name = values['metric_name'].lower() + resource_id = values['resource_uuid'] + + if alarm_name not in ALARM_NAMES.keys(): log.warn("This alarm is not supported, by a valid metric.") return None, False + if ALARM_NAMES[alarm_name] != metric_name: + log.warn("This is not the correct metric for this alarm.") + return None, False + + # Check for the required metric + metric_id = self.check_for_metric(auth_token, metric_name, resource_id) try: - metric_name = values['metric_name'] - resource_id = values['resource_uuid'] - # Check the payload for the desired alarm - payload = self.check_payload(values, metric_name, resource_id, - alarm_name) - new_alarm = self._common._perform_request( - url, auth_token, req_type="post", payload=payload) - - return json.loads(new_alarm.text)['alarm_id'], True + if metric_id is not None: + # Create the alarm if metric is available + payload = self.check_payload(values, metric_name, resource_id, + alarm_name) + new_alarm = self._common._perform_request( + url, auth_token, req_type="post", payload=payload) + return json.loads(new_alarm.text)['alarm_id'], True + else: + log.warn("The required Gnocchi metric does not exist.") + return None, False + except Exception as exc: - log.warn("Alarm creation could not be performed: %s", exc) + log.warn("Failed to create the alarm: %s", exc) return None, False def delete_alarm(self, endpoint, auth_token, alarm_id): @@ -310,7 +319,7 @@ class Alarming(object): metric_name = rule['metric'] except Exception as exc: log.warn("Failed to retreive existing alarm info: %s.\ - Can only update OSM created alarms.", exc) + Can only update OSM alarms.", exc) return None, False # Generates and check payload configuration for alarm update @@ -334,13 +343,13 @@ class Alarming(object): """Check that the payload is configuration for update/create alarm.""" try: # Check state and severity - severity = values['severity'] - if severity == "INDETERMINATE": + severity = values['severity'].lower() + if severity == "indeterminate": alarm_state = "insufficient data" if alarm_state is None: alarm_state = "ok" - statistic = values['statistic'] + statistic = values['statistic'].lower() # Try to configure the payload for the update/create request # Can only update: threshold, operation, statistic and # the severity of the alarm @@ -360,20 +369,16 @@ class Alarming(object): log.warn("Alarm is not configured correctly: %s", exc) return None - def authenticate(self, values): + def authenticate(self): """Generate an authentication token and endpoint for alarm request.""" try: # Check for a tenant_id - auth_token = self._common._authenticate( - tenant_id=values['tenant_uuid']) - endpoint = self._common.get_endpoint("alarming") - except Exception as exc: - log.warn("Tenant ID is not specified. Will use a generic\ - authentication: %s", exc) auth_token = self._common._authenticate() endpoint = self._common.get_endpoint("alarming") - - return auth_token, endpoint + return auth_token, endpoint + except Exception as exc: + log.warn("Authentication to Keystone failed:%s", exc) + return None, None def get_alarm_state(self, endpoint, auth_token, alarm_id): """Get the state of the alarm.""" @@ -386,3 +391,23 @@ class Alarming(object): except Exception as exc: log.warn("Failed to get the state of the alarm:%s", exc) return None + + def check_for_metric(self, auth_token, m_name, r_id): + """Check for the alarm metric.""" + try: + endpoint = self._common.get_endpoint("metric") + + url = "{}/v1/metric/".format(endpoint) + metric_list = self._common._perform_request( + url, auth_token, req_type="get") + + for metric in json.loads(metric_list.text): + name = metric['name'] + resource = metric['resource_id'] + if (name == m_name and resource == r_id): + metric_id = metric['id'] + log.info("The required metric exists, an alarm will be created.") + return metric_id + except Exception as exc: + log.info("Desired Gnocchi metric not found:%s", exc) + return None diff --git a/plugins/OpenStack/Aodh/notifier.py b/plugins/OpenStack/Aodh/notifier.py index 2068f03..aa95215 100644 --- a/plugins/OpenStack/Aodh/notifier.py +++ b/plugins/OpenStack/Aodh/notifier.py @@ -26,47 +26,61 @@ import logging as log from core.message_bus.producer import KafkaProducer +from plugins.OpenStack.Aodh.alarming import Alarming from plugins.OpenStack.response import OpenStack_Response -from plugins.OpenStack.singleton import Singleton +from plugins.OpenStack.settings import Config __author__ = "Helena McGough" ALARM_NAMES = [ - "Average_Memory_Usage_Above_Threshold", - "Read_Latency_Above_Threshold", - "Write_Latency_Above_Threshold", - "DISK_READ_OPS", - "DISK_WRITE_OPS", - "DISK_READ_BYTES", - "DISK_WRITE_BYTES", - "Net_Packets_Dropped", - "Packets_in_Above_Threshold", - "Packets_out_Above_Threshold", - "CPU_Utilization_Above_Threshold"] - - -@Singleton + "average_memory_usage_above_threshold", + "disk_read_ops", + "disk_write_ops", + "disk_read_bytes", + "disk_write_bytes", + "net_packets_dropped", + "packets_in_above_threshold", + "packets_out_above_threshold", + "cpu_utilization_above_threshold"] + + +def register_notifier(): + """Run the notifier instance.""" + config = Config.instance() + instance = Notifier(config=config) + instance.config() + instance.notify() + + class Notifier(object): """Alarm Notification class.""" - def __init__(self): + def __init__(self, config): """Initialize alarm notifier.""" + log.info("Initialize the notifier for the SO.") + self._config = config self._response = OpenStack_Response() - self._producer = KafkaProducer("alarm_response") + self._alarming = Alarming() + + def config(self): + """Configure the alarm notifier.""" + log.info("Configure the notifier instance.") + self._config.read_environ("aodh") - def notify(self, alarming): + def notify(self): """Send alarm notifications responses to the SO.""" - auth_token, endpoint = alarming.authenticate(None) + log.info("Checking for alarm notifications") + auth_token, endpoint = self._alarming.authenticate() while(1): - alarm_list = json.loads(alarming.list_alarms(endpoint, auth_token)) - for alarm in alarm_list: + alarm_list = self._alarming.list_alarms(endpoint, auth_token) + for alarm in json.loads(alarm_list): alarm_id = alarm['alarm_id'] alarm_name = alarm['name'] # Send a notification response to the SO on alarm trigger if alarm_name in ALARM_NAMES: - alarm_state = alarming.get_alarm_state( + alarm_state = self._alarming.get_alarm_state( endpoint, auth_token, alarm_id) if alarm_state == "alarm": # Generate and send an alarm notification response @@ -82,3 +96,5 @@ class Notifier(object): 'notify_alarm', resp_message, 'alarm_response') except Exception as exc: log.warn("Failed to send notify response:%s", exc) + +register_notifier() diff --git a/plugins/OpenStack/Aodh/plugin_instance.py b/plugins/OpenStack/Aodh/plugin_instance.py index 847d44b..ade14f3 100644 --- a/plugins/OpenStack/Aodh/plugin_instance.py +++ b/plugins/OpenStack/Aodh/plugin_instance.py @@ -24,7 +24,6 @@ import logging as log from plugins.OpenStack.Aodh.alarming import Alarming -from plugins.OpenStack.Aodh.notifier import Notifier from plugins.OpenStack.settings import Config __author__ = "Helena McGough" @@ -34,10 +33,9 @@ def register_plugin(): """Register the plugin.""" # Initialize configuration and notifications config = Config.instance() - notifier = Notifier.instance() # Intialize plugin - instance = Plugin(config=config, notifier=notifier) + instance = Plugin(config=config) instance.config() instance.alarm() @@ -45,12 +43,11 @@ def register_plugin(): class Plugin(object): """Aodh plugin for OSM MON.""" - def __init__(self, config, notifier): + def __init__(self, config): """Plugin instance.""" log.info("Initialze the plugin instance.") self._config = config self._alarming = Alarming() - self._notifier = notifier def config(self): """Configure plugin.""" @@ -62,11 +59,4 @@ class Plugin(object): log.info("Begin alarm functionality.") self._alarming.alarming() - def notify(self): - """Send notifications to the SO.""" - # TODO(mcgoughh): Run simultaneously so that notifications - # can be sent while messages are being consumed - log.info("Sending Openstack notifications to the SO.") - self._notifier.notify(self._alarming) - register_plugin() diff --git a/plugins/OpenStack/Gnocchi/metrics.py b/plugins/OpenStack/Gnocchi/metrics.py index ca6f47a..2004c38 100644 --- a/plugins/OpenStack/Gnocchi/metrics.py +++ b/plugins/OpenStack/Gnocchi/metrics.py @@ -36,15 +36,15 @@ from plugins.OpenStack.response import OpenStack_Response __author__ = "Helena McGough" METRIC_MAPPINGS = { - "AVERAGE_MEMORY_UTILIZATION": "memory.percent", - "DISK_READ_OPS": "disk.disk_ops", - "DISK_WRITE_OPS": "disk.disk_ops", - "DISK_READ_BYTES": "disk.disk_octets", - "DISK_WRITE_BYTES": "disk.disk_octets", - "PACKETS_DROPPED": "interface.if_dropped", - "PACKETS_RECEIVED": "interface.if_packets", - "PACKETS_SENT": "interface.if_packets", - "CPU_UTILIZATION": "cpu.percent", + "average_memory_utilization": "memory.percent", + "disk_read_ops": "disk.disk_ops", + "disk_write_ops": "disk.disk_ops", + "disk_read_bytes": "disk.disk_octets", + "disk_write_bytes": "disk.disk_octets", + "packets_dropped": "interface.if_dropped", + "packets_received": "interface.if_packets", + "packets_sent": "interface.if_packets", + "cpu_utilization": "cpu.percent", } PERIOD_MS = { @@ -86,7 +86,7 @@ class Metrics(object): if vim_type == "openstack": # Generate auth_token and endpoint - auth_token, endpoint = self.authenticate(values) + auth_token, endpoint = self.authenticate() if message.key == "create_metric_request": # Configure metric @@ -211,41 +211,55 @@ class Metrics(object): endpoint, auth_token, metric_name, resource_id) if metric_id is None: - # Need to create a new version of the resource for gnocchi to - # create the new metric based on that resource - url = "{}/v1/resource/generic".format(endpoint) + # Try appending metric to existing resource try: - # Try to create a new resource for the new metric - metric = {'name': metric_name, - 'archive_policy_name': 'high', - 'unit': values['metric_unit'], } - - resource_payload = json.dumps({'id': resource_id, - 'metrics': { - metric_name: metric}}) - - new_resource = self._common._perform_request( - url, auth_token, req_type="post", payload=resource_payload) - - resource_id = json.loads(new_resource.text)['id'] - except Exception as exc: - # Append new metric to existing resource - log.debug("This resource already exists:%s, appending metric.", - exc) base_url = "{}/v1/resource/generic/%s/metric" res_url = base_url.format(endpoint) % resource_id payload = {metric_name: {'archive_policy_name': 'high', 'unit': values['metric_unit']}} - self._common._perform_request( + result = self._common._perform_request( res_url, auth_token, req_type="post", payload=json.dumps(payload)) + # Get id of newly created metric + for row in json.loads(result.text): + if row['name'] == metric_name: + metric_id = row['id'] + log.info("Appended metric to existing resource.") + + return metric_id, resource_id, True + except Exception as exc: + # Gnocchi version of resource does not exist creating a new one + log.info("Failed to append metric to existing resource:%s", + exc) + try: + url = "{}/v1/resource/generic".format(endpoint) + metric = {'name': metric_name, + 'archive_policy_name': 'high', + 'unit': values['metric_unit'], } + + resource_payload = json.dumps({'id': resource_id, + 'metrics': { + metric_name: metric}}) + + resource = self._common._perform_request( + url, auth_token, req_type="post", + payload=resource_payload) + + # Return the newly created resource_id for creating alarms + new_resource_id = json.loads(resource.text)['id'] + log.info("Created new resource for metric: %s", + new_resource_id) - metric_id = self.get_metric_id( - endpoint, auth_token, metric_name, resource_id) - return metric_id, resource_id, True + metric_id = self.get_metric_id( + endpoint, auth_token, metric_name, new_resource_id) + + return metric_id, new_resource_id, True + except Exception as exc: + log.warn("Failed to create a new resource:%s", exc) + return None, None, False else: - log.debug("This metric already exists for this resource.") + log.info("This metric already exists for this resource.") return metric_id, resource_id, False @@ -318,17 +332,17 @@ class Metrics(object): url, auth_token, req_type="get") return json.loads(result.text)['metrics'][metric_name] except Exception: - log.debug("Metric doesn't exist. No metric_id available") + log.info("Metric doesn't exist. No metric_id available") return None def get_metric_name(self, values): """Check metric name configuration and normalize.""" try: # Normalize metric name - metric_name = values['metric_name'] + metric_name = values['metric_name'].lower() return metric_name, METRIC_MAPPINGS[metric_name] except KeyError: - log.warn("Metric name %s is invalid.", metric_name) + log.info("Metric name %s is invalid.", metric_name) return metric_name, None def read_metric_data(self, endpoint, auth_token, values): @@ -370,20 +384,17 @@ class Metrics(object): log.warn("Failed to gather specified measures: %s", exc) return timestamps, data - def authenticate(self, values): + def authenticate(self): """Generate an authentication token and endpoint for metric request.""" try: # Check for a tenant_id - auth_token = self._common._authenticate( - tenant_id=values['tenant_uuid']) - endpoint = self._common.get_endpoint("metric") - except KeyError: - log.warn("Tenant ID is not specified. Will use a generic\ - authentication.") auth_token = self._common._authenticate() endpoint = self._common.get_endpoint("metric") + return auth_token, endpoint + except Exception as exc: + log.warn("Authentication to Keystone failed: %s", exc) - return auth_token, endpoint + return None, None def response_list(self, metric_list, metric_name=None, resource=None): """Create the appropriate lists for a list response.""" diff --git a/plugins/OpenStack/common.py b/plugins/OpenStack/common.py index 25cca45..eed122d 100644 --- a/plugins/OpenStack/common.py +++ b/plugins/OpenStack/common.py @@ -41,7 +41,7 @@ class Common(object): self._endpoint = None self._ks = None - def _authenticate(self, tenant_id=None): + def _authenticate(self): """Authenticate and/or renew the authentication token.""" if self._auth_token is not None: return self._auth_token diff --git a/plugins/OpenStack/settings.py b/plugins/OpenStack/settings.py index e7b06e2..4a443aa 100644 --- a/plugins/OpenStack/settings.py +++ b/plugins/OpenStack/settings.py @@ -21,7 +21,7 @@ ## """Configurations for the OpenStack plugins.""" -from __future__ import unicode_literals +#from __future__ import unicode_literals import logging as log import os @@ -77,14 +77,19 @@ class Config(object): def read_environ(self, service): """Check the appropriate environment variables and update defaults.""" for key in self._config_keys: - if (key == "OS_IDENTITY_API_VERSION" or key == "OS_PASSWORD"): - val = str(os.environ[key]) - setattr(self, key, val) - elif (key == "OS_AUTH_URL"): - val = str(os.environ[key]) + "/v3" - setattr(self, key, val) - else: - # Default username for a service is it's name - setattr(self, 'OS_USERNAME', service) - log.info("Configuration complete!") - return + try: + if (key == "OS_IDENTITY_API_VERSION" or key == "OS_PASSWORD"): + val = str(os.environ[key]) + setattr(self, key, val) + elif (key == "OS_AUTH_URL"): + val = str(os.environ[key]) + "/v3" + setattr(self, key, val) + else: + # Default username for a service is it's name + setattr(self, 'OS_USERNAME', service) + log.info("Configuration complete!") + return + except KeyError as exc: + log.warn("Falied to configure plugin: %s", exc) + log.warn("Try re-authenticating your OpenStack deployment.") + return diff --git a/requirements.txt b/requirements.txt index b5de8b4..0f2ad0f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,3 +18,4 @@ pyopenssl python-requests cherrypy python-bottle +six