from osm_mon.core.database import DatabaseManager
from osm_mon.core.message_bus.producer import KafkaProducer
from osm_mon.core.settings import Config
+from osm_mon.plugins.OpenStack.Gnocchi.metrics import METRIC_MAPPINGS
from osm_mon.plugins.OpenStack.common import Common
from osm_mon.plugins.OpenStack.response import OpenStack_Response
log = logging.getLogger(__name__)
-ALARM_NAMES = {
- "average_memory_usage_above_threshold": "average_memory_utilization",
- "disk_read_ops": "disk_read_ops",
- "disk_write_ops": "disk_write_ops",
- "disk_read_bytes": "disk_read_bytes",
- "disk_write_bytes": "disk_write_bytes",
- "net_packets_dropped": "packets_dropped",
- "packets_in_above_threshold": "packets_received",
- "packets_out_above_threshold": "packets_sent",
- "cpu_utilization_above_threshold": "cpu_utilization"}
-
-METRIC_MAPPINGS = {
- "average_memory_utilization": "memory.percent",
- "disk_read_ops": "disk.read.requests",
- "disk_write_ops": "disk.write.requests",
- "disk_read_bytes": "disk.read.bytes",
- "disk_write_bytes": "disk.write.bytes",
- "packets_dropped": "interface.if_dropped",
- "packets_received": "interface.if_packets",
- "packets_sent": "interface.if_packets",
- "cpu_utilization": "cpu_util",
-}
-
SEVERITIES = {
"warning": "low",
"minor": "low",
log.warning("Failed to create the alarm: %s", exc)
return None, False
- def alarming(self, message):
+ def alarming(self, message, vim_uuid):
"""Consume info from the message bus to manage alarms."""
try:
values = json.loads(message.value)
values = yaml.safe_load(message.value)
log.info("OpenStack alarm action required.")
- vim_uuid = values['vim_uuid']
auth_token = Common.get_auth_token(vim_uuid)
alarm_endpoint, metric_endpoint, auth_token, alarm_details, vim_config)
# Generate a valid response message, send via producer
+ if alarm_status is True:
+ log.info("Alarm successfully created")
+ self._database_manager.save_alarm(alarm_id,
+ vim_uuid,
+ alarm_details['threshold_value'],
+ alarm_details['operation'].lower(),
+ alarm_details['metric_name'].lower(),
+ alarm_details['vdu_name'].lower(),
+ alarm_details['vnf_member_index'].lower(),
+ alarm_details['ns_id'].lower()
+ )
try:
- if alarm_status is True:
- log.info("Alarm successfully created")
- self._database_manager.save_alarm(alarm_id, vim_uuid)
-
resp_message = self._response.generate_response(
'create_alarm_response', status=alarm_status,
alarm_id=alarm_id,
# Checking what fields are specified for a list request
try:
name = list_details['alarm_name'].lower()
- if name not in ALARM_NAMES.keys():
- log.warning("This alarm is not supported, won't be used!")
- name = None
except KeyError as exc:
log.info("Alarm name isn't specified.")
name = None
resource_id = rule['resource_id']
metric_name = [key for key, value in six.iteritems(METRIC_MAPPINGS) if value == rule['metric']][0]
except Exception as exc:
- log.warning("Failed to retrieve existing alarm info: %s.\
- Can only update OSM alarms.", exc)
+ log.exception("Failed to retrieve existing alarm info. Can only update OSM alarms.")
return None, False
# Generates and check payload configuration for alarm update
return json.loads(update_alarm.text)['alarm_id'], True
except Exception as exc:
- log.warning("Alarm update could not be performed: %s", exc)
- return None, False
+ log.exception("Alarm update could not be performed: ")
return None, False
def check_payload(self, values, metric_name, resource_id,
def check_for_metric(self, auth_token, metric_endpoint, m_name, r_id):
"""Check for the alarm metric."""
try:
- url = "{}/v1/metric?sort=name:asc".format(metric_endpoint)
+ url = "{}/v1/resource/generic/{}".format(metric_endpoint, r_id)
result = Common.perform_request(
url, auth_token, req_type="get")
- metric_list = []
- metrics_partial = json.loads(result.text)
- for metric in metrics_partial:
- metric_list.append(metric)
-
- while len(json.loads(result.text)) > 0:
- last_metric_id = metrics_partial[-1]['id']
- url = "{}/v1/metric?sort=name:asc&marker={}".format(metric_endpoint, last_metric_id)
- result = Common.perform_request(
- url, auth_token, req_type="get")
- if len(json.loads(result.text)) > 0:
- metrics_partial = json.loads(result.text)
- for metric in metrics_partial:
- metric_list.append(metric)
- metric_id = None
- for metric in metric_list:
- name = metric['name']
- resource = metric['resource_id']
- if name == METRIC_MAPPINGS[m_name] and resource == r_id:
- metric_id = metric['id']
- log.info("The required metric exists, an alarm will be created.")
+ resource = json.loads(result.text)
+ metric_list = resource['metrics']
+ if metric_list.get(METRIC_MAPPINGS[m_name]):
+ metric_id = metric_list[METRIC_MAPPINGS[m_name]]
+ else:
+ metric_id = None
+ log.info("Desired Gnocchi metric not found")
return metric_id
except Exception as exc:
log.info("Desired Gnocchi metric not found:%s", exc)
- return None
+ return None
\ No newline at end of file