From: Benjamin Diaz Date: Mon, 8 Oct 2018 18:34:12 +0000 (-0300) Subject: Refactors code in OpenStack plugin X-Git-Tag: v5.0.0~24 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=refs%2Fchanges%2F51%2F6651%2F4;p=osm%2FMON.git Refactors code in OpenStack plugin Continues refactor process on OpenStack metrics plugin. Delegates exception handling to the main metric handler, removing exception management from specific function calls. Extracts response generation code into single method. Adds 'status' param in message responses where it was missing. Modifies tests accordingly. Signed-off-by: Benjamin Diaz Change-Id: I6ad27734f37708f5ccbef13e322723225545db4e --- diff --git a/osm_mon/plugins/OpenStack/Aodh/alarming.py b/osm_mon/plugins/OpenStack/Aodh/alarming.py index b71700e..453e77c 100644 --- a/osm_mon/plugins/OpenStack/Aodh/alarming.py +++ b/osm_mon/plugins/OpenStack/Aodh/alarming.py @@ -23,6 +23,7 @@ import json import logging +from json import JSONDecodeError import six import yaml @@ -75,7 +76,7 @@ class Alarming(object): """ try: values = json.loads(message.value) - except ValueError: + except JSONDecodeError: values = yaml.safe_load(message.value) log.info("OpenStack alarm action required.") @@ -199,6 +200,7 @@ class Alarming(object): if metric_name not in METRIC_MAPPINGS.keys(): raise KeyError("Metric {} is not supported.".format(metric_name)) + #FIXME if 'granularity' in vim_config and 'granularity' not in values: values['granularity'] = vim_config['granularity'] payload = self.check_payload(values, metric_name, resource_id, diff --git a/osm_mon/plugins/OpenStack/Aodh/notifier.py b/osm_mon/plugins/OpenStack/Aodh/notifier.py index e14ba7a..c74a73f 100644 --- a/osm_mon/plugins/OpenStack/Aodh/notifier.py +++ b/osm_mon/plugins/OpenStack/Aodh/notifier.py @@ -107,7 +107,7 @@ class NotifierHandler(BaseHTTPRequestHandler): # Generate and send response resp_message = response.generate_response( 'notify_alarm', - a_id=alarm_id, + alarm_id=alarm_id, vdu_name=alarm.vdu_name, vnf_member_index=alarm.vnf_member_index, ns_id=alarm.ns_id, diff --git a/osm_mon/plugins/OpenStack/Gnocchi/metrics.py b/osm_mon/plugins/OpenStack/Gnocchi/metrics.py index 825671e..7bfbc47 100644 --- a/osm_mon/plugins/OpenStack/Gnocchi/metrics.py +++ b/osm_mon/plugins/OpenStack/Gnocchi/metrics.py @@ -25,6 +25,7 @@ import datetime import json import logging import time +from json import JSONDecodeError import six import yaml @@ -76,9 +77,11 @@ class Metrics(object): log.info("OpenStack metric action required.") try: values = json.loads(message.value) - except ValueError: + except JSONDecodeError: values = yaml.safe_load(message.value) + log.info("OpenStack metric action required.") + if 'metric_name' in values and values['metric_name'] not in METRIC_MAPPINGS.keys(): raise ValueError('Metric ' + values['metric_name'] + ' is not supported.') @@ -89,129 +92,120 @@ class Metrics(object): auth_token = Common.get_auth_token(vim_uuid, verify_ssl=verify_ssl) if message.key == "create_metric_request": - # Configure metric metric_details = values['metric_create_request'] - metric_id, resource_id, status = self.configure_metric( - endpoint, auth_token, metric_details, verify_ssl) - - # Generate and send a create metric response + status = False + metric_id = None + resource_id = None try: - resp_message = self._response.generate_response( - 'create_metric_response', - status=status, - cor_id=metric_details['correlation_id'], - metric_id=metric_id, - r_id=resource_id) - log.info("Response messages: %s", resp_message) - self._producer.publish_metrics_response( - 'create_metric_response', resp_message) - except Exception as exc: - log.warning("Failed to create response: %s", exc) + # Configure metric + metric_id, resource_id = self.configure_metric(endpoint, auth_token, metric_details, verify_ssl) + log.info("Metric successfully created") + status = True + except Exception as e: + log.exception("Error creating metric") + raise e + finally: + self._generate_and_send_response('create_metric_response', + metric_details['correlation_id'], + status=status, + metric_id=metric_id, + resource_id=resource_id) elif message.key == "read_metric_data_request": - # Read all metric data related to a specified metric - timestamps, metric_data = self.read_metric_data(endpoint, auth_token, values, verify_ssl) - - # Generate and send a response message + metric_id = None + timestamps = [] + metric_data = [] + status = False try: metric_id = self.get_metric_id(endpoint, auth_token, METRIC_MAPPINGS[values['metric_name']], values['resource_uuid'], verify_ssl) - resp_message = self._response.generate_response( - 'read_metric_data_response', - m_id=metric_id, - m_name=values['metric_name'], - r_id=values['resource_uuid'], - cor_id=values['correlation_id'], - times=timestamps, - metrics=metric_data) - log.info("Response message: %s", resp_message) - self._producer.read_metric_data_response( - 'read_metric_data_response', resp_message) - except Exception as exc: - log.warning("Failed to create response: %s", exc) + # Read all metric data related to a specified metric + timestamps, metric_data = self.read_metric_data(endpoint, auth_token, values, verify_ssl) + log.info("Metric data collected successfully") + status = True + except Exception as e: + log.exception("Error reading metric data") + raise e + finally: + self._generate_and_send_response('read_metric_data_response', + values['correlation_id'], + status=status, + metric_id=metric_id, + metric_name=values['metric_name'], + resource_id=values['resource_uuid'], + times=timestamps, + metrics=metric_data) elif message.key == "delete_metric_request": - # delete the specified metric in the request - metric_id = self.get_metric_id(endpoint, auth_token, METRIC_MAPPINGS[values['metric_name']], - values['resource_uuid'], verify_ssl) - status = self.delete_metric( - endpoint, auth_token, metric_id, verify_ssl) - - # Generate and send a response message + metric_id = None + status = False try: - resp_message = self._response.generate_response( - 'delete_metric_response', - m_id=metric_id, - m_name=values['metric_name'], - status=status, - r_id=values['resource_uuid'], - cor_id=values['correlation_id']) - log.info("Response message: %s", resp_message) - self._producer.delete_metric_response( - 'delete_metric_response', resp_message) - except Exception as exc: - log.warning("Failed to create response: %s", exc) + # delete the specified metric in the request + metric_id = self.get_metric_id(endpoint, auth_token, METRIC_MAPPINGS[values['metric_name']], + values['resource_uuid'], verify_ssl) + self.delete_metric( + endpoint, auth_token, metric_id, verify_ssl) + log.info("Metric deleted successfully") + status = True + + except Exception as e: + log.exception("Error deleting metric") + raise e + finally: + self._generate_and_send_response('delete_metric_response', + values['correlation_id'], + metric_id=metric_id, + metric_name=values['metric_name'], + status=status, + resource_id=values['resource_uuid']) elif message.key == "update_metric_request": # Gnocchi doesn't support configuration updates # Log and send a response back to this effect log.warning("Gnocchi doesn't support metric configuration updates.") - req_details = values['metric_create_request'] + req_details = values['metric_update_request'] metric_name = req_details['metric_name'] resource_id = req_details['resource_uuid'] metric_id = self.get_metric_id(endpoint, auth_token, metric_name, resource_id, verify_ssl) - - # Generate and send a response message - try: - resp_message = self._response.generate_response( - 'update_metric_response', - status=False, - cor_id=req_details['correlation_id'], - r_id=resource_id, - m_id=metric_id) - log.info("Response message: %s", resp_message) - self._producer.update_metric_response( - 'update_metric_response', resp_message) - except Exception as exc: - log.warning("Failed to create response: %s", exc) + self._generate_and_send_response('update_metric_response', + req_details['correlation_id'], + status=False, + resource_id=resource_id, + metric_id=metric_id) elif message.key == "list_metric_request": list_details = values['metrics_list_request'] - - metric_list = self.list_metrics( - endpoint, auth_token, list_details, verify_ssl) - - # Generate and send a response message + metric_list = [] + status = False try: - resp_message = self._response.generate_response( - 'list_metric_response', - m_list=metric_list, - cor_id=list_details['correlation_id']) - log.info("Response message: %s", resp_message) - self._producer.list_metric_response( - 'list_metric_response', resp_message) - except Exception as exc: - log.warning("Failed to create response: %s", exc) + metric_list = self.list_metrics( + endpoint, auth_token, list_details, verify_ssl) + log.info("Metrics listed successfully") + status = True + except Exception as e: + log.exception("Error listing metrics") + raise e + finally: + self._generate_and_send_response('list_metric_response', + list_details['correlation_id'], + status=status, + metric_list=metric_list) else: log.warning("Unknown key %s, no action will be performed.", message.key) def configure_metric(self, endpoint, auth_token, values, verify_ssl): """Create the new metric in Gnocchi.""" - try: - resource_id = values['resource_uuid'] - except KeyError: - log.warning("resource_uuid field is missing.") - return None, None, False + required_fields = ['resource_uuid', 'metric_name'] + for field in required_fields: + if field not in values: + raise ValueError("Missing field: " + field) - try: - metric_name = values['metric_name'].lower() - except KeyError: - log.warning("metric_name field is missing.") - return None, None, False + resource_id = values['resource_uuid'] + metric_name = values['metric_name'].lower() # Check for an existing metric for this resource metric_id = self.get_metric_id( @@ -236,64 +230,52 @@ class Metrics(object): metric_id = row['id'] log.info("Appended metric to existing resource.") - return metric_id, resource_id, True + return metric_id, resource_id except Exception as exc: # Gnocchi version of resource does not exist creating a new one log.info("Failed to append metric to existing resource:%s", exc) - try: - url = "{}/v1/resource/generic".format(endpoint) - metric = {'name': metric_name, - 'archive_policy_name': 'high', - 'unit': values['metric_unit'], } - - resource_payload = json.dumps({'id': resource_id, - 'metrics': { - metric_name: metric}}, sort_keys=True) - - resource = Common.perform_request( - url, - auth_token, - req_type="post", - payload=resource_payload, - verify_ssl=verify_ssl) - - # Return the newly created resource_id for creating alarms - new_resource_id = json.loads(resource.text)['id'] - log.info("Created new resource for metric: %s", - new_resource_id) - - metric_id = self.get_metric_id( - endpoint, auth_token, metric_name, new_resource_id, verify_ssl) - - return metric_id, new_resource_id, True - except Exception as exc: - log.warning("Failed to create a new resource: %s", exc) - return None, None, False + url = "{}/v1/resource/generic".format(endpoint) + metric = {'name': metric_name, + 'archive_policy_name': 'high', + 'unit': values['metric_unit'], } - else: - log.info("This metric already exists for this resource.") + resource_payload = json.dumps({'id': resource_id, + 'metrics': { + metric_name: metric}}, sort_keys=True) + + resource = Common.perform_request( + url, + auth_token, + req_type="post", + payload=resource_payload, + verify_ssl=verify_ssl) + + # Return the newly created resource_id for creating alarms + new_resource_id = json.loads(resource.text)['id'] + log.info("Created new resource for metric: %s", + new_resource_id) - return metric_id, resource_id, False + metric_id = self.get_metric_id( + endpoint, auth_token, metric_name, new_resource_id, verify_ssl) + + return metric_id, new_resource_id + + else: + raise ValueError("Metric already exists for this resource") def delete_metric(self, endpoint, auth_token, metric_id, verify_ssl): """Delete metric.""" url = "{}/v1/metric/%s".format(endpoint) % metric_id - try: - result = Common.perform_request( - url, - auth_token, - req_type="delete", - verify_ssl=verify_ssl) - if str(result.status_code) == "404": - log.warning("Failed to delete the metric.") - return False - else: - return True - except Exception as exc: - log.warning("Failed to delete metric: %s", exc) - return False + result = Common.perform_request( + url, + auth_token, + req_type="delete", + verify_ssl=verify_ssl) + if not str(result.status_code).startswith("2"): + log.warning("Failed to delete the metric.") + raise ValueError("Error deleting metric. Aodh API responded with code " + str(result.status_code)) def list_metrics(self, endpoint, auth_token, values, verify_ssl): """List all metrics.""" @@ -307,77 +289,73 @@ class Metrics(object): if 'resource_uuid' in values: resource = values['resource_uuid'] - try: - if resource: - url = "{}/v1/resource/generic/{}".format(endpoint, resource) - result = Common.perform_request( - url, auth_token, req_type="get", verify_ssl=verify_ssl) - resource_data = json.loads(result.text) - metrics = resource_data['metrics'] - - if metric_name: - if metrics.get(METRIC_MAPPINGS[metric_name]): - metric_id = metrics[METRIC_MAPPINGS[metric_name]] - url = "{}/v1/metric/{}".format(endpoint, metric_id) - result = Common.perform_request( - url, auth_token, req_type="get", verify_ssl=verify_ssl) - metric_list = json.loads(result.text) - log.info("Returning an %s resource list for %s metrics", - metric_name, resource) - return metric_list - else: - log.info("Metric {} not found for {} resource".format(metric_name, resource)) - return None + if resource: + url = "{}/v1/resource/generic/{}".format(endpoint, resource) + result = Common.perform_request( + url, auth_token, req_type="get", verify_ssl=verify_ssl) + resource_data = json.loads(result.text) + metrics = resource_data['metrics'] + + if metric_name: + if metrics.get(METRIC_MAPPINGS[metric_name]): + metric_id = metrics[METRIC_MAPPINGS[metric_name]] + url = "{}/v1/metric/{}".format(endpoint, metric_id) + result = Common.perform_request( + url, auth_token, req_type="get", verify_ssl=verify_ssl) + metric_list = json.loads(result.text) + log.info("Returning an %s resource list for %s metrics", + metric_name, resource) + return metric_list else: - metric_list = [] - for k, v in metrics.items(): - url = "{}/v1/metric/{}".format(endpoint, v) - result = Common.perform_request( - url, auth_token, req_type="get", verify_ssl=verify_ssl) - metric = json.loads(result.text) - metric_list.append(metric) - if metric_list: - log.info("Return a list of %s resource metrics", resource) - return metric_list - - else: - log.info("There are no metrics available") - return [] + log.info("Metric {} not found for {} resource".format(metric_name, resource)) + return [] else: - url = "{}/v1/metric?sort=name:asc".format(endpoint) - result = Common.perform_request( - url, auth_token, req_type="get", verify_ssl=verify_ssl) - metrics = [] - metrics_partial = json.loads(result.text) - for metric in metrics_partial: - metrics.append(metric) - - while len(json.loads(result.text)) > 0: - last_metric_id = metrics_partial[-1]['id'] - url = "{}/v1/metric?sort=name:asc&marker={}".format(endpoint, last_metric_id) + metric_list = [] + for k, v in metrics.items(): + url = "{}/v1/metric/{}".format(endpoint, v) result = Common.perform_request( url, auth_token, req_type="get", verify_ssl=verify_ssl) - if len(json.loads(result.text)) > 0: - metrics_partial = json.loads(result.text) - for metric in metrics_partial: - metrics.append(metric) - - if metrics is not None: - # Format the list response - if metric_name is not None: - metric_list = self.response_list( - metrics, metric_name=metric_name) - log.info("Returning a list of %s metrics", metric_name) - else: - metric_list = self.response_list(metrics) - log.info("Returning a complete list of metrics") + metric = json.loads(result.text) + metric_list.append(metric) + if metric_list: + log.info("Return a list of %s resource metrics", resource) return metric_list + else: log.info("There are no metrics available") return [] - except Exception as exc: - log.exception("Failed to list metrics. %s", exc) - return None + else: + url = "{}/v1/metric?sort=name:asc".format(endpoint) + result = Common.perform_request( + url, auth_token, req_type="get", verify_ssl=verify_ssl) + metrics = [] + metrics_partial = json.loads(result.text) + for metric in metrics_partial: + metrics.append(metric) + + while len(json.loads(result.text)) > 0: + last_metric_id = metrics_partial[-1]['id'] + url = "{}/v1/metric?sort=name:asc&marker={}".format(endpoint, last_metric_id) + result = Common.perform_request( + url, auth_token, req_type="get", verify_ssl=verify_ssl) + if len(json.loads(result.text)) > 0: + metrics_partial = json.loads(result.text) + for metric in metrics_partial: + metrics.append(metric) + + if metrics is not None: + # Format the list response + if metric_name is not None: + metric_list = self.response_list( + metrics, metric_name=metric_name) + log.info("Returning a list of %s metrics", metric_name) + else: + metric_list = self.response_list(metrics) + log.info("Returning a complete list of metrics") + return metric_list + else: + log.info("There are no metrics available") + return [] def get_metric_id(self, endpoint, auth_token, metric_name, resource_id, verify_ssl): """Check if the desired metric already exists for the resource.""" @@ -390,53 +368,49 @@ class Metrics(object): req_type="get", verify_ssl=verify_ssl) return json.loads(result.text)['metrics'][metric_name] - except KeyError: - log.warning("Metric doesn't exist. No metric_id available") - return None + except KeyError as e: + log.error("Metric doesn't exist. No metric_id available") + raise e def read_metric_data(self, endpoint, auth_token, values, verify_ssl): """Collect metric measures over a specified time period.""" timestamps = [] data = [] - try: - # get metric_id - metric_id = self.get_metric_id(endpoint, auth_token, METRIC_MAPPINGS[values['metric_name']], - values['resource_uuid'], verify_ssl) - # Try and collect measures - collection_unit = values['collection_unit'].upper() - collection_period = values['collection_period'] - - # Define the start and end time based on configurations - # FIXME: Local timezone may differ from timezone set in Gnocchi, causing discrepancies in measures - stop_time = time.strftime("%Y-%m-%d") + "T" + time.strftime("%X") - end_time = int(round(time.time() * 1000)) - if collection_unit == 'YEAR': - diff = PERIOD_MS[collection_unit] - else: - diff = collection_period * PERIOD_MS[collection_unit] - s_time = (end_time - diff) / 1000.0 - start_time = datetime.datetime.fromtimestamp(s_time).strftime( - '%Y-%m-%dT%H:%M:%S.%f') - base_url = "{}/v1/metric/%(0)s/measures?start=%(1)s&stop=%(2)s" - url = base_url.format(endpoint) % { - "0": metric_id, "1": start_time, "2": stop_time} - - # Perform metric data request - metric_data = Common.perform_request( - url, - auth_token, - req_type="get", - verify_ssl=verify_ssl) - - # Generate a list of the requested timestamps and data - for r in json.loads(metric_data.text): - timestamp = r[0].replace("T", " ") - timestamps.append(timestamp) - data.append(r[2]) + # get metric_id + metric_id = self.get_metric_id(endpoint, auth_token, METRIC_MAPPINGS[values['metric_name']], + values['resource_uuid'], verify_ssl) + # Try and collect measures + collection_unit = values['collection_unit'].upper() + collection_period = values['collection_period'] + + # Define the start and end time based on configurations + # FIXME: Local timezone may differ from timezone set in Gnocchi, causing discrepancies in measures + stop_time = time.strftime("%Y-%m-%d") + "T" + time.strftime("%X") + end_time = int(round(time.time() * 1000)) + if collection_unit == 'YEAR': + diff = PERIOD_MS[collection_unit] + else: + diff = collection_period * PERIOD_MS[collection_unit] + s_time = (end_time - diff) / 1000.0 + start_time = datetime.datetime.fromtimestamp(s_time).strftime( + '%Y-%m-%dT%H:%M:%S.%f') + base_url = "{}/v1/metric/%(0)s/measures?start=%(1)s&stop=%(2)s" + url = base_url.format(endpoint) % { + "0": metric_id, "1": start_time, "2": stop_time} + + # Perform metric data request + metric_data = Common.perform_request( + url, + auth_token, + req_type="get", + verify_ssl=verify_ssl) + + # Generate a list of the requested timestamps and data + for r in json.loads(metric_data.text): + timestamp = r[0].replace("T", " ") + timestamps.append(timestamp) + data.append(r[2]) - return timestamps, data - except Exception as exc: - log.warning("Failed to gather specified measures: %s", exc) return timestamps, data def response_list(self, metric_list, metric_name=None, resource=None): @@ -483,3 +457,14 @@ class Metrics(object): return res_list else: return resp_list + + def _generate_and_send_response(self, key, correlation_id, **kwargs): + try: + resp_message = self._response.generate_response( + key, cor_id=correlation_id, **kwargs) + log.info("Response Message: %s", resp_message) + self._producer.publish_metrics_response( + key, resp_message) + except Exception as e: + log.exception("Response creation failed:") + raise e diff --git a/osm_mon/plugins/OpenStack/response.py b/osm_mon/plugins/OpenStack/response.py index 8d328f0..5f1529a 100644 --- a/osm_mon/plugins/OpenStack/response.py +++ b/osm_mon/plugins/OpenStack/response.py @@ -108,7 +108,7 @@ class OpenStack_Response(object): "correlation_id": kwargs['cor_id'], "metric_create_response": { "metric_uuid": kwargs['metric_id'], - "resource_uuid": kwargs['r_id'], + "resource_uuid": kwargs['resource_id'], "status": kwargs['status']}} return json.dumps(create_metric_resp) @@ -116,10 +116,11 @@ class OpenStack_Response(object): """Generate a response for a read metric data request.""" read_metric_data_resp = {"schema_version": schema_version, "schema_type": "read_metric_data_response", - "metric_name": kwargs['m_name'], - "metric_uuid": kwargs['m_id'], - "resource_uuid": kwargs['r_id'], + "metric_name": kwargs['metric_name'], + "metric_uuid": kwargs['metric_id'], + "resource_uuid": kwargs['resource_id'], "correlation_id": kwargs['cor_id'], + "status": kwargs['status'], "metrics_data": { "time_series": kwargs['times'], "metrics_series": kwargs['metrics']}} @@ -129,9 +130,9 @@ class OpenStack_Response(object): """Generate a response for a delete metric request.""" delete_metric_resp = {"schema_version": schema_version, "schema_type": "delete_metric_response", - "metric_name": kwargs['m_name'], - "metric_uuid": kwargs['m_id'], - "resource_uuid": kwargs['r_id'], + "metric_name": kwargs['metric_name'], + "metric_uuid": kwargs['metric_id'], + "resource_uuid": kwargs['resource_id'], "correlation_id": kwargs['cor_id'], "status": kwargs['status']} return json.dumps(delete_metric_resp) @@ -142,9 +143,9 @@ class OpenStack_Response(object): "schema_type": "update_metric_response", "correlation_id": kwargs['cor_id'], "metric_update_response": { - "metric_uuid": kwargs['m_id'], + "metric_uuid": kwargs['metric_id'], "status": kwargs['status'], - "resource_uuid": kwargs['r_id']}} + "resource_uuid": kwargs['resource_id']}} return json.dumps(update_metric_resp) def list_metric_response(self, **kwargs): @@ -152,7 +153,8 @@ class OpenStack_Response(object): list_metric_resp = {"schema_version": schema_version, "schema_type": "list_metric_response", "correlation_id": kwargs['cor_id'], - "metrics_list": kwargs['m_list']} + "status": kwargs['status'], + "metrics_list": kwargs['metric_list']} return json.dumps(list_metric_resp) def notify_alarm(self, **kwargs): @@ -160,7 +162,7 @@ class OpenStack_Response(object): notify_alarm_resp = {"schema_version": schema_version, "schema_type": "notify_alarm", "notify_details": { - "alarm_uuid": kwargs['a_id'], + "alarm_uuid": kwargs['alarm_id'], "vdu_name": kwargs['vdu_name'], "vnf_member_index": kwargs['vnf_member_index'], "ns_id": kwargs['ns_id'], diff --git a/osm_mon/test/OpenStack/integration/test_metric_integration.py b/osm_mon/test/OpenStack/integration/test_metric_integration.py index eb672da..344ccbd 100644 --- a/osm_mon/test/OpenStack/integration/test_metric_integration.py +++ b/osm_mon/test/OpenStack/integration/test_metric_integration.py @@ -94,13 +94,13 @@ class MetricIntegrationTest(unittest.TestCase): for message in self.req_consumer: if message.key == "create_metric_request": # A valid metric is created - config_metric.return_value = "metric_id", "resource_id", True + config_metric.return_value = "metric_id", "resource_id" self.metric_req.metric_calls(message, 'test_id') # A response message is generated and sent by MON's producer resp.assert_called_with( 'create_metric_response', status=True, cor_id=123, - metric_id="metric_id", r_id="resource_id") + metric_id="metric_id", resource_id="resource_id") return self.fail("No message received in consumer") @@ -133,8 +133,8 @@ class MetricIntegrationTest(unittest.TestCase): # A response message is generated and sent by MON's producer resp.assert_called_with( - 'delete_metric_response', m_id='1', - m_name="cpu_utilization", status=True, r_id="resource_id", + 'delete_metric_response', metric_id='1', + metric_name="cpu_utilization", status=True, resource_id="resource_id", cor_id=123) return @@ -169,9 +169,9 @@ class MetricIntegrationTest(unittest.TestCase): # A response message is generated and sent by MON's producer resp.assert_called_with( - 'read_metric_data_response', m_id='1', - m_name="cpu_utilization", r_id="resource_id", cor_id=123, times=[], - metrics=[]) + 'read_metric_data_response', metric_id='1', + metric_name="cpu_utilization", resource_id="resource_id", cor_id=123, times=[], + metrics=[], status=True) return self.fail("No message received in consumer") @@ -204,7 +204,7 @@ class MetricIntegrationTest(unittest.TestCase): # A response message is generated and sent by MON's producer resp.assert_called_with( - 'list_metric_response', m_list=[], cor_id=123) + 'list_metric_response', metric_list=[], cor_id=123, status=True) return self.fail("No message received in consumer") @@ -216,7 +216,7 @@ class MetricIntegrationTest(unittest.TestCase): def test_update_metrics_req(self, resp, get_id, get_creds, perf_req): """Test Gnocchi update metric request message from KafkaProducer.""" # Set-up message, producer and consumer for tests - payload = {"metric_create_request": {"metric_name": "my_metric", + payload = {"metric_update_request": {"metric_name": "my_metric", "correlation_id": 123, "resource_uuid": "resource_id", }} @@ -238,7 +238,7 @@ class MetricIntegrationTest(unittest.TestCase): # No metric update has taken place resp.assert_called_with( 'update_metric_response', status=False, cor_id=123, - r_id="resource_id", m_id="metric_id") + resource_id="resource_id", metric_id="metric_id") return self.fail("No message received in consumer") diff --git a/osm_mon/test/OpenStack/integration/test_notify_alarm.py b/osm_mon/test/OpenStack/integration/test_notify_alarm.py index 0841446..6fedf69 100644 --- a/osm_mon/test/OpenStack/integration/test_notify_alarm.py +++ b/osm_mon/test/OpenStack/integration/test_notify_alarm.py @@ -115,8 +115,9 @@ class MockNotifierHandler(BaseHTTPRequestHandler): # Try generate and send response try: resp_message = self._response.generate_response( - 'notify_alarm', a_id=alarm_id, - r_id=resource_id, + 'notify_alarm', + alarm_id=alarm_id, + resource_id=resource_id, sev=values['severity'], date=a_date, state=values['current'], vim_type="OpenStack") self._producer.publish_alarm_response( @@ -183,7 +184,7 @@ class AlarmNotificationTest(unittest.TestCase): self.assertEqual(response.status_code, 200) # A response message is generated with the following details resp.assert_called_with( - "notify_alarm", a_id="my_alarm_id", r_id="my_resource_id", + "notify_alarm", alarm_id="my_alarm_id", resource_id="my_resource_id", sev="critical", date='dd-mm-yyyy 00:00', state="current_state", vim_type="OpenStack") diff --git a/osm_mon/test/OpenStack/unit/test_metric_calls.py b/osm_mon/test/OpenStack/unit/test_metric_calls.py index 3f89a91..de2d13b 100644 --- a/osm_mon/test/OpenStack/unit/test_metric_calls.py +++ b/osm_mon/test/OpenStack/unit/test_metric_calls.py @@ -83,29 +83,26 @@ class TestMetricCalls(unittest.TestCase): # Test invalid configuration for creating a metric values = {"metric_details": "invalid_metric"} - m_id, r_id, status = self.metrics.configure_metric( - endpoint, auth_token, values, verify_ssl=False) + with self.assertRaises(ValueError): + self.metrics.configure_metric(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_not_called() - self.assertEqual(status, False) # Test with an invalid metric name, will not perform request values = {"resource_uuid": "r_id"} - m_id, r_id, status = self.metrics.configure_metric( - endpoint, auth_token, values, verify_ssl=False) + with self.assertRaises(ValueError): + self.metrics.configure_metric(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_not_called() - self.assertEqual(status, False) # If metric exists, it won't be recreated get_metric.return_value = "metric_id" - m_id, r_id, status = self.metrics.configure_metric( - endpoint, auth_token, values, verify_ssl=False) + with self.assertRaises(ValueError): + self.metrics.configure_metric(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_not_called() - self.assertEqual(status, False) @mock.patch.object(metric_req.Metrics, "get_metric_id") @mock.patch.object(Common, "perform_request") @@ -136,6 +133,10 @@ class TestMetricCalls(unittest.TestCase): @mock.patch.object(Common, "perform_request") def test_delete_metric_req(self, perf_req): """Test the delete metric function.""" + mock_response = Response() + mock_response.status_code = 200 + perf_req.return_value = mock_response + self.metrics.delete_metric(endpoint, auth_token, "metric_id", verify_ssl=False) perf_req.assert_called_with( @@ -146,9 +147,8 @@ class TestMetricCalls(unittest.TestCase): """Test invalid response for delete request.""" perf_req.return_value = type('obj', (object,), {"status_code": "404"}) - status = self.metrics.delete_metric(endpoint, auth_token, "metric_id", verify_ssl=False) - - self.assertEqual(status, False) + with self.assertRaises(ValueError): + self.metrics.delete_metric(endpoint, auth_token, "metric_id", verify_ssl=False) @mock.patch.object(metric_req.Metrics, "response_list") @mock.patch.object(Common, "perform_request") @@ -206,7 +206,9 @@ class TestMetricCalls(unittest.TestCase): @mock.patch.object(Common, "perform_request") def test_get_metric_id(self, perf_req): """Test get_metric_id function.""" - perf_req.return_value = type('obj', (object,), {'text': '{"alarm_id":"1"}'}) + mock_response = Response() + mock_response.text = json.dumps({'metrics': {'my_metric': 'id'}}) + perf_req.return_value = mock_response self.metrics.get_metric_id(endpoint, auth_token, "my_metric", "r_id", verify_ssl=False) perf_req.assert_called_with( @@ -230,15 +232,11 @@ class TestMetricCalls(unittest.TestCase): @mock.patch.object(Common, "perform_request") def test_invalid_read_data_req(self, perf_req): - """Test the read metric data function, for an invalid call.""" - # Teo empty lists wil be returned because the values are invalid + """Test the read metric data function for an invalid call.""" values = {} - times, data = self.metrics.read_metric_data( - endpoint, auth_token, values, verify_ssl=False) - - self.assertEqual(times, []) - self.assertEqual(data, []) + with self.assertRaises(KeyError): + self.metrics.read_metric_data(endpoint, auth_token, values, verify_ssl=False) def test_complete_response_list(self): """Test the response list function for formatting metric lists.""" diff --git a/osm_mon/test/OpenStack/unit/test_metric_req.py b/osm_mon/test/OpenStack/unit/test_metric_req.py index 7bb81c9..f66be72 100644 --- a/osm_mon/test/OpenStack/unit/test_metric_req.py +++ b/osm_mon/test/OpenStack/unit/test_metric_req.py @@ -35,6 +35,15 @@ from osm_mon.plugins.OpenStack.common import Common log = logging.getLogger(__name__) +class Response(object): + """Mock a response object for requests.""" + + def __init__(self): + """Initialise test and status code values.""" + self.text = json.dumps([{"id": "test_id"}]) + self.status_code = "STATUS_CODE" + + class Message(object): """A class to mock a message object value for metric requests.""" @@ -99,19 +108,14 @@ class TestMetricReq(unittest.TestCase): @mock.patch.object(Common, "get_auth_token", mock.Mock()) @mock.patch.object(Common, 'get_endpoint', mock.Mock()) - @mock.patch.object(metric_req.Metrics, "read_metric_data") - @mock.patch.object(metric_req.Metrics, "list_metrics") - @mock.patch.object(metric_req.Metrics, "delete_metric") - @mock.patch.object(metric_req.Metrics, "configure_metric") @mock.patch.object(AuthManager, "get_credentials") @mock.patch.object(Common, "perform_request") - def test_update_metric_key(self, perf_req, get_creds, config_metric, delete_metric, list_metrics, - read_data): + def test_update_metric_key(self, perf_req, get_creds): """Test the functionality for an update metric request.""" # Mock a message with update metric key and value message = Message() message.key = "update_metric_request" - message.value = json.dumps({"metric_create_request": + message.value = json.dumps({"metric_update_request": {"correlation_id": 1, "metric_name": "my_metric", "resource_uuid": "my_r_id"}}) @@ -120,15 +124,13 @@ class TestMetricReq(unittest.TestCase): 'config': '{"insecure":true}' }) - perf_req.return_value = type('obj', (object,), {'text': '{"metric_id":"1"}'}) + mock_response = Response() + mock_response.text = json.dumps({'metrics': {'my_metric': 'id'}}) + perf_req.return_value = mock_response # Call metric functionality and confirm no function is called # Gnocchi does not support updating a metric configuration self.metrics.metric_calls(message, 'test_id') - config_metric.assert_not_called() - list_metrics.assert_not_called() - delete_metric.assert_not_called() - read_data.assert_not_called() @mock.patch.object(Common, "get_auth_token", mock.Mock()) @mock.patch.object(Common, 'get_endpoint', mock.Mock()) @@ -142,7 +144,7 @@ class TestMetricReq(unittest.TestCase): message.value = json.dumps({"metric_create_request": {"correlation_id": 123}}) get_credentials.return_value = type('obj', (object,), {'config': '{"insecure":true}'}) # Call metric functionality and check config metric - config_metric.return_value = "metric_id", "resource_id", True + config_metric.return_value = "metric_id", "resource_id" self.metrics.metric_calls(message, 'test_id') config_metric.assert_called_with(mock.ANY, mock.ANY, {"correlation_id": 123}, False) @@ -150,17 +152,22 @@ class TestMetricReq(unittest.TestCase): @mock.patch.object(Common, 'get_endpoint', mock.Mock()) @mock.patch.object(metric_req.Metrics, "read_metric_data") @mock.patch.object(AuthManager, "get_credentials") - def test_read_data_key(self, get_creds, read_data): + @mock.patch.object(Common, "perform_request") + def test_read_data_key(self, perf_req, get_creds, read_data): """Test the functionality for a read metric data request.""" # Mock a message with a read data key and value message = Message() message.key = "read_metric_data_request" - message.value = json.dumps({"alarm_uuid": "alarm_id"}) + message.value = json.dumps({"correlation_id": 123, "metric_name": "cpu_utilization", "resource_uuid": "uuid"}) get_creds.return_value = type('obj', (object,), { 'config': '{"insecure":true}' }) + mock_response = Response() + mock_response.text = json.dumps({'metrics': {'cpu_util': 'id'}}) + perf_req.return_value = mock_response + # Call metric functionality and check read data metrics read_data.return_value = "time_stamps", "data_values" self.metrics.metric_calls(message, 'test_id')