X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_mon%2Ftest%2FOpenStack%2Funit%2Ftest_metric_calls.py;h=b71ca722246dfff7e35d4a601d0e6741d3531c25;hb=93699898c51364cde193d8d441f4aed45670e7bf;hp=501413391514d3d64ddee4c727bb4329b7aec303;hpb=5154baa6fd0a33b46ef5fedb0e954c96f82fae03;p=osm%2FMON.git diff --git a/osm_mon/test/OpenStack/unit/test_metric_calls.py b/osm_mon/test/OpenStack/unit/test_metric_calls.py index 5014133..b71ca72 100644 --- a/osm_mon/test/OpenStack/unit/test_metric_calls.py +++ b/osm_mon/test/OpenStack/unit/test_metric_calls.py @@ -22,15 +22,13 @@ """Tests for all metric request message keys.""" import json - import logging - import unittest import mock -from osm_mon.plugins.OpenStack.Gnocchi import metrics as metric_req - +from osm_mon.core.auth import AuthManager +from osm_mon.plugins.OpenStack.Gnocchi import metric_handler as metric_req from osm_mon.plugins.OpenStack.common import Common log = logging.getLogger(__name__) @@ -60,6 +58,8 @@ def perform_request_side_effect(*args, **kwargs): resp = Response() if 'marker' in args[0]: resp.text = json.dumps([]) + if 'resource/generic' in args[0]: + resp.text = json.dumps({'metrics': {'cpu_util': 'test_id'}}) return resp @@ -69,173 +69,149 @@ class TestMetricCalls(unittest.TestCase): def setUp(self): """Setup the tests for metric request keys.""" super(TestMetricCalls, self).setUp() - self.metrics = metric_req.Metrics() + self.metrics = metric_req.OpenstackMetricHandler() self.metrics._common = Common() - @mock.patch.object(metric_req.Metrics, "get_metric_name") - @mock.patch.object(metric_req.Metrics, "get_metric_id") + @mock.patch.object(metric_req.OpenstackMetricHandler, "get_metric_id") @mock.patch.object(Common, "perform_request") def test_invalid_config_metric_req( - self, perf_req, get_metric, get_metric_name): + self, perf_req, get_metric): """Test the configure metric function, for an invalid metric.""" # Test invalid configuration for creating a metric values = {"metric_details": "invalid_metric"} - m_id, r_id, status = self.metrics.configure_metric( - endpoint, auth_token, values) + with self.assertRaises(ValueError): + self.metrics.configure_metric(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_not_called() - self.assertEqual(m_id, None) - self.assertEqual(r_id, None) - self.assertEqual(status, False) # Test with an invalid metric name, will not perform request values = {"resource_uuid": "r_id"} - get_metric_name.return_value = "metric_name", None - m_id, r_id, status = self.metrics.configure_metric( - endpoint, auth_token, values) + with self.assertRaises(ValueError): + self.metrics.configure_metric(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_not_called() - self.assertEqual(m_id, None) - self.assertEqual(r_id, "r_id") - self.assertEqual(status, False) - get_metric_name.reset_mock() # If metric exists, it won't be recreated - get_metric_name.return_value = "metric_name", "norm_name" get_metric.return_value = "metric_id" - m_id, r_id, status = self.metrics.configure_metric( - endpoint, auth_token, values) + with self.assertRaises(ValueError): + self.metrics.configure_metric(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_not_called() - self.assertEqual(m_id, "metric_id") - self.assertEqual(r_id, "r_id") - self.assertEqual(status, False) - @mock.patch.object(metric_req.Metrics, "get_metric_name") - @mock.patch.object(metric_req.Metrics, "get_metric_id") + @mock.patch.object(metric_req.OpenstackMetricHandler, "get_metric_id") @mock.patch.object(Common, "perform_request") + @mock.patch.object(AuthManager, "get_credentials") def test_valid_config_metric_req( - self, perf_req, get_metric, get_metric_name): + self, get_creds, perf_req, get_metric): """Test the configure metric function, for a valid metric.""" # Test valid configuration and payload for creating a metric + get_creds.return_value = type('obj', (object,), {'config': '{"insecure":true}'}) values = {"resource_uuid": "r_id", - "metric_unit": "units"} - get_metric_name.return_value = "norm_name", "metric_name" + "metric_unit": "units", + "metric_name": "cpu_util"} get_metric.return_value = None payload = {"id": "r_id", - "metrics": {"metric_name": + "metrics": {"cpu_util": {"archive_policy_name": "high", - "name": "metric_name", + "name": "cpu_util", "unit": "units"}}} - perf_req.return_value = type('obj', (object,), {'text': '{"id":"1"}'}) + perf_req.return_value = type('obj', (object,), {'text': '{"metrics":{"cpu_util":1}, "id":1}'}) - self.metrics.configure_metric(endpoint, auth_token, values) + self.metrics.configure_metric(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_called_with( - "/v1/resource/generic", auth_token, req_type="post", + "/v1/resource/generic", auth_token, req_type="post", verify_ssl=False, payload=json.dumps(payload, sort_keys=True)) @mock.patch.object(Common, "perform_request") def test_delete_metric_req(self, perf_req): """Test the delete metric function.""" - self.metrics.delete_metric(endpoint, auth_token, "metric_id") + mock_response = Response() + mock_response.status_code = 200 + perf_req.return_value = mock_response + + self.metrics.delete_metric(endpoint, auth_token, "metric_id", verify_ssl=False) perf_req.assert_called_with( - "/v1/metric/metric_id", auth_token, req_type="delete") + "/v1/metric/metric_id", auth_token, req_type="delete", verify_ssl=False) @mock.patch.object(Common, "perform_request") def test_delete_metric_invalid_status(self, perf_req): """Test invalid response for delete request.""" perf_req.return_value = type('obj', (object,), {"status_code": "404"}) - status = self.metrics.delete_metric(endpoint, auth_token, "metric_id") - - self.assertEqual(status, False) + with self.assertRaises(ValueError): + self.metrics.delete_metric(endpoint, auth_token, "metric_id", verify_ssl=False) - @mock.patch.object(metric_req.Metrics, "response_list") + @mock.patch.object(metric_req.OpenstackMetricHandler, "response_list") @mock.patch.object(Common, "perform_request") def test_complete_list_metric_req(self, perf_req, resp_list): """Test the complete list metric function.""" # Test listing metrics without any configuration options values = {} perf_req.side_effect = perform_request_side_effect - self.metrics.list_metrics(endpoint, auth_token, values) + self.metrics.list_metrics(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_any_call( - "/v1/metric?sort=name:asc", auth_token, req_type="get") + "/v1/metric?sort=name:asc", auth_token, req_type="get", verify_ssl=False) resp_list.assert_called_with([{u'id': u'test_id'}]) - @mock.patch.object(metric_req.Metrics, "response_list") + @mock.patch.object(metric_req.OpenstackMetricHandler, "response_list") @mock.patch.object(Common, "perform_request") def test_resource_list_metric_req(self, perf_req, resp_list): """Test the resource list metric function.""" # Test listing metrics with a resource id specified values = {"resource_uuid": "resource_id"} perf_req.side_effect = perform_request_side_effect - self.metrics.list_metrics(endpoint, auth_token, values) + self.metrics.list_metrics(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_any_call( - "/v1/resource/generic/resource_id", auth_token, req_type="get") + "/v1/metric/test_id", auth_token, req_type="get", verify_ssl=False) - @mock.patch.object(metric_req.Metrics, "response_list") + @mock.patch.object(metric_req.OpenstackMetricHandler, "response_list") @mock.patch.object(Common, "perform_request") def test_name_list_metric_req(self, perf_req, resp_list): """Test the metric_name list metric function.""" # Test listing metrics with a metric_name specified values = {"metric_name": "disk_write_bytes"} perf_req.side_effect = perform_request_side_effect - self.metrics.list_metrics(endpoint, auth_token, values) + self.metrics.list_metrics(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_any_call( - "/v1/metric?sort=name:asc", auth_token, req_type="get") + "/v1/metric?sort=name:asc", auth_token, req_type="get", verify_ssl=False) resp_list.assert_called_with( [{u'id': u'test_id'}], metric_name="disk_write_bytes") - @mock.patch.object(metric_req.Metrics, "response_list") + @mock.patch.object(metric_req.OpenstackMetricHandler, "response_list") @mock.patch.object(Common, "perform_request") def test_combined_list_metric_req(self, perf_req, resp_list): """Test the combined resource and metric list metric function.""" # Test listing metrics with a resource id and metric name specified values = {"resource_uuid": "resource_id", - "metric_name": "packets_sent"} + "metric_name": "cpu_utilization"} perf_req.side_effect = perform_request_side_effect - self.metrics.list_metrics(endpoint, auth_token, values) + self.metrics.list_metrics(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_any_call( - "/v1/resource/generic/resource_id", auth_token, req_type="get") + "/v1/metric/test_id", auth_token, req_type="get", verify_ssl=False) @mock.patch.object(Common, "perform_request") def test_get_metric_id(self, perf_req): """Test get_metric_id function.""" - self.metrics.get_metric_id(endpoint, auth_token, "my_metric", "r_id") + mock_response = Response() + mock_response.text = json.dumps({'metrics': {'my_metric': 'id'}}) + perf_req.return_value = mock_response + self.metrics.get_metric_id(endpoint, auth_token, "my_metric", "r_id", verify_ssl=False) perf_req.assert_called_with( - "/v1/resource/generic/r_id", auth_token, req_type="get") - - def test_get_metric_name(self): - """Test the result from the get_metric_name function.""" - # test with a valid metric_name - values = {"metric_name": "disk_write_ops"} + "/v1/resource/generic/r_id", auth_token, req_type="get", verify_ssl=False) - metric_name, norm_name = self.metrics.get_metric_name(values) - - self.assertEqual(metric_name, "disk_write_ops") - self.assertEqual(norm_name, "disk.write.requests") - - # test with an invalid metric name - values = {"metric_name": "my_invalid_metric"} - - metric_name, norm_name = self.metrics.get_metric_name(values) - - self.assertEqual(metric_name, "my_invalid_metric") - self.assertEqual(norm_name, None) - - @mock.patch.object(metric_req.Metrics, "get_metric_id") + @mock.patch.object(metric_req.OpenstackMetricHandler, "get_metric_id") @mock.patch.object(Common, "perform_request") def test_valid_read_data_req(self, perf_req, get_metric): """Test the read metric data function, for a valid call.""" @@ -247,24 +223,20 @@ class TestMetricCalls(unittest.TestCase): perf_req.return_value = type('obj', (object,), {'text': '{"metric_data":"[]"}'}) get_metric.return_value = "metric_id" - self.metrics.read_metric_data(endpoint, auth_token, values) + self.metrics.read_metric_data(endpoint, auth_token, values, verify_ssl=False) perf_req.assert_called_once() @mock.patch.object(Common, "perform_request") def test_invalid_read_data_req(self, perf_req): - """Test the read metric data function, for an invalid call.""" - # Teo empty lists wil be returned because the values are invalid + """Test the read metric data function for an invalid call.""" values = {} - times, data = self.metrics.read_metric_data( - endpoint, auth_token, values) - - self.assertEqual(times, []) - self.assertEqual(data, []) + with self.assertRaises(KeyError): + self.metrics.read_metric_data(endpoint, auth_token, values, verify_ssl=False) def test_complete_response_list(self): - """Test the response list function for formating metric lists.""" + """Test the response list function for formatting metric lists.""" # Mock a list for testing purposes, with valid OSM metric resp_list = self.metrics.response_list(metric_list)