diff options
-rw-r--r-- | yardstick/benchmark/core/report.py | 122 | ||||
-rw-r--r-- | yardstick/common/nsb_report.css | 19 | ||||
-rw-r--r-- | yardstick/common/nsb_report.html.j2 | 52 | ||||
-rw-r--r-- | yardstick/common/nsb_report.js | 73 | ||||
-rw-r--r-- | yardstick/tests/functional/benchmark/core/__init__.py | 0 | ||||
-rw-r--r-- | yardstick/tests/functional/benchmark/core/test_report.py | 114 | ||||
-rw-r--r-- | yardstick/tests/unit/benchmark/core/test_report.py | 139 |
7 files changed, 379 insertions, 140 deletions
diff --git a/yardstick/benchmark/core/report.py b/yardstick/benchmark/core/report.py index 0bc392fe5..0819cd497 100644 --- a/yardstick/benchmark/core/report.py +++ b/yardstick/benchmark/core/report.py @@ -10,13 +10,12 @@ """ Handler for yardstick command 'report' """ -import ast import re +import six import uuid import jinja2 from api.utils import influx -from oslo_utils import encodeutils from oslo_utils import uuidutils from yardstick.common import constants as consts from yardstick.common.utils import cliargs @@ -55,11 +54,9 @@ class JSTree(object): def format_for_jstree(self, data): """Format the data into the required format for jsTree. - The data format expected is a list of key-value pairs which represent - the data and label for each metric e.g.: + The data format expected is a list of metric names e.g.: - [{'data': [0, ], 'label': 'tg__0.DropPackets'}, - {'data': [548, ], 'label': 'tg__0.LatencyAvg.5'},] + ['tg__0.DropPackets', 'tg__0.LatencyAvg.5'] This data is converted into the format required for jsTree to group and display the metrics in a hierarchial fashion, including creating a @@ -76,8 +73,8 @@ class JSTree(object): self._created_nodes = ['#'] self.jstree_data = [] - for item in data: - self._create_node(item["label"]) + for metric in data: + self._create_node(metric) return self.jstree_data @@ -115,10 +112,10 @@ class Report(object): else: raise KeyError("Test case not found.") - def _get_tasks(self): - task_cmd = "select * from \"%s\" where task_id= '%s'" - task_query = task_cmd % (self.yaml_name, self.task_id) - query_exec = influx.query(task_query) + def _get_metrics(self): + metrics_cmd = "select * from \"%s\" where task_id = '%s'" + metrics_query = metrics_cmd % (self.yaml_name, self.task_id) + query_exec = influx.query(metrics_query) if query_exec: return query_exec else: @@ -132,38 +129,72 @@ class Report(object): """ self._validate(args.yaml_name[0], args.task_id[0]) - self.db_fieldkeys = self._get_fieldkeys() + db_fieldkeys = self._get_fieldkeys() + # list of dicts of: + # - PY2: unicode key and unicode value + # - PY3: str key and str value - self.db_task = self._get_tasks() + db_metrics = self._get_metrics() + # list of dicts of: + # - PY2: unicode key and { None | unicode | float | long | int } value + # - PY3: str key and { None | str | float | int } value - field_keys = [] - datasets = [] - table_vals = {} + # extract fieldKey entries, and convert them to str where needed + field_keys = [key if isinstance(key, str) # PY3: already str + else key.encode('utf8') # PY2: unicode to str + for key in + [field['fieldKey'] + for field in db_fieldkeys]] - field_keys = [encodeutils.to_utf8(field['fieldKey']) - for field in self.db_fieldkeys] + # extract timestamps + self.Timestamp = [] + for metric in db_metrics: + metric_time = metric['time'] # in RFC3339 format + if not isinstance(metric_time, str): + metric_time = metric_time.encode('utf8') # PY2: unicode to str + metric_time = metric_time[11:] # skip date, keep time + head, _, tail = metric_time.partition('.') # split HH:MM:SS and nsZ + metric_time = head + '.' + tail[:6] # join HH:MM:SS and .us + self.Timestamp.append(metric_time) # HH:MM:SS.micros + + # prepare return values + datasets = [] + table_vals = {'Timestamp': self.Timestamp} + # extract and convert field values for key in field_keys: - self.Timestamp = [] values = [] - for task in self.db_task: - task_time = encodeutils.to_utf8(task['time']) - if not isinstance(task_time, str): - task_time = str(task_time, 'utf8') - if not isinstance(key, str): - key = str(key, 'utf8') - task_time = task_time[11:] - head, _, tail = task_time.partition('.') - task_time = head + "." + tail[:6] - self.Timestamp.append(task_time) - if task[key] is None: - values.append(None) - elif isinstance(task[key], (int, float)): - values.append(task[key]) + for metric in db_metrics: + val = metric.get(key, None) + if val is None: + # keep explicit None or missing entry as is + pass + elif isinstance(val, (int, float)): + # keep plain int or float as is + pass + elif six.PY2 and isinstance(val, + long): # pylint: disable=undefined-variable + # PY2: long value would be rendered with trailing L, + # which JS does not support, so convert it to float + val = float(val) + elif isinstance(val, six.string_types): + s = val + if not isinstance(s, str): + s = s.encode('utf8') # PY2: unicode to str + try: + # convert until failure + val = s + val = float(s) + val = int(s) + if six.PY2 and isinstance(val, + long): # pylint: disable=undefined-variable + val = float(val) # PY2: long to float + except ValueError: + pass else: - values.append(ast.literal_eval(task[key])) + raise ValueError("Cannot convert %r" % val) + values.append(val) datasets.append({'label': key, 'data': values}) - table_vals['Timestamp'] = self.Timestamp table_vals[key] = values return datasets, table_vals @@ -197,8 +228,14 @@ class Report(object): @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1) def generate_nsb(self, args): """Start NSB report generation.""" - datasets, table_vals = self._generate_common(args) - jstree_data = JSTree().format_for_jstree(datasets) + _, report_data = self._generate_common(args) + report_time = report_data.pop('Timestamp') + report_keys = sorted(report_data, key=str.lower) + report_tree = JSTree().format_for_jstree(report_keys) + report_meta = { + "testcase": self.yaml_name, + "task_id": self.task_id, + } template_dir = consts.YARDSTICK_ROOT_PATH + "yardstick/common" template_environment = jinja2.Environment( @@ -207,10 +244,11 @@ class Report(object): lstrip_blocks=True) context = { - "Timestamps": self.Timestamp, - "task_id": self.task_id, - "table": table_vals, - "jstree_nodes": jstree_data, + "report_meta": report_meta, + "report_data": report_data, + "report_time": report_time, + "report_keys": report_keys, + "report_tree": report_tree, } template_html = template_environment.get_template("nsb_report.html.j2") diff --git a/yardstick/common/nsb_report.css b/yardstick/common/nsb_report.css index 2beb91c53..667f865a5 100644 --- a/yardstick/common/nsb_report.css +++ b/yardstick/common/nsb_report.css @@ -9,21 +9,26 @@ ******************************************************************************/ body { - font-size: 16pt; + font-family: Frutiger, "Helvetica Neue", Helvetica, Arial, sans-serif; +} + +header { + padding-top: 5px; + text-align: center; + font-weight: bold; } -table { +#tblMetrics { overflow-y: scroll; height: 360px; display: block; } -header { - font-family: Frutiger, "Helvetica Neue", Helvetica, Arial, sans-serif; - clear: left; - text-align: center; +#cnvGraph { + width: 100%; + height: 500px; } -.control-pane { +#divTree { font-size: 10pt; } diff --git a/yardstick/common/nsb_report.html.j2 b/yardstick/common/nsb_report.html.j2 index a3087d746..aa90253f8 100644 --- a/yardstick/common/nsb_report.html.j2 +++ b/yardstick/common/nsb_report.html.j2 @@ -29,53 +29,45 @@ </head> <body> - <div class="container" style="width:80%"> + <div class="container-fluid"> <div class="row"> - <header class="jumbotron"> - <h1>Yardstick User Interface</h1> - <h4>Report of {{task_id}} Generated</h4> + <header> + Testcase: {{report_meta.testcase}}<br> + Task-ID: {{report_meta.task_id}}<br> </header> </div> <div class="row"> - <div class="col-md-2 control-pane"> - <div id="data_selector"></div> + <div class="col-md-2"> + <div id="divTree"></div> </div> - <div class="col-md-10 data-pane"> - <canvas id="cnvGraph" style="width: 100%; height: 500px"></canvas> + <div class="col-md-10"> + <canvas id="cnvGraph"></canvas> </div> </div> <div class="row"> <div class="col-md-12 table-responsive"> - <table class="table table-hover"></table> + <table id="tblMetrics" class="table table-condensed table-hover"></table> </div> </div> </div> <script> - var arr, jstree_data, timestamps; - arr = {{table|safe}}; - timestamps = {{Timestamps|safe}}; - jstree_data = {{jstree_nodes|safe}}; + // Injected metrics, timestamps, keys and hierarchy + var report_data = {{report_data|safe}}; + var report_time = {{report_time|safe}}; + var report_keys = {{report_keys|safe}}; + var report_tree = {{report_tree|safe}}; + // Wait for DOM to be loaded $(function() { - create_table(arr); - create_tree(jstree_data); - var objGraph = create_graph($('#cnvGraph'), timestamps); + var tblMetrics = $('#tblMetrics'); + var cnvGraph = $('#cnvGraph'); + var divTree = $('#divTree'); - $('#data_selector').on('check_node.jstree uncheck_node.jstree', function(e, data) { - var selected_datasets = []; - for (var i = 0; i < data.selected.length; i++) { - var node = data.instance.get_node(data.selected[i]); - if (node.children.length == 0) { - var dataset = { - label: node.id, - data: arr[node.id], - }; - selected_datasets.push(dataset); - } - } - update_graph(objGraph, selected_datasets); - }); + create_table(tblMetrics, report_data, report_time, report_keys); + var objGraph = create_graph(cnvGraph, report_time); + create_tree(divTree, report_tree); + handle_tree(divTree, tblMetrics, objGraph, report_data, report_time); }); </script> </body> diff --git a/yardstick/common/nsb_report.js b/yardstick/common/nsb_report.js index cc5e14ee7..4de1c8e78 100644 --- a/yardstick/common/nsb_report.js +++ b/yardstick/common/nsb_report.js @@ -10,9 +10,9 @@ var None = null; -function create_tree(jstree_data) +function create_tree(divTree, jstree_data) { - $('#data_selector').jstree({ + divTree.jstree({ plugins: ['checkbox'], checkbox: { three_state: false, @@ -29,34 +29,33 @@ function create_tree(jstree_data) }); } -// may need to pass timestamps too... -function create_table(table_data) +function create_table(tblMetrics, table_data, timestamps, table_keys) { - var tab, tr, td, tn, tbody, keys, key, curr_data, val; - // create table - tab = document.getElementsByTagName('table')[0]; - tbody = document.createElement('tbody'); + var tbody = $('<tbody></tbody>'); + var tr0 = $('<tr></tr>'); + var th0 = $('<th></th>'); + var td0 = $('<td></td>'); + var tr; + + // create table headings using timestamps + tr = tr0.clone().append(th0.clone().text('Timestamp')); + timestamps.forEach(function(t) { + tr.append(th0.clone().text(t)); + }); + tbody.append(tr); + // for each metric - keys = Object.keys(table_data); - for (var i = 0; i < keys.length; i++) { - key = keys[i]; - tr = document.createElement('tr'); - td = document.createElement('td'); - tn = document.createTextNode(key); - td.appendChild(tn); - tr.appendChild(td); + table_keys.forEach(function(key) { + tr = tr0.clone().append(td0.clone().text(key)); // add each piece of data as its own column - curr_data = table_data[key]; - for (var j = 0; j < curr_data.length; j++) { - val = curr_data[j]; - td = document.createElement('td'); - tn = document.createTextNode(val === None ? '' : val); - td.appendChild(tn); - tr.appendChild(td); - } - tbody.appendChild(tr); - } - tab.appendChild(tbody); + table_data[key].forEach(function(val) { + tr.append(td0.clone().text(val === None ? '' : val)); + }); + tbody.append(tr); + }); + + // re-create table + tblMetrics.empty().append(tbody); } function create_graph(cnvGraph, timestamps) @@ -144,3 +143,23 @@ function update_graph(objGraph, datasets) objGraph.data.datasets = datasets; objGraph.update(); } + +function handle_tree(divTree, tblMetrics, objGraph, table_data, timestamps) +{ + divTree.on('check_node.jstree uncheck_node.jstree', function(e, data) { + var selected_keys = []; + var selected_datasets = []; + data.selected.forEach(function(sel) { + var node = data.instance.get_node(sel); + if (node.children.length == 0) { + selected_keys.push(node.id); + selected_datasets.push({ + label: node.id, + data: table_data[node.id], + }); + } + }); + create_table(tblMetrics, table_data, timestamps, selected_keys); + update_graph(objGraph, selected_datasets); + }); +} diff --git a/yardstick/tests/functional/benchmark/core/__init__.py b/yardstick/tests/functional/benchmark/core/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/yardstick/tests/functional/benchmark/core/__init__.py diff --git a/yardstick/tests/functional/benchmark/core/test_report.py b/yardstick/tests/functional/benchmark/core/test_report.py new file mode 100644 index 000000000..5f060dd1e --- /dev/null +++ b/yardstick/tests/functional/benchmark/core/test_report.py @@ -0,0 +1,114 @@ +############################################################################## +# Copyright (c) 2018 Intel Corporation. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import ast +import tempfile +import unittest + +import mock +from six.moves import configparser + +from yardstick.benchmark import core +from yardstick.benchmark.core import report +from yardstick.cmd.commands import change_osloobj_to_paras + + +GOOD_YAML_NAME = 'fake_name' +GOOD_TASK_ID = "9cbe74b6-df09-4535-8bdc-dc3a43b8a4e2" +GOOD_DB_FIELDKEYS = [ + {u'fieldKey': u'metric1', u'fieldType': u'integer'}, + {u'fieldKey': u'metric4', u'fieldType': u'integer'}, + {u'fieldKey': u'metric2', u'fieldType': u'integer'}, + {u'fieldKey': u'metric3', u'fieldType': u'integer'}, +] +GOOD_DB_METRICS = [ + {u'time': u'2018-08-20T16:49:26.372662016Z', + u'metric1': 1, u'metric2': 0, u'metric3': 8, u'metric4': 5}, + {u'time': u'2018-08-20T16:49:27.374208000Z', + u'metric1': 1, u'metric2': 1, u'metric3': 5, u'metric4': 4}, + {u'time': u'2018-08-20T16:49:28.375742976Z', + u'metric1': 2, u'metric2': 2, u'metric3': 3, u'metric4': 3}, + {u'time': u'2018-08-20T16:49:29.377299968Z', + u'metric1': 3, u'metric2': 3, u'metric3': 2, u'metric4': 2}, + {u'time': u'2018-08-20T16:49:30.378252032Z', + u'metric1': 5, u'metric2': 4, u'metric3': 1, u'metric4': 1}, + {u'time': u'2018-08-20T16:49:30.379359421Z', + u'metric1': 8, u'metric2': 5, u'metric3': 1, u'metric4': 0}, +] + +yardstick_config = """ +[DEFAULT] +dispatcher = influxdb +""" + + +def my_query(query_sql): + get_fieldkeys_cmd = 'show field keys' + get_metrics_cmd = 'select * from' + + if get_fieldkeys_cmd in query_sql: + return GOOD_DB_FIELDKEYS + elif get_metrics_cmd in query_sql: + return GOOD_DB_METRICS + return [] + + +class ReportTestCase(unittest.TestCase): + + @mock.patch.object(report.influx, 'query', new=my_query) + @mock.patch.object(configparser.ConfigParser, + 'read', side_effect=mock.mock_open(read_data=yardstick_config)) + def test_report_generate_nsb_simple(self, *args): + tmpfile = tempfile.NamedTemporaryFile(delete=True) + + args = core.Param({"task_id": [GOOD_TASK_ID], "yaml_name": [GOOD_YAML_NAME]}) + params = change_osloobj_to_paras(args) + + with mock.patch.object(report.consts, 'DEFAULT_HTML_FILE', tmpfile.name): + report.Report().generate_nsb(params) + + data_act = None + time_act = None + keys_act = None + tree_act = None + with open(tmpfile.name) as f: + for l in f.readlines(): + if "var report_data = {" in l: + data_act = ast.literal_eval(l.strip()[18:-1]) + elif "var report_time = [" in l: + time_act = ast.literal_eval(l.strip()[18:-1]) + elif "var report_keys = [" in l: + keys_act = ast.literal_eval(l.strip()[18:-1]) + elif "var report_tree = [" in l: + tree_act = ast.literal_eval(l.strip()[18:-1]) + + data_exp = { + 'metric1': [1, 1, 2, 3, 5, 8], + 'metric2': [0, 1, 2, 3, 4, 5], + 'metric3': [8, 5, 3, 2, 1, 1], + 'metric4': [5, 4, 3, 2, 1, 0], + } + time_exp = [ + '16:49:26.372662', '16:49:27.374208', '16:49:28.375742', + '16:49:29.377299', '16:49:30.378252', '16:49:30.379359', + ] + keys_exp = [ + 'metric1', 'metric2', 'metric3', 'metric4', + ] + tree_exp = [ + {'parent': '#', 'text': 'metric1', 'id': 'metric1'}, + {'parent': '#', 'text': 'metric2', 'id': 'metric2'}, + {'parent': '#', 'text': 'metric3', 'id': 'metric3'}, + {'parent': '#', 'text': 'metric4', 'id': 'metric4'}, + ] + + self.assertEqual(data_exp, data_act) + self.assertEqual(time_exp, time_act) + self.assertEqual(keys_exp, keys_act) + self.assertEqual(tree_exp, tree_act) diff --git a/yardstick/tests/unit/benchmark/core/test_report.py b/yardstick/tests/unit/benchmark/core/test_report.py index 11d017ff0..4683c26b0 100644 --- a/yardstick/tests/unit/benchmark/core/test_report.py +++ b/yardstick/tests/unit/benchmark/core/test_report.py @@ -9,6 +9,7 @@ ############################################################################## import mock +import six import unittest import uuid @@ -19,13 +20,82 @@ from yardstick.cmd.commands import change_osloobj_to_paras GOOD_YAML_NAME = 'fake_name' GOOD_TASK_ID = str(uuid.uuid4()) GOOD_DB_FIELDKEYS = [{'fieldKey': 'fake_key'}] -GOOD_DB_TASK = [{ +GOOD_DB_METRICS = [{ 'fake_key': 1.234, 'time': '0000-00-00T12:34:56.789012Z', }] GOOD_TIMESTAMP = ['12:34:56.789012'] BAD_YAML_NAME = 'F@KE_NAME' BAD_TASK_ID = 'aaaaaa-aaaaaaaa-aaaaaaaaaa-aaaaaa' +MORE_DB_FIELDKEYS = [ + {'fieldKey': 'fake_key'}, + {'fieldKey': 'str_str'}, + {'fieldKey': u'str_unicode'}, + {u'fieldKey': 'unicode_str'}, + {u'fieldKey': u'unicode_unicode'}, + ] +MORE_DB_METRICS = [{ + 'fake_key': None, + 'time': '0000-00-00T00:00:00.000000Z', + }, { + 'fake_key': 123, + 'time': '0000-00-00T00:00:01.000000Z', + }, { + 'fake_key': 4.56, + 'time': '0000-00-00T00:00:02.000000Z', + }, { + 'fake_key': 9876543210987654321, + 'time': '0000-00-00T00:00:03.000000Z', + }, { + 'fake_key': 'str_str value', + 'time': '0000-00-00T00:00:04.000000Z', + }, { + 'fake_key': u'str_unicode value', + 'time': '0000-00-00T00:00:05.000000Z', + }, { + u'fake_key': 'unicode_str value', + 'time': '0000-00-00T00:00:06.000000Z', + }, { + u'fake_key': u'unicode_unicode value', + 'time': '0000-00-00T00:00:07.000000Z', + }, { + 'fake_key': '7.89', + 'time': '0000-00-00T00:00:08.000000Z', + }, { + 'fake_key': '1011', + 'time': '0000-00-00T00:00:09.000000Z', + }, { + 'fake_key': '9876543210123456789', + 'time': '0000-00-00T00:00:10.000000Z', + }] +MORE_TIMESTAMP = ['00:00:%02d.000000' % n for n in range(len(MORE_DB_METRICS))] +MORE_EMPTY_DATA = [None] * len(MORE_DB_METRICS) +MORE_EXPECTED_TABLE_VALS = { + 'Timestamp': MORE_TIMESTAMP, + 'fake_key': [ + None, + 123, + 4.56, + 9876543210987654321 if six.PY3 else 9.876543210987655e+18, + 'str_str value', + 'str_unicode value', + 'unicode_str value', + 'unicode_unicode value', + 7.89, + 1011, + 9876543210123456789 if six.PY3 else 9.876543210123457e+18, + ], + 'str_str': MORE_EMPTY_DATA, + 'str_unicode': MORE_EMPTY_DATA, + 'unicode_str': MORE_EMPTY_DATA, + 'unicode_unicode': MORE_EMPTY_DATA, + } +MORE_EXPECTED_DATASETS = [{ + 'label': key, + 'data': MORE_EXPECTED_TABLE_VALS[key], + } + for key in map(str, [field['fieldKey'] for field in MORE_DB_FIELDKEYS]) + ] class JSTreeTestCase(unittest.TestCase): @@ -47,23 +117,15 @@ class JSTreeTestCase(unittest.TestCase): def test_format_for_jstree(self): data = [ - {'data': [0, ], 'label': 'tg__0.DropPackets'}, - {'data': [548, ], 'label': 'tg__0.LatencyAvg.5'}, - {'data': [1172, ], 'label': 'tg__0.LatencyAvg.6'}, - {'data': [1001, ], 'label': 'tg__0.LatencyMax.5'}, - {'data': [1468, ], 'label': 'tg__0.LatencyMax.6'}, - {'data': [18.11, ], 'label': 'tg__0.RxThroughput'}, - {'data': [18.11, ], 'label': 'tg__0.TxThroughput'}, - {'data': [0, ], 'label': 'tg__1.DropPackets'}, - {'data': [548, ], 'label': 'tg__1.LatencyAvg.5'}, - {'data': [1172, ], 'label': 'tg__1.LatencyAvg.6'}, - {'data': [1001, ], 'label': 'tg__1.LatencyMax.5'}, - {'data': [1468, ], 'label': 'tg__1.LatencyMax.6'}, - {'data': [18.1132084505, ], 'label': 'tg__1.RxThroughput'}, - {'data': [18.1157260383, ], 'label': 'tg__1.TxThroughput'}, - {'data': [9057888, ], 'label': 'vnf__0.curr_packets_in'}, - {'data': [0, ], 'label': 'vnf__0.packets_dropped'}, - {'data': [617825443, ], 'label': 'vnf__0.packets_fwd'}, + 'tg__0.DropPackets', + 'tg__0.LatencyAvg.5', 'tg__0.LatencyAvg.6', + 'tg__0.LatencyMax.5', 'tg__0.LatencyMax.6', + 'tg__0.RxThroughput', 'tg__0.TxThroughput', + 'tg__1.DropPackets', + 'tg__1.LatencyAvg.5', 'tg__1.LatencyAvg.6', + 'tg__1.LatencyMax.5', 'tg__1.LatencyMax.6', + 'tg__1.RxThroughput', 'tg__1.TxThroughput', + 'vnf__0.curr_packets_in', 'vnf__0.packets_dropped', 'vnf__0.packets_fwd', ] expected_output = [ @@ -117,11 +179,11 @@ class ReportTestCase(unittest.TestCase): self.assertEqual(GOOD_TASK_ID, str(self.rep.task_id)) def test__validate_invalid_yaml_name(self): - with self.assertRaisesRegexp(ValueError, "yaml*"): + with six.assertRaisesRegex(self, ValueError, "yaml*"): self.rep._validate(BAD_YAML_NAME, GOOD_TASK_ID) def test__validate_invalid_task_id(self): - with self.assertRaisesRegexp(ValueError, "task*"): + with six.assertRaisesRegex(self, ValueError, "task*"): self.rep._validate(GOOD_YAML_NAME, BAD_TASK_ID) @mock.patch.object(influx, 'query') @@ -141,42 +203,51 @@ class ReportTestCase(unittest.TestCase): mock_query.return_value = [] self.rep.yaml_name = GOOD_YAML_NAME self.rep.task_id = GOOD_TASK_ID - self.assertRaisesRegexp(KeyError, "Test case", self.rep._get_fieldkeys) + six.assertRaisesRegex(self, KeyError, "Test case", self.rep._get_fieldkeys) @mock.patch.object(influx, 'query') - def test__get_tasks(self, mock_query): - mock_query.return_value = GOOD_DB_TASK + def test__get_metrics(self, mock_query): + mock_query.return_value = GOOD_DB_METRICS self.rep.yaml_name = GOOD_YAML_NAME self.rep.task_id = GOOD_TASK_ID - self.assertEqual(GOOD_DB_TASK, self.rep._get_tasks()) + self.assertEqual(GOOD_DB_METRICS, self.rep._get_metrics()) @mock.patch.object(influx, 'query') - def test__get_tasks_task_not_found(self, mock_query): + def test__get_metrics_task_not_found(self, mock_query): mock_query.return_value = [] self.rep.yaml_name = GOOD_YAML_NAME self.rep.task_id = GOOD_TASK_ID - self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_tasks) + six.assertRaisesRegex(self, KeyError, "Task ID", self.rep._get_metrics) - @mock.patch.object(report.Report, '_get_tasks') + @mock.patch.object(report.Report, '_get_metrics') + @mock.patch.object(report.Report, '_get_fieldkeys') + def test__generate_common(self, mock_keys, mock_metrics): + mock_metrics.return_value = MORE_DB_METRICS + mock_keys.return_value = MORE_DB_FIELDKEYS + datasets, table_vals = self.rep._generate_common(self.param) + self.assertEqual(MORE_EXPECTED_DATASETS, datasets) + self.assertEqual(MORE_EXPECTED_TABLE_VALS, table_vals) + + @mock.patch.object(report.Report, '_get_metrics') @mock.patch.object(report.Report, '_get_fieldkeys') @mock.patch.object(report.Report, '_validate') - def test_generate(self, mock_valid, mock_keys, mock_tasks): - mock_tasks.return_value = GOOD_DB_TASK + def test_generate(self, mock_valid, mock_keys, mock_metrics): + mock_metrics.return_value = GOOD_DB_METRICS mock_keys.return_value = GOOD_DB_FIELDKEYS self.rep.generate(self.param) mock_valid.assert_called_once_with(GOOD_YAML_NAME, GOOD_TASK_ID) - mock_tasks.assert_called_once_with() + mock_metrics.assert_called_once_with() mock_keys.assert_called_once_with() self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp) - @mock.patch.object(report.Report, '_get_tasks') + @mock.patch.object(report.Report, '_get_metrics') @mock.patch.object(report.Report, '_get_fieldkeys') @mock.patch.object(report.Report, '_validate') - def test_generate_nsb(self, mock_valid, mock_keys, mock_tasks): - mock_tasks.return_value = GOOD_DB_TASK + def test_generate_nsb(self, mock_valid, mock_keys, mock_metrics): + mock_metrics.return_value = GOOD_DB_METRICS mock_keys.return_value = GOOD_DB_FIELDKEYS self.rep.generate_nsb(self.param) mock_valid.assert_called_once_with(GOOD_YAML_NAME, GOOD_TASK_ID) - mock_tasks.assert_called_once_with() + mock_metrics.assert_called_once_with() mock_keys.assert_called_once_with() self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp) |