summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrice Buriez <patrice.buriez@intel.com>2018-11-09 11:24:21 +0100
committerPatrice Buriez <patrice.buriez@intel.com>2018-12-19 21:34:34 +0100
commit1dd64e56e7c2ce249f6c5ae472dc6fda1cf20e25 (patch)
tree5733a6386b3983d63cfcac206f374c71f352193c
parent21351c5c19fd350512639f54d39a05980ec53633 (diff)
Add report generate-nsb command
JIRA: YARDSTICK-1367 Topic: report/html_table (8 of 12) Change-Id: I8a2f96224a745334b67dd71875bebb3b69b9adc3 Signed-off-by: Emma Foley <emma.l.foley@intel.com> Signed-off-by: Patrice Buriez <patrice.buriez@intel.com>
-rw-r--r--docs/testing/user/userguide/10-yardstick-user-interface.rst26
-rw-r--r--yardstick/benchmark/core/report.py63
-rw-r--r--yardstick/cmd/commands/report.py23
-rw-r--r--yardstick/tests/unit/benchmark/core/test_report.py52
4 files changed, 116 insertions, 48 deletions
diff --git a/docs/testing/user/userguide/10-yardstick-user-interface.rst b/docs/testing/user/userguide/10-yardstick-user-interface.rst
index 5f9414974..b3056ec99 100644
--- a/docs/testing/user/userguide/10-yardstick-user-interface.rst
+++ b/docs/testing/user/userguide/10-yardstick-user-interface.rst
@@ -2,16 +2,32 @@
Yardstick User Interface
========================
-This interface provides a user to view the test result
-in table format and also values pinned on to a graph.
+This chapter describes how to generate HTML reports, used to view, store, share
+or publish test results in table and graph formats.
+The following layouts are available:
-Command
-=======
-::
+* The compact HTML report layout is suitable for testcases producing a few
+ metrics over a short period of time. All metrics for all timestamps are
+ displayed in the data table and on the graph.
+
+* The dynamic HTML report layout consists of a wider data table, a graph, and
+ a tree that allows selecting the metrics to be displayed. This layout is
+ suitable for testcases, such as NSB ones, producing a lot of metrics over
+ a longer period of time.
+
+
+Commands
+========
+
+To generate the compact HTML report, run::
yardstick report generate <task-ID> <testcase-filename>
+To generate the dynamic HTML report, run::
+
+ yardstick report generate-nsb <task-ID> <testcase-filename>
+
Description
===========
diff --git a/yardstick/benchmark/core/report.py b/yardstick/benchmark/core/report.py
index 530fbf165..0bc392fe5 100644
--- a/yardstick/benchmark/core/report.py
+++ b/yardstick/benchmark/core/report.py
@@ -56,10 +56,10 @@ class JSTree(object):
"""Format the data into the required format for jsTree.
The data format expected is a list of key-value pairs which represent
- the data and name for each metric e.g.:
+ the data and label for each metric e.g.:
- [{'data': [0, ], 'name': 'tg__0.DropPackets'},
- {'data': [548, ], 'name': 'tg__0.LatencyAvg.5'},]
+ [{'data': [0, ], 'label': 'tg__0.DropPackets'},
+ {'data': [548, ], 'label': 'tg__0.LatencyAvg.5'},]
This data is converted into the format required for jsTree to group and
display the metrics in a hierarchial fashion, including creating a
@@ -77,7 +77,7 @@ class JSTree(object):
self.jstree_data = []
for item in data:
- self._create_node(item["name"])
+ self._create_node(item["label"])
return self.jstree_data
@@ -85,7 +85,7 @@ class JSTree(object):
class Report(object):
"""Report commands.
- Set of commands to manage benchmark tasks.
+ Set of commands to manage reports.
"""
def __init__(self):
@@ -124,10 +124,12 @@ class Report(object):
else:
raise KeyError("Task ID or Test case not found.")
- @cliargs("task_id", type=str, help=" task id", nargs=1)
- @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
- def generate(self, args):
- """Start report generation."""
+ def _generate_common(self, args):
+ """Actions that are common to both report formats.
+
+ Create the necessary data structure for rendering
+ the report templates.
+ """
self._validate(args.yaml_name[0], args.task_id[0])
self.db_fieldkeys = self._get_fieldkeys()
@@ -148,6 +150,7 @@ class Report(object):
task_time = encodeutils.to_utf8(task['time'])
if not isinstance(task_time, str):
task_time = str(task_time, 'utf8')
+ if not isinstance(key, str):
key = str(key, 'utf8')
task_time = task_time[11:]
head, _, tail = task_time.partition('.')
@@ -155,7 +158,7 @@ class Report(object):
self.Timestamp.append(task_time)
if task[key] is None:
values.append(None)
- elif isinstance(task[key], (int, float)) is True:
+ elif isinstance(task[key], (int, float)):
values.append(task[key])
else:
values.append(ast.literal_eval(task[key]))
@@ -163,11 +166,18 @@ class Report(object):
table_vals['Timestamp'] = self.Timestamp
table_vals[key] = values
+ return datasets, table_vals
+
+ @cliargs("task_id", type=str, help=" task id", nargs=1)
+ @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+ def generate(self, args):
+ """Start report generation."""
+ datasets, table_vals = self._generate_common(args)
+
template_dir = consts.YARDSTICK_ROOT_PATH + "yardstick/common"
template_environment = jinja2.Environment(
autoescape=False,
- loader=jinja2.FileSystemLoader(template_dir),
- trim_blocks=False)
+ loader=jinja2.FileSystemLoader(template_dir))
context = {
"datasets": datasets,
@@ -181,4 +191,31 @@ class Report(object):
with open(consts.DEFAULT_HTML_FILE, "w") as file_open:
file_open.write(template_html.render(context))
- print("Report generated. View /tmp/yardstick.htm")
+ print("Report generated. View %s" % consts.DEFAULT_HTML_FILE)
+
+ @cliargs("task_id", type=str, help=" task id", nargs=1)
+ @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+ def generate_nsb(self, args):
+ """Start NSB report generation."""
+ datasets, table_vals = self._generate_common(args)
+ jstree_data = JSTree().format_for_jstree(datasets)
+
+ template_dir = consts.YARDSTICK_ROOT_PATH + "yardstick/common"
+ template_environment = jinja2.Environment(
+ autoescape=False,
+ loader=jinja2.FileSystemLoader(template_dir),
+ lstrip_blocks=True)
+
+ context = {
+ "Timestamps": self.Timestamp,
+ "task_id": self.task_id,
+ "table": table_vals,
+ "jstree_nodes": jstree_data,
+ }
+
+ template_html = template_environment.get_template("nsb_report.html.j2")
+
+ with open(consts.DEFAULT_HTML_FILE, "w") as file_open:
+ file_open.write(template_html.render(context))
+
+ print("Report generated. View %s" % consts.DEFAULT_HTML_FILE)
diff --git a/yardstick/cmd/commands/report.py b/yardstick/cmd/commands/report.py
index 47bf22a1f..4f057a05d 100644
--- a/yardstick/cmd/commands/report.py
+++ b/yardstick/cmd/commands/report.py
@@ -1,7 +1,7 @@
##############################################################################
-# Copyright (c) 2017 Rajesh Kudaka.
+# Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com>
+# Copyright (c) 2018 Intel Corporation.
#
-# Author: Rajesh Kudaka (4k.rajesh@gmail.com)
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -10,11 +10,7 @@
""" Handler for yardstick command 'report' """
-from __future__ import print_function
-
-from __future__ import absolute_import
-
-from yardstick.benchmark.core.report import Report
+from yardstick.benchmark.core import report
from yardstick.cmd.commands import change_osloobj_to_paras
from yardstick.common.utils import cliargs
@@ -22,12 +18,19 @@ from yardstick.common.utils import cliargs
class ReportCommands(object): # pragma: no cover
"""Report commands.
- Set of commands to manage benchmark tasks.
+ Set of commands to manage reports.
"""
@cliargs("task_id", type=str, help=" task id", nargs=1)
@cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
def do_generate(self, args):
- """Start a benchmark scenario."""
+ """Generate a report."""
+ param = change_osloobj_to_paras(args)
+ report.Report().generate(param)
+
+ @cliargs("task_id", type=str, help=" task id", nargs=1)
+ @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+ def do_generate_nsb(self, args):
+ """Generate a report using the NSB template."""
param = change_osloobj_to_paras(args)
- Report().generate(param)
+ report.Report().generate_nsb(param)
diff --git a/yardstick/tests/unit/benchmark/core/test_report.py b/yardstick/tests/unit/benchmark/core/test_report.py
index 3e80dcc45..11d017ff0 100644
--- a/yardstick/tests/unit/benchmark/core/test_report.py
+++ b/yardstick/tests/unit/benchmark/core/test_report.py
@@ -20,10 +20,10 @@ GOOD_YAML_NAME = 'fake_name'
GOOD_TASK_ID = str(uuid.uuid4())
GOOD_DB_FIELDKEYS = [{'fieldKey': 'fake_key'}]
GOOD_DB_TASK = [{
- 'fake_key': 0.000,
- 'time': '0000-00-00T00:00:00.000000Z',
+ 'fake_key': 1.234,
+ 'time': '0000-00-00T12:34:56.789012Z',
}]
-GOOD_TIMESTAMP = ['00:00:00.000000']
+GOOD_TIMESTAMP = ['12:34:56.789012']
BAD_YAML_NAME = 'F@KE_NAME'
BAD_TASK_ID = 'aaaaaa-aaaaaaaa-aaaaaaaaaa-aaaaaa'
@@ -47,23 +47,23 @@ class JSTreeTestCase(unittest.TestCase):
def test_format_for_jstree(self):
data = [
- {'data': [0, ], 'name': 'tg__0.DropPackets'},
- {'data': [548, ], 'name': 'tg__0.LatencyAvg.5'},
- {'data': [1172, ], 'name': 'tg__0.LatencyAvg.6'},
- {'data': [1001, ], 'name': 'tg__0.LatencyMax.5'},
- {'data': [1468, ], 'name': 'tg__0.LatencyMax.6'},
- {'data': [18.11, ], 'name': 'tg__0.RxThroughput'},
- {'data': [18.11, ], 'name': 'tg__0.TxThroughput'},
- {'data': [0, ], 'name': 'tg__1.DropPackets'},
- {'data': [548, ], 'name': 'tg__1.LatencyAvg.5'},
- {'data': [1172, ], 'name': 'tg__1.LatencyAvg.6'},
- {'data': [1001, ], 'name': 'tg__1.LatencyMax.5'},
- {'data': [1468, ], 'name': 'tg__1.LatencyMax.6'},
- {'data': [18.1132084505, ], 'name': 'tg__1.RxThroughput'},
- {'data': [18.1157260383, ], 'name': 'tg__1.TxThroughput'},
- {'data': [9057888, ], 'name': 'vnf__0.curr_packets_in'},
- {'data': [0, ], 'name': 'vnf__0.packets_dropped'},
- {'data': [617825443, ], 'name': 'vnf__0.packets_fwd'},
+ {'data': [0, ], 'label': 'tg__0.DropPackets'},
+ {'data': [548, ], 'label': 'tg__0.LatencyAvg.5'},
+ {'data': [1172, ], 'label': 'tg__0.LatencyAvg.6'},
+ {'data': [1001, ], 'label': 'tg__0.LatencyMax.5'},
+ {'data': [1468, ], 'label': 'tg__0.LatencyMax.6'},
+ {'data': [18.11, ], 'label': 'tg__0.RxThroughput'},
+ {'data': [18.11, ], 'label': 'tg__0.TxThroughput'},
+ {'data': [0, ], 'label': 'tg__1.DropPackets'},
+ {'data': [548, ], 'label': 'tg__1.LatencyAvg.5'},
+ {'data': [1172, ], 'label': 'tg__1.LatencyAvg.6'},
+ {'data': [1001, ], 'label': 'tg__1.LatencyMax.5'},
+ {'data': [1468, ], 'label': 'tg__1.LatencyMax.6'},
+ {'data': [18.1132084505, ], 'label': 'tg__1.RxThroughput'},
+ {'data': [18.1157260383, ], 'label': 'tg__1.TxThroughput'},
+ {'data': [9057888, ], 'label': 'vnf__0.curr_packets_in'},
+ {'data': [0, ], 'label': 'vnf__0.packets_dropped'},
+ {'data': [617825443, ], 'label': 'vnf__0.packets_fwd'},
]
expected_output = [
@@ -168,3 +168,15 @@ class ReportTestCase(unittest.TestCase):
mock_tasks.assert_called_once_with()
mock_keys.assert_called_once_with()
self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp)
+
+ @mock.patch.object(report.Report, '_get_tasks')
+ @mock.patch.object(report.Report, '_get_fieldkeys')
+ @mock.patch.object(report.Report, '_validate')
+ def test_generate_nsb(self, mock_valid, mock_keys, mock_tasks):
+ mock_tasks.return_value = GOOD_DB_TASK
+ mock_keys.return_value = GOOD_DB_FIELDKEYS
+ self.rep.generate_nsb(self.param)
+ mock_valid.assert_called_once_with(GOOD_YAML_NAME, GOOD_TASK_ID)
+ mock_tasks.assert_called_once_with()
+ mock_keys.assert_called_once_with()
+ self.assertEqual(GOOD_TIMESTAMP, self.rep.Timestamp)