aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/core/report.py
blob: 3d59124714dcb76c2006f958e342fa3c61767aa7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
##############################################################################
# Copyright (c) 2017 Rajesh Kudaka <4k.rajesh@gmail.com>
# Copyright (c) 2018-2019 Intel Corporation.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################

""" Handler for yardstick command 'report' """

import re
import six
import uuid

import jinja2
from api.utils import influx
from oslo_utils import uuidutils
from yardstick.common import constants as consts
from yardstick.common.utils import cliargs


class JSTree(object):
    """Data structure to parse data for use with the JS library jsTree"""
    def __init__(self):
        self._created_nodes = ['#']
        self.jstree_data = []

    def _create_node(self, _id):
        """Helper method for format_for_jstree to create each node.

        Creates the node (and any required parents) and keeps track
        of the created nodes.

        :param _id: (string) id of the node to be created
        :return: None
        """
        components = _id.split(".")

        if len(components) == 1:
            text = components[0]
            parent_id = "#"
        else:
            text = components[-1]
            parent_id = ".".join(components[:-1])
            # make sure the parent has been created
            if not parent_id in self._created_nodes:
                self._create_node(parent_id)

        self.jstree_data.append({"id": _id, "text": text, "parent": parent_id})
        self._created_nodes.append(_id)

    def format_for_jstree(self, data):
        """Format the data into the required format for jsTree.

        The data format expected is a list of metric names e.g.:

            ['tg__0.DropPackets', 'tg__0.LatencyAvg.5']

        This data is converted into the format required for jsTree to group and
        display the metrics in a hierarchial fashion, including creating a
        number of parent nodes e.g.::

            [{"id": "tg__0", "text": "tg__0", "parent": "#"},
             {"id": "tg__0.DropPackets", "text": "DropPackets", "parent": "tg__0"},
             {"id": "tg__0.LatencyAvg", "text": "LatencyAvg", "parent": "tg__0"},
             {"id": "tg__0.LatencyAvg.5", "text": "5", "parent": "tg__0.LatencyAvg"},]

        :param data: (list) data to be converted
        :return: list
        """
        self._created_nodes = ['#']
        self.jstree_data = []

        for metric in data:
            self._create_node(metric)

        return self.jstree_data


class Report(object):
    """Report commands.

    Set of commands to manage reports.
    """

    def __init__(self):
        self.Timestamp = []
        self.yaml_name = ""
        self.task_id = ""

    def _validate(self, yaml_name, task_id):
        if re.match(r"^[\w-]+$", yaml_name):
            self.yaml_name = yaml_name
        else:
            raise ValueError("invalid yaml_name", yaml_name)

        if uuidutils.is_uuid_like(task_id):
            task_id = '{' + task_id + '}'
            task_uuid = (uuid.UUID(task_id))
            self.task_id = task_uuid
        else:
            raise ValueError("invalid task_id", task_id)

    def _get_fieldkeys(self):
        fieldkeys_cmd = "show field keys from \"%s\""
        fieldkeys_query = fieldkeys_cmd % (self.yaml_name)
        query_exec = influx.query(fieldkeys_query)
        if query_exec:
            return query_exec
        else:
            raise KeyError("Test case not found.")

    def _get_metrics(self):
        metrics_cmd = "select * from \"%s\" where task_id = '%s'"
        metrics_query = metrics_cmd % (self.yaml_name, self.task_id)
        query_exec = influx.query(metrics_query)
        if query_exec:
            return query_exec
        else:
            raise KeyError("Task ID or Test case not found.")

    def _get_task_start_time(self):
        # The start time should come from the task or the metadata table.
        # The first entry into influx for a task will be AFTER the first TC
        # iteration
        cmd = "select * from \"%s\" where task_id='%s' ORDER BY time ASC limit 1"
        task_query = cmd % (self.yaml_name, self.task_id)

        query_exec = influx.query(task_query)
        start_time = query_exec[0]['time']
        return start_time

    def _get_task_end_time(self):
        # NOTE(elfoley): when using select first() and select last() for the
        # DB query, the timestamp returned is 0, so later queries try to
        # return metrics from 1970
        cmd = "select * from \"%s\" where task_id='%s' ORDER BY time DESC limit 1"
        task_query = cmd % (self.yaml_name, self.task_id)
        query_exec = influx.query(task_query)
        end_time = query_exec[0]['time']
        return end_time

    def _get_baro_metrics(self):
        start_time = self._get_task_start_time()
        end_time = self._get_task_end_time()
        metric_list = [
                "cpu_value", "cpufreq_value", "intel_pmu_value",
                 "virt_value", "memory_value"]
        metrics = {}
        times = []
        query_exec = {}
        for metric in metric_list:
            cmd = "select * from \"%s\" where time >= '%s' and time <= '%s'"
            query = cmd % (metric, start_time, end_time)
            query_exec[metric] = influx.query(query, db='collectd')
            print("query_exec: {}".format(query_exec))

        for metric in query_exec:
            print("metric in query_exec: {}".format(metric))
            met_values = query_exec[metric]
            print("met_values: {}".format(met_values))
            for x in met_values:
                x['name'] = metric
                metric_name = str('.'.join(
                    [x[f] for f in [
                        'host', 'name', 'type', 'type_instance', 'instance'
                         ] if x.get(f)]))

                if not metrics.get(metric_name):
                    metrics[metric_name] = {}
                metric_time = self._get_trimmed_timestamp(x['time'])
                times.append(metric_time)
                time = metric_time
                metrics[metric_name][time] = x['value']

        times = sorted(list(set(times)))

        metrics['Timestamp'] = times
        print("metrics: {}".format(metrics))
        return metrics

    def _get_trimmed_timestamp(self, metric_time, resolution=4):
        if not isinstance(metric_time, str):
            metric_time = metric_time.encode('utf8') # PY2: unicode to str
        metric_time = metric_time[11:]               # skip date, keep time
        head, _, tail = metric_time.partition('.')   # split HH:MM:SS & nsZ
        metric_time = head + '.' + tail[:resolution] # join HH:MM:SS & .us
        return metric_time

    def _get_timestamps(self, metrics, resolution=6):
        # Extract the timestamps from a list of metrics
        timestamps = []
        for metric in metrics:
            metric_time = self._get_trimmed_timestamp(
                metric['time'], resolution)
            timestamps.append(metric_time)               # HH:MM:SS.micros
        return timestamps

    def _format_datasets(self, metric_name, metrics):
        values = []
        for metric in metrics:
            val = metric.get(metric_name, None)
            if val is None:
                # keep explicit None or missing entry as is
                pass
            elif isinstance(val, (int, float)):
                # keep plain int or float as is
                pass
            elif six.PY2 and isinstance(val,
                        long):  # pylint: disable=undefined-variable
                # PY2: long value would be rendered with trailing L,
                # which JS does not support, so convert it to float
                val = float(val)
            elif isinstance(val, six.string_types):
                s = val
                if not isinstance(s, str):
                    s = s.encode('utf8')            # PY2: unicode to str
                try:
                    # convert until failure
                    val = s
                    val = float(s)
                    val = int(s)
                    if six.PY2 and isinstance(val,
                                long):  # pylint: disable=undefined-variable
                        val = float(val)            # PY2: long to float
                except ValueError:
                    # value may have been converted to a number
                    pass
                finally:
                    # if val was not converted into a num, then it must be
                    # text, which shouldn't end up in the report
                    if isinstance(val, six.string_types):
                        val = None
            else:
                raise ValueError("Cannot convert %r" % val)
            values.append(val)
        return values

    @cliargs("task_id", type=str, help=" task id", nargs=1)
    @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
    def _generate_common(self, args):
        """Actions that are common to both report formats.

        Create the necessary data structure for rendering
        the report templates.
        """
        self._validate(args.yaml_name[0], args.task_id[0])

        db_fieldkeys = self._get_fieldkeys()
        # list of dicts of:
        # - PY2: unicode key and unicode value
        # - PY3: str key and str value

        db_metrics = self._get_metrics()
        # list of dicts of:
        # - PY2: unicode key and { None | unicode | float | long | int } value
        # - PY3: str key and { None | str | float | int } value

        # extract fieldKey entries, and convert them to str where needed
        field_keys = [key if isinstance(key, str)       # PY3: already str
                          else key.encode('utf8')       # PY2: unicode to str
                      for key in
                          [field['fieldKey']
                           for field in db_fieldkeys]]

        # extract timestamps
        self.Timestamp = self._get_timestamps(db_metrics)

        # prepare return values
        datasets = []
        table_vals = {'Timestamp': self.Timestamp}

        # extract and convert field values
        for key in field_keys:
            values = self._format_datasets(key, db_metrics)
            datasets.append({'label': key, 'data': values})
            table_vals[key] = values

        return datasets, table_vals

    @cliargs("task_id", type=str, help=" task id", nargs=1)
    @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
    def generate(self, args):
        """Start report generation."""
        datasets, table_vals = self._generate_common(args)

        template_dir = consts.YARDSTICK_ROOT_PATH + "yardstick/common"
        template_environment = jinja2.Environment(
            autoescape=False,
            loader=jinja2.FileSystemLoader(template_dir))

        context = {
            "datasets": datasets,
            "Timestamps": self.Timestamp,
            "task_id": self.task_id,
            "table": table_vals,
        }

        template_html = template_environment.get_template("report.html.j2")

        with open(consts.DEFAULT_HTML_FILE, "w") as file_open:
            file_open.write(template_html.render(context))

        print("Report generated. View %s" % consts.DEFAULT_HTML_FILE)

    @cliargs("task_id", type=str, help=" task id", nargs=1)
    @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
    def generate_nsb(self, args):
        """Start NSB report generation."""
        _, report_data = self._generate_common(args)
        report_time = report_data.pop('Timestamp')
        report_keys = sorted(report_data, key=str.lower)
        report_tree = JSTree().format_for_jstree(report_keys)
        report_meta = {
            "testcase": self.yaml_name,
            "task_id": self.task_id,
        }

        template_dir = consts.YARDSTICK_ROOT_PATH + "yardstick/common"
        template_environment = jinja2.Environment(
            autoescape=False,
            loader=jinja2.FileSystemLoader(template_dir),
            lstrip_blocks=True)

        context = {
            "report_meta": report_meta,
            "report_data": report_data,
            "report_time": report_time,
            "report_keys": report_keys,
            "report_tree": report_tree,
        }

        template_html = template_environment.get_template("nsb_report.html.j2")

        with open(consts.DEFAULT_HTML_FILE, "w") as file_open:
            file_open.write(template_html.render(context))

        print("Report generated. View %s" % consts.DEFAULT_HTML_FILE)