aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/scenarios/compute/perf.py
blob: b973211f1876e584cbef8e5800efc0f9da7d3714 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
from __future__ import print_function

import logging

import pkg_resources
from oslo_serialization import jsonutils

import yardstick.ssh as ssh
from yardstick.benchmark.scenarios import base

LOG = logging.getLogger(__name__)


class Perf(base.Scenario):
    """Execute perf benchmark in a host

  Parameters
    events - perf tool software, hardware or tracepoint events
        type:       [str]
        unit:       na
        default:    ['task-clock']
    load - simulate load on the host by doing IO operations
        type:       bool
        unit:       na
        default:    false

    For more info about perf and perf events see https://perf.wiki.kernel.org
    """

    __scenario_type__ = "Perf"

    TARGET_SCRIPT = 'perf_benchmark.bash'

    def __init__(self, scenario_cfg, context_cfg):
        self.scenario_cfg = scenario_cfg
        self.context_cfg = context_cfg
        self.setup_done = False

    def setup(self):
        """scenario setup"""
        self.target_script = pkg_resources.resource_filename(
            'yardstick.benchmark.scenarios.compute', Perf.TARGET_SCRIPT)
        host = self.context_cfg['host']

        self.client = ssh.SSH.from_node(host, defaults={"user": "ubuntu"})
        self.client.wait(timeout=600)

        # copy script to host
        self.client._put_file_shell(self.target_script, '~/perf_benchmark.sh')

        self.setup_done = True

    def run(self, result):
        """execute the benchmark"""

        if not self.setup_done:
            self.setup()

        options = self.scenario_cfg['options']
        events = options.get('events', ['task-clock'])

        events_string = ""
        for event in events:
            events_string += event + " "

        # if run by a duration runner
        duration_time = self.scenario_cfg["runner"].get("duration", None) \
            if "runner" in self.scenario_cfg else None
        # if run by an arithmetic runner
        arithmetic_time = options.get("duration", None)
        if duration_time:
            duration = duration_time
        elif arithmetic_time:
            duration = arithmetic_time
        else:
            duration = 30

        if 'load' in options:
            load = "dd if=/dev/urandom of=/dev/null"
        else:
            load = "sleep %d" % duration

        cmd = "sudo bash perf_benchmark.sh '%s' %d %s" \
            % (load, duration, events_string)

        LOG.debug("Executing command: %s", cmd)
        status, stdout, _ = self.client.execute(cmd)

        if status:
            raise RuntimeError(stdout)

        result.update(jsonutils.loads(stdout))

        if "sla" in self.scenario_cfg:
            metric = self.scenario_cfg['sla']['metric']
            exp_val = self.scenario_cfg['sla']['expected_value']
            smaller_than_exp = 'smaller_than_expected' \
                               in self.scenario_cfg['sla']
            self.verify_SLA(metric in result,
                            "Metric (%s) not found." % metric)
            self.verify_SLA(
                not smaller_than_exp,
                "%s %d >= %d (sla); " % (metric, result[metric], exp_val))
            self.verify_SLA(
                result[metric] >= exp_val,
                "%s %d < %d (sla); " % (metric, result[metric], exp_val))


def _test():
    """internal test function"""
    key_filename = pkg_resources.resource_filename('yardstick.resources',
                                                   'files/yardstick_key')
    ctx = {
        'host': {
            'ip': '10.229.47.137',
            'user': 'root',
            'key_filename': key_filename
        }
    }

    logger = logging.getLogger('yardstick')
    logger.setLevel(logging.DEBUG)

    options = {'load': True}
    args = {'options': options}
    result = {}

    p = Perf(args, ctx)
    p.run(result)
    print(result)


if __name__ == '__main__':
    _test()