aboutsummaryrefslogtreecommitdiffstats
path: root/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py
blob: 1d039ea59fddf0760818e6deec57078590b58c2e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.

import subprocess
import time
import os
from distutils.spawn import find_executable

from charmhelpers.core.hookenv import (
    in_relation_hook,
    relation_ids,
    relation_set,
    relation_get,
)


def action_set(key, val):
    if find_executable('action-set'):
        action_cmd = ['action-set']

        if isinstance(val, dict):
            for k, v in iter(val.items()):
                action_set('%s.%s' % (key, k), v)
            return True

        action_cmd.append('%s=%s' % (key, val))
        subprocess.check_call(action_cmd)
        return True
    return False


class Benchmark():
    """
    Helper class for the `benchmark` interface.

    :param list actions: Define the actions that are also benchmarks

    From inside the benchmark-relation-changed hook, you would
    Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])

    Examples:

        siege = Benchmark(['siege'])
        siege.start()
        [... run siege ...]
        # The higher the score, the better the benchmark
        siege.set_composite_score(16.70, 'trans/sec', 'desc')
        siege.finish()


    """

    BENCHMARK_CONF = '/etc/benchmark.conf'  # Replaced in testing

    required_keys = [
        'hostname',
        'port',
        'graphite_port',
        'graphite_endpoint',
        'api_port'
    ]

    def __init__(self, benchmarks=None):
        if in_relation_hook():
            if benchmarks is not None:
                for rid in sorted(relation_ids('benchmark')):
                    relation_set(relation_id=rid, relation_settings={
                        'benchmarks': ",".join(benchmarks)
                    })

            # Check the relation data
            config = {}
            for key in self.required_keys:
                val = relation_get(key)
                if val is not None:
                    config[key] = val
                else:
                    # We don't have all of the required keys
                    config = {}
                    break

            if len(config):
                with open(self.BENCHMARK_CONF, 'w') as f:
                    for key, val in iter(config.items()):
                        f.write("%s=%s\n" % (key, val))

    @staticmethod
    def start():
        action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))

        """
        If the collectd charm is also installed, tell it to send a snapshot
        of the current profile data.
        """
        COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
        if os.path.exists(COLLECT_PROFILE_DATA):
            subprocess.check_output([COLLECT_PROFILE_DATA])

    @staticmethod
    def finish():
        action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))

    @staticmethod
    def set_composite_score(value, units, direction='asc'):
        """
        Set the composite score for a benchmark run. This is a single number
        representative of the benchmark results. This could be the most
        important metric, or an amalgamation of metric scores.
        """
        return action_set(
            "meta.composite",
            {'value': value, 'units': units, 'direction': direction}
        )