From d09c1cf0ed3a1af45546129ecd8d648ce703f650 Mon Sep 17 00:00:00 2001 From: kubi Date: Mon, 14 Dec 2015 19:51:45 +0800 Subject: add scenario and sample file for Unixbench. JIRA:YARDSTICK-184 Change-Id: Iedd4a3708e08305b1c8fa7a8e1766ceef03ab8bb Signed-off-by: kubi --- yardstick/benchmark/scenarios/compute/unixbench.py | 156 +++++++++++++++++++++ .../scenarios/compute/unixbench_benchmark.bash | 46 ++++++ 2 files changed, 202 insertions(+) create mode 100644 yardstick/benchmark/scenarios/compute/unixbench.py create mode 100644 yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash (limited to 'yardstick/benchmark') diff --git a/yardstick/benchmark/scenarios/compute/unixbench.py b/yardstick/benchmark/scenarios/compute/unixbench.py new file mode 100644 index 000000000..e6318b92e --- /dev/null +++ b/yardstick/benchmark/scenarios/compute/unixbench.py @@ -0,0 +1,156 @@ +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import pkg_resources +import logging +import json + +import yardstick.ssh as ssh +from yardstick.benchmark.scenarios import base + +LOG = logging.getLogger(__name__) + + +class Unixbench(base.Scenario): + """Execute Unixbench cpu benchmark in a host + The Run script takes a number of options which you can use to customise a + test, and you can specify the names of the tests to run. The full usage + is: + + Run [ -q | -v ] [-i ] [-c [-c ...]] [test ...] + + -i Run iterations for each test -- slower tests + use / 3, but at least 1. Defaults to 10 (3 for + slow tests). + -c Run copies of each test in parallel. + + Parameters for setting unixbench + run_mode - Run in quiet mode or verbose mode + type: string + unit: None + default: None + test_type - The available tests are organised into categories; + type: string + unit: None + default: None + iterations - Run iterations for each test -- slower tests + use / 3, but at least 1. Defaults to 10 (3 for slow tests). + type: int + unit: None + default: None + copies - Run copies of each test in parallel. + type: int + unit: None + default: None + + more info https://github.com/kdlucas/byte-unixbench/blob/master/UnixBench + """ + __scenario_type__ = "UnixBench" + + TARGET_SCRIPT = "unixbench_benchmark.bash" + + def __init__(self, scenario_cfg, context_cfg): + self.scenario_cfg = scenario_cfg + self.context_cfg = context_cfg + self.setup_done = False + + def setup(self): + """scenario setup""" + self.target_script = pkg_resources.resource_filename( + "yardstick.benchmark.scenarios.compute", + Unixbench.TARGET_SCRIPT) + + host = self.context_cfg["host"] + user = host.get("user", "ubuntu") + ip = host.get("ip", None) + key_filename = host.get('key_filename', "~/.ssh/id_rsa") + + LOG.info("user:%s, host:%s", user, ip) + self.client = ssh.SSH(user, ip, key_filename=key_filename) + self.client.wait(timeout=600) + + # copy scripts to host + self.client.run("cat > ~/unixbench_benchmark.sh", + stdin=open(self.target_script, 'rb')) + + self.setup_done = True + + def run(self, result): + """execute the benchmark""" + + if not self.setup_done: + self.setup() + + options = self.scenario_cfg["options"] + + run_mode = options.get("run_mode", None) + LOG.debug("Executing run_mode: %s", run_mode) + cmd_args = "" + if run_mode == "quiet": + cmd_args = "-q" + elif run_mode == "verbose": + cmd_args = "-v" + + option_pair_list = [("iterations", "-i"), + ("copies", "-c")] + for option_pair in option_pair_list: + if option_pair[0] in options: + cmd_args += " %s %s " % (option_pair[1], + options[option_pair[0]]) + + test_type = options.get("test_type", None) + if test_type is not None: + cmd_args += " %s " % (test_type) + + cmd = "sudo bash unixbench_benchmark.sh %s" % (cmd_args) + LOG.debug("Executing command: %s", cmd) + status, stdout, stderr = self.client.execute(cmd) + if status: + raise RuntimeError(stderr) + + result.update(json.loads(stdout)) + + if "sla" in self.scenario_cfg: + sla_error = "" + for t, score in result.items(): + if t not in self.scenario_cfg['sla']: + continue + sla_score = float(self.scenario_cfg['sla'][t]) + score = float(score) + if score < sla_score: + sla_error += "%s score %f < sla:%s_score(%f); " % \ + (t, score, t, sla_score) + assert sla_error == "", sla_error + + +def _test(): # pragma: no cover + """internal test function""" + key_filename = pkg_resources.resource_filename('yardstick.resources', + 'files/yardstick_key') + ctx = { + 'host': { + 'ip': '10.229.47.137', + 'user': 'root', + 'key_filename': key_filename + } + } + + options = { + 'test_type': 'dhrystone', + 'run_mode': 'verbose' + } + + args = {'options': options} + result = {} + + p = Unixbench(args, ctx) + p.run(result) + print result + +if __name__ == '__main__': + _test() diff --git a/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash new file mode 100644 index 000000000..5a5dbc394 --- /dev/null +++ b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash @@ -0,0 +1,46 @@ +#!/bin/bash + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -e + +# Commandline arguments +OPTIONS="$@" +OUTPUT_FILE=/tmp/unixbench-out.log + +# run unixbench test +run_unixbench() +{ + cd /opt/tempT/UnixBench/ + ./Run $OPTIONS > $OUTPUT_FILE +} + +# write the result to stdout in json format +output_json() +{ + single_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | head -1 ) + parallel_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | tail -1 ) + echo -e "{ \ + \"single_score\":\"$single_score\", \ + \"parallel_score\":\"$parallel_score\" \ + }" +} + +# main entry +main() +{ + # run the test + run_unixbench + + # output result + output_json +} + +main -- cgit 1.2.3-korg