diff options
author | Kristian Hunt <kristian.hunt@gmail.com> | 2015-10-13 16:38:56 +0200 |
---|---|---|
committer | Kristian Hunt <kristian.hunt@gmail.com> | 2015-10-28 11:38:48 +0100 |
commit | 05d5ac8d6d5e9bd1e6b69afbd764000aeb4a030e (patch) | |
tree | 2937a8372d4e672f4d8f755950e9e58779cbdc4b /yardstick/benchmark/scenarios/compute | |
parent | f37d291f6397891cd0dc37c6140b114868921b61 (diff) |
Extend lmbench scenario to measure memory bandwidth
Lmbench scenario has now two scripts and will choose between them,
based on whether the intention is to run memory latency or bandwidth
tests. Added also unit test file for this scenario.
JIRA: YARDSTICK-113
Change-Id: I2ba4dbef31f3cafbdb3c583ece5ed9512a906896
Signed-off-by: Kristian Hunt <kristian.hunt@gmail.com>
Diffstat (limited to 'yardstick/benchmark/scenarios/compute')
-rw-r--r-- | yardstick/benchmark/scenarios/compute/lmbench.py | 98 | ||||
-rw-r--r-- | yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash | 29 | ||||
-rw-r--r-- | yardstick/benchmark/scenarios/compute/lmbench_latency_benchmark.bash (renamed from yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash) | 0 |
3 files changed, 106 insertions, 21 deletions
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py index 03caff525..b9adf5079 100644 --- a/yardstick/benchmark/scenarios/compute/lmbench.py +++ b/yardstick/benchmark/scenarios/compute/lmbench.py @@ -17,9 +17,15 @@ LOG = logging.getLogger(__name__) class Lmbench(base.Scenario): - """Execute lmbench memory read latency benchmark in a host + """Execute lmbench memory read latency or memory bandwidth benchmark in a host Parameters + test_type - specifies whether to measure memory latency or bandwidth + type: string + unit: na + default: "latency" + + Parameters for memory read latency benchmark stride - number of locations in memory between starts of array elements type: int unit: bytes @@ -29,11 +35,28 @@ class Lmbench(base.Scenario): unit: megabytes default: 16 - Results are accurate to the ~2-5 nanosecond range. + Results are accurate to the ~2-5 nanosecond range. + + Parameters for memory bandwidth benchmark + size - the amount of memory to test + type: int + unit: kilobyte + default: 128 + benchmark - the name of the memory bandwidth benchmark test to execute. + Valid test names are rd, wr, rdwr, cp, frd, fwr, fcp, bzero, bcopy + type: string + unit: na + default: "rd" + warmup - the number of repetitons to perform before taking measurements + type: int + unit: na + default: 0 + more info http://manpages.ubuntu.com/manpages/trusty/lmbench.8.html """ __scenario_type__ = "Lmbench" - TARGET_SCRIPT = "lmbench_benchmark.bash" + LATENCY_BENCHMARK_SCRIPT = "lmbench_latency_benchmark.bash" + BANDWIDTH_BENCHMARK_SCRIPT = "lmbench_bandwidth_benchmark.bash" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg @@ -42,9 +65,12 @@ class Lmbench(base.Scenario): def setup(self): """scenario setup""" - self.target_script = pkg_resources.resource_filename( + self.bandwidth_target_script = pkg_resources.resource_filename( "yardstick.benchmark.scenarios.compute", - Lmbench.TARGET_SCRIPT) + Lmbench.BANDWIDTH_BENCHMARK_SCRIPT) + self.latency_target_script = pkg_resources.resource_filename( + "yardstick.benchmark.scenarios.compute", + Lmbench.LATENCY_BENCHMARK_SCRIPT) host = self.context_cfg["host"] user = host.get("user", "ubuntu") ip = host.get("ip", None) @@ -54,10 +80,11 @@ class Lmbench(base.Scenario): self.client = ssh.SSH(user, ip, key_filename=key_filename) self.client.wait(timeout=600) - # copy script to host - self.client.run("cat > ~/lmbench.sh", - stdin=open(self.target_script, 'rb')) - + # copy scripts to host + self.client.run("cat > ~/lmbench_latency.sh", + stdin=open(self.latency_target_script, 'rb')) + self.client.run("cat > ~/lmbench_bandwidth.sh", + stdin=open(self.bandwidth_target_script, 'rb')) self.setup_done = True def run(self, result): @@ -67,25 +94,48 @@ class Lmbench(base.Scenario): self.setup() options = self.scenario_cfg['options'] - stride = options.get('stride', 128) - stop_size = options.get('stop_size', 16) + test_type = options.get('test_type', 'latency') + + if test_type == 'latency': + stride = options.get('stride', 128) + stop_size = options.get('stop_size', 16) + cmd = "sudo bash lmbench_latency.sh %d %d" % (stop_size, stride) + elif test_type == 'bandwidth': + size = options.get('size', 128) + benchmark = options.get('benchmark', 'rd') + warmup_repetitions = options.get('warmup', 0) + cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \ + (size, benchmark, warmup_repetitions) + else: + raise RuntimeError("No such test_type: %s for Lmbench scenario", + test_type) - cmd = "sudo bash lmbench.sh %d %d" % (stop_size, stride) LOG.debug("Executing command: %s", cmd) status, stdout, stderr = self.client.execute(cmd) if status: raise RuntimeError(stderr) - result.update({"latencies": json.loads(stdout)}) + if test_type == 'latency': + result.update({"latencies": json.loads(stdout)}) + else: + result.update(json.loads(stdout)) + if "sla" in self.scenario_cfg: sla_error = "" - sla_max_latency = int(self.scenario_cfg['sla']['max_latency']) - for t_latency in result: - latency = t_latency['latency'] - if latency > sla_max_latency: - sla_error += "latency %f > sla:max_latency(%f); " \ - % (latency, sla_max_latency) + if test_type == 'latency': + sla_max_latency = int(self.scenario_cfg['sla']['max_latency']) + for t_latency in result["latencies"]: + latency = t_latency['latency'] + if latency > sla_max_latency: + sla_error += "latency %f > sla:max_latency(%f); " \ + % (latency, sla_max_latency) + else: + sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth']) + bw = result["bandwidth(MBps)"] + if bw < sla_min_bw: + sla_error += "bandwidth %f < " \ + "sla:min_bandwidth(%f)" % (bw, sla_min_bw) assert sla_error == "", sla_error @@ -104,8 +154,14 @@ def _test(): logger = logging.getLogger('yardstick') logger.setLevel(logging.DEBUG) - options = {'stride': 128, 'stop_size': 16} - args = {'options': options} + options = { + 'test_type': 'latency', + 'stride': 128, + 'stop_size': 16 + } + + sla = {'max_latency': 35, 'action': 'monitor'} + args = {'options': options, 'sla': sla} result = {} p = Lmbench(args, ctx) diff --git a/yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash b/yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash new file mode 100644 index 000000000..09993a088 --- /dev/null +++ b/yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash @@ -0,0 +1,29 @@ +#!/bin/bash + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Run a lmbench memory bandwidth benchmark in a host and +# output in json format the memory size in megabytes and +# memory bandwidth in megabytes per second + +set -e + +SIZE=$1 +TEST_NAME=$2 +WARMUP=$3 + +# write the result to stdout in json format +output_json() +{ + read DATA + echo $DATA | awk '/ /{printf "{\"size(MB)\": %s, \"bandwidth(MBps)\": %s}", $1, $2}' +} + +/usr/lib/lmbench/bin/x86_64-linux-gnu/bw_mem -W $WARMUP ${SIZE}k $TEST_NAME 2>&1 | output_json
\ No newline at end of file diff --git a/yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/lmbench_latency_benchmark.bash index 04e3c1a9d..04e3c1a9d 100644 --- a/yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash +++ b/yardstick/benchmark/scenarios/compute/lmbench_latency_benchmark.bash |