diff options
author | kubi <jean.gaoliang@huawei.com> | 2016-04-01 11:02:11 +0800 |
---|---|---|
committer | qi liang <liangqi1@huawei.com> | 2016-04-08 06:09:56 +0000 |
commit | 6a1a21e2ecfde58ba8f57f1430cf577a6b80ea51 (patch) | |
tree | bd8de23af89109094c3d6eb57f7e40dc322fc297 /yardstick/benchmark/scenarios | |
parent | fe1c1e138bd909a810ef1f4272c7431c7afdc870 (diff) |
add latency for cache read operations(LMBench)
Using LMBench to measure latency of cache.
two parameter can be configured (repetition and warmup)
Change-Id: I5e4ecca0f9dd9c9ce2cecce3623dd8347ab2b5b1
Signed-off-by: kubi <jean.gaoliang@huawei.com>
Diffstat (limited to 'yardstick/benchmark/scenarios')
-rw-r--r-- | yardstick/benchmark/scenarios/compute/lmbench.py | 19 | ||||
-rw-r--r-- | yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash | 29 |
2 files changed, 47 insertions, 1 deletions
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py index e15fe7eb4..d3e802f3b 100644 --- a/yardstick/benchmark/scenarios/compute/lmbench.py +++ b/yardstick/benchmark/scenarios/compute/lmbench.py @@ -57,6 +57,7 @@ class Lmbench(base.Scenario): LATENCY_BENCHMARK_SCRIPT = "lmbench_latency_benchmark.bash" BANDWIDTH_BENCHMARK_SCRIPT = "lmbench_bandwidth_benchmark.bash" + LATENCY_CACHE_SCRIPT = "lmbench_latency_for_cache.bash" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg @@ -71,6 +72,9 @@ class Lmbench(base.Scenario): self.latency_target_script = pkg_resources.resource_filename( "yardstick.benchmark.scenarios.compute", Lmbench.LATENCY_BENCHMARK_SCRIPT) + self.latency_for_cache_script = pkg_resources.resource_filename( + "yardstick.benchmark.scenarios.compute", + Lmbench.LATENCY_CACHE_SCRIPT) host = self.context_cfg["host"] user = host.get("user", "ubuntu") ip = host.get("ip", None) @@ -85,6 +89,8 @@ class Lmbench(base.Scenario): stdin=open(self.latency_target_script, 'rb')) self.client.run("cat > ~/lmbench_bandwidth.sh", stdin=open(self.bandwidth_target_script, 'rb')) + self.client.run("cat > ~/lmbench_latency_for_cache.sh", + stdin=open(self.latency_for_cache_script, 'rb')) self.setup_done = True def run(self, result): @@ -106,6 +112,11 @@ class Lmbench(base.Scenario): warmup_repetitions = options.get('warmup', 0) cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \ (size, benchmark, warmup_repetitions) + elif test_type == 'latency_for_cache': + repetition = options.get('repetition', 1) + warmup = options.get('warmup', 0) + cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \ + (repetition, warmup) else: raise RuntimeError("No such test_type: %s for Lmbench scenario", test_type) @@ -130,12 +141,18 @@ class Lmbench(base.Scenario): if latency > sla_max_latency: sla_error += "latency %f > sla:max_latency(%f); " \ % (latency, sla_max_latency) - else: + elif test_type == 'bandwidth': sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth']) bw = result["bandwidth(MBps)"] if bw < sla_min_bw: sla_error += "bandwidth %f < " \ "sla:min_bandwidth(%f)" % (bw, sla_min_bw) + elif test_type == 'latency_for_cache': + sla_latency = float(self.scenario_cfg['sla']['max_latency']) + cache_latency = float(result['L1cache']) + if sla_latency < cache_latency: + sla_error += "latency %f > sla:max_latency(%f); " \ + % (cache_latency, sla_latency) assert sla_error == "", sla_error diff --git a/yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash b/yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash new file mode 100644 index 000000000..2ed1bbe14 --- /dev/null +++ b/yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash @@ -0,0 +1,29 @@ +#!/bin/bash + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Run a lmbench cache latency benchmark in a host and +# outputs in json format the array sizes in megabytes and +# load latency over all points in that array in nanosecods + +set -e + +REPETITON=$1 +WARMUP=$2 + +# write the result to stdout in json format +output_json() +{ + read DATA + echo $DATA | awk '{printf "{\"L1cache\": %s}", $5}' +} + +/usr/lib/lmbench/bin/x86_64-linux-gnu/cache -W $WARMUP -N $REPETITON 2>&1 | output_json + |