aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--samples/lmbench_cache.yaml41
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_lmbench.py19
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py19
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash29
4 files changed, 107 insertions, 1 deletions
diff --git a/samples/lmbench_cache.yaml b/samples/lmbench_cache.yaml
new file mode 100644
index 000000000..7a22cf15f
--- /dev/null
+++ b/samples/lmbench_cache.yaml
@@ -0,0 +1,41 @@
+---
+# Sample benchmark task config file
+# measure memory cache latency using lmbench
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Lmbench
+ options:
+ test_type: "latency_for_cache"
+ line_size: 128
+ repetition: 1
+ warmup: 0
+
+ host: demeter.demo
+
+ runner:
+ type: Iteration
+ iterations: 2
+ interval: 1
+
+ sla:
+ max_latency: 35
+ action: monitor
+
+context:
+ name: demo
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ubuntu
+
+ servers:
+ demeter:
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
+
+
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
index 1b24258b6..6be116371 100644
--- a/tests/unit/benchmark/scenarios/compute/test_lmbench.py
+++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -159,6 +159,25 @@ class LmbenchTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
self.assertRaises(AssertionError, l.run, self.result)
+ def test_successful_latency_for_cache_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency_for_cache",
+ "repetition":1,
+ "warmup": 0
+ }
+ args = {
+ "options": options,
+ "sla": {"max_latency": 35}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = "{\"L1cache\": 1.6}"
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
def test_unsuccessful_script_error(self, mock_ssh):
options = {"test_type": "bandwidth"}
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index e15fe7eb4..d3e802f3b 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -57,6 +57,7 @@ class Lmbench(base.Scenario):
LATENCY_BENCHMARK_SCRIPT = "lmbench_latency_benchmark.bash"
BANDWIDTH_BENCHMARK_SCRIPT = "lmbench_bandwidth_benchmark.bash"
+ LATENCY_CACHE_SCRIPT = "lmbench_latency_for_cache.bash"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
@@ -71,6 +72,9 @@ class Lmbench(base.Scenario):
self.latency_target_script = pkg_resources.resource_filename(
"yardstick.benchmark.scenarios.compute",
Lmbench.LATENCY_BENCHMARK_SCRIPT)
+ self.latency_for_cache_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.compute",
+ Lmbench.LATENCY_CACHE_SCRIPT)
host = self.context_cfg["host"]
user = host.get("user", "ubuntu")
ip = host.get("ip", None)
@@ -85,6 +89,8 @@ class Lmbench(base.Scenario):
stdin=open(self.latency_target_script, 'rb'))
self.client.run("cat > ~/lmbench_bandwidth.sh",
stdin=open(self.bandwidth_target_script, 'rb'))
+ self.client.run("cat > ~/lmbench_latency_for_cache.sh",
+ stdin=open(self.latency_for_cache_script, 'rb'))
self.setup_done = True
def run(self, result):
@@ -106,6 +112,11 @@ class Lmbench(base.Scenario):
warmup_repetitions = options.get('warmup', 0)
cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \
(size, benchmark, warmup_repetitions)
+ elif test_type == 'latency_for_cache':
+ repetition = options.get('repetition', 1)
+ warmup = options.get('warmup', 0)
+ cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
+ (repetition, warmup)
else:
raise RuntimeError("No such test_type: %s for Lmbench scenario",
test_type)
@@ -130,12 +141,18 @@ class Lmbench(base.Scenario):
if latency > sla_max_latency:
sla_error += "latency %f > sla:max_latency(%f); " \
% (latency, sla_max_latency)
- else:
+ elif test_type == 'bandwidth':
sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
bw = result["bandwidth(MBps)"]
if bw < sla_min_bw:
sla_error += "bandwidth %f < " \
"sla:min_bandwidth(%f)" % (bw, sla_min_bw)
+ elif test_type == 'latency_for_cache':
+ sla_latency = float(self.scenario_cfg['sla']['max_latency'])
+ cache_latency = float(result['L1cache'])
+ if sla_latency < cache_latency:
+ sla_error += "latency %f > sla:max_latency(%f); " \
+ % (cache_latency, sla_latency)
assert sla_error == "", sla_error
diff --git a/yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash b/yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash
new file mode 100644
index 000000000..2ed1bbe14
--- /dev/null
+++ b/yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Run a lmbench cache latency benchmark in a host and
+# outputs in json format the array sizes in megabytes and
+# load latency over all points in that array in nanosecods
+
+set -e
+
+REPETITON=$1
+WARMUP=$2
+
+# write the result to stdout in json format
+output_json()
+{
+ read DATA
+ echo $DATA | awk '{printf "{\"L1cache\": %s}", $5}'
+}
+
+/usr/lib/lmbench/bin/x86_64-linux-gnu/cache -W $WARMUP -N $REPETITON 2>&1 | output_json
+