summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKristian Hunt <kristian.hunt@gmail.com>2015-10-13 16:38:56 +0200
committerKristian Hunt <kristian.hunt@gmail.com>2015-10-28 11:38:48 +0100
commit05d5ac8d6d5e9bd1e6b69afbd764000aeb4a030e (patch)
tree2937a8372d4e672f4d8f755950e9e58779cbdc4b
parentf37d291f6397891cd0dc37c6140b114868921b61 (diff)
Extend lmbench scenario to measure memory bandwidth
Lmbench scenario has now two scripts and will choose between them, based on whether the intention is to run memory latency or bandwidth tests. Added also unit test file for this scenario. JIRA: YARDSTICK-113 Change-Id: I2ba4dbef31f3cafbdb3c583ece5ed9512a906896 Signed-off-by: Kristian Hunt <kristian.hunt@gmail.com>
-rw-r--r--samples/lmbench.yaml21
-rwxr-xr-xtests/functional/test_cli_scenario.py3
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_lmbench.py169
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py98
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash29
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench_latency_benchmark.bash (renamed from yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash)0
6 files changed, 297 insertions, 23 deletions
diff --git a/samples/lmbench.yaml b/samples/lmbench.yaml
index 256d8c67e..2b8e99084 100644
--- a/samples/lmbench.yaml
+++ b/samples/lmbench.yaml
@@ -1,6 +1,6 @@
---
# Sample benchmark task config file
-# measure memory read latency using lmbench
+# measure memory read latency and memory bandwidth using lmbench
schema: "yardstick:task:0.1"
@@ -8,6 +8,7 @@ scenarios:
-
type: Lmbench
options:
+ test_type: "latency"
stride: 64
stop_size: 32
@@ -22,6 +23,24 @@ scenarios:
sla:
max_latency: 35
action: monitor
+-
+ type: Lmbench
+ options:
+ test_type: "bandwidth"
+ size: 500
+ benchmark: "wr"
+
+ host: demeter.demo
+
+ runner:
+ type: Arithmetic
+ name: size
+ stop: 2000
+ step: 500
+
+ sla:
+ min_bandwidth: 10000
+ action: monitor
context:
name: demo
diff --git a/tests/functional/test_cli_scenario.py b/tests/functional/test_cli_scenario.py
index aad475970..877973783 100755
--- a/tests/functional/test_cli_scenario.py
+++ b/tests/functional/test_cli_scenario.py
@@ -31,7 +31,8 @@ class ScenarioTestCase(unittest.TestCase):
def test_scenario_show_Lmbench(self):
res = self.yardstick("scenario show Lmbench")
- lmbench = "Execute lmbench memory read latency benchmark in a host" in res
+ lmbench = "Execute lmbench memory read latency"
+ "or memory bandwidth benchmark in a host" in res
self.assertTrue(lmbench)
def test_scenario_show_Perf(self):
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
new file mode 100644
index 000000000..1b24258b6
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench
+
+import mock
+import unittest
+import json
+
+from yardstick.benchmark.scenarios.compute import lmbench
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh')
+class LmbenchTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'key_filename': "mykey.key"
+ }
+ }
+
+ self.result = {}
+
+ def test_successful_setup(self, mock_ssh):
+
+ l = lmbench.Lmbench({}, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+
+ l.setup()
+ self.assertIsNotNone(l.client)
+ self.assertTrue(l.setup_done)
+
+ def test_unsuccessful_unknown_type_run(self, mock_ssh):
+
+ options = {
+ "test_type": "foo"
+ }
+ args = {'options': options}
+
+ l = lmbench.Lmbench(args, self.ctx)
+
+ self.assertRaises(RuntimeError, l.run, self.result)
+
+ def test_successful_latency_run_no_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency",
+ "stride": 64,
+ "stop_size": 16
+ }
+ args = {'options': options}
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '[{"latency": 4.944, "size": 0.00049}]'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads('{"latencies": ' + sample_output + "}")
+ self.assertEqual(self.result, expected_result)
+
+ def test_successful_bandwidth_run_no_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "bandwidth",
+ "size": 500,
+ "benchmark": "rd",
+ "warmup": 0
+ }
+ args = {"options": options}
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
+ def test_successful_latency_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency",
+ "stride": 64,
+ "stop_size": 16
+ }
+ args = {
+ "options": options,
+ "sla": {"max_latency": 35}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '[{"latency": 4.944, "size": 0.00049}]'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads('{"latencies": ' + sample_output + "}")
+ self.assertEqual(self.result, expected_result)
+
+ def test_successful_bandwidth_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "bandwidth",
+ "size": 500,
+ "benchmark": "rd",
+ "warmup": 0
+ }
+ args = {
+ "options": options,
+ "sla": {"min_bandwidth": 10000}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
+ def test_unsuccessful_latency_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency",
+ "stride": 64,
+ "stop_size": 16
+ }
+ args = {
+ "options": options,
+ "sla": {"max_latency": 35}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '[{"latency": 37.5, "size": 0.00049}]'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, l.run, self.result)
+
+ def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "bandwidth",
+ "size": 500,
+ "benchmark": "rd",
+ "warmup": 0
+ }
+ args = {
+ "options": options,
+ "sla": {"min_bandwidth": 10000}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, l.run, self.result)
+
+ def test_unsuccessful_script_error(self, mock_ssh):
+
+ options = {"test_type": "bandwidth"}
+ args = {"options": options}
+ l = lmbench.Lmbench(args, self.ctx)
+
+ mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, l.run, self.result)
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index 03caff525..b9adf5079 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -17,9 +17,15 @@ LOG = logging.getLogger(__name__)
class Lmbench(base.Scenario):
- """Execute lmbench memory read latency benchmark in a host
+ """Execute lmbench memory read latency or memory bandwidth benchmark in a host
Parameters
+ test_type - specifies whether to measure memory latency or bandwidth
+ type: string
+ unit: na
+ default: "latency"
+
+ Parameters for memory read latency benchmark
stride - number of locations in memory between starts of array elements
type: int
unit: bytes
@@ -29,11 +35,28 @@ class Lmbench(base.Scenario):
unit: megabytes
default: 16
- Results are accurate to the ~2-5 nanosecond range.
+ Results are accurate to the ~2-5 nanosecond range.
+
+ Parameters for memory bandwidth benchmark
+ size - the amount of memory to test
+ type: int
+ unit: kilobyte
+ default: 128
+ benchmark - the name of the memory bandwidth benchmark test to execute.
+ Valid test names are rd, wr, rdwr, cp, frd, fwr, fcp, bzero, bcopy
+ type: string
+ unit: na
+ default: "rd"
+ warmup - the number of repetitons to perform before taking measurements
+ type: int
+ unit: na
+ default: 0
+ more info http://manpages.ubuntu.com/manpages/trusty/lmbench.8.html
"""
__scenario_type__ = "Lmbench"
- TARGET_SCRIPT = "lmbench_benchmark.bash"
+ LATENCY_BENCHMARK_SCRIPT = "lmbench_latency_benchmark.bash"
+ BANDWIDTH_BENCHMARK_SCRIPT = "lmbench_bandwidth_benchmark.bash"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
@@ -42,9 +65,12 @@ class Lmbench(base.Scenario):
def setup(self):
"""scenario setup"""
- self.target_script = pkg_resources.resource_filename(
+ self.bandwidth_target_script = pkg_resources.resource_filename(
"yardstick.benchmark.scenarios.compute",
- Lmbench.TARGET_SCRIPT)
+ Lmbench.BANDWIDTH_BENCHMARK_SCRIPT)
+ self.latency_target_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.compute",
+ Lmbench.LATENCY_BENCHMARK_SCRIPT)
host = self.context_cfg["host"]
user = host.get("user", "ubuntu")
ip = host.get("ip", None)
@@ -54,10 +80,11 @@ class Lmbench(base.Scenario):
self.client = ssh.SSH(user, ip, key_filename=key_filename)
self.client.wait(timeout=600)
- # copy script to host
- self.client.run("cat > ~/lmbench.sh",
- stdin=open(self.target_script, 'rb'))
-
+ # copy scripts to host
+ self.client.run("cat > ~/lmbench_latency.sh",
+ stdin=open(self.latency_target_script, 'rb'))
+ self.client.run("cat > ~/lmbench_bandwidth.sh",
+ stdin=open(self.bandwidth_target_script, 'rb'))
self.setup_done = True
def run(self, result):
@@ -67,25 +94,48 @@ class Lmbench(base.Scenario):
self.setup()
options = self.scenario_cfg['options']
- stride = options.get('stride', 128)
- stop_size = options.get('stop_size', 16)
+ test_type = options.get('test_type', 'latency')
+
+ if test_type == 'latency':
+ stride = options.get('stride', 128)
+ stop_size = options.get('stop_size', 16)
+ cmd = "sudo bash lmbench_latency.sh %d %d" % (stop_size, stride)
+ elif test_type == 'bandwidth':
+ size = options.get('size', 128)
+ benchmark = options.get('benchmark', 'rd')
+ warmup_repetitions = options.get('warmup', 0)
+ cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \
+ (size, benchmark, warmup_repetitions)
+ else:
+ raise RuntimeError("No such test_type: %s for Lmbench scenario",
+ test_type)
- cmd = "sudo bash lmbench.sh %d %d" % (stop_size, stride)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
- result.update({"latencies": json.loads(stdout)})
+ if test_type == 'latency':
+ result.update({"latencies": json.loads(stdout)})
+ else:
+ result.update(json.loads(stdout))
+
if "sla" in self.scenario_cfg:
sla_error = ""
- sla_max_latency = int(self.scenario_cfg['sla']['max_latency'])
- for t_latency in result:
- latency = t_latency['latency']
- if latency > sla_max_latency:
- sla_error += "latency %f > sla:max_latency(%f); " \
- % (latency, sla_max_latency)
+ if test_type == 'latency':
+ sla_max_latency = int(self.scenario_cfg['sla']['max_latency'])
+ for t_latency in result["latencies"]:
+ latency = t_latency['latency']
+ if latency > sla_max_latency:
+ sla_error += "latency %f > sla:max_latency(%f); " \
+ % (latency, sla_max_latency)
+ else:
+ sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
+ bw = result["bandwidth(MBps)"]
+ if bw < sla_min_bw:
+ sla_error += "bandwidth %f < " \
+ "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
assert sla_error == "", sla_error
@@ -104,8 +154,14 @@ def _test():
logger = logging.getLogger('yardstick')
logger.setLevel(logging.DEBUG)
- options = {'stride': 128, 'stop_size': 16}
- args = {'options': options}
+ options = {
+ 'test_type': 'latency',
+ 'stride': 128,
+ 'stop_size': 16
+ }
+
+ sla = {'max_latency': 35, 'action': 'monitor'}
+ args = {'options': options, 'sla': sla}
result = {}
p = Lmbench(args, ctx)
diff --git a/yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash b/yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash
new file mode 100644
index 000000000..09993a088
--- /dev/null
+++ b/yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Run a lmbench memory bandwidth benchmark in a host and
+# output in json format the memory size in megabytes and
+# memory bandwidth in megabytes per second
+
+set -e
+
+SIZE=$1
+TEST_NAME=$2
+WARMUP=$3
+
+# write the result to stdout in json format
+output_json()
+{
+ read DATA
+ echo $DATA | awk '/ /{printf "{\"size(MB)\": %s, \"bandwidth(MBps)\": %s}", $1, $2}'
+}
+
+/usr/lib/lmbench/bin/x86_64-linux-gnu/bw_mem -W $WARMUP ${SIZE}k $TEST_NAME 2>&1 | output_json \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/lmbench_latency_benchmark.bash
index 04e3c1a9d..04e3c1a9d 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash
+++ b/yardstick/benchmark/scenarios/compute/lmbench_latency_benchmark.bash