diff options
author | Kristian Hunt <kristian.hunt@gmail.com> | 2015-10-13 16:38:56 +0200 |
---|---|---|
committer | Kristian Hunt <kristian.hunt@gmail.com> | 2015-10-28 11:38:48 +0100 |
commit | 05d5ac8d6d5e9bd1e6b69afbd764000aeb4a030e (patch) | |
tree | 2937a8372d4e672f4d8f755950e9e58779cbdc4b /tests/unit/benchmark/scenarios | |
parent | f37d291f6397891cd0dc37c6140b114868921b61 (diff) |
Extend lmbench scenario to measure memory bandwidth
Lmbench scenario has now two scripts and will choose between them,
based on whether the intention is to run memory latency or bandwidth
tests. Added also unit test file for this scenario.
JIRA: YARDSTICK-113
Change-Id: I2ba4dbef31f3cafbdb3c583ece5ed9512a906896
Signed-off-by: Kristian Hunt <kristian.hunt@gmail.com>
Diffstat (limited to 'tests/unit/benchmark/scenarios')
-rw-r--r-- | tests/unit/benchmark/scenarios/compute/test_lmbench.py | 169 |
1 files changed, 169 insertions, 0 deletions
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py new file mode 100644 index 000000000..1b24258b6 --- /dev/null +++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench + +import mock +import unittest +import json + +from yardstick.benchmark.scenarios.compute import lmbench + + +@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh') +class LmbenchTestCase(unittest.TestCase): + + def setUp(self): + self.ctx = { + 'host': { + 'ip': '172.16.0.137', + 'user': 'cirros', + 'key_filename': "mykey.key" + } + } + + self.result = {} + + def test_successful_setup(self, mock_ssh): + + l = lmbench.Lmbench({}, self.ctx) + mock_ssh.SSH().execute.return_value = (0, '', '') + + l.setup() + self.assertIsNotNone(l.client) + self.assertTrue(l.setup_done) + + def test_unsuccessful_unknown_type_run(self, mock_ssh): + + options = { + "test_type": "foo" + } + args = {'options': options} + + l = lmbench.Lmbench(args, self.ctx) + + self.assertRaises(RuntimeError, l.run, self.result) + + def test_successful_latency_run_no_sla(self, mock_ssh): + + options = { + "test_type": "latency", + "stride": 64, + "stop_size": 16 + } + args = {'options': options} + l = lmbench.Lmbench(args, self.ctx) + + sample_output = '[{"latency": 4.944, "size": 0.00049}]' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + l.run(self.result) + expected_result = json.loads('{"latencies": ' + sample_output + "}") + self.assertEqual(self.result, expected_result) + + def test_successful_bandwidth_run_no_sla(self, mock_ssh): + + options = { + "test_type": "bandwidth", + "size": 500, + "benchmark": "rd", + "warmup": 0 + } + args = {"options": options} + l = lmbench.Lmbench(args, self.ctx) + + sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + l.run(self.result) + expected_result = json.loads(sample_output) + self.assertEqual(self.result, expected_result) + + def test_successful_latency_run_sla(self, mock_ssh): + + options = { + "test_type": "latency", + "stride": 64, + "stop_size": 16 + } + args = { + "options": options, + "sla": {"max_latency": 35} + } + l = lmbench.Lmbench(args, self.ctx) + + sample_output = '[{"latency": 4.944, "size": 0.00049}]' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + l.run(self.result) + expected_result = json.loads('{"latencies": ' + sample_output + "}") + self.assertEqual(self.result, expected_result) + + def test_successful_bandwidth_run_sla(self, mock_ssh): + + options = { + "test_type": "bandwidth", + "size": 500, + "benchmark": "rd", + "warmup": 0 + } + args = { + "options": options, + "sla": {"min_bandwidth": 10000} + } + l = lmbench.Lmbench(args, self.ctx) + + sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + l.run(self.result) + expected_result = json.loads(sample_output) + self.assertEqual(self.result, expected_result) + + def test_unsuccessful_latency_run_sla(self, mock_ssh): + + options = { + "test_type": "latency", + "stride": 64, + "stop_size": 16 + } + args = { + "options": options, + "sla": {"max_latency": 35} + } + l = lmbench.Lmbench(args, self.ctx) + + sample_output = '[{"latency": 37.5, "size": 0.00049}]' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + self.assertRaises(AssertionError, l.run, self.result) + + def test_unsuccessful_bandwidth_run_sla(self, mock_ssh): + + options = { + "test_type": "bandwidth", + "size": 500, + "benchmark": "rd", + "warmup": 0 + } + args = { + "options": options, + "sla": {"min_bandwidth": 10000} + } + l = lmbench.Lmbench(args, self.ctx) + + sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + self.assertRaises(AssertionError, l.run, self.result) + + def test_unsuccessful_script_error(self, mock_ssh): + + options = {"test_type": "bandwidth"} + args = {"options": options} + l = lmbench.Lmbench(args, self.ctx) + + mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR') + self.assertRaises(RuntimeError, l.run, self.result) |