aboutsummaryrefslogtreecommitdiffstats
path: root/tests/unit/benchmark/scenarios
diff options
context:
space:
mode:
authorkubi <jean.gaoliang@huawei.com>2016-04-01 11:02:11 +0800
committerqi liang <liangqi1@huawei.com>2016-04-08 06:09:56 +0000
commit6a1a21e2ecfde58ba8f57f1430cf577a6b80ea51 (patch)
treebd8de23af89109094c3d6eb57f7e40dc322fc297 /tests/unit/benchmark/scenarios
parentfe1c1e138bd909a810ef1f4272c7431c7afdc870 (diff)
add latency for cache read operations(LMBench)
Using LMBench to measure latency of cache. two parameter can be configured (repetition and warmup) Change-Id: I5e4ecca0f9dd9c9ce2cecce3623dd8347ab2b5b1 Signed-off-by: kubi <jean.gaoliang@huawei.com>
Diffstat (limited to 'tests/unit/benchmark/scenarios')
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_lmbench.py19
1 files changed, 19 insertions, 0 deletions
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
index 1b24258b6..6be116371 100644
--- a/tests/unit/benchmark/scenarios/compute/test_lmbench.py
+++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -159,6 +159,25 @@ class LmbenchTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
self.assertRaises(AssertionError, l.run, self.result)
+ def test_successful_latency_for_cache_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency_for_cache",
+ "repetition":1,
+ "warmup": 0
+ }
+ args = {
+ "options": options,
+ "sla": {"max_latency": 35}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = "{\"L1cache\": 1.6}"
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
def test_unsuccessful_script_error(self, mock_ssh):
options = {"test_type": "bandwidth"}