summaryrefslogtreecommitdiffstats
path: root/tests/unit/benchmark/scenarios
diff options
context:
space:
mode:
Diffstat (limited to 'tests/unit/benchmark/scenarios')
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_lmbench.py169
1 files changed, 169 insertions, 0 deletions
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
new file mode 100644
index 000000000..1b24258b6
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench
+
+import mock
+import unittest
+import json
+
+from yardstick.benchmark.scenarios.compute import lmbench
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh')
+class LmbenchTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'key_filename': "mykey.key"
+ }
+ }
+
+ self.result = {}
+
+ def test_successful_setup(self, mock_ssh):
+
+ l = lmbench.Lmbench({}, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+
+ l.setup()
+ self.assertIsNotNone(l.client)
+ self.assertTrue(l.setup_done)
+
+ def test_unsuccessful_unknown_type_run(self, mock_ssh):
+
+ options = {
+ "test_type": "foo"
+ }
+ args = {'options': options}
+
+ l = lmbench.Lmbench(args, self.ctx)
+
+ self.assertRaises(RuntimeError, l.run, self.result)
+
+ def test_successful_latency_run_no_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency",
+ "stride": 64,
+ "stop_size": 16
+ }
+ args = {'options': options}
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '[{"latency": 4.944, "size": 0.00049}]'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads('{"latencies": ' + sample_output + "}")
+ self.assertEqual(self.result, expected_result)
+
+ def test_successful_bandwidth_run_no_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "bandwidth",
+ "size": 500,
+ "benchmark": "rd",
+ "warmup": 0
+ }
+ args = {"options": options}
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
+ def test_successful_latency_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency",
+ "stride": 64,
+ "stop_size": 16
+ }
+ args = {
+ "options": options,
+ "sla": {"max_latency": 35}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '[{"latency": 4.944, "size": 0.00049}]'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads('{"latencies": ' + sample_output + "}")
+ self.assertEqual(self.result, expected_result)
+
+ def test_successful_bandwidth_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "bandwidth",
+ "size": 500,
+ "benchmark": "rd",
+ "warmup": 0
+ }
+ args = {
+ "options": options,
+ "sla": {"min_bandwidth": 10000}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = json.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
+ def test_unsuccessful_latency_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency",
+ "stride": 64,
+ "stop_size": 16
+ }
+ args = {
+ "options": options,
+ "sla": {"max_latency": 35}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '[{"latency": 37.5, "size": 0.00049}]'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, l.run, self.result)
+
+ def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "bandwidth",
+ "size": 500,
+ "benchmark": "rd",
+ "warmup": 0
+ }
+ args = {
+ "options": options,
+ "sla": {"min_bandwidth": 10000}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, l.run, self.result)
+
+ def test_unsuccessful_script_error(self, mock_ssh):
+
+ options = {"test_type": "bandwidth"}
+ args = {"options": options}
+ l = lmbench.Lmbench(args, self.ctx)
+
+ mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, l.run, self.result)