aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/tests/unit/benchmark
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/tests/unit/benchmark')
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_arithmetic.py228
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_duration.py18
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_proxduration.py2
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py56
4 files changed, 264 insertions, 40 deletions
diff --git a/yardstick/tests/unit/benchmark/runner/test_arithmetic.py b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
index 7b1e1e976..35d935cd5 100644
--- a/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
+++ b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
@@ -14,16 +14,26 @@ import os
import time
from yardstick.benchmark.runners import arithmetic
+from yardstick.common import exceptions as y_exc
class ArithmeticRunnerTest(unittest.TestCase):
class MyMethod(object):
- def __init__(self):
+ SLA_VALIDATION_ERROR_SIDE_EFFECT = 1
+ BROAD_EXCEPTION_SIDE_EFFECT = 2
+
+ def __init__(self, side_effect=0):
self.count = 101
+ self.side_effect = side_effect
def __call__(self, data):
self.count += 1
data['my_key'] = self.count
+ if self.side_effect == self.SLA_VALIDATION_ERROR_SIDE_EFFECT:
+ raise y_exc.SLAValidationError(case_name='My Case',
+ error_msg='my error message')
+ elif self.side_effect == self.BROAD_EXCEPTION_SIDE_EFFECT:
+ raise y_exc.YardstickException
return self.count
def setUp(self):
@@ -218,3 +228,219 @@ class ArithmeticRunnerTest(unittest.TestCase):
self.assertEqual(result['sequence'], count)
self.assertGreater(result['timestamp'], timestamp)
timestamp = result['timestamp']
+
+ def test__worker_process_except_sla_validation_error_no_sla_cfg(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.call_count, 8)
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+
+ def test__worker_process_output_on_sla_validation_error_no_sla_cfg(self):
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ output_queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
+ self.assertEqual(count, 8)
+ self.assertTrue(output_queue.empty())
+
+ def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.call_count, 8)
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+
+ def test__worker_process_output_sla_validation_error_sla_cfg_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ output_queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'],
+ ('My Case SLA validation failed. '
+ 'Error: my error message',))
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
+ self.assertEqual(count, 8)
+ self.assertTrue(output_queue.empty())
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self):
+ self.scenario_cfg['sla'] = {'action': 'assert'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.my_method.assert_called_once()
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_not_called()
+
+ def test__worker_process_output_sla_validation_error_sla_cfg_assert(self):
+ self.scenario_cfg['sla'] = {'action': 'assert'}
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ output_queue = multiprocessing.Queue()
+ with self.assertRaisesRegexp(
+ y_exc.SLAValidationError,
+ 'My Case SLA validation failed. Error: my error message'):
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.assertEqual(self.benchmark.my_method.count, 102)
+ self.benchmark.teardown.assert_not_called()
+ self.assertTrue(queue.empty())
+ self.assertTrue(output_queue.empty())
+
+ def test__worker_process_broad_exception_no_sla_cfg_early_exit(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.benchmark.my_method.assert_called_once()
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 64, 'size': 500})
+
+ def test__worker_process_output_on_broad_exception_no_sla_cfg(self):
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ output_queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 102)
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 64, 'size': 500})
+ self.assertEqual(queue.qsize(), 1)
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertRegexpMatches(
+ result['errors'],
+ 'YardstickException: An unknown exception occurred.')
+ self.assertEqual(result['sequence'], 1)
+ self.assertTrue(output_queue.empty())
+
+ def test__worker_process_broad_exception_sla_cfg_not_none(self):
+ self.scenario_cfg['sla'] = {'action': 'some action'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.call_count, 8)
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+
+ def test__worker_process_output_on_broad_exception_sla_cfg_not_none(self):
+ self.scenario_cfg['sla'] = {'action': 'some action'}
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ output_queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+ self.assertTrue(output_queue.empty())
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertRegexpMatches(
+ result['errors'],
+ 'YardstickException: An unknown exception occurred.')
+ self.assertEqual(result['sequence'], count)
+
+ def test__worker_process_benchmark_teardown_on_broad_exception(self):
+ self.benchmark.teardown = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ with self.assertRaises(SystemExit) as raised:
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ self.assertEqual(raised.exception.code, 1)
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.call_count, 8)
diff --git a/yardstick/tests/unit/benchmark/runner/test_duration.py b/yardstick/tests/unit/benchmark/runner/test_duration.py
index d4801ef2c..fa47e96bf 100644
--- a/yardstick/tests/unit/benchmark/runner/test_duration.py
+++ b/yardstick/tests/unit/benchmark/runner/test_duration.py
@@ -97,9 +97,9 @@ class DurationRunnerTest(unittest.TestCase):
multiprocessing.Event(), mock.Mock())
self._assert_defaults__worker_run_setup_and_teardown()
- self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
- self.assertGreater(self.benchmark.my_method.call_count, 2)
- self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 0)
+ self.assertGreater(self.benchmark.my_method.call_count, 0)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 0)
def test__worker_process_called_without_cfg(self):
scenario_cfg = {'runner': {}}
@@ -140,9 +140,9 @@ class DurationRunnerTest(unittest.TestCase):
time.sleep(0.1)
self._assert_defaults__worker_run_setup_and_teardown()
- self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
- self.assertGreater(self.benchmark.my_method.count, 103)
- self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 0)
+ self.assertGreater(self.benchmark.my_method.count, 1)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 0)
count = 101
while not output_queue.empty():
@@ -181,9 +181,9 @@ class DurationRunnerTest(unittest.TestCase):
time.sleep(0.1)
self._assert_defaults__worker_run_setup_and_teardown()
- self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
- self.assertGreater(self.benchmark.my_method.count, 103)
- self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 0)
+ self.assertGreater(self.benchmark.my_method.count, 1)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 0)
count = 0
while not queue.empty():
diff --git a/yardstick/tests/unit/benchmark/runner/test_proxduration.py b/yardstick/tests/unit/benchmark/runner/test_proxduration.py
index 3299c5b05..056195fd3 100644
--- a/yardstick/tests/unit/benchmark/runner/test_proxduration.py
+++ b/yardstick/tests/unit/benchmark/runner/test_proxduration.py
@@ -97,7 +97,7 @@ class ProxDurationRunnerTest(unittest.TestCase):
{}, multiprocessing.Event(), mock.Mock())
self._assert_defaults__worker_run_setup_and_teardown()
- self.assertGreater(self.benchmark.my_method.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.call_count, 0)
def test__worker_process_called_without_cfg(self):
scenario_cfg = {'runner': {}}
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
index c4ac347f4..ba63e5f9e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -6,11 +6,6 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench
-
-from __future__ import absolute_import
-
import unittest
import mock
@@ -18,13 +13,9 @@ from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import lmbench
from yardstick.common import exceptions as y_exc
+from yardstick import ssh
-# pylint: disable=unused-argument
-# disable this for now because I keep forgetting mock patch arg ordering
-
-
-@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh')
class LmbenchTestCase(unittest.TestCase):
def setUp(self):
@@ -38,16 +29,23 @@ class LmbenchTestCase(unittest.TestCase):
self.result = {}
- def test_successful_setup(self, mock_ssh):
+ self._mock_ssh = mock.patch.object(ssh, 'SSH')
+ self.mock_ssh = self._mock_ssh.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_ssh.stop()
+
+ def test_successful_setup(self):
l = lmbench.Lmbench({}, self.ctx)
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_ssh.from_node().execute.return_value = (0, '', '')
l.setup()
self.assertIsNotNone(l.client)
self.assertTrue(l.setup_done)
- def test_unsuccessful_unknown_type_run(self, mock_ssh):
+ def test_unsuccessful_unknown_type_run(self):
options = {
"test_type": "foo"
@@ -58,7 +56,7 @@ class LmbenchTestCase(unittest.TestCase):
self.assertRaises(RuntimeError, l.run, self.result)
- def test_successful_latency_run_no_sla(self, mock_ssh):
+ def test_successful_latency_run_no_sla(self):
options = {
"test_type": "latency",
@@ -69,12 +67,12 @@ class LmbenchTestCase(unittest.TestCase):
l = lmbench.Lmbench(args, self.ctx)
sample_output = '[{"latency": 4.944, "size": 0.00049}]'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_ssh.from_node().execute.return_value = (0, sample_output, '')
l.run(self.result)
expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049}
self.assertEqual(self.result, expected_result)
- def test_successful_bandwidth_run_no_sla(self, mock_ssh):
+ def test_successful_bandwidth_run_no_sla(self):
options = {
"test_type": "bandwidth",
@@ -86,12 +84,12 @@ class LmbenchTestCase(unittest.TestCase):
l = lmbench.Lmbench(args, self.ctx)
sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_ssh.from_node().execute.return_value = (0, sample_output, '')
l.run(self.result)
expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
- def test_successful_latency_run_sla(self, mock_ssh):
+ def test_successful_latency_run_sla(self):
options = {
"test_type": "latency",
@@ -105,12 +103,12 @@ class LmbenchTestCase(unittest.TestCase):
l = lmbench.Lmbench(args, self.ctx)
sample_output = '[{"latency": 4.944, "size": 0.00049}]'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_ssh.from_node().execute.return_value = (0, sample_output, '')
l.run(self.result)
expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049}
self.assertEqual(self.result, expected_result)
- def test_successful_bandwidth_run_sla(self, mock_ssh):
+ def test_successful_bandwidth_run_sla(self):
options = {
"test_type": "bandwidth",
@@ -125,12 +123,12 @@ class LmbenchTestCase(unittest.TestCase):
l = lmbench.Lmbench(args, self.ctx)
sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_ssh.from_node().execute.return_value = (0, sample_output, '')
l.run(self.result)
expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
- def test_unsuccessful_latency_run_sla(self, mock_ssh):
+ def test_unsuccessful_latency_run_sla(self):
options = {
"test_type": "latency",
@@ -144,10 +142,10 @@ class LmbenchTestCase(unittest.TestCase):
l = lmbench.Lmbench(args, self.ctx)
sample_output = '[{"latency": 37.5, "size": 0.00049}]'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_ssh.from_node().execute.return_value = (0, sample_output, '')
self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
- def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
+ def test_unsuccessful_bandwidth_run_sla(self):
options = {
"test_type": "bandwidth",
@@ -162,10 +160,10 @@ class LmbenchTestCase(unittest.TestCase):
l = lmbench.Lmbench(args, self.ctx)
sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_ssh.from_node().execute.return_value = (0, sample_output, '')
self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
- def test_successful_latency_for_cache_run_sla(self, mock_ssh):
+ def test_successful_latency_for_cache_run_sla(self):
options = {
"test_type": "latency_for_cache",
@@ -179,16 +177,16 @@ class LmbenchTestCase(unittest.TestCase):
l = lmbench.Lmbench(args, self.ctx)
sample_output = "{\"L1cache\": 1.6}"
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_ssh.from_node().execute.return_value = (0, sample_output, '')
l.run(self.result)
expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
- def test_unsuccessful_script_error(self, mock_ssh):
+ def test_unsuccessful_script_error(self):
options = {"test_type": "bandwidth"}
args = {"options": options}
l = lmbench.Lmbench(args, self.ctx)
- mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.mock_ssh.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, l.run, self.result)