diff options
3 files changed, 200 insertions, 87 deletions
diff --git a/yardstick/tests/unit/benchmark/runner/test_search.py b/yardstick/tests/unit/benchmark/runner/test_search.py index 10ea48931..d5d1b8ded 100644 --- a/yardstick/tests/unit/benchmark/runner/test_search.py +++ b/yardstick/tests/unit/benchmark/runner/test_search.py @@ -25,16 +25,14 @@ from yardstick.common import exceptions as y_exc class TestSearchRunnerHelper(unittest.TestCase): def test___call__(self): - cls = mock.MagicMock() - aborted = mock.MagicMock() scenario_cfg = { 'runner': {}, } - benchmark = cls() - method = getattr(benchmark, 'my_method') + benchmark = mock.Mock() + method = getattr(benchmark(), 'my_method') helper = SearchRunnerHelper( - cls, 'my_method', scenario_cfg, {}, aborted) + benchmark, 'my_method', scenario_cfg, {}, mock.Mock()) with helper.get_benchmark_instance(): helper() @@ -42,14 +40,12 @@ class TestSearchRunnerHelper(unittest.TestCase): method.assert_called_once() def test___call___error(self): - cls = mock.MagicMock() - aborted = mock.MagicMock() scenario_cfg = { 'runner': {}, } helper = SearchRunnerHelper( - cls, 'my_method', scenario_cfg, {}, aborted) + mock.Mock(), 'my_method', scenario_cfg, {}, mock.Mock()) with self.assertRaises(RuntimeError): helper() @@ -57,8 +53,6 @@ class TestSearchRunnerHelper(unittest.TestCase): @mock.patch.object(time, 'sleep') @mock.patch.object(time, 'time') def test_is_not_done(self, mock_time, *args): - cls = mock.MagicMock() - aborted = mock.MagicMock() scenario_cfg = { 'runner': {}, } @@ -66,7 +60,7 @@ class TestSearchRunnerHelper(unittest.TestCase): mock_time.side_effect = range(1000) helper = SearchRunnerHelper( - cls, 'my_method', scenario_cfg, {}, aborted) + mock.Mock(), 'my_method', scenario_cfg, {}, mock.Mock()) index = -1 for index in helper.is_not_done(): @@ -77,8 +71,6 @@ class TestSearchRunnerHelper(unittest.TestCase): @mock.patch.object(time, 'sleep') def test_is_not_done_immediate_stop(self, *args): - cls = mock.MagicMock() - aborted = mock.MagicMock() scenario_cfg = { 'runner': { 'run_step': '', @@ -86,7 +78,7 @@ class TestSearchRunnerHelper(unittest.TestCase): } helper = SearchRunnerHelper( - cls, 'my_method', scenario_cfg, {}, aborted) + mock.Mock(), 'my_method', scenario_cfg, {}, mock.Mock()) index = -1 for index in helper.is_not_done(): @@ -113,7 +105,7 @@ class TestSearchRunner(unittest.TestCase): } runner = SearchRunner({}) - runner.worker_helper = mock.MagicMock(side_effect=update) + runner.worker_helper = mock.Mock(side_effect=update) self.assertFalse(runner._worker_run_once('sequence 1')) @@ -137,14 +129,14 @@ class TestSearchRunner(unittest.TestCase): } runner = SearchRunner({}) - runner.worker_helper = mock.MagicMock(side_effect=update) + runner.worker_helper = mock.Mock(side_effect=update) self.assertTrue(runner._worker_run_once('sequence 1')) def test__worker_run_once_assertion_error_assert(self): runner = SearchRunner({}) runner.sla_action = 'assert' - runner.worker_helper = mock.MagicMock(side_effect=y_exc.SLAValidationError) + runner.worker_helper = mock.Mock(side_effect=y_exc.SLAValidationError) with self.assertRaises(y_exc.SLAValidationError): runner._worker_run_once('sequence 1') @@ -152,36 +144,34 @@ class TestSearchRunner(unittest.TestCase): def test__worker_run_once_assertion_error_monitor(self): runner = SearchRunner({}) runner.sla_action = 'monitor' - runner.worker_helper = mock.MagicMock(side_effect=y_exc.SLAValidationError) + runner.worker_helper = mock.Mock(side_effect=y_exc.SLAValidationError) self.assertFalse(runner._worker_run_once('sequence 1')) def test__worker_run_once_non_assertion_error_none(self): runner = SearchRunner({}) - runner.worker_helper = mock.MagicMock(side_effect=RuntimeError) + runner.worker_helper = mock.Mock(side_effect=RuntimeError) self.assertTrue(runner._worker_run_once('sequence 1')) def test__worker_run_once_non_assertion_error(self): runner = SearchRunner({}) runner.sla_action = 'monitor' - runner.worker_helper = mock.MagicMock(side_effect=RuntimeError) + runner.worker_helper = mock.Mock(side_effect=RuntimeError) self.assertFalse(runner._worker_run_once('sequence 1')) def test__worker_run(self): - cls = mock.MagicMock() scenario_cfg = { 'runner': {'interval': 0, 'timeout': 1}, } runner = SearchRunner({}) - runner._worker_run_once = mock.MagicMock(side_effect=[0, 0, 1]) + runner._worker_run_once = mock.Mock(side_effect=[0, 0, 1]) - runner._worker_run(cls, 'my_method', scenario_cfg, {}) + runner._worker_run(mock.Mock(), 'my_method', scenario_cfg, {}) def test__worker_run_immediate_stop(self): - cls = mock.MagicMock() scenario_cfg = { 'runner': { 'run_step': '', @@ -189,15 +179,14 @@ class TestSearchRunner(unittest.TestCase): } runner = SearchRunner({}) - runner._worker_run(cls, 'my_method', scenario_cfg, {}) + runner._worker_run(mock.Mock(), 'my_method', scenario_cfg, {}) @mock.patch('yardstick.benchmark.runners.search.multiprocessing') def test__run_benchmark(self, mock_multi_process): - cls = mock.MagicMock() scenario_cfg = { 'runner': {}, } runner = SearchRunner({}) - runner._run_benchmark(cls, 'my_method', scenario_cfg, {}) + runner._run_benchmark(mock.Mock(), 'my_method', scenario_cfg, {}) mock_multi_process.Process.assert_called_once() diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py index 419605b26..a606543e5 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py @@ -12,31 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Unittest for yardstick.benchmark.scenarios.networking.vsperf.Vsperf - -from __future__ import absolute_import -try: - from unittest import mock -except ImportError: - import mock +import mock import unittest +import subprocess +import yardstick.ssh as ssh from yardstick.benchmark.scenarios.networking import vsperf +from yardstick import exceptions as y_exc -@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.subprocess') -@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.ssh') class VsperfTestCase(unittest.TestCase): def setUp(self): - self.ctx = { + self.context_cfg = { "host": { "ip": "10.229.47.137", "user": "ubuntu", "password": "ubuntu", }, } - self.args = { + self.scenario_cfg = { 'options': { 'testname': 'p2p_rfc2544_continuous', 'traffic_type': 'continuous', @@ -57,70 +52,154 @@ class VsperfTestCase(unittest.TestCase): } } - def test_vsperf_setup(self, mock_ssh, mock_subprocess): - p = vsperf.Vsperf(self.args, self.ctx) - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - mock_subprocess.call().execute.return_value = None + self._mock_SSH = mock.patch.object(ssh, 'SSH') + self.mock_SSH = self._mock_SSH.start() + self.mock_SSH.from_node().execute.return_value = (0, '', '') + + self._mock_subprocess_call = mock.patch.object(subprocess, 'call') + self.mock_subprocess_call = self._mock_subprocess_call.start() + self.mock_subprocess_call.return_value = None + + self.addCleanup(self._stop_mock) + + self.scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg) + + def _stop_mock(self): + self._mock_SSH.stop() + self._mock_subprocess_call.stop() + + def test_setup(self): + self.scenario.setup() + self.assertIsNotNone(self.scenario.client) + self.assertTrue(self.scenario.setup_done) + + def test_setup_tg_port_not_set(self): + del self.scenario_cfg['options']['trafficgen_port1'] + del self.scenario_cfg['options']['trafficgen_port2'] + scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg) + scenario.setup() + + self.mock_subprocess_call.assert_called_once_with( + 'setup_yardstick.sh setup', shell=True) + self.assertIsNone(scenario.tg_port1) + self.assertIsNone(scenario.tg_port2) + self.assertIsNotNone(scenario.client) + self.assertTrue(scenario.setup_done) + + def test_setup_no_setup_script(self): + del self.scenario_cfg['options']['setup_script'] + scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg) + scenario.setup() + + self.mock_subprocess_call.assert_has_calls( + (mock.call('sudo bash -c "ovs-vsctl add-port br-ex eth1"', + shell=True), + mock.call('sudo bash -c "ovs-vsctl add-port br-ex eth3"', + shell=True))) + self.assertEqual(2, self.mock_subprocess_call.call_count) + self.assertIsNone(scenario.setup_script) + self.assertIsNotNone(scenario.client) + self.assertTrue(scenario.setup_done) + + def test_run_ok(self): + self.scenario.setup() + + self.mock_SSH.from_node().execute.return_value = ( + 0, 'throughput_rx_fps\r\n14797660.000\r\n', '') - p.setup() - self.assertIsNotNone(p.client) - self.assertTrue(p.setup_done) + result = {} + self.scenario.run(result) - def test_vsperf_teardown(self, mock_ssh, mock_subprocess): - p = vsperf.Vsperf(self.args, self.ctx) + self.assertEqual(result['throughput_rx_fps'], '14797660.000') - # setup() specific mocks - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - mock_subprocess.call().execute.return_value = None + def test_run_ok_setup_not_done(self): + self.mock_SSH.from_node().execute.return_value = ( + 0, 'throughput_rx_fps\r\n14797660.000\r\n', '') - p.setup() - self.assertIsNotNone(p.client) - self.assertTrue(p.setup_done) + result = {} + self.scenario.run(result) - p.teardown() - self.assertFalse(p.setup_done) + self.assertTrue(self.scenario.setup_done) + self.assertEqual(result['throughput_rx_fps'], '14797660.000') - def test_vsperf_run_ok(self, mock_ssh, mock_subprocess): - p = vsperf.Vsperf(self.args, self.ctx) + def test_run_failed_vsperf_execution(self): + self.mock_SSH.from_node().execute.side_effect = ((0, '', ''), + (1, '', '')) - # setup() specific mocks - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - mock_subprocess.call().execute.return_value = None + with self.assertRaises(RuntimeError): + self.scenario.run({}) + self.assertEqual(self.mock_SSH.from_node().execute.call_count, 2) - # run() specific mocks - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - mock_ssh.SSH.from_node().execute.return_value = ( - 0, 'throughput_rx_fps\r\n14797660.000\r\n', '') + def test_run_failed_csv_report(self): + self.mock_SSH.from_node().execute.side_effect = ((0, '', ''), + (0, '', ''), + (1, '', '')) - result = {} - p.run(result) + with self.assertRaises(RuntimeError): + self.scenario.run({}) + self.assertEqual(self.mock_SSH.from_node().execute.call_count, 3) - self.assertEqual(result['throughput_rx_fps'], '14797660.000') + def test_run_sla_fail(self): + self.mock_SSH.from_node().execute.return_value = ( + 0, 'throughput_rx_fps\r\n123456.000\r\n', '') - def test_vsperf_run_falied_vsperf_execution(self, mock_ssh, - mock_subprocess): - p = vsperf.Vsperf(self.args, self.ctx) + with self.assertRaises(y_exc.SLAValidationError) as raised: + self.scenario.run({}) - # setup() specific mocks - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - mock_subprocess.call().execute.return_value = None + self.assertTrue('VSPERF_throughput_rx_fps(123456.000000) < ' + 'SLA_throughput_rx_fps(500000.000000)' + in str(raised.exception)) - # run() specific mocks - mock_ssh.SSH.from_node().execute.return_value = (1, '', '') + def test_run_sla_fail_metric_not_collected(self): + self.mock_SSH.from_node().execute.return_value = ( + 0, 'nonexisting_metric\r\n14797660.000\r\n', '') - result = {} - self.assertRaises(RuntimeError, p.run, result) + with self.assertRaises(y_exc.SLAValidationError) as raised: + self.scenario.run({}) - def test_vsperf_run_falied_csv_report(self, mock_ssh, mock_subprocess): - p = vsperf.Vsperf(self.args, self.ctx) + self.assertTrue('throughput_rx_fps was not collected by VSPERF' + in str(raised.exception)) - # setup() specific mocks - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - mock_subprocess.call().execute.return_value = None + def test_run_sla_fail_metric_not_defined_in_sla(self): + del self.scenario_cfg['sla']['throughput_rx_fps'] + scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg) + scenario.setup() - # run() specific mocks - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') - mock_ssh.SSH.from_node().execute.return_value = (1, '', '') + self.mock_SSH.from_node().execute.return_value = ( + 0, 'throughput_rx_fps\r\n14797660.000\r\n', '') - result = {} - self.assertRaises(RuntimeError, p.run, result) + with self.assertRaises(y_exc.SLAValidationError) as raised: + scenario.run({}) + self.assertTrue('throughput_rx_fps is not defined in SLA' + in str(raised.exception)) + + def test_teardown(self): + self.scenario.setup() + self.assertIsNotNone(self.scenario.client) + self.assertTrue(self.scenario.setup_done) + + self.scenario.teardown() + self.assertFalse(self.scenario.setup_done) + + def test_teardown_tg_port_not_set(self): + del self.scenario_cfg['options']['trafficgen_port1'] + del self.scenario_cfg['options']['trafficgen_port2'] + scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg) + scenario.teardown() + + self.mock_subprocess_call.assert_called_once_with( + 'setup_yardstick.sh teardown', shell=True) + self.assertFalse(scenario.setup_done) + + def test_teardown_no_setup_script(self): + del self.scenario_cfg['options']['setup_script'] + scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg) + scenario.teardown() + + self.mock_subprocess_call.assert_has_calls( + (mock.call('sudo bash -c "ovs-vsctl del-port br-ex eth1"', + shell=True), + mock.call('sudo bash -c "ovs-vsctl del-port br-ex eth3"', + shell=True))) + self.assertEqual(2, self.mock_subprocess_call.call_count) + self.assertFalse(scenario.setup_done) diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py index 1d2278e21..c05d2ced2 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py @@ -19,6 +19,7 @@ import mock import unittest from yardstick.benchmark.scenarios.networking import vsperf_dpdk +from yardstick import exceptions as y_exc class VsperfDPDKTestCase(unittest.TestCase): @@ -211,3 +212,47 @@ class VsperfDPDKTestCase(unittest.TestCase): result = {} self.assertRaises(RuntimeError, self.scenario.run, result) + + @mock.patch.object(time, 'sleep') + @mock.patch.object(subprocess, 'check_output') + def test_vsperf_run_sla_fail(self, *args): + self.scenario.setup() + + self.mock_ssh.SSH.from_node().execute.return_value = ( + 0, 'throughput_rx_fps\r\n123456.000\r\n', '') + + with self.assertRaises(y_exc.SLAValidationError) as raised: + self.scenario.run({}) + + self.assertIn('VSPERF_throughput_rx_fps(123456.000000) < ' + 'SLA_throughput_rx_fps(500000.000000)', + str(raised.exception)) + + @mock.patch.object(time, 'sleep') + @mock.patch.object(subprocess, 'check_output') + def test_vsperf_run_sla_fail_metric_not_collected(self, *args): + self.scenario.setup() + + self.mock_ssh.SSH.from_node().execute.return_value = ( + 0, 'nonexisting_metric\r\n123456.000\r\n', '') + + with self.assertRaises(y_exc.SLAValidationError) as raised: + self.scenario.run({}) + + self.assertIn('throughput_rx_fps was not collected by VSPERF', + str(raised.exception)) + + @mock.patch.object(time, 'sleep') + @mock.patch.object(subprocess, 'check_output') + def test_vsperf_run_sla_fail_sla_not_defined(self, *args): + del self.scenario.scenario_cfg['sla']['throughput_rx_fps'] + self.scenario.setup() + + self.mock_ssh.SSH.from_node().execute.return_value = ( + 0, 'throughput_rx_fps\r\n14797660.000\r\n', '') + + with self.assertRaises(y_exc.SLAValidationError) as raised: + self.scenario.run({}) + + self.assertIn('throughput_rx_fps is not defined in SLA', + str(raised.exception)) |