diff options
Diffstat (limited to 'tests/unit/benchmark/scenarios')
-rw-r--r-- | tests/unit/benchmark/scenarios/availability/test_util.py | 36 | ||||
-rw-r--r-- | tests/unit/benchmark/scenarios/lib/test_create_volume.py | 83 |
2 files changed, 88 insertions, 31 deletions
diff --git a/tests/unit/benchmark/scenarios/availability/test_util.py b/tests/unit/benchmark/scenarios/availability/test_util.py index 0974f385a..03dc70e2a 100644 --- a/tests/unit/benchmark/scenarios/availability/test_util.py +++ b/tests/unit/benchmark/scenarios/availability/test_util.py @@ -16,36 +16,42 @@ import unittest from yardstick.benchmark.scenarios.availability import util -@mock.patch('yardstick.benchmark.scenarios.availability.util.subprocess') class ExecuteShellTestCase(unittest.TestCase): def setUp(self): self.param_config = {'serviceName': '@serviceName', 'value': 1} self.intermediate_variables = {'@serviceName': 'nova-api'} self.std_output = '| id | 1 |' - self.cmd_config = {'cmd':'ls','param':'-a'} + self.cmd_config = {'cmd':'ls', 'param':'-a'} - def test_util_build_command_shell(self,mock_subprocess): + self._mock_subprocess = mock.patch.object(util, 'subprocess') + self.mock_subprocess = self._mock_subprocess.start() + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_subprocess.stop() + + def test_util_build_command_shell(self): result = util.build_shell_command(self.param_config, True, self.intermediate_variables) - self.assertEqual("nova-api" in result, True) + self.assertIn("nova-api", result) - def test_read_stdout_item(self,mock_subprocess): - result = util.read_stdout_item(self.std_output,'id') - self.assertEquals('1',result) + def test_read_stdout_item(self): + result = util.read_stdout_item(self.std_output, 'id') + self.assertEquals('1', result) - def test_buildshellparams(self,mock_subprocess): - result = util.buildshellparams(self.cmd_config,True) + def test_buildshellparams(self): + result = util.buildshellparams(self.cmd_config, True) self.assertEquals('/bin/bash -s {0} {1}', result) - def test__fun_execute_shell_command_successful(self, mock_subprocess): + def test__fun_execute_shell_command_successful(self): cmd = "env" - mock_subprocess.check_output.return_value = (0, 'unittest') - exitcode, output = util.execute_shell_command(cmd) + self.mock_subprocess.check_output.return_value = (0, 'unittest') + exitcode, _ = util.execute_shell_command(cmd) self.assertEqual(exitcode, 0) - def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): + def test__fun_execute_shell_command_fail_cmd_exception(self): cmd = "env" - mock_subprocess.check_output.side_effect = RuntimeError - exitcode, output = util.execute_shell_command(cmd) + self.mock_subprocess.check_output.side_effect = RuntimeError + exitcode, _ = util.execute_shell_command(cmd) self.assertEqual(exitcode, -1) diff --git a/tests/unit/benchmark/scenarios/lib/test_create_volume.py b/tests/unit/benchmark/scenarios/lib/test_create_volume.py index fc633139e..ef2c0ccaf 100644 --- a/tests/unit/benchmark/scenarios/lib/test_create_volume.py +++ b/tests/unit/benchmark/scenarios/lib/test_create_volume.py @@ -9,28 +9,79 @@ import unittest import mock -from yardstick.benchmark.scenarios.lib.create_volume import CreateVolume +import yardstick.benchmark.scenarios.lib.create_volume class CreateVolumeTestCase(unittest.TestCase): + def setUp(self): + self._mock_cinder_client = mock.patch( + 'yardstick.common.openstack_utils.get_cinder_client') + self.mock_cinder_client = self._mock_cinder_client.start() + self._mock_glance_client = mock.patch( + 'yardstick.common.openstack_utils.get_glance_client') + self.mock_glance_client = self._mock_glance_client.start() + self.addCleanup(self._stop_mock) + + self.scenario_cfg = { + "options" : + { + 'volume_name': 'yardstick_test_volume_01', + 'size': '256', + 'image': 'cirros-0.3.5' + } + } + + self.scenario = ( + yardstick.benchmark.scenarios.lib.create_volume.CreateVolume( + scenario_cfg=self.scenario_cfg, + context_cfg={})) + + def _stop_mock(self): + self._mock_cinder_client.stop() + self._mock_glance_client.stop() + + def test_init(self): + self.mock_cinder_client.return_value = "All volumes are equal" + self.mock_glance_client.return_value = "Images are more equal" + + expected_vol_name = self.scenario_cfg["options"]["volume_name"] + expected_vol_size = self.scenario_cfg["options"]["size"] + expected_im_name = self.scenario_cfg["options"]["image"] + expected_im_id = None + + scenario = ( + yardstick.benchmark.scenarios.lib.create_volume.CreateVolume( + scenario_cfg=self.scenario_cfg, + context_cfg={})) + + self.assertEqual(expected_vol_name, scenario.volume_name) + self.assertEqual(expected_vol_size, scenario.volume_size) + self.assertEqual(expected_im_name, scenario.image_name) + self.assertEqual(expected_im_id, scenario.image_id) + self.assertEqual("All volumes are equal", scenario.cinder_client) + self.assertEqual("Images are more equal", scenario.glance_client) + + def test_setup(self): + self.assertFalse(self.scenario.setup_done) + self.scenario.setup() + self.assertTrue(self.scenario.setup_done) + @mock.patch('yardstick.common.openstack_utils.create_volume') @mock.patch('yardstick.common.openstack_utils.get_image_id') - @mock.patch('yardstick.common.openstack_utils.get_cinder_client') - @mock.patch('yardstick.common.openstack_utils.get_glance_client') - def test_create_volume(self, mock_get_glance_client, mock_get_cinder_client, mock_image_id, mock_create_volume): - options = { - 'volume_name': 'yardstick_test_volume_01', - 'size': '256', - 'image': 'cirros-0.3.5' - } - args = {"options": options} - obj = CreateVolume(args, {}) - obj.run({}) - self.assertTrue(mock_create_volume.called) - self.assertTrue(mock_image_id.called) - self.assertTrue(mock_get_glance_client.called) - self.assertTrue(mock_get_cinder_client.called) + def test_run(self, mock_image_id, mock_create_volume): + self.scenario.run() + + mock_image_id.assert_called_once() + mock_create_volume.assert_called_once() + + @mock.patch.object( + yardstick.benchmark.scenarios.lib.create_volume.CreateVolume, 'setup') + def test_run_no_setup(self, scenario_setup): + self.scenario.setup_done = False + self.scenario.run() + scenario_setup.assert_called_once() + def main(): unittest.main() |