diff options
Diffstat (limited to 'tests/unit/benchmark/scenarios/parser')
-rw-r--r-- | tests/unit/benchmark/scenarios/parser/test_parser.py | 67 |
1 files changed, 42 insertions, 25 deletions
diff --git a/tests/unit/benchmark/scenarios/parser/test_parser.py b/tests/unit/benchmark/scenarios/parser/test_parser.py index 59b98a092..ee2bbc07d 100644 --- a/tests/unit/benchmark/scenarios/parser/test_parser.py +++ b/tests/unit/benchmark/scenarios/parser/test_parser.py @@ -9,50 +9,67 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Unittest for yardstick.benchmark.scenarios.parser.Parser - -from __future__ import absolute_import +import subprocess import unittest - import mock + from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.parser import parser -@mock.patch('yardstick.benchmark.scenarios.parser.parser.subprocess') class ParserTestCase(unittest.TestCase): def setUp(self): - pass - - def test_parser_successful_setup(self, mock_subprocess): - - p = parser.Parser({}, {}) - mock_subprocess.call().return_value = 0 - p.setup() - self.assertEqual(p.setup_done, True) - - def test_parser_successful(self, mock_subprocess): args = { 'options': {'yangfile': '/root/yardstick/samples/yang.yaml', 'toscafile': '/root/yardstick/samples/tosca.yaml'}, } - p = parser.Parser(args, {}) + self.scenario = parser.Parser(scenario_cfg=args, context_cfg={}) + + self._mock_popen = mock.patch.object(subprocess, 'Popen') + self.mock_popen = self._mock_popen.start() + self._mock_call = mock.patch.object(subprocess, 'call') + self.mock_call = self._mock_call.start() + + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_popen.stop() + self._mock_call.stop() + + def test_setup_successful(self): + + self.mock_call.return_value = 0 + self.scenario.setup() + self.assertTrue(self.scenario.setup_done) + + def test_run_successful(self): + result = {} - mock_subprocess.call().return_value = 0 - sample_output = '{"yangtotosca": "success"}' - p.run(result) - expected_result = jsonutils.loads(sample_output) + self.mock_popen().returncode = 0 + + expected_result = jsonutils.loads('{"yangtotosca": "success"}') + + self.scenario.run(result) + self.assertEqual(result, expected_result) + + def test_run_fail(self): + result = {} + + self.mock_popen().returncode = 1 + expected_result = jsonutils.loads('{"yangtotosca": "fail"}') + + self.scenario.run(result) + self.assertEqual(result, expected_result) - def test_parser_teardown_successful(self, mock_subprocess): + def test_teardown_successful(self): - p = parser.Parser({}, {}) - mock_subprocess.call().return_value = 0 - p.teardown() - self.assertEqual(p.teardown_done, True) + self.mock_call.return_value = 0 + self.scenario.teardown() + self.assertTrue(self.scenario.teardown_done) def main(): |