diff options
author | Cédric Ollivier <cedric.ollivier@orange.com> | 2018-01-21 21:11:48 +0100 |
---|---|---|
committer | Cédric Ollivier <cedric.ollivier@orange.com> | 2018-01-21 21:13:18 +0100 |
commit | a01ff48e8c5c64cd179c3f407bf6d4363a8f1557 (patch) | |
tree | ef31247ed2e4344461e7524d156f74cb27f1f46e /functest | |
parent | 2030e14451a072844e750318de0d5efc47d4500c (diff) |
Partially rewrite test_run_tests.py
It mainly fixes pylint issues and leverages on decorators.
It should be noted that run_tests.py is not fully covered (see
xtesting requirements).
Change-Id: I114b67c3c5bfe61d72d004829af513e2014ad8b8
Signed-off-by: Cédric Ollivier <cedric.ollivier@orange.com>
Diffstat (limited to 'functest')
-rw-r--r-- | functest/ci/run_tests.py | 26 | ||||
-rw-r--r-- | functest/tests/unit/ci/test_run_tests.py | 147 |
2 files changed, 85 insertions, 88 deletions
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py index 6748484d..7cba0efb 100644 --- a/functest/ci/run_tests.py +++ b/functest/ci/run_tests.py @@ -14,7 +14,6 @@ """ import argparse -import enum import importlib import logging import logging.config @@ -24,6 +23,7 @@ import sys import textwrap import pkg_resources +import enum import prettytable import functest.ci.tier_builder as tb @@ -93,7 +93,7 @@ class Runner(object): self.overall_result = Result.EX_OK self.clean_flag = True self.report_flag = False - self._tiers = tb.TierBuilder( + self.tiers = tb.TierBuilder( CONST.__getattribute__('INSTALLER_TYPE'), CONST.__getattribute__('DEPLOY_SCENARIO'), pkg_resources.resource_filename('functest', 'ci/testcases.yaml')) @@ -110,7 +110,7 @@ class Runner(object): @staticmethod def get_run_dict(testname): - """Obtain the the 'run' block of the testcase from testcases.yaml""" + """Obtain the 'run' block of the testcase from testcases.yaml""" try: dic_testcase = ft_utils.get_dict_by_test(testname) if not dic_testcase: @@ -189,7 +189,7 @@ class Runner(object): header_style='upper', padding_width=5, field_names=['tiers', 'order', 'CI Loop', 'description', 'testcases']) - for tier in self._tiers.get_tiers(): + for tier in self.tiers.get_tiers(): if (tier.get_tests() and re.search(CONST.__getattribute__('CI_LOOP'), tier.get_ci_loop()) is not None): @@ -213,11 +213,11 @@ class Runner(object): if 'test' in kwargs: self.source_rc_file() LOGGER.debug("Test args: %s", kwargs['test']) - if self._tiers.get_tier(kwargs['test']): - self.run_tier(self._tiers.get_tier(kwargs['test'])) - elif self._tiers.get_test(kwargs['test']): + if self.tiers.get_tier(kwargs['test']): + self.run_tier(self.tiers.get_tier(kwargs['test'])) + elif self.tiers.get_test(kwargs['test']): result = self.run_test( - self._tiers.get_test(kwargs['test'])) + self.tiers.get_test(kwargs['test'])) if result != testcase.TestCase.EX_OK: LOGGER.error("The test case '%s' failed.", kwargs['test']) @@ -230,7 +230,7 @@ class Runner(object): kwargs['test'], CONST.__getattribute__('DEPLOY_SCENARIO')) LOGGER.debug("Available tiers are:\n\n%s", - self._tiers) + self.tiers) return Result.EX_ERROR else: self.run_all() @@ -239,8 +239,8 @@ class Runner(object): except Exception: # pylint: disable=broad-except LOGGER.exception("Failures when running testcase(s)") self.overall_result = Result.EX_ERROR - if not self._tiers.get_test(kwargs['test']): - self.summary(self._tiers.get_tier(kwargs['test'])) + if not self.tiers.get_test(kwargs['test']): + self.summary(self.tiers.get_tier(kwargs['test'])) LOGGER.info("Execution exit value: %s", self.overall_result) return self.overall_result @@ -257,7 +257,7 @@ class Runner(object): header_style='upper', padding_width=5, field_names=['test case', 'project', 'tier', 'duration', 'result']) - tiers = [tier] if tier else self._tiers.get_tiers() + tiers = [tier] if tier else self.tiers.get_tiers() for each_tier in tiers: for test in each_tier.get_tests(): try: @@ -270,7 +270,7 @@ class Runner(object): ) == test_case.EX_OK) else 'FAIL' msg.add_row( [test_case.case_name, test_case.project_name, - self._tiers.get_tier_name(test_case.case_name), + self.tiers.get_tier_name(test_case.case_name), test_case.get_duration(), result]) for test in each_tier.get_skipped_test(): msg.add_row([test.get_name(), test.get_project(), diff --git a/functest/tests/unit/ci/test_run_tests.py b/functest/tests/unit/ci/test_run_tests.py index 0db5f283..9f48891e 100644 --- a/functest/tests/unit/ci/test_run_tests.py +++ b/functest/tests/unit/ci/test_run_tests.py @@ -5,6 +5,8 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 +# pylint: disable=missing-docstring + import logging import unittest @@ -17,7 +19,7 @@ from functest.core.testcase import TestCase class FakeModule(TestCase): - def run(self): + def run(self, **kwargs): return TestCase.EX_OK @@ -52,74 +54,65 @@ class RunTestsTesting(unittest.TestCase): self.run_tests_parser = run_tests.RunTestsParser() - @mock.patch('functest.ci.run_tests.LOGGER.error') - def test_source_rc_file_missing_file(self, mock_logger_error): - with mock.patch('functest.ci.run_tests.os.path.isfile', - return_value=False), \ - self.assertRaises(Exception): + @mock.patch('functest.ci.run_tests.os.path.isfile', return_value=False) + def test_source_rc_file_ko(self, *args): + with self.assertRaises(Exception): self.runner.source_rc_file() + args[0].assert_called_once_with( + '/home/opnfv/functest/conf/openstack.creds') - @mock.patch('functest.ci.run_tests.LOGGER.debug') @mock.patch('functest.ci.run_tests.os.path.isfile', return_value=True) def test_source_rc_file_default(self, *args): with mock.patch('functest.ci.run_tests.os_utils.source_credentials', return_value=self.creds): self.runner.source_rc_file() + args[0].assert_called_once_with( + '/home/opnfv/functest/conf/openstack.creds') - def test_get_run_dict_if_defined_default(self): - mock_obj = mock.Mock() - with mock.patch('functest.ci.run_tests.' - 'ft_utils.get_dict_by_test', - return_value={'run': mock_obj}): - self.assertEqual(self.runner.get_run_dict('test_name'), - mock_obj) + @mock.patch('functest.ci.run_tests.ft_utils.get_dict_by_test') + def test_get_run_dict(self, *args): + retval = {'run': mock.Mock()} + args[0].return_value = retval + self.assertEqual(self.runner.get_run_dict('test_name'), retval['run']) + args[0].assert_called_once_with('test_name') @mock.patch('functest.ci.run_tests.LOGGER.error') - def test_get_run_dict_if_defined_missing_config_option(self, - mock_logger_error): - with mock.patch('functest.ci.run_tests.' - 'ft_utils.get_dict_by_test', - return_value=None): - testname = 'test_name' - self.assertEqual(self.runner.get_run_dict(testname), - None) - mock_logger_error.assert_called_once_with( - "Cannot get %s's config options", testname) - - with mock.patch('functest.ci.run_tests.' - 'ft_utils.get_dict_by_test', - return_value={}): - testname = 'test_name' - self.assertEqual(self.runner.get_run_dict(testname), - None) + @mock.patch('functest.ci.run_tests.ft_utils.get_dict_by_test', + return_value=None) + def test_get_run_dict_config_ko(self, *args): + testname = 'test_name' + self.assertEqual(self.runner.get_run_dict(testname), None) + args[0].return_value = {} + self.assertEqual(self.runner.get_run_dict(testname), None) + calls = [mock.call(testname), mock.call(testname)] + args[0].assert_has_calls(calls) + calls = [mock.call("Cannot get %s's config options", testname), + mock.call("Cannot get %s's config options", testname)] + args[1].assert_has_calls(calls) @mock.patch('functest.ci.run_tests.LOGGER.exception') - def test_get_run_dict_if_defined_exception(self, - mock_logger_except): - with mock.patch('functest.ci.run_tests.' - 'ft_utils.get_dict_by_test', - side_effect=Exception): - testname = 'test_name' - self.assertEqual(self.runner.get_run_dict(testname), - None) - mock_logger_except.assert_called_once_with( - "Cannot get %s's config options", testname) - - def test_run_tests_import_test_class_exception(self): + @mock.patch('functest.ci.run_tests.ft_utils.get_dict_by_test', + side_effect=Exception) + def test_get_run_dict_exception(self, *args): + testname = 'test_name' + self.assertEqual(self.runner.get_run_dict(testname), None) + args[1].assert_called_once_with( + "Cannot get %s's config options", testname) + + @mock.patch('functest.ci.run_tests.Runner.get_run_dict', + return_value=None) + def test_run_tests_import_exception(self, *args): mock_test = mock.Mock() - args = {'get_name.return_value': 'test_name', - 'needs_clean.return_value': False} - mock_test.configure_mock(**args) - with mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \ - mock.patch('functest.ci.run_tests.Runner.get_run_dict', - return_value=None), \ - self.assertRaises(Exception) as context: + kwargs = {'get_name.return_value': 'test_name', + 'needs_clean.return_value': False} + mock_test.configure_mock(**kwargs) + with self.assertRaises(Exception) as context: self.runner.run_test(mock_test) + args[0].assert_called_with('test_name') msg = "Cannot import the class for the test case." self.assertTrue(msg in str(context.exception)) - @mock.patch('functest.ci.run_tests.Runner.source_rc_file') @mock.patch('importlib.import_module', name="module", return_value=mock.Mock(test_class=mock.Mock( side_effect=FakeModule))) @@ -135,6 +128,8 @@ class RunTestsTesting(unittest.TestCase): return_value=test_run_dict): self.runner.clean_flag = True self.runner.run_test(mock_test) + args[0].assert_called_with('test_name') + args[1].assert_called_with('test_module') self.assertEqual(self.runner.overall_result, run_tests.Result.EX_OK) @@ -175,8 +170,8 @@ class RunTestsTesting(unittest.TestCase): kwargs = {'test': 'test_name', 'noclean': True, 'report': True} args = {'get_tier.return_value': False, 'get_test.return_value': False} - self.runner._tiers = mock.Mock() - self.runner._tiers.configure_mock(**args) + self.runner.tiers = mock.Mock() + self.runner.tiers.configure_mock(**args) self.assertEqual(self.runner.main(**kwargs), run_tests.Result.EX_ERROR) mock_methods[1].assert_called_once_with() @@ -195,8 +190,8 @@ class RunTestsTesting(unittest.TestCase): kwargs = {'test': 'tier_name', 'noclean': True, 'report': True} args = {'get_tier.return_value': mock_tier, 'get_test.return_value': None} - self.runner._tiers = mock.Mock() - self.runner._tiers.configure_mock(**args) + self.runner.tiers = mock.Mock() + self.runner.tiers.configure_mock(**args) self.assertEqual(self.runner.main(**kwargs), run_tests.Result.EX_OK) mock_methods[1].assert_called() @@ -208,8 +203,8 @@ class RunTestsTesting(unittest.TestCase): kwargs = {'test': 'test_name', 'noclean': True, 'report': True} args = {'get_tier.return_value': None, 'get_test.return_value': 'test_name'} - self.runner._tiers = mock.Mock() - self.runner._tiers.configure_mock(**args) + self.runner.tiers = mock.Mock() + self.runner.tiers.configure_mock(**args) self.assertEqual(self.runner.main(**kwargs), run_tests.Result.EX_OK) mock_methods[0].assert_called_once_with('test_name') @@ -217,26 +212,28 @@ class RunTestsTesting(unittest.TestCase): @mock.patch('functest.ci.run_tests.Runner.source_rc_file') @mock.patch('functest.ci.run_tests.Runner.run_all') @mock.patch('functest.ci.run_tests.Runner.summary') - def test_main_all_tier(self, *mock_methods): - kwargs = {'test': 'all', 'noclean': True, 'report': True} - args = {'get_tier.return_value': None, - 'get_test.return_value': None} - self.runner._tiers = mock.Mock() - self.runner._tiers.configure_mock(**args) - self.assertEqual(self.runner.main(**kwargs), - run_tests.Result.EX_OK) - mock_methods[1].assert_called_once_with() + def test_main_all_tier(self, *args): + kwargs = {'get_tier.return_value': None, + 'get_test.return_value': None} + self.runner.tiers = mock.Mock() + self.runner.tiers.configure_mock(**kwargs) + self.assertEqual( + self.runner.main(test='all', noclean=True, report=True), + run_tests.Result.EX_OK) + args[0].assert_called_once_with(None) + args[1].assert_called_once_with() + args[2].assert_called_once_with() @mock.patch('functest.ci.run_tests.Runner.source_rc_file') - @mock.patch('functest.ci.run_tests.Runner.summary') - def test_main_any_tier_test_ko(self, *mock_methods): - kwargs = {'test': 'any', 'noclean': True, 'report': True} - args = {'get_tier.return_value': None, - 'get_test.return_value': None} - self.runner._tiers = mock.Mock() - self.runner._tiers.configure_mock(**args) - self.assertEqual(self.runner.main(**kwargs), - run_tests.Result.EX_ERROR) + def test_main_any_tier_test_ko(self, *args): + kwargs = {'get_tier.return_value': None, + 'get_test.return_value': None} + self.runner.tiers = mock.Mock() + self.runner.tiers.configure_mock(**kwargs) + self.assertEqual( + self.runner.main(test='any', noclean=True, report=True), + run_tests.Result.EX_ERROR) + args[0].assert_called_once_with() if __name__ == "__main__": |