aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJose Lausuch <jose.lausuch@ericsson.com>2017-05-19 08:59:27 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-05-19 08:59:27 +0000
commit7050fa91fca9a66e29e43a46f266f8eddba0d64f (patch)
treee56c83edfe48c9d37cb2cb03708747d0bd6ddd06
parentf5c8f9dd175e462bf362b5c301205a3376a0d82b (diff)
parent4c1e2c2b35ef2474d26e0cd6ce3977d24dc93553 (diff)
Merge "Define Runner class"
-rwxr-xr-xfunctest/ci/run_tests.py404
-rw-r--r--functest/tests/unit/ci/test_run_tests.py171
2 files changed, 300 insertions, 275 deletions
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index 76760096b..f973b616c 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -64,210 +64,209 @@ class RunTestsParser(object):
return vars(self.parser.parse_args(argv))
-class GlobalVariables:
- EXECUTED_TEST_CASES = []
- OVERALL_RESULT = Result.EX_OK
- CLEAN_FLAG = True
- REPORT_FLAG = False
-
-
-def print_separator(str, count=45):
- line = ""
- for i in range(0, count - 1):
- line += str
- logger.info("%s" % line)
-
-
-def source_rc_file():
- rc_file = CONST.__getattribute__('openstack_creds')
- if not os.path.isfile(rc_file):
- raise Exception("RC file %s does not exist..." % rc_file)
- logger.debug("Sourcing the OpenStack RC file...")
- os_utils.source_credentials(rc_file)
- for key, value in os.environ.iteritems():
- if re.search("OS_", key):
- if key == 'OS_AUTH_URL':
- CONST.__setattr__('OS_AUTH_URL', value)
- elif key == 'OS_USERNAME':
- CONST.__setattr__('OS_USERNAME', value)
- elif key == 'OS_TENANT_NAME':
- CONST.__setattr__('OS_TENANT_NAME', value)
- elif key == 'OS_PASSWORD':
- CONST.__setattr__('OS_PASSWORD', value)
-
-
-def generate_os_snapshot():
- os_snapshot.main()
-
-
-def cleanup():
- os_clean.main()
-
-
-def get_run_dict(testname):
- try:
- dict = ft_utils.get_dict_by_test(testname)
- if not dict:
- logger.error("Cannot get {}'s config options".format(testname))
- elif 'run' in dict:
- return dict['run']
- return None
- except Exception:
- logger.exception("Cannot get {}'s config options".format(testname))
- return None
-
-
-def run_test(test, tier_name, testcases=None):
- if not test.is_enabled():
- raise TestNotEnabled("The test case {} is not enabled"
- .format(test.get_name()))
- test_name = test.get_name()
- logger.info("\n") # blank line
- print_separator("=")
- logger.info("Running test case '%s'..." % test_name)
- print_separator("=")
- logger.debug("\n%s" % test)
- source_rc_file()
-
- if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
- generate_os_snapshot()
-
- flags = (" -t %s" % (test_name))
- if GlobalVariables.REPORT_FLAG:
- flags += " -r"
-
- result = testcase.TestCase.EX_RUN_ERROR
- run_dict = get_run_dict(test_name)
- if run_dict:
+class Runner(object):
+
+ def __init__(self):
+ self.executed_test_cases = []
+ self.overall_result = Result.EX_OK
+ self.clean_flag = True
+ self.report_flag = False
+
+ @staticmethod
+ def print_separator(str, count=45):
+ line = ""
+ for i in range(0, count - 1):
+ line += str
+ logger.info("%s" % line)
+
+ @staticmethod
+ def source_rc_file():
+ rc_file = CONST.__getattribute__('openstack_creds')
+ if not os.path.isfile(rc_file):
+ raise Exception("RC file %s does not exist..." % rc_file)
+ logger.debug("Sourcing the OpenStack RC file...")
+ os_utils.source_credentials(rc_file)
+ for key, value in os.environ.iteritems():
+ if re.search("OS_", key):
+ if key == 'OS_AUTH_URL':
+ CONST.__setattr__('OS_AUTH_URL', value)
+ elif key == 'OS_USERNAME':
+ CONST.__setattr__('OS_USERNAME', value)
+ elif key == 'OS_TENANT_NAME':
+ CONST.__setattr__('OS_TENANT_NAME', value)
+ elif key == 'OS_PASSWORD':
+ CONST.__setattr__('OS_PASSWORD', value)
+
+ @staticmethod
+ def generate_os_snapshot():
+ os_snapshot.main()
+
+ @staticmethod
+ def cleanup():
+ os_clean.main()
+
+ @staticmethod
+ def get_run_dict(testname):
try:
- module = importlib.import_module(run_dict['module'])
- cls = getattr(module, run_dict['class'])
- test_dict = ft_utils.get_dict_by_test(test_name)
- test_case = cls(**test_dict)
- GlobalVariables.EXECUTED_TEST_CASES.append(test_case)
+ dict = ft_utils.get_dict_by_test(testname)
+ if not dict:
+ logger.error("Cannot get {}'s config options".format(testname))
+ elif 'run' in dict:
+ return dict['run']
+ return None
+ except Exception:
+ logger.exception("Cannot get {}'s config options".format(testname))
+ return None
+
+ def run_test(self, test, tier_name, testcases=None):
+ if not test.is_enabled():
+ raise TestNotEnabled(
+ "The test case {} is not enabled".format(test.get_name()))
+ test_name = test.get_name()
+ logger.info("\n") # blank line
+ self.print_separator("=")
+ logger.info("Running test case '%s'..." % test_name)
+ self.print_separator("=")
+ logger.debug("\n%s" % test)
+ self.source_rc_file()
+
+ if test.needs_clean() and self.clean_flag:
+ self.generate_os_snapshot()
+
+ flags = (" -t %s" % (test_name))
+ if self.report_flag:
+ flags += " -r"
+
+ result = testcase.TestCase.EX_RUN_ERROR
+ run_dict = self.get_run_dict(test_name)
+ if run_dict:
try:
- kwargs = run_dict['args']
- result = test_case.run(**kwargs)
- except KeyError:
- result = test_case.run()
- if result == testcase.TestCase.EX_OK:
- if GlobalVariables.REPORT_FLAG:
- test_case.push_to_db()
- result = test_case.is_successful()
- logger.info("Test result:\n\n%s\n", test_case)
- except ImportError:
- logger.exception("Cannot import module {}".format(
- run_dict['module']))
- except AttributeError:
- logger.exception("Cannot get class {}".format(
- run_dict['class']))
- else:
- raise Exception("Cannot import the class for the test case.")
-
- if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
- cleanup()
- if result != testcase.TestCase.EX_OK:
- logger.error("The test case '%s' failed. " % test_name)
- GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
- if test.is_blocking():
- raise BlockingTestFailed("The test case {} failed and is blocking"
- .format(test.get_name()))
-
-
-def run_tier(tier):
- tier_name = tier.get_name()
- tests = tier.get_tests()
- if tests is None or len(tests) == 0:
- logger.info("There are no supported test cases in this tier "
- "for the given scenario")
- return 0
- logger.info("\n\n") # blank line
- print_separator("#")
- logger.info("Running tier '%s'" % tier_name)
- print_separator("#")
- logger.debug("\n%s" % tier)
- for test in tests:
- run_test(test, tier_name)
-
-
-def run_all(tiers):
- summary = ""
- tiers_to_run = []
-
- for tier in tiers.get_tiers():
- if (len(tier.get_tests()) != 0 and
- re.search(CONST.__getattribute__('CI_LOOP'),
- tier.get_ci_loop()) is not None):
- tiers_to_run.append(tier)
- summary += ("\n - %s:\n\t %s"
- % (tier.get_name(),
- tier.get_test_names()))
-
- logger.info("Tests to be executed:%s" % summary)
- for tier in tiers_to_run:
- run_tier(tier)
-
-
-def main(**kwargs):
-
- file = CONST.functest_testcases_yaml
- _tiers = tb.TierBuilder(CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- file)
-
- if kwargs['noclean']:
- GlobalVariables.CLEAN_FLAG = False
-
- if kwargs['report']:
- GlobalVariables.REPORT_FLAG = True
-
- try:
- if kwargs['test']:
- source_rc_file()
- if _tiers.get_tier(kwargs['test']):
- run_tier(_tiers.get_tier(kwargs['test']))
- elif _tiers.get_test(kwargs['test']):
- run_test(_tiers.get_test(kwargs['test']),
- _tiers.get_tier_name(kwargs['test']),
- kwargs['test'])
- elif kwargs['test'] == "all":
- run_all(_tiers)
- else:
- logger.error("Unknown test case or tier '%s', "
- "or not supported by "
- "the given scenario '%s'."
- % (kwargs['test'],
- CONST.__getattribute__('DEPLOY_SCENARIO')))
- logger.debug("Available tiers are:\n\n%s"
- % _tiers)
- return Result.EX_ERROR
+ module = importlib.import_module(run_dict['module'])
+ cls = getattr(module, run_dict['class'])
+ test_dict = ft_utils.get_dict_by_test(test_name)
+ test_case = cls(**test_dict)
+ self.executed_test_cases.append(test_case)
+ try:
+ kwargs = run_dict['args']
+ result = test_case.run(**kwargs)
+ except KeyError:
+ result = test_case.run()
+ if result == testcase.TestCase.EX_OK:
+ if self.report_flag:
+ test_case.push_to_db()
+ result = test_case.is_successful()
+ logger.info("Test result:\n\n%s\n", test_case)
+ except ImportError:
+ logger.exception("Cannot import module {}".format(
+ run_dict['module']))
+ except AttributeError:
+ logger.exception("Cannot get class {}".format(
+ run_dict['class']))
else:
- run_all(_tiers)
- except Exception as e:
- logger.error(e)
- GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
-
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['env var', 'value'])
- for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
- 'CI_LOOP']:
- msg.add_row([env_var, CONST.__getattribute__(env_var)])
- logger.info("Deployment description: \n\n%s\n", msg)
-
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['test case', 'project', 'tier', 'duration', 'result'])
- for test_case in GlobalVariables.EXECUTED_TEST_CASES:
- result = 'PASS' if(test_case.is_successful(
- ) == test_case.EX_OK) else 'FAIL'
- msg.add_row([test_case.case_name, test_case.project_name,
- _tiers.get_tier_name(test_case.case_name),
- test_case.get_duration(), result])
- logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
-
- logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT)
- return GlobalVariables.OVERALL_RESULT
+ raise Exception("Cannot import the class for the test case.")
+
+ if test.needs_clean() and self.clean_flag:
+ self.cleanup()
+ if result != testcase.TestCase.EX_OK:
+ logger.error("The test case '%s' failed. " % test_name)
+ self.overall_result = Result.EX_ERROR
+ if test.is_blocking():
+ raise BlockingTestFailed(
+ "The test case {} failed and is blocking".format(
+ test.get_name()))
+
+ def run_tier(self, tier):
+ tier_name = tier.get_name()
+ tests = tier.get_tests()
+ if tests is None or len(tests) == 0:
+ logger.info("There are no supported test cases in this tier "
+ "for the given scenario")
+ return 0
+ logger.info("\n\n") # blank line
+ self.print_separator("#")
+ logger.info("Running tier '%s'" % tier_name)
+ self.print_separator("#")
+ logger.debug("\n%s" % tier)
+ for test in tests:
+ self.run_test(test, tier_name)
+
+ def run_all(self, tiers):
+ summary = ""
+ tiers_to_run = []
+
+ for tier in tiers.get_tiers():
+ if (len(tier.get_tests()) != 0 and
+ re.search(CONST.__getattribute__('CI_LOOP'),
+ tier.get_ci_loop()) is not None):
+ tiers_to_run.append(tier)
+ summary += ("\n - %s:\n\t %s"
+ % (tier.get_name(),
+ tier.get_test_names()))
+
+ logger.info("Tests to be executed:%s" % summary)
+ for tier in tiers_to_run:
+ self.run_tier(tier)
+
+ def main(self, **kwargs):
+ _tiers = tb.TierBuilder(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__("functest_testcases_yaml"))
+
+ if kwargs['noclean']:
+ self.clean_flag = False
+
+ if kwargs['report']:
+ self.report_flag = True
+
+ try:
+ if kwargs['test']:
+ self.source_rc_file()
+ logger.error(kwargs['test'])
+ if _tiers.get_tier(kwargs['test']):
+ self.run_tier(_tiers.get_tier(kwargs['test']))
+ elif _tiers.get_test(kwargs['test']):
+ self.run_test(_tiers.get_test(kwargs['test']),
+ _tiers.get_tier_name(kwargs['test']),
+ kwargs['test'])
+ elif kwargs['test'] == "all":
+ self.run_all(_tiers)
+ else:
+ logger.error("Unknown test case or tier '%s', "
+ "or not supported by "
+ "the given scenario '%s'."
+ % (kwargs['test'],
+ CONST.__getattribute__('DEPLOY_SCENARIO')))
+ logger.debug("Available tiers are:\n\n%s",
+ _tiers)
+ return Result.EX_ERROR
+ else:
+ self.run_all(_tiers)
+ except Exception:
+ logger.exception("Runner failed")
+ self.overall_result = Result.EX_ERROR
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['env var', 'value'])
+ for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
+ 'CI_LOOP']:
+ msg.add_row([env_var, CONST.__getattribute__(env_var)])
+ logger.info("Deployment description: \n\n%s\n", msg)
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'project', 'tier', 'duration', 'result'])
+ for test_case in self.executed_test_cases:
+ result = 'PASS' if(test_case.is_successful(
+ ) == test_case.EX_OK) else 'FAIL'
+ msg.add_row([test_case.case_name, test_case.project_name,
+ _tiers.get_tier_name(test_case.case_name),
+ test_case.get_duration(), result])
+ logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
+
+ logger.info("Execution exit value: %s" % self.overall_result)
+ return self.overall_result
if __name__ == '__main__':
@@ -275,4 +274,5 @@ if __name__ == '__main__':
CONST.__getattribute__('dir_functest_logging_cfg'))
parser = RunTestsParser()
args = parser.parse_args(sys.argv[1:])
- sys.exit(main(**args).value)
+ runner = Runner()
+ sys.exit(runner.main(**args).value)
diff --git a/functest/tests/unit/ci/test_run_tests.py b/functest/tests/unit/ci/test_run_tests.py
index d48c79cc8..88e5d2b86 100644
--- a/functest/tests/unit/ci/test_run_tests.py
+++ b/functest/tests/unit/ci/test_run_tests.py
@@ -5,19 +5,32 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-
-import unittest
import logging
+import unittest
import mock
from functest.ci import run_tests
from functest.utils.constants import CONST
+from functest.core.testcase import TestCase
+
+
+class FakeModule(TestCase):
+
+ def run(self):
+ return TestCase.EX_OK
+
+ def push_to_db(self):
+ return TestCase.EX_OK
+
+ def is_successful(self):
+ return TestCase.EX_OK
class RunTestsTesting(unittest.TestCase):
def setUp(self):
+ self.runner = run_tests.Runner()
self.sep = 'test_sep'
self.creds = {'OS_AUTH_URL': 'http://test_ip:test_port/v2.0',
'OS_USERNAME': 'test_os_username',
@@ -36,11 +49,10 @@ class RunTestsTesting(unittest.TestCase):
self.tiers.configure_mock(**attrs)
self.run_tests_parser = run_tests.RunTestsParser()
- self.global_variables = run_tests.GlobalVariables()
@mock.patch('functest.ci.run_tests.logger.info')
def test_print_separator(self, mock_logger_info):
- run_tests.print_separator(self.sep)
+ self.runner.print_separator(self.sep)
mock_logger_info.assert_called_once_with(self.sep * 44)
@mock.patch('functest.ci.run_tests.logger.error')
@@ -48,24 +60,24 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.os.path.isfile',
return_value=False), \
self.assertRaises(Exception):
- run_tests.source_rc_file()
+ self.runner.source_rc_file()
@mock.patch('functest.ci.run_tests.logger.debug')
- def test_source_rc_file_default(self, mock_logger_debug):
- with mock.patch('functest.ci.run_tests.os.path.isfile',
- return_value=True), \
- mock.patch('functest.ci.run_tests.os_utils.source_credentials',
- return_value=self.creds):
- run_tests.source_rc_file()
+ @mock.patch('functest.ci.run_tests.os.path.isfile',
+ return_value=True)
+ def test_source_rc_file_default(self, *args):
+ with mock.patch('functest.ci.run_tests.os_utils.source_credentials',
+ return_value=self.creds):
+ self.runner.source_rc_file()
@mock.patch('functest.ci.run_tests.os_snapshot.main')
def test_generate_os_snapshot(self, mock_os_snap):
- run_tests.generate_os_snapshot()
+ self.runner.generate_os_snapshot()
self.assertTrue(mock_os_snap.called)
@mock.patch('functest.ci.run_tests.os_clean.main')
def test_cleanup(self, mock_os_clean):
- run_tests.cleanup()
+ self.runner.cleanup()
self.assertTrue(mock_os_clean.called)
def test_get_run_dict_if_defined_default(self):
@@ -73,7 +85,7 @@ class RunTestsTesting(unittest.TestCase):
with mock.patch('functest.ci.run_tests.'
'ft_utils.get_dict_by_test',
return_value={'run': mock_obj}):
- self.assertEqual(run_tests.get_run_dict('test_name'),
+ self.assertEqual(self.runner.get_run_dict('test_name'),
mock_obj)
@mock.patch('functest.ci.run_tests.logger.error')
@@ -83,7 +95,7 @@ class RunTestsTesting(unittest.TestCase):
'ft_utils.get_dict_by_test',
return_value=None):
testname = 'test_name'
- self.assertEqual(run_tests.get_run_dict(testname),
+ self.assertEqual(self.runner.get_run_dict(testname),
None)
mock_logger_error.assert_called_once_with("Cannot get {}'s config "
"options"
@@ -93,7 +105,7 @@ class RunTestsTesting(unittest.TestCase):
'ft_utils.get_dict_by_test',
return_value={}):
testname = 'test_name'
- self.assertEqual(run_tests.get_run_dict(testname),
+ self.assertEqual(self.runner.get_run_dict(testname),
None)
@mock.patch('functest.ci.run_tests.logger.exception')
@@ -103,7 +115,7 @@ class RunTestsTesting(unittest.TestCase):
'ft_utils.get_dict_by_test',
side_effect=Exception):
testname = 'test_name'
- self.assertEqual(run_tests.get_run_dict(testname),
+ self.assertEqual(self.runner.get_run_dict(testname),
None)
mock_logger_except.assert_called_once_with("Cannot get {}'s config"
" options"
@@ -114,63 +126,67 @@ class RunTestsTesting(unittest.TestCase):
args = {'get_name.return_value': 'test_name',
'needs_clean.return_value': False}
mock_test.configure_mock(**args)
- with mock.patch('functest.ci.run_tests.print_separator'),\
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.get_run_dict',
+ with mock.patch('functest.ci.run_tests.Runner.print_separator'),\
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.get_run_dict',
return_value=None), \
self.assertRaises(Exception) as context:
- run_tests.run_test(mock_test, 'tier_name')
+ self.runner(mock_test, 'tier_name')
msg = "Cannot import the class for the test case."
self.assertTrue(msg in context)
- def test_run_tests_default(self):
+ @mock.patch('functest.ci.run_tests.Runner.print_separator')
+ @mock.patch('functest.ci.run_tests.Runner.source_rc_file')
+ @mock.patch('functest.ci.run_tests.Runner.generate_os_snapshot')
+ @mock.patch('functest.ci.run_tests.Runner.cleanup')
+ @mock.patch('importlib.import_module', name="module",
+ return_value=mock.Mock(test_class=mock.Mock(
+ side_effect=FakeModule)))
+ @mock.patch('functest.utils.functest_utils.get_dict_by_test')
+ def test_run_tests_default(self, *args):
mock_test = mock.Mock()
- args = {'get_name.return_value': 'test_name',
- 'needs_clean.return_value': True}
- mock_test.configure_mock(**args)
+ kwargs = {'get_name.return_value': 'test_name',
+ 'needs_clean.return_value': True}
+ mock_test.configure_mock(**kwargs)
test_run_dict = {'module': 'test_module',
- 'class': mock.Mock,
- 'args': 'test_args'}
- with mock.patch('functest.ci.run_tests.print_separator'),\
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.generate_os_snapshot'), \
- mock.patch('functest.ci.run_tests.cleanup'), \
- mock.patch('functest.ci.run_tests.get_run_dict',
- return_value=test_run_dict), \
- self.assertRaises(run_tests.BlockingTestFailed) as context:
- run_tests.GlobalVariables.CLEAN_FLAG = True
- run_tests.run_test(mock_test, 'tier_name')
- msg = 'The test case test_name failed and is blocking'
- self.assertTrue(msg in context)
+ 'class': 'test_class'}
+ with mock.patch('functest.ci.run_tests.Runner.get_run_dict',
+ return_value=test_run_dict):
+ self.runner.clean_flag = True
+ self.runner.run_test(mock_test, 'tier_name')
+ self.assertEqual(self.runner.overall_result,
+ run_tests.Result.EX_OK)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_tier_default(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.print_separator'), \
- mock.patch('functest.ci.run_tests.run_test') as mock_method:
- run_tests.run_tier(self.tier)
+ with mock.patch('functest.ci.run_tests.Runner.print_separator'), \
+ mock.patch(
+ 'functest.ci.run_tests.Runner.run_test') as mock_method:
+ self.runner.run_tier(self.tier)
mock_method.assert_any_call('test1', 'test_tier')
mock_method.assert_any_call('test2', 'test_tier')
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_tier_missing_test(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.print_separator'):
+ with mock.patch('functest.ci.run_tests.Runner.print_separator'):
self.tier.get_tests.return_value = None
- self.assertEqual(run_tests.run_tier(self.tier), 0)
+ self.assertEqual(self.runner.run_tier(self.tier), 0)
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_all_default(self, mock_logger_info):
- with mock.patch('functest.ci.run_tests.run_tier') as mock_method:
+ with mock.patch(
+ 'functest.ci.run_tests.Runner.run_tier') as mock_method:
CONST.__setattr__('CI_LOOP', 'test_ci_loop')
- run_tests.run_all(self.tiers)
+ self.runner.run_all(self.tiers)
mock_method.assert_any_call(self.tier)
self.assertTrue(mock_logger_info.called)
@mock.patch('functest.ci.run_tests.logger.info')
def test_run_all_missing_tier(self, mock_logger_info):
CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
- run_tests.run_all(self.tiers)
+ self.runner.run_all(self.tiers)
self.assertTrue(mock_logger_info.called)
def test_main_failed(self):
@@ -179,69 +195,78 @@ class RunTestsTesting(unittest.TestCase):
args = {'get_tier.return_value': False,
'get_test.return_value': False}
mock_obj.configure_mock(**args)
-
with mock.patch('functest.ci.run_tests.tb.TierBuilder'), \
- mock.patch('functest.ci.run_tests.source_rc_file',
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file',
side_effect=Exception):
- self.assertEqual(run_tests.main(**kwargs),
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_ERROR)
-
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file',
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file',
side_effect=Exception):
- self.assertEqual(run_tests.main(**kwargs),
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_ERROR)
- def test_main_default(self):
- kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
+ def test_main_tier(self, *args):
+ mock_tier = mock.Mock()
+ args = {'get_name.return_value': 'tier_name'}
+ mock_tier.configure_mock(**args)
+ kwargs = {'test': 'tier_name', 'noclean': True, 'report': True}
mock_obj = mock.Mock()
- args = {'get_tier.return_value': True,
- 'get_test.return_value': False}
+ args = {'get_tier.return_value': mock_tier,
+ 'get_test.return_value': None}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.run_tier') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.run_tier') as m:
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_OK)
self.assertTrue(m.called)
+ def test_main_test(self, *args):
+ kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
+ mock_test = mock.Mock()
+ args = {'get_name.return_value': 'test_name',
+ 'needs_clean.return_value': True}
+ mock_test.configure_mock(**args)
mock_obj = mock.Mock()
- args = {'get_tier.return_value': False,
- 'get_test.return_value': True}
+ args = {'get_tier.return_value': None,
+ 'get_test.return_value': mock_test}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.run_test') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.run_test') as m:
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_OK)
self.assertTrue(m.called)
+ def test_main_all_tier(self, *args):
kwargs = {'test': 'all', 'noclean': True, 'report': True}
mock_obj = mock.Mock()
- args = {'get_tier.return_value': False,
- 'get_test.return_value': False}
+ args = {'get_tier.return_value': None,
+ 'get_test.return_value': None}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
- mock.patch('functest.ci.run_tests.run_all') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.run_all') as m:
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_OK)
self.assertTrue(m.called)
+ def test_main_any_tier_test_ko(self, *args):
kwargs = {'test': 'any', 'noclean': True, 'report': True}
mock_obj = mock.Mock()
- args = {'get_tier.return_value': False,
- 'get_test.return_value': False}
+ args = {'get_tier.return_value': None,
+ 'get_test.return_value': None}
mock_obj.configure_mock(**args)
with mock.patch('functest.ci.run_tests.tb.TierBuilder',
return_value=mock_obj), \
- mock.patch('functest.ci.run_tests.source_rc_file'), \
+ mock.patch('functest.ci.run_tests.Runner.source_rc_file'), \
mock.patch('functest.ci.run_tests.logger.debug') as m:
- self.assertEqual(run_tests.main(**kwargs),
+ self.assertEqual(self.runner.main(**kwargs),
run_tests.Result.EX_ERROR)
self.assertTrue(m.called)