aboutsummaryrefslogtreecommitdiffstats
path: root/functest/ci/run_tests.py
diff options
context:
space:
mode:
authorCédric Ollivier <cedric.ollivier@orange.com>2017-05-15 18:55:26 +0200
committerCédric Ollivier <cedric.ollivier@orange.com>2017-05-16 18:05:23 +0200
commit4c1e2c2b35ef2474d26e0cd6ce3977d24dc93553 (patch)
treec94d5dced1751eca4bc025badcaf6430d77c1c1e /functest/ci/run_tests.py
parentd180eabc5a2c2453d04bb4a080874c329605c026 (diff)
Define Runner class
It simply converts run_tests.py's functions to Runner's methods. This proposal could be enhanced in a second step to fully leverage on object programming. It defines as instance variables the former static variables to allow multiple runs (it's mandatory for unit testing). All false positives in unit tests are fixed but run_tests.py is still not fully covered. Change-Id: I8f91c132aa8b2248041f31e46238dd5598344d34 Signed-off-by: Cédric Ollivier <cedric.ollivier@orange.com>
Diffstat (limited to 'functest/ci/run_tests.py')
-rwxr-xr-xfunctest/ci/run_tests.py404
1 files changed, 202 insertions, 202 deletions
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index 76760096b..f973b616c 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -64,210 +64,209 @@ class RunTestsParser(object):
return vars(self.parser.parse_args(argv))
-class GlobalVariables:
- EXECUTED_TEST_CASES = []
- OVERALL_RESULT = Result.EX_OK
- CLEAN_FLAG = True
- REPORT_FLAG = False
-
-
-def print_separator(str, count=45):
- line = ""
- for i in range(0, count - 1):
- line += str
- logger.info("%s" % line)
-
-
-def source_rc_file():
- rc_file = CONST.__getattribute__('openstack_creds')
- if not os.path.isfile(rc_file):
- raise Exception("RC file %s does not exist..." % rc_file)
- logger.debug("Sourcing the OpenStack RC file...")
- os_utils.source_credentials(rc_file)
- for key, value in os.environ.iteritems():
- if re.search("OS_", key):
- if key == 'OS_AUTH_URL':
- CONST.__setattr__('OS_AUTH_URL', value)
- elif key == 'OS_USERNAME':
- CONST.__setattr__('OS_USERNAME', value)
- elif key == 'OS_TENANT_NAME':
- CONST.__setattr__('OS_TENANT_NAME', value)
- elif key == 'OS_PASSWORD':
- CONST.__setattr__('OS_PASSWORD', value)
-
-
-def generate_os_snapshot():
- os_snapshot.main()
-
-
-def cleanup():
- os_clean.main()
-
-
-def get_run_dict(testname):
- try:
- dict = ft_utils.get_dict_by_test(testname)
- if not dict:
- logger.error("Cannot get {}'s config options".format(testname))
- elif 'run' in dict:
- return dict['run']
- return None
- except Exception:
- logger.exception("Cannot get {}'s config options".format(testname))
- return None
-
-
-def run_test(test, tier_name, testcases=None):
- if not test.is_enabled():
- raise TestNotEnabled("The test case {} is not enabled"
- .format(test.get_name()))
- test_name = test.get_name()
- logger.info("\n") # blank line
- print_separator("=")
- logger.info("Running test case '%s'..." % test_name)
- print_separator("=")
- logger.debug("\n%s" % test)
- source_rc_file()
-
- if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
- generate_os_snapshot()
-
- flags = (" -t %s" % (test_name))
- if GlobalVariables.REPORT_FLAG:
- flags += " -r"
-
- result = testcase.TestCase.EX_RUN_ERROR
- run_dict = get_run_dict(test_name)
- if run_dict:
+class Runner(object):
+
+ def __init__(self):
+ self.executed_test_cases = []
+ self.overall_result = Result.EX_OK
+ self.clean_flag = True
+ self.report_flag = False
+
+ @staticmethod
+ def print_separator(str, count=45):
+ line = ""
+ for i in range(0, count - 1):
+ line += str
+ logger.info("%s" % line)
+
+ @staticmethod
+ def source_rc_file():
+ rc_file = CONST.__getattribute__('openstack_creds')
+ if not os.path.isfile(rc_file):
+ raise Exception("RC file %s does not exist..." % rc_file)
+ logger.debug("Sourcing the OpenStack RC file...")
+ os_utils.source_credentials(rc_file)
+ for key, value in os.environ.iteritems():
+ if re.search("OS_", key):
+ if key == 'OS_AUTH_URL':
+ CONST.__setattr__('OS_AUTH_URL', value)
+ elif key == 'OS_USERNAME':
+ CONST.__setattr__('OS_USERNAME', value)
+ elif key == 'OS_TENANT_NAME':
+ CONST.__setattr__('OS_TENANT_NAME', value)
+ elif key == 'OS_PASSWORD':
+ CONST.__setattr__('OS_PASSWORD', value)
+
+ @staticmethod
+ def generate_os_snapshot():
+ os_snapshot.main()
+
+ @staticmethod
+ def cleanup():
+ os_clean.main()
+
+ @staticmethod
+ def get_run_dict(testname):
try:
- module = importlib.import_module(run_dict['module'])
- cls = getattr(module, run_dict['class'])
- test_dict = ft_utils.get_dict_by_test(test_name)
- test_case = cls(**test_dict)
- GlobalVariables.EXECUTED_TEST_CASES.append(test_case)
+ dict = ft_utils.get_dict_by_test(testname)
+ if not dict:
+ logger.error("Cannot get {}'s config options".format(testname))
+ elif 'run' in dict:
+ return dict['run']
+ return None
+ except Exception:
+ logger.exception("Cannot get {}'s config options".format(testname))
+ return None
+
+ def run_test(self, test, tier_name, testcases=None):
+ if not test.is_enabled():
+ raise TestNotEnabled(
+ "The test case {} is not enabled".format(test.get_name()))
+ test_name = test.get_name()
+ logger.info("\n") # blank line
+ self.print_separator("=")
+ logger.info("Running test case '%s'..." % test_name)
+ self.print_separator("=")
+ logger.debug("\n%s" % test)
+ self.source_rc_file()
+
+ if test.needs_clean() and self.clean_flag:
+ self.generate_os_snapshot()
+
+ flags = (" -t %s" % (test_name))
+ if self.report_flag:
+ flags += " -r"
+
+ result = testcase.TestCase.EX_RUN_ERROR
+ run_dict = self.get_run_dict(test_name)
+ if run_dict:
try:
- kwargs = run_dict['args']
- result = test_case.run(**kwargs)
- except KeyError:
- result = test_case.run()
- if result == testcase.TestCase.EX_OK:
- if GlobalVariables.REPORT_FLAG:
- test_case.push_to_db()
- result = test_case.is_successful()
- logger.info("Test result:\n\n%s\n", test_case)
- except ImportError:
- logger.exception("Cannot import module {}".format(
- run_dict['module']))
- except AttributeError:
- logger.exception("Cannot get class {}".format(
- run_dict['class']))
- else:
- raise Exception("Cannot import the class for the test case.")
-
- if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
- cleanup()
- if result != testcase.TestCase.EX_OK:
- logger.error("The test case '%s' failed. " % test_name)
- GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
- if test.is_blocking():
- raise BlockingTestFailed("The test case {} failed and is blocking"
- .format(test.get_name()))
-
-
-def run_tier(tier):
- tier_name = tier.get_name()
- tests = tier.get_tests()
- if tests is None or len(tests) == 0:
- logger.info("There are no supported test cases in this tier "
- "for the given scenario")
- return 0
- logger.info("\n\n") # blank line
- print_separator("#")
- logger.info("Running tier '%s'" % tier_name)
- print_separator("#")
- logger.debug("\n%s" % tier)
- for test in tests:
- run_test(test, tier_name)
-
-
-def run_all(tiers):
- summary = ""
- tiers_to_run = []
-
- for tier in tiers.get_tiers():
- if (len(tier.get_tests()) != 0 and
- re.search(CONST.__getattribute__('CI_LOOP'),
- tier.get_ci_loop()) is not None):
- tiers_to_run.append(tier)
- summary += ("\n - %s:\n\t %s"
- % (tier.get_name(),
- tier.get_test_names()))
-
- logger.info("Tests to be executed:%s" % summary)
- for tier in tiers_to_run:
- run_tier(tier)
-
-
-def main(**kwargs):
-
- file = CONST.functest_testcases_yaml
- _tiers = tb.TierBuilder(CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- file)
-
- if kwargs['noclean']:
- GlobalVariables.CLEAN_FLAG = False
-
- if kwargs['report']:
- GlobalVariables.REPORT_FLAG = True
-
- try:
- if kwargs['test']:
- source_rc_file()
- if _tiers.get_tier(kwargs['test']):
- run_tier(_tiers.get_tier(kwargs['test']))
- elif _tiers.get_test(kwargs['test']):
- run_test(_tiers.get_test(kwargs['test']),
- _tiers.get_tier_name(kwargs['test']),
- kwargs['test'])
- elif kwargs['test'] == "all":
- run_all(_tiers)
- else:
- logger.error("Unknown test case or tier '%s', "
- "or not supported by "
- "the given scenario '%s'."
- % (kwargs['test'],
- CONST.__getattribute__('DEPLOY_SCENARIO')))
- logger.debug("Available tiers are:\n\n%s"
- % _tiers)
- return Result.EX_ERROR
+ module = importlib.import_module(run_dict['module'])
+ cls = getattr(module, run_dict['class'])
+ test_dict = ft_utils.get_dict_by_test(test_name)
+ test_case = cls(**test_dict)
+ self.executed_test_cases.append(test_case)
+ try:
+ kwargs = run_dict['args']
+ result = test_case.run(**kwargs)
+ except KeyError:
+ result = test_case.run()
+ if result == testcase.TestCase.EX_OK:
+ if self.report_flag:
+ test_case.push_to_db()
+ result = test_case.is_successful()
+ logger.info("Test result:\n\n%s\n", test_case)
+ except ImportError:
+ logger.exception("Cannot import module {}".format(
+ run_dict['module']))
+ except AttributeError:
+ logger.exception("Cannot get class {}".format(
+ run_dict['class']))
else:
- run_all(_tiers)
- except Exception as e:
- logger.error(e)
- GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
-
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['env var', 'value'])
- for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
- 'CI_LOOP']:
- msg.add_row([env_var, CONST.__getattribute__(env_var)])
- logger.info("Deployment description: \n\n%s\n", msg)
-
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
- field_names=['test case', 'project', 'tier', 'duration', 'result'])
- for test_case in GlobalVariables.EXECUTED_TEST_CASES:
- result = 'PASS' if(test_case.is_successful(
- ) == test_case.EX_OK) else 'FAIL'
- msg.add_row([test_case.case_name, test_case.project_name,
- _tiers.get_tier_name(test_case.case_name),
- test_case.get_duration(), result])
- logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
-
- logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT)
- return GlobalVariables.OVERALL_RESULT
+ raise Exception("Cannot import the class for the test case.")
+
+ if test.needs_clean() and self.clean_flag:
+ self.cleanup()
+ if result != testcase.TestCase.EX_OK:
+ logger.error("The test case '%s' failed. " % test_name)
+ self.overall_result = Result.EX_ERROR
+ if test.is_blocking():
+ raise BlockingTestFailed(
+ "The test case {} failed and is blocking".format(
+ test.get_name()))
+
+ def run_tier(self, tier):
+ tier_name = tier.get_name()
+ tests = tier.get_tests()
+ if tests is None or len(tests) == 0:
+ logger.info("There are no supported test cases in this tier "
+ "for the given scenario")
+ return 0
+ logger.info("\n\n") # blank line
+ self.print_separator("#")
+ logger.info("Running tier '%s'" % tier_name)
+ self.print_separator("#")
+ logger.debug("\n%s" % tier)
+ for test in tests:
+ self.run_test(test, tier_name)
+
+ def run_all(self, tiers):
+ summary = ""
+ tiers_to_run = []
+
+ for tier in tiers.get_tiers():
+ if (len(tier.get_tests()) != 0 and
+ re.search(CONST.__getattribute__('CI_LOOP'),
+ tier.get_ci_loop()) is not None):
+ tiers_to_run.append(tier)
+ summary += ("\n - %s:\n\t %s"
+ % (tier.get_name(),
+ tier.get_test_names()))
+
+ logger.info("Tests to be executed:%s" % summary)
+ for tier in tiers_to_run:
+ self.run_tier(tier)
+
+ def main(self, **kwargs):
+ _tiers = tb.TierBuilder(
+ CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ CONST.__getattribute__("functest_testcases_yaml"))
+
+ if kwargs['noclean']:
+ self.clean_flag = False
+
+ if kwargs['report']:
+ self.report_flag = True
+
+ try:
+ if kwargs['test']:
+ self.source_rc_file()
+ logger.error(kwargs['test'])
+ if _tiers.get_tier(kwargs['test']):
+ self.run_tier(_tiers.get_tier(kwargs['test']))
+ elif _tiers.get_test(kwargs['test']):
+ self.run_test(_tiers.get_test(kwargs['test']),
+ _tiers.get_tier_name(kwargs['test']),
+ kwargs['test'])
+ elif kwargs['test'] == "all":
+ self.run_all(_tiers)
+ else:
+ logger.error("Unknown test case or tier '%s', "
+ "or not supported by "
+ "the given scenario '%s'."
+ % (kwargs['test'],
+ CONST.__getattribute__('DEPLOY_SCENARIO')))
+ logger.debug("Available tiers are:\n\n%s",
+ _tiers)
+ return Result.EX_ERROR
+ else:
+ self.run_all(_tiers)
+ except Exception:
+ logger.exception("Runner failed")
+ self.overall_result = Result.EX_ERROR
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['env var', 'value'])
+ for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
+ 'CI_LOOP']:
+ msg.add_row([env_var, CONST.__getattribute__(env_var)])
+ logger.info("Deployment description: \n\n%s\n", msg)
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'project', 'tier', 'duration', 'result'])
+ for test_case in self.executed_test_cases:
+ result = 'PASS' if(test_case.is_successful(
+ ) == test_case.EX_OK) else 'FAIL'
+ msg.add_row([test_case.case_name, test_case.project_name,
+ _tiers.get_tier_name(test_case.case_name),
+ test_case.get_duration(), result])
+ logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
+
+ logger.info("Execution exit value: %s" % self.overall_result)
+ return self.overall_result
if __name__ == '__main__':
@@ -275,4 +274,5 @@ if __name__ == '__main__':
CONST.__getattribute__('dir_functest_logging_cfg'))
parser = RunTestsParser()
args = parser.parse_args(sys.argv[1:])
- sys.exit(main(**args).value)
+ runner = Runner()
+ sys.exit(runner.main(**args).value)