diff options
author | jose.lausuch <jose.lausuch@ericsson.com> | 2016-07-22 17:49:34 +0200 |
---|---|---|
committer | Jose Lausuch <jose.lausuch@ericsson.com> | 2016-07-26 21:41:44 +0000 |
commit | 934a16a7766a5f06b046d882eee408a725b03139 (patch) | |
tree | fe47b3a354f4cf176bb9506949ea95fc65258e0b /ci/run_tests.py | |
parent | b67e86163bc785b9186cf6ca6af3187726946eb1 (diff) |
Generate Functest report at the end of the execution
JIRA: FUNCTEST-89
Change-Id: I10c41efdd6a7f737f57ab15158c8c8d49f392271
Signed-off-by: jose.lausuch <jose.lausuch@ericsson.com>
Diffstat (limited to 'ci/run_tests.py')
-rwxr-xr-x | ci/run_tests.py | 40 |
1 files changed, 29 insertions, 11 deletions
diff --git a/ci/run_tests.py b/ci/run_tests.py index cb05d435..ca7ff2b5 100755 --- a/ci/run_tests.py +++ b/ci/run_tests.py @@ -13,7 +13,7 @@ import datetime import os import re import sys - +import functest.ci.generate_report as generate_report import functest.ci.tier_builder as tb import functest.utils.functest_logger as ft_logger import functest.utils.functest_utils as ft_utils @@ -44,6 +44,7 @@ FUNCTEST_REPO = ("%s/functest/" % REPOS_DIR) EXEC_SCRIPT = ("%sci/exec_test.sh" % FUNCTEST_REPO) CLEAN_FLAG = True REPORT_FLAG = False +EXECUTED_TEST_CASES = [] # This will be the return code of this script. If any of the tests fails, # this variable will change to -1 @@ -74,8 +75,9 @@ def cleanup(): os_clean.main() -def run_test(test): - global OVERALL_RESULT +def run_test(test, tier_name): + global OVERALL_RESULT, EXECUTED_TEST_CASES + result_str = "PASS" start = datetime.datetime.now() test_name = test.get_name() logger.info("\n") # blank line @@ -93,29 +95,35 @@ def run_test(test): cmd = ("%s%s" % (EXEC_SCRIPT, flags)) logger.debug("Executing command '%s'" % cmd) - result = ft_utils.execute_command(cmd, logger, exit_on_error=False) if CLEAN_FLAG: cleanup() - end = datetime.datetime.now() duration = (end - start).seconds - str = ("%02d:%02d" % divmod(duration, 60)) - logger.info("Test execution time: %s" % str) + duration_str = ("%02d:%02d" % divmod(duration, 60)) + logger.info("Test execution time: %s" % duration_str) + result = 0 if result != 0: logger.error("The test case '%s' failed. " % test_name) OVERALL_RESULT = -1 + result_str = "FAIL" if test.get_blocking(): logger.info("This test case is blocking. Exiting...") sys.exit(OVERALL_RESULT) + for test in EXECUTED_TEST_CASES: + if test['test_name'] == test_name: + test.update({"result": result_str, + "duration": duration_str}) + return result def run_tier(tier): + tier_name = tier.get_name() tests = tier.get_tests() if tests is None or len(tests) == 0: logger.info("There are no supported test cases in this tier " @@ -123,14 +131,19 @@ def run_tier(tier): return 0 logger.info("\n\n") # blank line print_separator("#") - logger.info("Running tier '%s'" % tier.get_name()) + logger.info("Running tier '%s'" % tier_name) print_separator("#") logger.debug("\n%s" % tier) for test in tests: - run_test(test) + res = run_test(test, tier_name) + if res != 0: + return res + + return 0 def run_all(tiers): + global EXECUTED_TEST_CASES summary = "" BUILD_TAG = os.getenv('BUILD_TAG') if BUILD_TAG is not None and re.search("daily", BUILD_TAG) is not None: @@ -149,9 +162,14 @@ def run_all(tiers): tier.get_test_names())) logger.info("Tests to be executed:%s" % summary) - + EXECUTED_TEST_CASES = generate_report.init(tiers_to_run) for tier in tiers_to_run: - run_tier(tier) + res = run_tier(tier) + if res != 0: + return res + generate_report.main(EXECUTED_TEST_CASES) + + return 0 def main(): |