aboutsummaryrefslogtreecommitdiffstats
path: root/ci/run_tests.py
diff options
context:
space:
mode:
Diffstat (limited to 'ci/run_tests.py')
-rwxr-xr-xci/run_tests.py40
1 files changed, 29 insertions, 11 deletions
diff --git a/ci/run_tests.py b/ci/run_tests.py
index cb05d435d..ca7ff2b5a 100755
--- a/ci/run_tests.py
+++ b/ci/run_tests.py
@@ -13,7 +13,7 @@ import datetime
import os
import re
import sys
-
+import functest.ci.generate_report as generate_report
import functest.ci.tier_builder as tb
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
@@ -44,6 +44,7 @@ FUNCTEST_REPO = ("%s/functest/" % REPOS_DIR)
EXEC_SCRIPT = ("%sci/exec_test.sh" % FUNCTEST_REPO)
CLEAN_FLAG = True
REPORT_FLAG = False
+EXECUTED_TEST_CASES = []
# This will be the return code of this script. If any of the tests fails,
# this variable will change to -1
@@ -74,8 +75,9 @@ def cleanup():
os_clean.main()
-def run_test(test):
- global OVERALL_RESULT
+def run_test(test, tier_name):
+ global OVERALL_RESULT, EXECUTED_TEST_CASES
+ result_str = "PASS"
start = datetime.datetime.now()
test_name = test.get_name()
logger.info("\n") # blank line
@@ -93,29 +95,35 @@ def run_test(test):
cmd = ("%s%s" % (EXEC_SCRIPT, flags))
logger.debug("Executing command '%s'" % cmd)
-
result = ft_utils.execute_command(cmd, logger, exit_on_error=False)
if CLEAN_FLAG:
cleanup()
-
end = datetime.datetime.now()
duration = (end - start).seconds
- str = ("%02d:%02d" % divmod(duration, 60))
- logger.info("Test execution time: %s" % str)
+ duration_str = ("%02d:%02d" % divmod(duration, 60))
+ logger.info("Test execution time: %s" % duration_str)
+ result = 0
if result != 0:
logger.error("The test case '%s' failed. " % test_name)
OVERALL_RESULT = -1
+ result_str = "FAIL"
if test.get_blocking():
logger.info("This test case is blocking. Exiting...")
sys.exit(OVERALL_RESULT)
+ for test in EXECUTED_TEST_CASES:
+ if test['test_name'] == test_name:
+ test.update({"result": result_str,
+ "duration": duration_str})
+
return result
def run_tier(tier):
+ tier_name = tier.get_name()
tests = tier.get_tests()
if tests is None or len(tests) == 0:
logger.info("There are no supported test cases in this tier "
@@ -123,14 +131,19 @@ def run_tier(tier):
return 0
logger.info("\n\n") # blank line
print_separator("#")
- logger.info("Running tier '%s'" % tier.get_name())
+ logger.info("Running tier '%s'" % tier_name)
print_separator("#")
logger.debug("\n%s" % tier)
for test in tests:
- run_test(test)
+ res = run_test(test, tier_name)
+ if res != 0:
+ return res
+
+ return 0
def run_all(tiers):
+ global EXECUTED_TEST_CASES
summary = ""
BUILD_TAG = os.getenv('BUILD_TAG')
if BUILD_TAG is not None and re.search("daily", BUILD_TAG) is not None:
@@ -149,9 +162,14 @@ def run_all(tiers):
tier.get_test_names()))
logger.info("Tests to be executed:%s" % summary)
-
+ EXECUTED_TEST_CASES = generate_report.init(tiers_to_run)
for tier in tiers_to_run:
- run_tier(tier)
+ res = run_tier(tier)
+ if res != 0:
+ return res
+ generate_report.main(EXECUTED_TEST_CASES)
+
+ return 0
def main():