aboutsummaryrefslogtreecommitdiffstats
path: root/testcases/OpenStack
diff options
context:
space:
mode:
authorMorgan Richomme <morgan.richomme@orange.com>2016-05-27 07:40:40 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2016-05-27 07:40:40 +0000
commit1b180fae8f1c15467facf3eebb3e9e68e43f3f06 (patch)
treee63ea881c9bd1ec0b4991a8ef22f24c3ccd33dbb /testcases/OpenStack
parentd5ba524bf4702fc586129e9bc902908efc763f13 (diff)
parentd00870df2d5d6c3be03799623372ee6801b380e6 (diff)
Merge "Add simple error logs in result DB"
Diffstat (limited to 'testcases/OpenStack')
-rw-r--r--testcases/OpenStack/tempest/run_tempest.py44
1 files changed, 28 insertions, 16 deletions
diff --git a/testcases/OpenStack/tempest/run_tempest.py b/testcases/OpenStack/tempest/run_tempest.py
index bf62ce306..93581f0a9 100644
--- a/testcases/OpenStack/tempest/run_tempest.py
+++ b/testcases/OpenStack/tempest/run_tempest.py
@@ -255,6 +255,7 @@ def run_tempest(OPTION):
logger.info("Starting Tempest test suite: '%s'." % OPTION)
cmd_line = "rally verify start " + OPTION + " --system-wide"
CI_DEBUG = os.environ.get("CI_DEBUG")
+
if CI_DEBUG == "true" or CI_DEBUG == "True":
ft_utils.execute_command(cmd_line, logger, exit_on_error=True)
else:
@@ -297,24 +298,35 @@ def run_tempest(OPTION):
dur_sec_int = int(round(dur_sec_float, 0))
dur_sec_int = dur_sec_int + 60 * dur_min
- # Generate json results for DB
- json_results = {"timestart": time_start, "duration": dur_sec_int,
- "tests": int(num_tests), "failures": int(num_failures)}
- logger.info("Results: " + str(json_results))
-
- status = "failed"
- try:
- diff = (int(num_tests) - int(num_failures))
- success_rate = 100 * diff / int(num_tests)
- except:
- success_rate = 0
-
- # For Tempest we assume that teh success rate is above 90%
- if success_rate >= 90:
- status = "passed"
-
# Push results in payload of testcase
if args.report:
+ # Note criteria hardcoded...TODO move to testcase.yaml
+ status = "failed"
+ try:
+ diff = (int(num_tests) - int(num_failures))
+ success_rate = 100 * diff / int(num_tests)
+ except:
+ success_rate = 0
+
+ # For Tempest we assume that the success rate is above 90%
+ if success_rate >= 90:
+ status = "passed"
+
+ # add the test in error in the details sections
+ # should be possible to do it during the test
+ with open(TEMPEST_RESULTS_DIR + "/tempest.log", 'r') as myfile:
+ output = myfile.read()
+ error_logs = ""
+
+ for match in re.findall('(.*?)[. ]*FAILED', output):
+ error_logs += match
+
+ # Generate json results for DB
+ json_results = {"timestart": time_start, "duration": dur_sec_int,
+ "tests": int(num_tests), "failures": int(num_failures),
+ "errors": error_logs}
+ logger.info("Results: " + str(json_results))
+
logger.debug("Push result into DB")
push_results_to_db("Tempest", json_results, status)