diff options
Diffstat (limited to 'testcases/OpenStack/tempest/run_tempest.py')
-rwxr-xr-x | testcases/OpenStack/tempest/run_tempest.py | 59 |
1 files changed, 41 insertions, 18 deletions
diff --git a/testcases/OpenStack/tempest/run_tempest.py b/testcases/OpenStack/tempest/run_tempest.py index 5d1648dc..8ca3bdb3 100755 --- a/testcases/OpenStack/tempest/run_tempest.py +++ b/testcases/OpenStack/tempest/run_tempest.py @@ -219,7 +219,7 @@ def configure_tempest_feature(deployment_dir, mode): if mode == 'feature_multisite': config.set('service_available', 'kingbird', 'true') cmd = "openstack endpoint show kingbird | grep publicurl |\ - awk '{print $4}' | awk -F '/' '{print $3}'" + awk '{print $4}' | awk -F '/' '{print $4}'" kingbird_api_version = os.popen(cmd).read() if os.environ.get("INSTALLER_TYPE") == 'fuel': # For MOS based setup, the service is accessible @@ -397,23 +397,41 @@ def run_tempest(OPTION): dur_sec_int = int(round(dur_sec_float, 0)) dur_sec_int = dur_sec_int + 60 * dur_min stop_time = time.time() - # Push results in payload of testcase - if args.report: - logger.debug("Pushing tempest results into DB...") - # Note criteria hardcoded...TODO move to testcase.yaml - status = "FAIL" - try: - diff = (int(num_tests) - int(num_failures)) - success_rate = 100 * diff / int(num_tests) - except: - success_rate = 0 - # For Tempest we assume that the success rate is above 90% - if success_rate >= 90: + status = "FAIL" + try: + diff = (int(num_tests) - int(num_failures)) + success_rate = 100 * diff / int(num_tests) + except: + success_rate = 0 + + # For Tempest we assume that the success rate is above 90% + if "smoke" in args.mode: + case_name = "tempest_smoke_serial" + # Note criteria hardcoded...TODO read it from testcases.yaml + success_criteria = 100 + if success_rate >= success_criteria: + status = "PASS" + else: + logger.info("Tempest success rate: %s%%. The success criteria to " + "pass this test is %s%%. Marking the test as FAILED." % + (success_rate, success_criteria)) + else: + case_name = "tempest_full_parallel" + # Note criteria hardcoded...TODO read it from testcases.yaml + success_criteria = 80 + if success_rate >= success_criteria: status = "PASS" + else: + logger.info("Tempest success rate: %s%%. The success criteria to " + "pass this test is %s%%. Marking the test as FAILED." % + (success_rate, success_criteria)) + # Push results in payload of testcase + if args.report: # add the test in error in the details sections # should be possible to do it during the test + logger.debug("Pushing tempest results into DB...") with open(TEMPEST_RESULTS_DIR + "/tempest.log", 'r') as myfile: output = myfile.read() error_logs = "" @@ -427,10 +445,6 @@ def run_tempest(OPTION): "errors": error_logs} logger.info("Results: " + str(json_results)) # split Tempest smoke and full - if "smoke" in args.mode: - case_name = "tempest_smoke_serial" - else: - case_name = "tempest_full_parallel" try: ft_utils.push_results_to_db("functest", @@ -444,6 +458,11 @@ def run_tempest(OPTION): logger.error("Error pushing results into Database '%s'" % sys.exc_info()[0]) + if status == "PASS": + return 0 + else: + return -1 + def main(): global MODE @@ -467,7 +486,11 @@ def main(): if args.serial: MODE += " --concur 1" - run_tempest(MODE) + ret_val = run_tempest(MODE) + if ret_val != 0: + sys.exit(-1) + + sys.exit(0) if __name__ == '__main__': |