diff options
author | Morgan Richomme <morgan.richomme@orange.com> | 2016-08-25 14:32:08 +0200 |
---|---|---|
committer | Morgan Richomme <morgan.richomme@orange.com> | 2016-08-25 15:07:38 +0200 |
commit | b365cf4c1ce1bc988f136dddf2904cc6bd310c64 (patch) | |
tree | 0bb6da943dddfea21a405e8296ea8347dc497c0f /utils/test/reporting/functest/reporting-tempest.py | |
parent | 0b66e0d6ba10996f124926d3506b78eaa9bcd8b8 (diff) |
Add colorado reporting for tempest and vims
Change-Id: I978799cd1cd777b5033e55a0146c41865e9c2bf5
Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
Diffstat (limited to 'utils/test/reporting/functest/reporting-tempest.py')
-rwxr-xr-x | utils/test/reporting/functest/reporting-tempest.py | 204 |
1 files changed, 104 insertions, 100 deletions
diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py index e3f4e3306..0dc1dd343 100755 --- a/utils/test/reporting/functest/reporting-tempest.py +++ b/utils/test/reporting/functest/reporting-tempest.py @@ -24,104 +24,108 @@ logger.info("nb tests executed > %s s " % criteria_nb_test) logger.info("test duration < %s s " % criteria_duration) logger.info("success rate > %s " % criteria_success_rate) -for installer in installers: - # we consider the Tempest results of the last PERIOD days - url = conf.URL_BASE + "?case=tempest_smoke_serial" - request = Request(url + '&period=' + str(PERIOD) + - '&installer=' + installer + '&version=master') - logger.info("Search tempest_smoke_serial results for installer %s" - % installer) - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError, e: - logger.error("Error code: %s" % e) - - test_results = results['results'] - - scenario_results = {} - criteria = {} - errors = {} - - for r in test_results: - # Retrieve all the scenarios per installer - # In Brahmaputra use version - # Since Colorado use scenario - if not r['scenario'] in scenario_results.keys(): - scenario_results[r['scenario']] = [] - scenario_results[r['scenario']].append(r) - - for s, s_result in scenario_results.items(): - scenario_results[s] = s_result[0:5] - # For each scenario, we build a result object to deal with - # results, criteria and error handling - for result in scenario_results[s]: - result["start_date"] = result["start_date"].split(".")[0] - - # retrieve results - # **************** - nb_tests_run = result['details']['tests'] - nb_tests_failed = result['details']['failures'] - if nb_tests_run != 0: - success_rate = 100*(int(nb_tests_run) - - int(nb_tests_failed)) / int(nb_tests_run) - else: - success_rate = 0 - - result['details']["tests"] = nb_tests_run - result['details']["Success rate"] = str(success_rate) + "%" - - # Criteria management - # ******************* - crit_tests = False - crit_rate = False - crit_time = False - - # Expect that at least 165 tests are run - if nb_tests_run >= criteria_nb_test: - crit_tests = True - - # Expect that at least 90% of success - if success_rate >= criteria_success_rate: - crit_rate = True - - # Expect that the suite duration is inferior to 30m - if result['details']['duration'] < criteria_duration: - crit_time = True - - result['criteria'] = {'tests': crit_tests, - 'Success rate': crit_rate, - 'duration': crit_time} - try: - logger.debug("Scenario %s, Installer %s" - % (s_result[1]['scenario'], installer)) - logger.debug("Nb Test run: %s" % nb_tests_run) - logger.debug("Test duration: %s" - % result['details']['duration']) - logger.debug("Success rate: %s" % success_rate) - except: - logger.error("Data format error") - - # Error management - # **************** - try: - errors = result['details']['errors'] - result['errors'] = errors.replace('{0}', '') - except: - logger.error("Error field not present (Brahamputra runs?)") - - templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH) - templateEnv = jinja2.Environment(loader=templateLoader) - - TEMPLATE_FILE = "/template/index-tempest-tmpl.html" - template = templateEnv.get_template(TEMPLATE_FILE) - - outputText = template.render(scenario_results=scenario_results, - items=items, - installer=installer) - - with open(conf.REPORTING_PATH + "/release/master/index-tempest-" + - installer + ".html", "wb") as fh: - fh.write(outputText) +# For all the versions +for version in conf.versions: + for installer in conf.installers: + # we consider the Tempest results of the last PERIOD days + url = conf.URL_BASE + "?case=tempest_smoke_serial" + request = Request(url + '&period=' + str(PERIOD) + + '&installer=' + installer + + '&version=' + version) + logger.info("Search tempest_smoke_serial results for installer %s" + " for version %s" + % (installer, version)) + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + logger.error("Error code: %s" % e) + + test_results = results['results'] + + scenario_results = {} + criteria = {} + errors = {} + + for r in test_results: + # Retrieve all the scenarios per installer + # In Brahmaputra use version + # Since Colorado use scenario + if not r['scenario'] in scenario_results.keys(): + scenario_results[r['scenario']] = [] + scenario_results[r['scenario']].append(r) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + # For each scenario, we build a result object to deal with + # results, criteria and error handling + for result in scenario_results[s]: + result["start_date"] = result["start_date"].split(".")[0] + + # retrieve results + # **************** + nb_tests_run = result['details']['tests'] + nb_tests_failed = result['details']['failures'] + if nb_tests_run != 0: + success_rate = 100*(int(nb_tests_run) - + int(nb_tests_failed)) / int(nb_tests_run) + else: + success_rate = 0 + + result['details']["tests"] = nb_tests_run + result['details']["Success rate"] = str(success_rate) + "%" + + # Criteria management + # ******************* + crit_tests = False + crit_rate = False + crit_time = False + + # Expect that at least 165 tests are run + if nb_tests_run >= criteria_nb_test: + crit_tests = True + + # Expect that at least 90% of success + if success_rate >= criteria_success_rate: + crit_rate = True + + # Expect that the suite duration is inferior to 30m + if result['details']['duration'] < criteria_duration: + crit_time = True + + result['criteria'] = {'tests': crit_tests, + 'Success rate': crit_rate, + 'duration': crit_time} + try: + logger.debug("Scenario %s, Installer %s" + % (s_result[1]['scenario'], installer)) + logger.debug("Nb Test run: %s" % nb_tests_run) + logger.debug("Test duration: %s" + % result['details']['duration']) + logger.debug("Success rate: %s" % success_rate) + except: + logger.error("Data format error") + + # Error management + # **************** + try: + errors = result['details']['errors'] + result['errors'] = errors.replace('{0}', '') + except: + logger.error("Error field not present (Brahamputra runs?)") + + templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH) + templateEnv = jinja2.Environment(loader=templateLoader) + + TEMPLATE_FILE = "/template/index-tempest-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_results=scenario_results, + items=items, + installer=installer) + + with open(conf.REPORTING_PATH + "/release/" + version + + "/index-tempest-" + installer + ".html", "wb") as fh: + fh.write(outputText) logger.info("Tempest automatic reporting succesfully generated.") |