From b365cf4c1ce1bc988f136dddf2904cc6bd310c64 Mon Sep 17 00:00:00 2001 From: Morgan Richomme Date: Thu, 25 Aug 2016 14:32:08 +0200 Subject: Add colorado reporting for tempest and vims Change-Id: I978799cd1cd777b5033e55a0146c41865e9c2bf5 Signed-off-by: Morgan Richomme --- utils/test/reporting/functest/reporting-tempest.py | 204 +++++++++++---------- utils/test/reporting/functest/reporting-vims.py | 158 ++++++++-------- utils/test/reporting/functest/reportingConf.py | 2 +- .../functest/template/index-status-tmpl.html | 2 +- .../functest/template/index-tempest-tmpl.html | 2 +- .../functest/template/index-vims-tmpl.html | 2 +- 6 files changed, 190 insertions(+), 180 deletions(-) (limited to 'utils') diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py index e3f4e3306..0dc1dd343 100755 --- a/utils/test/reporting/functest/reporting-tempest.py +++ b/utils/test/reporting/functest/reporting-tempest.py @@ -24,104 +24,108 @@ logger.info("nb tests executed > %s s " % criteria_nb_test) logger.info("test duration < %s s " % criteria_duration) logger.info("success rate > %s " % criteria_success_rate) -for installer in installers: - # we consider the Tempest results of the last PERIOD days - url = conf.URL_BASE + "?case=tempest_smoke_serial" - request = Request(url + '&period=' + str(PERIOD) + - '&installer=' + installer + '&version=master') - logger.info("Search tempest_smoke_serial results for installer %s" - % installer) - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError, e: - logger.error("Error code: %s" % e) - - test_results = results['results'] - - scenario_results = {} - criteria = {} - errors = {} - - for r in test_results: - # Retrieve all the scenarios per installer - # In Brahmaputra use version - # Since Colorado use scenario - if not r['scenario'] in scenario_results.keys(): - scenario_results[r['scenario']] = [] - scenario_results[r['scenario']].append(r) - - for s, s_result in scenario_results.items(): - scenario_results[s] = s_result[0:5] - # For each scenario, we build a result object to deal with - # results, criteria and error handling - for result in scenario_results[s]: - result["start_date"] = result["start_date"].split(".")[0] - - # retrieve results - # **************** - nb_tests_run = result['details']['tests'] - nb_tests_failed = result['details']['failures'] - if nb_tests_run != 0: - success_rate = 100*(int(nb_tests_run) - - int(nb_tests_failed)) / int(nb_tests_run) - else: - success_rate = 0 - - result['details']["tests"] = nb_tests_run - result['details']["Success rate"] = str(success_rate) + "%" - - # Criteria management - # ******************* - crit_tests = False - crit_rate = False - crit_time = False - - # Expect that at least 165 tests are run - if nb_tests_run >= criteria_nb_test: - crit_tests = True - - # Expect that at least 90% of success - if success_rate >= criteria_success_rate: - crit_rate = True - - # Expect that the suite duration is inferior to 30m - if result['details']['duration'] < criteria_duration: - crit_time = True - - result['criteria'] = {'tests': crit_tests, - 'Success rate': crit_rate, - 'duration': crit_time} - try: - logger.debug("Scenario %s, Installer %s" - % (s_result[1]['scenario'], installer)) - logger.debug("Nb Test run: %s" % nb_tests_run) - logger.debug("Test duration: %s" - % result['details']['duration']) - logger.debug("Success rate: %s" % success_rate) - except: - logger.error("Data format error") - - # Error management - # **************** - try: - errors = result['details']['errors'] - result['errors'] = errors.replace('{0}', '') - except: - logger.error("Error field not present (Brahamputra runs?)") - - templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH) - templateEnv = jinja2.Environment(loader=templateLoader) - - TEMPLATE_FILE = "/template/index-tempest-tmpl.html" - template = templateEnv.get_template(TEMPLATE_FILE) - - outputText = template.render(scenario_results=scenario_results, - items=items, - installer=installer) - - with open(conf.REPORTING_PATH + "/release/master/index-tempest-" + - installer + ".html", "wb") as fh: - fh.write(outputText) +# For all the versions +for version in conf.versions: + for installer in conf.installers: + # we consider the Tempest results of the last PERIOD days + url = conf.URL_BASE + "?case=tempest_smoke_serial" + request = Request(url + '&period=' + str(PERIOD) + + '&installer=' + installer + + '&version=' + version) + logger.info("Search tempest_smoke_serial results for installer %s" + " for version %s" + % (installer, version)) + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + logger.error("Error code: %s" % e) + + test_results = results['results'] + + scenario_results = {} + criteria = {} + errors = {} + + for r in test_results: + # Retrieve all the scenarios per installer + # In Brahmaputra use version + # Since Colorado use scenario + if not r['scenario'] in scenario_results.keys(): + scenario_results[r['scenario']] = [] + scenario_results[r['scenario']].append(r) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + # For each scenario, we build a result object to deal with + # results, criteria and error handling + for result in scenario_results[s]: + result["start_date"] = result["start_date"].split(".")[0] + + # retrieve results + # **************** + nb_tests_run = result['details']['tests'] + nb_tests_failed = result['details']['failures'] + if nb_tests_run != 0: + success_rate = 100*(int(nb_tests_run) - + int(nb_tests_failed)) / int(nb_tests_run) + else: + success_rate = 0 + + result['details']["tests"] = nb_tests_run + result['details']["Success rate"] = str(success_rate) + "%" + + # Criteria management + # ******************* + crit_tests = False + crit_rate = False + crit_time = False + + # Expect that at least 165 tests are run + if nb_tests_run >= criteria_nb_test: + crit_tests = True + + # Expect that at least 90% of success + if success_rate >= criteria_success_rate: + crit_rate = True + + # Expect that the suite duration is inferior to 30m + if result['details']['duration'] < criteria_duration: + crit_time = True + + result['criteria'] = {'tests': crit_tests, + 'Success rate': crit_rate, + 'duration': crit_time} + try: + logger.debug("Scenario %s, Installer %s" + % (s_result[1]['scenario'], installer)) + logger.debug("Nb Test run: %s" % nb_tests_run) + logger.debug("Test duration: %s" + % result['details']['duration']) + logger.debug("Success rate: %s" % success_rate) + except: + logger.error("Data format error") + + # Error management + # **************** + try: + errors = result['details']['errors'] + result['errors'] = errors.replace('{0}', '') + except: + logger.error("Error field not present (Brahamputra runs?)") + + templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH) + templateEnv = jinja2.Environment(loader=templateLoader) + + TEMPLATE_FILE = "/template/index-tempest-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_results=scenario_results, + items=items, + installer=installer) + + with open(conf.REPORTING_PATH + "/release/" + version + + "/index-tempest-" + installer + ".html", "wb") as fh: + fh.write(outputText) logger.info("Tempest automatic reporting succesfully generated.") diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py index d0436ed14..a83d92f0a 100755 --- a/utils/test/reporting/functest/reporting-vims.py +++ b/utils/test/reporting/functest/reporting-vims.py @@ -33,81 +33,87 @@ logger.info("****************************************") installers = conf.installers step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"] logger.info("Start processing....") -for installer in installers: - logger.info("Search vIMS results for installer %s" % installer) - request = Request(conf.URL_BASE + '?case=vims&installer=' + installer) - - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError, e: - logger.error("Error code: %s" % e) - - test_results = results['results'] - - logger.debug("Results found: %s" % test_results) - - scenario_results = {} - for r in test_results: - if not r['scenario'] in scenario_results.keys(): - scenario_results[r['scenario']] = [] - scenario_results[r['scenario']].append(r) - - for s, s_result in scenario_results.items(): - scenario_results[s] = s_result[0:5] - logger.debug("Search for success criteria") - for result in scenario_results[s]: - result["start_date"] = result["start_date"].split(".")[0] - sig_test = result['details']['sig_test']['result'] - if not sig_test == "" and isinstance(sig_test, list): - format_result = sig_test_format(sig_test) - if format_result['failures'] > format_result['passed']: - result['details']['sig_test']['duration'] = 0 - result['details']['sig_test']['result'] = format_result - nb_step_ok = 0 - nb_step = len(result['details']) - - for step_name, step_result in result['details'].items(): - if step_result['duration'] != 0: - nb_step_ok += 1 - m, s = divmod(step_result['duration'], 60) - m_display = "" - if int(m) != 0: - m_display += str(int(m)) + "m " - step_result['duration_display'] = m_display + str(int(s)) + "s" - - result['pr_step_ok'] = 0 - if nb_step != 0: - result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100 - try: - logger.debug("Scenario %s, Installer %s" - % (s_result[1]['scenario'], installer)) - logger.debug("Orchestrator deployment: %s s" - % result['details']['orchestrator']['duration']) - logger.debug("vIMS deployment: %s s" - % result['details']['vIMS']['duration']) - logger.debug("Signaling testing: %s s" - % result['details']['sig_test']['duration']) - logger.debug("Signaling testing results: %s" - % format_result) - except: - logger.error("Data badly formatted") - logger.debug("------------------------------------------------") - - templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH) - templateEnv = jinja2.Environment(loader=templateLoader) - - TEMPLATE_FILE = "/template/index-vims-tmpl.html" - template = templateEnv.get_template(TEMPLATE_FILE) - - outputText = template.render(scenario_results=scenario_results, - step_order=step_order, - installer=installer) - - with open(conf.REPORTING_PATH + - "/release/master/index-vims-" + - installer + ".html", "wb") as fh: - fh.write(outputText) + +# For all the versions +for version in conf.versions: + for installer in installers: + logger.info("Search vIMS results for installer: %s, version: %s" + % (installer, version)) + request = Request(conf.URL_BASE + '?case=vims&installer=' + + installer + '&version=' + version) + + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + logger.error("Error code: %s" % e) + + test_results = results['results'] + + logger.debug("Results found: %s" % test_results) + + scenario_results = {} + for r in test_results: + if not r['scenario'] in scenario_results.keys(): + scenario_results[r['scenario']] = [] + scenario_results[r['scenario']].append(r) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + logger.debug("Search for success criteria") + for result in scenario_results[s]: + result["start_date"] = result["start_date"].split(".")[0] + sig_test = result['details']['sig_test']['result'] + if not sig_test == "" and isinstance(sig_test, list): + format_result = sig_test_format(sig_test) + if format_result['failures'] > format_result['passed']: + result['details']['sig_test']['duration'] = 0 + result['details']['sig_test']['result'] = format_result + nb_step_ok = 0 + nb_step = len(result['details']) + + for step_name, step_result in result['details'].items(): + if step_result['duration'] != 0: + nb_step_ok += 1 + m, s = divmod(step_result['duration'], 60) + m_display = "" + if int(m) != 0: + m_display += str(int(m)) + "m " + + step_result['duration_display'] = m_display + str(int(s)) + "s" + + result['pr_step_ok'] = 0 + if nb_step != 0: + result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100 + try: + logger.debug("Scenario %s, Installer %s" + % (s_result[1]['scenario'], installer)) + logger.debug("Orchestrator deployment: %s s" + % result['details']['orchestrator']['duration']) + logger.debug("vIMS deployment: %s s" + % result['details']['vIMS']['duration']) + logger.debug("Signaling testing: %s s" + % result['details']['sig_test']['duration']) + logger.debug("Signaling testing results: %s" + % format_result) + except: + logger.error("Data badly formatted") + logger.debug("----------------------------------------") + + templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH) + templateEnv = jinja2.Environment(loader=templateLoader) + + TEMPLATE_FILE = "/template/index-vims-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_results=scenario_results, + step_order=step_order, + installer=installer) + + with open(conf.REPORTING_PATH + + "/release/" + version + "/index-vims-" + + installer + ".html", "wb") as fh: + fh.write(outputText) logger.info("vIMS report succesfully generated") diff --git a/utils/test/reporting/functest/reportingConf.py b/utils/test/reporting/functest/reportingConf.py index c60ba5483..9230cb286 100644 --- a/utils/test/reporting/functest/reportingConf.py +++ b/utils/test/reporting/functest/reportingConf.py @@ -14,7 +14,7 @@ installers = ["apex", "compass", "fuel", "joid"] # taken into account for the scoring blacklist = ["ovno", "security_scan", 'odl-sfc'] # versions = ["brahmaputra", "master"] -versions = ["master"] +versions = ["master", "colorado"] PERIOD = 50 MAX_SCENARIO_CRITERIA = 50 # get the last 5 test results to determinate the success criteria diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html index 78eae1f16..da2213bc0 100644 --- a/utils/test/reporting/functest/template/index-status-tmpl.html +++ b/utils/test/reporting/functest/template/index-status-tmpl.html @@ -21,7 +21,7 @@

Functest status page ({{version}})