diff options
Diffstat (limited to 'utils/test/reporting/functest')
-rwxr-xr-x | utils/test/reporting/functest/reporting-status.py | 405 | ||||
-rw-r--r-- | utils/test/reporting/functest/template/index-status-tmpl.html | 30 | ||||
-rw-r--r-- | utils/test/reporting/functest/testCase.py | 59 |
3 files changed, 235 insertions, 259 deletions
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py index af1d1d8a5..e700e047f 100755 --- a/utils/test/reporting/functest/reporting-status.py +++ b/utils/test/reporting/functest/reporting-status.py @@ -9,10 +9,8 @@ import datetime import jinja2 import os -import requests import sys import time -import yaml import testCase as tc import scenarioResult as sr @@ -43,9 +41,7 @@ log_level = rp_utils.get_config('general.log.log_level') exclude_noha = rp_utils.get_config('functest.exclude_noha') exclude_virtual = rp_utils.get_config('functest.exclude_virtual') -response = requests.get(cf) - -functest_yaml_config = yaml.safe_load(response.text) +functest_yaml_config = rp_utils.getFunctestConfig() logger.info("*******************************************") logger.info("* *") @@ -69,128 +65,116 @@ config_tiers = functest_yaml_config.get("tiers") for tier in config_tiers: if tier['order'] >= 0 and tier['order'] < 2: for case in tier['testcases']: - if case['name'] not in blacklist: - testValid.append(tc.TestCase(case['name'], + if case['case_name'] not in blacklist: + testValid.append(tc.TestCase(case['case_name'], "functest", case['dependencies'])) elif tier['order'] == 2: for case in tier['testcases']: - if case['name'] not in blacklist: - testValid.append(tc.TestCase(case['name'], - case['name'], + if case['case_name'] not in blacklist: + testValid.append(tc.TestCase(case['case_name'], + case['case_name'], case['dependencies'])) elif tier['order'] > 2: for case in tier['testcases']: - if case['name'] not in blacklist: - otherTestCases.append(tc.TestCase(case['name'], + if case['case_name'] not in blacklist: + otherTestCases.append(tc.TestCase(case['case_name'], "functest", case['dependencies'])) logger.debug("Functest reporting start") + # For all the versions for version in versions: # For all the installers + scenario_directory = "./display/" + version + "/functest/" + scenario_file_name = scenario_directory + "scenario_history.txt" + + # check that the directory exists, if not create it + # (first run on new version) + if not os.path.exists(scenario_directory): + os.makedirs(scenario_directory) + + # initiate scenario file if it does not exist + if not os.path.isfile(scenario_file_name): + with open(scenario_file_name, "a") as my_file: + logger.debug("Create scenario file: %s" % scenario_file_name) + my_file.write("date,scenario,installer,detail,score\n") + for installer in installers: + # get scenarios scenario_results = rp_utils.getScenarios(healthcheck, installer, version) - scenario_stats = rp_utils.getScenarioStats(scenario_results) - items = {} - scenario_result_criteria = {} - scenario_directory = "./display/" + version + "/functest/" - scenario_file_name = scenario_directory + "scenario_history.txt" - - # check that the directory exists, if not create it - # (first run on new version) - if not os.path.exists(scenario_directory): - os.makedirs(scenario_directory) - - # initiate scenario file if it does not exist - if not os.path.isfile(scenario_file_name): - with open(scenario_file_name, "a") as my_file: - logger.debug("Create scenario file: %s" % scenario_file_name) - my_file.write("date,scenario,installer,detail,score\n") - - # For all the scenarios get results - for s, s_result in scenario_results.items(): - logger.info("---------------------------------") - logger.info("installer %s, version %s, scenario %s:" % - (installer, version, s)) - logger.debug("Scenario results: %s" % s_result) - - # Green or Red light for a given scenario - nb_test_runnable_for_this_scenario = 0 - scenario_score = 0 - # url of the last jenkins log corresponding to a given - # scenario - s_url = "" - if len(s_result) > 0: - build_tag = s_result[len(s_result)-1]['build_tag'] - logger.debug("Build tag: %s" % build_tag) - s_url = rp_utils.getJenkinsUrl(build_tag) - if s_url is None: - s_url = "http://testresultS.opnfv.org/reporting" - logger.info("last jenkins url: %s" % s_url) - testCases2BeDisplayed = [] - # Check if test case is runnable / installer, scenario - # for the test case used for Scenario validation - try: - # 1) Manage the test cases for the scenario validation - # concretely Tiers 0-3 - for test_case in testValid: - test_case.checkRunnable(installer, s, - test_case.getConstraints()) - logger.debug("testcase %s (%s) is %s" % - (test_case.getDisplayName(), - test_case.getName(), - test_case.isRunnable)) - time.sleep(1) - if test_case.isRunnable: - dbName = test_case.getDbName() - name = test_case.getName() - displayName = test_case.getDisplayName() - project = test_case.getProject() - nb_test_runnable_for_this_scenario += 1 - logger.info(" Searching results for case %s " % - (displayName)) - result = rp_utils.getResult(dbName, installer, - s, version) - # if no result set the value to 0 - if result < 0: - result = 0 - logger.info(" >>>> Test score = " + str(result)) - test_case.setCriteria(result) - test_case.setIsRunnable(True) - testCases2BeDisplayed.append(tc.TestCase(name, - project, - "", - result, - True, - 1)) - scenario_score = scenario_score + result - - # 2) Manage the test cases for the scenario qualification - # concretely Tiers > 3 - for test_case in otherTestCases: - test_case.checkRunnable(installer, s, - test_case.getConstraints()) - logger.debug("testcase %s (%s) is %s" % - (test_case.getDisplayName(), - test_case.getName(), - test_case.isRunnable)) - time.sleep(1) - if test_case.isRunnable: - dbName = test_case.getDbName() - name = test_case.getName() - displayName = test_case.getDisplayName() - project = test_case.getProject() - logger.info(" Searching results for case %s " % - (displayName)) - result = rp_utils.getResult(dbName, installer, - s, version) - # at least 1 result for the test - if result > -1: + + # get nb of supported architecture (x86, aarch64) + architectures = rp_utils.getArchitectures(scenario_results) + logger.info("Supported architectures: {}".format(architectures)) + + for architecture in architectures: + logger.info("architecture: {}".format(architecture)) + # Consider only the results for the selected architecture + # i.e drop x86 for aarch64 and vice versa + filter_results = rp_utils.filterArchitecture(scenario_results, + architecture) + scenario_stats = rp_utils.getScenarioStats(filter_results) + items = {} + scenario_result_criteria = {} + + # in case of more than 1 architecture supported + # precise the architecture + installer_display = installer + if (len(architectures) > 1): + installer_display = installer + "@" + architecture + + # For all the scenarios get results + for s, s_result in filter_results.items(): + logger.info("---------------------------------") + logger.info("installer %s, version %s, scenario %s:" % + (installer, version, s)) + logger.debug("Scenario results: %s" % s_result) + + # Green or Red light for a given scenario + nb_test_runnable_for_this_scenario = 0 + scenario_score = 0 + # url of the last jenkins log corresponding to a given + # scenario + s_url = "" + if len(s_result) > 0: + build_tag = s_result[len(s_result)-1]['build_tag'] + logger.debug("Build tag: %s" % build_tag) + s_url = rp_utils.getJenkinsUrl(build_tag) + if s_url is None: + s_url = "http://testresultS.opnfv.org/reporting" + logger.info("last jenkins url: %s" % s_url) + testCases2BeDisplayed = [] + # Check if test case is runnable / installer, scenario + # for the test case used for Scenario validation + try: + # 1) Manage the test cases for the scenario validation + # concretely Tiers 0-3 + for test_case in testValid: + test_case.checkRunnable(installer, s, + test_case.getConstraints()) + logger.debug("testcase %s (%s) is %s" % + (test_case.getDisplayName(), + test_case.getName(), + test_case.isRunnable)) + time.sleep(1) + if test_case.isRunnable: + name = test_case.getName() + displayName = test_case.getDisplayName() + project = test_case.getProject() + nb_test_runnable_for_this_scenario += 1 + logger.info(" Searching results for case %s " % + (displayName)) + result = rp_utils.getResult(name, installer, + s, version) + # if no result set the value to 0 + if result < 0: + result = 0 + logger.info(" >>>> Test score = " + str(result)) test_case.setCriteria(result) test_case.setIsRunnable(True) testCases2BeDisplayed.append(tc.TestCase(name, @@ -198,91 +182,126 @@ for version in versions: "", result, True, - 4)) - else: - logger.debug("No results found") - - items[s] = testCases2BeDisplayed - except: - logger.error("Error: installer %s, version %s, scenario %s" % - (installer, version, s)) - logger.error("No data available: %s " % (sys.exc_info()[0])) - - # ********************************************** - # Evaluate the results for scenario validation - # ********************************************** - # the validation criteria = nb runnable tests x 3 - # because each test case = 0,1,2 or 3 - scenario_criteria = nb_test_runnable_for_this_scenario * 3 - # if 0 runnable tests set criteria at a high value - if scenario_criteria < 1: - scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA - - s_score = str(scenario_score) + "/" + str(scenario_criteria) - s_score_percent = rp_utils.getScenarioPercent(scenario_score, - scenario_criteria) - - s_status = "KO" - if scenario_score < scenario_criteria: - logger.info(">>>> scenario not OK, score = %s/%s" % - (scenario_score, scenario_criteria)) + 1)) + scenario_score = scenario_score + result + + # 2) Manage the test cases for the scenario qualification + # concretely Tiers > 3 + for test_case in otherTestCases: + test_case.checkRunnable(installer, s, + test_case.getConstraints()) + logger.debug("testcase %s (%s) is %s" % + (test_case.getDisplayName(), + test_case.getName(), + test_case.isRunnable)) + time.sleep(1) + if test_case.isRunnable: + name = test_case.getName() + displayName = test_case.getDisplayName() + project = test_case.getProject() + logger.info(" Searching results for case %s " % + (displayName)) + result = rp_utils.getResult(name, installer, + s, version) + # at least 1 result for the test + if result > -1: + test_case.setCriteria(result) + test_case.setIsRunnable(True) + testCases2BeDisplayed.append(tc.TestCase( + name, + project, + "", + result, + True, + 4)) + else: + logger.debug("No results found") + + items[s] = testCases2BeDisplayed + except: + logger.error("Error: installer %s, version %s, scenario %s" + % (installer, version, s)) + logger.error("No data available: %s" % (sys.exc_info()[0])) + + # ********************************************** + # Evaluate the results for scenario validation + # ********************************************** + # the validation criteria = nb runnable tests x 3 + # because each test case = 0,1,2 or 3 + scenario_criteria = nb_test_runnable_for_this_scenario * 3 + # if 0 runnable tests set criteria at a high value + if scenario_criteria < 1: + scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA + + s_score = str(scenario_score) + "/" + str(scenario_criteria) + s_score_percent = rp_utils.getScenarioPercent( + scenario_score, + scenario_criteria) + s_status = "KO" - else: - logger.info(">>>>> scenario OK, save the information") - s_status = "OK" - path_validation_file = ("./display/" + version + - "/functest/" + - "validated_scenario_history.txt") - with open(path_validation_file, "a") as f: - time_format = "%Y-%m-%d %H:%M" - info = (datetime.datetime.now().strftime(time_format) + - ";" + installer + ";" + s + "\n") + if scenario_score < scenario_criteria: + logger.info(">>>> scenario not OK, score = %s/%s" % + (scenario_score, scenario_criteria)) + s_status = "KO" + else: + logger.info(">>>>> scenario OK, save the information") + s_status = "OK" + path_validation_file = ("./display/" + version + + "/functest/" + + "validated_scenario_history.txt") + with open(path_validation_file, "a") as f: + time_format = "%Y-%m-%d %H:%M" + info = (datetime.datetime.now().strftime(time_format) + + ";" + installer_display + ";" + s + "\n") + f.write(info) + + # Save daily results in a file + with open(scenario_file_name, "a") as f: + info = (reportingDate + "," + s + "," + installer_display + + "," + s_score + "," + + str(round(s_score_percent)) + "\n") f.write(info) - # Save daily results in a file - with open(scenario_file_name, "a") as f: - info = (reportingDate + "," + s + "," + installer + - "," + s_score + "," + - str(round(s_score_percent)) + "\n") - f.write(info) - - scenario_result_criteria[s] = sr.ScenarioResult(s_status, - s_score, - s_score_percent, - s_url) - logger.info("--------------------------") - - templateLoader = jinja2.FileSystemLoader(".") - templateEnv = jinja2.Environment( - loader=templateLoader, autoescape=True) - - TEMPLATE_FILE = "./functest/template/index-status-tmpl.html" - template = templateEnv.get_template(TEMPLATE_FILE) - - outputText = template.render(scenario_stats=scenario_stats, - scenario_results=scenario_result_criteria, - items=items, - installer=installer, - period=period, - version=version, - date=reportingDate) - - with open("./display/" + version + - "/functest/status-" + installer + ".html", "wb") as fh: - fh.write(outputText) - - logger.info("Manage export CSV & PDF") - rp_utils.export_csv(scenario_file_name, installer, version) - logger.error("CSV generated...") - - # Generate outputs for export - # pdf - # TODO Change once web site updated...use the current one - # to test pdf production - url_pdf = rp_utils.get_config('general.url') - pdf_path = ("./display/" + version + - "/functest/status-" + installer + ".html") - pdf_doc_name = ("./display/" + version + - "/functest/status-" + installer + ".pdf") - rp_utils.export_pdf(pdf_path, pdf_doc_name) - logger.info("PDF generated...") + scenario_result_criteria[s] = sr.ScenarioResult( + s_status, + s_score, + s_score_percent, + s_url) + logger.info("--------------------------") + + templateLoader = jinja2.FileSystemLoader(".") + templateEnv = jinja2.Environment( + loader=templateLoader, autoescape=True) + + TEMPLATE_FILE = "./functest/template/index-status-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render( + scenario_stats=scenario_stats, + scenario_results=scenario_result_criteria, + items=items, + installer=installer_display, + period=period, + version=version, + date=reportingDate) + + with open("./display/" + version + + "/functest/status-" + + installer_display + ".html", "wb") as fh: + fh.write(outputText) + + logger.info("Manage export CSV & PDF") + rp_utils.export_csv(scenario_file_name, installer_display, version) + logger.error("CSV generated...") + + # Generate outputs for export + # pdf + # TODO Change once web site updated...use the current one + # to test pdf production + url_pdf = rp_utils.get_config('general.url') + pdf_path = ("./display/" + version + + "/functest/status-" + installer_display + ".html") + pdf_doc_name = ("./display/" + version + + "/functest/status-" + installer_display + ".pdf") + rp_utils.export_pdf(pdf_path, pdf_doc_name) + logger.info("PDF generated...") diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html index 52046c37f..cc4edaac5 100644 --- a/utils/test/reporting/functest/template/index-status-tmpl.html +++ b/utils/test/reporting/functest/template/index-status-tmpl.html @@ -15,27 +15,27 @@ {% for scenario in scenario_stats.iteritems() -%} var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}'); {%- endfor %} - + // assign success rate to the gauge function updateReadings() { {% for scenario,iteration in scenario_stats.iteritems() -%} gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}}); {%- endfor %} } - updateReadings(); + updateReadings(); } - + // trend line management - d3.csv("./scenario_history.csv", function(data) { + d3.csv("./scenario_history.txt", function(data) { // *************************************** // Create the trend line {% for scenario,iteration in scenario_stats.iteritems() -%} - // for scenario {{scenario}} + // for scenario {{scenario}} // Filter results - var trend{{loop.index}} = data.filter(function(row) { + var trend{{loop.index}} = data.filter(function(row) { return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}"; }) - // Parse the date + // Parse the date trend{{loop.index}}.forEach(function(d) { d.date = parseDate(d.date); d.score = +d.score @@ -44,7 +44,7 @@ var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}}) // **************************************** {%- endfor %} - }); + }); if ( !window.isLoaded ) { window.addEventListener("load", function() { onDocumentReady(); @@ -61,7 +61,7 @@ $(document).ready(function (){ }); }) </script> - + </head> <body> <div class="container"> @@ -72,8 +72,8 @@ $(document).ready(function (){ <li class="active"><a href="../../index.html">Home</a></li> <li><a href="status-apex.html">Apex</a></li> <li><a href="status-compass.html">Compass</a></li> - <li><a href="status-daisy.html">Daisy</a></li> - <li><a href="status-fuel.html">Fuel</a></li> + <li><a href="status-fuel@x86.html">fuel@x86</a></li> + <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li> <li><a href="status-joid.html">Joid</a></li> </ul> </nav> @@ -134,13 +134,13 @@ $(document).ready(function (){ <tr class="tr-weather-weather"> {% for test in items[scenario] -%} {% if test.getCriteria() > 2 -%} - <td><img src="../../../img/weather-clear.png"></td> + <td><img src="../../img/weather-clear.png"></td> {%- elif test.getCriteria() > 1 -%} - <td><img src="../../../img/weather-few-clouds.png"></td> + <td><img src="../../img/weather-few-clouds.png"></td> {%- elif test.getCriteria() > 0 -%} - <td><img src="../../../img/weather-overcast.png"></td> + <td><img src="../../img/weather-overcast.png"></td> {%- elif test.getCriteria() > -1 -%} - <td><img src="../../../img/weather-storm.png"></td> + <td><img src="../../img/weather-storm.png"></td> {%- endif %} {%- endfor %} </tr> diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py index c89e619c0..9834f0753 100644 --- a/utils/test/reporting/functest/testCase.py +++ b/utils/test/reporting/functest/testCase.py @@ -33,27 +33,29 @@ class TestCase(object): 'bgpvpn': 'bgpvpn', 'rally_full': 'Rally (full)', 'vims': 'vIMS', - 'doctor': 'Doctor', + 'doctor-notification': 'Doctor', 'promise': 'Promise', 'moon': 'Moon', 'copper': 'Copper', 'security_scan': 'Security', 'multisite': 'Multisite', - 'domino': 'Domino', - 'odl-sfc': 'SFC', + 'domino-multinode': 'Domino', + 'functest-odl-sfc': 'SFC', 'onos_sfc': 'SFC', - 'parser': 'Parser', + 'parser-basics': 'Parser', 'connection_check': 'Health (connection)', 'api_check': 'Health (api)', 'snaps_smoke': 'SNAPS', 'snaps_health_check': 'Health (dhcp)', - 'netready': 'Netready', + 'gluon_vping': 'Netready', 'fds': 'FDS', 'cloudify_ims': 'vIMS (Cloudify)', 'orchestra_ims': 'OpenIMS (OpenBaton)', 'opera_ims': 'vIMS (Open-O)', 'vyos_vrouter': 'vyos', - 'barometer': 'Barometer'} + 'barometercollectd': 'Barometer', + 'odl_netvirt': 'Netvirt', + 'security_scan': 'Security'} try: self.displayName = display_name_matrix[self.name] except: @@ -119,50 +121,5 @@ class TestCase(object): ";IsRunnable" + str(self.isRunnable)) return testcase - def getDbName(self): - # Correspondance name of the test case / name in the DB - # ideally we should modify the DB to avoid such interface.... - # '<name in the config>':'<name in the DB>' - # I know it is uggly... - test_match_matrix = {'healthcheck': 'healthcheck', - 'vping_ssh': 'vping_ssh', - 'vping_userdata': 'vping_userdata', - 'odl': 'odl', - 'onos': 'onos', - 'ocl': 'ocl', - 'tempest_smoke_serial': 'tempest_smoke_serial', - 'tempest_full_parallel': 'tempest_full_parallel', - 'tempest_defcore': 'tempest_defcore', - 'refstack_defcore': 'refstack_defcore', - 'rally_sanity': 'rally_sanity', - 'bgpvpn': 'bgpvpn', - 'rally_full': 'rally_full', - 'vims': 'vims', - 'doctor': 'doctor-notification', - 'promise': 'promise', - 'moon': 'moon_authentication', - 'copper': 'copper-notification', - 'security_scan': 'security', - 'multisite': 'multisite', - 'domino': 'domino-multinode', - 'odl-sfc': 'functest-odl-sfc', - 'onos_sfc': 'onos_sfc', - 'parser': 'parser-basics', - 'connection_check': 'connection_check', - 'api_check': 'api_check', - 'snaps_smoke': 'snaps_smoke', - 'snaps_health_check': 'snaps_health_check', - 'netready': 'gluon_vping', - 'fds': 'fds', - 'cloudify_ims': 'cloudify_ims', - 'orchestra_ims': 'orchestra_ims', - 'opera_ims': 'opera_ims', - 'vyos_vrouter': 'vyos_vrouter', - 'barometer': 'barometercollectd'} - try: - return test_match_matrix[self.name] - except: - return "unknown" - def getDisplayName(self): return self.displayName |