From 42c0c75a9e3564758aaaccadec9e1bad42b283eb Mon Sep 17 00:00:00 2001 From: Morgan Richomme Date: Tue, 26 Apr 2016 14:24:56 +0200 Subject: Functest reporting refactoring integrate the notion of version (brahmaputra, master, ..) change dir structure Change-Id: Ieb8aed811ed4e1ab8738fb02e4db411da3d07ca2 Signed-off-by: Morgan Richomme --- utils/test/reporting/default.css | 56 ---- utils/test/reporting/functest/default.css | 56 ++++ .../test/reporting/functest/img/weather-clear.png | Bin 0 -> 1560 bytes .../reporting/functest/img/weather-few-clouds.png | Bin 0 -> 1927 bytes .../reporting/functest/img/weather-overcast.png | Bin 0 -> 1588 bytes .../test/reporting/functest/img/weather-storm.png | Bin 0 -> 2137 bytes utils/test/reporting/functest/index.html | 52 ++++ utils/test/reporting/functest/reporting-status.py | 307 +++++++++++++++++++++ utils/test/reporting/functest/reporting-tempest.py | 99 +++++++ utils/test/reporting/functest/reporting-vims.py | 83 ++++++ .../functest/template/index-status-tmpl.html | 94 +++++++ .../functest/template/index-tempest-tmpl.html | 90 ++++++ .../functest/template/index-vims-tmpl.html | 91 ++++++ utils/test/reporting/img/weather-clear.png | Bin 1560 -> 0 bytes utils/test/reporting/img/weather-few-clouds.png | Bin 1927 -> 0 bytes utils/test/reporting/img/weather-overcast.png | Bin 1588 -> 0 bytes utils/test/reporting/img/weather-storm.png | Bin 2137 -> 0 bytes utils/test/reporting/index-status-tmpl.html | 94 ------- utils/test/reporting/index-tempest-tmpl.html | 90 ------ utils/test/reporting/index-vims-tmpl.html | 91 ------ utils/test/reporting/index.html | 52 ---- utils/test/reporting/reporting-status.py | 306 -------------------- utils/test/reporting/reporting-tempest.py | 99 ------- utils/test/reporting/reporting-vims.py | 83 ------ 24 files changed, 872 insertions(+), 871 deletions(-) delete mode 100644 utils/test/reporting/default.css create mode 100644 utils/test/reporting/functest/default.css create mode 100644 utils/test/reporting/functest/img/weather-clear.png create mode 100644 utils/test/reporting/functest/img/weather-few-clouds.png create mode 100644 utils/test/reporting/functest/img/weather-overcast.png create mode 100644 utils/test/reporting/functest/img/weather-storm.png create mode 100644 utils/test/reporting/functest/index.html create mode 100644 utils/test/reporting/functest/reporting-status.py create mode 100644 utils/test/reporting/functest/reporting-tempest.py create mode 100644 utils/test/reporting/functest/reporting-vims.py create mode 100644 utils/test/reporting/functest/template/index-status-tmpl.html create mode 100644 utils/test/reporting/functest/template/index-tempest-tmpl.html create mode 100644 utils/test/reporting/functest/template/index-vims-tmpl.html delete mode 100644 utils/test/reporting/img/weather-clear.png delete mode 100644 utils/test/reporting/img/weather-few-clouds.png delete mode 100644 utils/test/reporting/img/weather-overcast.png delete mode 100644 utils/test/reporting/img/weather-storm.png delete mode 100644 utils/test/reporting/index-status-tmpl.html delete mode 100644 utils/test/reporting/index-tempest-tmpl.html delete mode 100644 utils/test/reporting/index-vims-tmpl.html delete mode 100644 utils/test/reporting/index.html delete mode 100644 utils/test/reporting/reporting-status.py delete mode 100644 utils/test/reporting/reporting-tempest.py delete mode 100644 utils/test/reporting/reporting-vims.py (limited to 'utils/test/reporting') diff --git a/utils/test/reporting/default.css b/utils/test/reporting/default.css deleted file mode 100644 index 0e330e965..000000000 --- a/utils/test/reporting/default.css +++ /dev/null @@ -1,56 +0,0 @@ -.panel-header-item { - position: relative; - display: inline-block; - padding-left: 17px; - padding-right: 17px; -} - -.panel-pod-name { - margin-top: 10px; - margin-right: 27px; - float:right; - padding: 6px; -} - -.panel-default > .panel-heading .badge { - background-color: #007e88; - position: relative; - display: inline-block; -} - -.panel-default > .panel-heading .progress-bar { - height: 100%; - position: absolute; - left: 0; - top: 0; - width: 100%; - background-color: #0095a2 -} -.panel-default > .panel-heading h4 { - color: white; -} - -.panel-default > .panel-heading { - background-color: #00ADBB; - overflow: hidden; - position: relative; - width: 100%; -} - -th{ - text-align: center; -} - -td{ - text-align: center; -} - -.tr-danger { - background-color: #177870; - color: white; -} - -.btn-more { - color: white; - background-color: #0095a2; -} \ No newline at end of file diff --git a/utils/test/reporting/functest/default.css b/utils/test/reporting/functest/default.css new file mode 100644 index 000000000..0e330e965 --- /dev/null +++ b/utils/test/reporting/functest/default.css @@ -0,0 +1,56 @@ +.panel-header-item { + position: relative; + display: inline-block; + padding-left: 17px; + padding-right: 17px; +} + +.panel-pod-name { + margin-top: 10px; + margin-right: 27px; + float:right; + padding: 6px; +} + +.panel-default > .panel-heading .badge { + background-color: #007e88; + position: relative; + display: inline-block; +} + +.panel-default > .panel-heading .progress-bar { + height: 100%; + position: absolute; + left: 0; + top: 0; + width: 100%; + background-color: #0095a2 +} +.panel-default > .panel-heading h4 { + color: white; +} + +.panel-default > .panel-heading { + background-color: #00ADBB; + overflow: hidden; + position: relative; + width: 100%; +} + +th{ + text-align: center; +} + +td{ + text-align: center; +} + +.tr-danger { + background-color: #177870; + color: white; +} + +.btn-more { + color: white; + background-color: #0095a2; +} \ No newline at end of file diff --git a/utils/test/reporting/functest/img/weather-clear.png b/utils/test/reporting/functest/img/weather-clear.png new file mode 100644 index 000000000..a0d967750 Binary files /dev/null and b/utils/test/reporting/functest/img/weather-clear.png differ diff --git a/utils/test/reporting/functest/img/weather-few-clouds.png b/utils/test/reporting/functest/img/weather-few-clouds.png new file mode 100644 index 000000000..acfa78398 Binary files /dev/null and b/utils/test/reporting/functest/img/weather-few-clouds.png differ diff --git a/utils/test/reporting/functest/img/weather-overcast.png b/utils/test/reporting/functest/img/weather-overcast.png new file mode 100644 index 000000000..4296246d0 Binary files /dev/null and b/utils/test/reporting/functest/img/weather-overcast.png differ diff --git a/utils/test/reporting/functest/img/weather-storm.png b/utils/test/reporting/functest/img/weather-storm.png new file mode 100644 index 000000000..956f0e20f Binary files /dev/null and b/utils/test/reporting/functest/img/weather-storm.png differ diff --git a/utils/test/reporting/functest/index.html b/utils/test/reporting/functest/index.html new file mode 100644 index 000000000..af4033567 --- /dev/null +++ b/utils/test/reporting/functest/index.html @@ -0,0 +1,52 @@ + + + + + + + + + + + +
+
+

Functest reporting page

+ +
+
+
+
+
+

Functest

+ This project develops test suites that cover functionaling test cases in OPNFV. +
The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario. +
Weekly meeting: every Tuesday 8 AM UTC +
IRC chan #opnfv-testperf + +
+

Useful Links

+
  • Functest in Depth
  • +
  • Functest Repo
  • +
  • Functest Project
  • +
  • Functest Jenkins page
  • +
  • JIRA
  • + +
    +
    +
    +
    diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py new file mode 100644 index 000000000..9e6aeb1ad --- /dev/null +++ b/utils/test/reporting/functest/reporting-status.py @@ -0,0 +1,307 @@ +from urllib2 import Request, urlopen, URLError +import json +import jinja2 +import os +import re +import requests +import sys +import time +import yaml + +# Declaration of the variables +functest_test_list = ['vPing', 'vPing_userdata', + 'Tempest', 'Rally', + 'ODL', 'ONOS', 'vIMS'] +# functest_test_list = ['vPing'] +companion_test_list = ['doctor/doctor-notification', 'promise/promise'] +# companion_test_list = [] +installers = ["apex", "compass", "fuel", "joid"] +# installers = ["fuel"] +versions = ["brahmaputra", "master"] +# versions = ["master"] +PERIOD = 10 + +# Correspondance between the name of the test case and the name in the DB +# ideally we should modify the DB to avoid such interface.... +# ' +# I know it is uggly... +test_match_matrix = {'vPing': 'vping_ssh', + 'vPing_userdata': 'vping_userdata', + 'ODL': 'odl', + 'ONOS': 'onos', + 'Tempest': 'tempest', + 'Rally': 'rally', + 'vIMS': 'vims', + 'doctor-notification': 'doctor', + 'promise': 'promise'} + + +class TestCase(object): + def __init__(self, name, project, criteria=-1, isRunnable=True): + self.name = name + self.project = project + self.criteria = criteria + self.isRunnable = isRunnable + + def getName(self): + return self.name + + def getProject(self): + return self.project + + def getCriteria(self): + return self.criteria + + def setCriteria(self, criteria): + self.criteria = criteria + + def setIsRunnable(self, isRunnable): + self.isRunnable = isRunnable + + def checkRunnable(self, installer, scenario, config): + # Re-use Functest declaration + # Retrieve Functest configuration file functest_config.yaml + is_runnable = True + config_test = "" + TEST_ENV = functest_yaml_config.get("test-dependencies") + + # print " *********************** " + # print TEST_ENV + # print " ---------------------- " + # print "case = " + self.name + # print "installer = " + installer + # print "scenario = " + scenario + # print "project = " + self.project + + # Retrieve test constraints + case_name_formated = test_match_matrix[self.name] + + try: + config_test = TEST_ENV[self.project][case_name_formated] + except KeyError: + # if not defined in dependencies => no dependencies + config_test = TEST_ENV[case_name_formated] + except Exception, e: + print "Error [getTestEnv]:", e + + # Retrieve test execution param + test_execution_context = {"installer": installer, + "scenario": scenario} + # By default we assume that all the tests are always runnable... + # if test_env not empty => dependencies to be checked + if config_test is not None and len(config_test) > 0: + # possible criteria = ["installer", "scenario"] + # consider test criteria from config file + # compare towards CI env through CI en variable + for criteria in config_test: + if re.search(config_test[criteria], + test_execution_context[criteria]) is None: + # print "Test "+ test + " cannot be run on the environment" + is_runnable = False + # print is_runnable + self.isRunnable = is_runnable + + +def getApiResults(case, installer, scenario, version): + case = case.getName() + results = json.dumps([]) + # to remove proxy (to be removed at the end for local test only) + # proxy_handler = urllib2.ProxyHandler({}) + # opener = urllib2.build_opener(proxy_handler) + # urllib2.install_opener(opener) + # url = "http://127.0.0.1:8000/results?case=" + case + \ + # "&period=30&installer=" + installer + url = "http://testresults.opnfv.org/testapi/results?case=" + case + \ + "&period=" + str(PERIOD) + "&installer=" + installer + \ + "&scenario=" + scenario + "&version=" + version + request = Request(url) + + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + print 'No kittez. Got an error code:', e + + return results + + +def getScenarios(case, installer, version): + + case = case.getName() + url = "http://testresults.opnfv.org/testapi/results?case=" + case + \ + "&period=" + str(PERIOD) + "&installer=" + installer + \ + "&version=" + version + request = Request(url) + + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + print 'Got an error code:', e + + test_results = results['test_results'] + + if test_results is not None: + test_results.reverse() + + scenario_results = {} + + for r in test_results: + # Retrieve all the scenarios per installer + if not r['scenario'] in scenario_results.keys(): + scenario_results[r['scenario']] = [] + scenario_results[r['scenario']].append(r) + + return scenario_results + + +def getScenarioStats(scenario_results): + scenario_stats = {} + for k, v in scenario_results.iteritems(): + scenario_stats[k] = len(v) + + return scenario_stats + + +def getNbtestOk(results): + nb_test_ok = 0 + for r in results: + for k, v in r.iteritems(): + try: + if "passed" in v: + nb_test_ok += 1 + except: + print "Cannot retrieve test status" + return nb_test_ok + + +def getResult(testCase, installer, scenario, version): + + # retrieve raw results + results = getApiResults(testCase, installer, scenario, version) + # let's concentrate on test results only + test_results = results['test_results'] + + # if results found, analyze them + if test_results is not None: + test_results.reverse() + + scenario_results = [] + + # print " ---------------- " + # print test_results + # print " ---------------- " + # print "nb of results:" + str(len(test_results)) + + for r in test_results: + # print r["creation_date"] + # print r["criteria"] + scenario_results.append({r["creation_date"]: r["criteria"]}) + # sort results + scenario_results.sort() + # 4 levels for the results + # 3: 4+ consecutive runs passing the success criteria + # 2: <4 successful consecutive runs but passing the criteria + # 1: close to pass the success criteria + # 0: 0% success, not passing + test_result_indicator = 0 + nbTestOk = getNbtestOk(scenario_results) + # print "Nb test OK:"+ str(nbTestOk) + # check that we have at least 4 runs + if nbTestOk < 1: + test_result_indicator = 0 + elif nbTestOk < 2: + test_result_indicator = 1 + else: + # Test the last 4 run + if (len(scenario_results) > 3): + last4runResults = scenario_results[-4:] + if getNbtestOk(last4runResults): + test_result_indicator = 3 + else: + test_result_indicator = 2 + else: + test_result_indicator = 2 + print " >>>> Test indicator:" + str(test_result_indicator) + return test_result_indicator + +# ****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** + +# init just tempest to get the list of scenarios +# as all the scenarios run Tempest +tempest = TestCase("Tempest", "functest", -1) + +# Retrieve the Functest configuration to detect which tests are relevant +# according to the installer, scenario +response = requests.get('https://git.opnfv.org/cgit/functest/plain/testcases/config_functest.yaml') +functest_yaml_config = yaml.load(response.text) + +print "****************************************" +print "* Generating reporting..... *" +print "****************************************" +# For all the versions +for version in versions: + # For all the installers + for installer in installers: + # get scenarios + scenario_results = getScenarios(tempest, installer, version) + scenario_stats = getScenarioStats(scenario_results) + + items = {} + # For all the scenarios get results + for s, s_result in scenario_results.items(): + testCases = [] + # For each scenario declare the test cases + # Functest cases + for test_case in functest_test_list: + testCases.append(TestCase(test_case, "functest")) + + # project/case + for test_case in companion_test_list: + test_split = test_case.split("/") + test_project = test_split[0] + test_case = test_split[1] + testCases.append(TestCase(test_case, test_project)) + + # Check if test case is runnable / installer, scenario + try: + for test_case in testCases: + test_case.checkRunnable(installer, s, functest_yaml_config) + # print "testcase %s is %s" % (test_case.getName(), + # test_case.isRunnable) + print "--------------------------" + print "installer %s, version %s, scenario %s:" % (installer, version, s) + for testCase in testCases: + time.sleep(1) + if testCase.isRunnable: + print " Searching results for case %s " % (testCase.getName()) + result = getResult(testCase, installer, s, version) + testCase.setCriteria(result) + items[s] = testCases + print "--------------------------" + except: + print "installer %s, version %s, scenario %s" % (installer, version, s) + print "No data available , error %s " % (sys.exc_info()[0]) + + print "****************************************" + templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) + templateEnv = jinja2.Environment(loader=templateLoader) + + TEMPLATE_FILE = "./template/index-status-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_stats=scenario_stats, + items=items, + installer=installer, + period=PERIOD, + version=version) + + with open("./release/" + version + + "/index-status-" + installer + ".html", "wb") as fh: + fh.write(outputText) diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py new file mode 100644 index 000000000..563e53010 --- /dev/null +++ b/utils/test/reporting/functest/reporting-tempest.py @@ -0,0 +1,99 @@ +from urllib2 import Request, urlopen, URLError +import json +import jinja2 +import os + +installers = ["apex", "compass", "fuel", "joid"] +items = ["tests", "Success rate", "duration"] + +for installer in installers: + # we consider the Tempest results of the last 7 days + url = "http://testresults.opnfv.org/testapi/results?case=Tempest" + request = Request(url + '&period=7&installer=' + installer) + + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + print 'No kittez. Got an error code:', e + + test_results = results['test_results'] + test_results.reverse() + + scenario_results = {} + criteria = {} + errors = {} + + for r in test_results: + # Retrieve all the scenarios per installer + if not r['version'] in scenario_results.keys(): + scenario_results[r['version']] = [] + scenario_results[r['version']].append(r) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + # For each scenario, we build a result object to deal with + # results, criteria and error handling + for result in scenario_results[s]: + result["creation_date"] = result["creation_date"].split(".")[0] + + # retrieve results + # **************** + nb_tests_run = result['details']['tests'] + if nb_tests_run != 0: + success_rate = 100*(int(result['details']['tests']) - int(result['details']['failures']))/int(result['details']['tests']) + else: + success_rate = 0 + + result['details']["tests"] = nb_tests_run + result['details']["Success rate"] = str(success_rate) + "%" + + # Criteria management + # ******************* + crit_tests = False + crit_rate = False + crit_time = False + + # Expect that at least 200 tests are run + if nb_tests_run >= 200: + crit_tests = True + + # Expect that at least 90% of success + if success_rate >= 90: + crit_rate = True + + # Expect that the suite duration is inferior to 45m + if result['details']['duration'] < 2700: + crit_time = True + + result['criteria'] = {'tests': crit_tests, + 'Success rate': crit_rate, + 'duration': crit_time} + + # error management + # **************** + + # TODO get information from artefact based on build tag + # to identify errors of the associated run + # build tag needed to wget errors on the artifacts + # the idea is to list the tests in errors and provide the link + # towards complete artifact + # another option will be to put the errors in the DB + # (in the detail section)... + result['errors'] = {'tests': "", + 'Success rate': "", + 'duration': ""} + + templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) + templateEnv = jinja2.Environment(loader=templateLoader) + + TEMPLATE_FILE = "./template/index-tempest-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_results=scenario_results, + items=items, + installer=installer) + + with open("./release/index-tempest-" + installer + ".html", "wb") as fh: + fh.write(outputText) diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py new file mode 100644 index 000000000..78ca9f5b3 --- /dev/null +++ b/utils/test/reporting/functest/reporting-vims.py @@ -0,0 +1,83 @@ +from urllib2 import Request, urlopen, URLError +import json +import jinja2 +import os + +def sig_test_format(sig_test): + nbPassed = 0 + nbFailures = 0 + nbSkipped = 0 + for data_test in sig_test: + if data_test['result'] == "Passed": + nbPassed+= 1 + elif data_test['result'] == "Failed": + nbFailures += 1 + elif data_test['result'] == "Skipped": + nbSkipped += 1 + total_sig_test_result = {} + total_sig_test_result['passed'] = nbPassed + total_sig_test_result['failures'] = nbFailures + total_sig_test_result['skipped'] = nbSkipped + return total_sig_test_result + +installers = ["fuel", "compass", "joid", "apex"] +step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"] + +for installer in installers: + request = Request('http://testresults.opnfv.org/testapi/results?case=vIMS&installer=' + installer) + + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError, e: + print 'No kittez. Got an error code:', e + + test_results = results['test_results'] + test_results.reverse() + + scenario_results = {} + for r in test_results: + if not r['version'] in scenario_results.keys(): + scenario_results[r['version']] = [] + scenario_results[r['version']].append(r) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + for result in scenario_results[s]: + result["creation_date"] = result["creation_date"].split(".")[0] + sig_test = result['details']['sig_test']['result'] + if not sig_test == "" and isinstance(sig_test, list): + format_result = sig_test_format(sig_test) + if format_result['failures'] > format_result['passed']: + result['details']['sig_test']['duration'] = 0 + result['details']['sig_test']['result'] = format_result + nb_step_ok = 0 + nb_step = len(result['details']) + + for step_name, step_result in result['details'].items(): + if step_result['duration'] != 0: + nb_step_ok += 1 + m, s = divmod(step_result['duration'], 60) + m_display = "" + if int(m) != 0: + m_display += str(int(m)) + "m " + step_result['duration_display'] = m_display + str(int(s)) + "s" + + result['pr_step_ok'] = 0 + if nb_step != 0: + result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100 + + + templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) + templateEnv = jinja2.Environment( loader=templateLoader ) + + TEMPLATE_FILE = "./template/index-vims-tmpl.html" + template = templateEnv.get_template( TEMPLATE_FILE ) + + outputText = template.render( scenario_results = scenario_results, step_order = step_order, installer = installer) + + with open("./release/index-vims" + installer + ".html", "wb") as fh: + fh.write(outputText) + + diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html new file mode 100644 index 000000000..604f2c8e4 --- /dev/null +++ b/utils/test/reporting/functest/template/index-status-tmpl.html @@ -0,0 +1,94 @@ + + + + + + + + + + + +
    +
    +

    Functest status page ({{version}})

    + +
    +
    +
    +
    + + +
    +

    List of last scenarios ({{version}}) run over the last {{period}} days

    + + + + + + {% for scenario,iteration in scenario_stats.iteritems() -%} + + + + + {%- endfor %} +
    ScenarioIteration
    {{scenario}}{{iteration}}
    +
    + + + + {% for scenario, iteration in scenario_stats.iteritems() -%} +
    + +
    +
    + + +
    + + + {% for test in items[scenario] -%} + + {%- endfor %} + + + {% for test in items[scenario] -%} + {% if test.isRunnable is sameas false -%} + + {% elif test.getCriteria() > 2 -%} + + {%- elif test.getCriteria() > 1 -%} + + {%- elif test.getCriteria() > 0 -%} + + {%- else -%} + + {%- endif %} + {%- endfor %} + +
    {{test.getName() }}
    N.R
    +
    +
    + {%- endfor %} +
    +
    +
    diff --git a/utils/test/reporting/functest/template/index-tempest-tmpl.html b/utils/test/reporting/functest/template/index-tempest-tmpl.html new file mode 100644 index 000000000..be0b79734 --- /dev/null +++ b/utils/test/reporting/functest/template/index-tempest-tmpl.html @@ -0,0 +1,90 @@ + + + + + + + + + + + +
    +
    +

    Tempest status page

    + +
    +
    +
    +
    + + {% for scenario_name, results in scenario_results.iteritems() -%} +
    + + {% for result in results -%} + {% if loop.index > 2 -%} + + {%- endfor %} +
    +
    +
    diff --git a/utils/test/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/functest/template/index-vims-tmpl.html new file mode 100644 index 000000000..8858182c1 --- /dev/null +++ b/utils/test/reporting/functest/template/index-vims-tmpl.html @@ -0,0 +1,91 @@ + + + + + + + + + + + +
    +
    +

    vIMS status page

    + +
    +
    +
    +
    + + {% for scenario_name, results in scenario_results.iteritems() -%} +
    + + {% for result in results -%} + {% if loop.index > 2 -%} + + {%- endfor %} +
    +
    +
    diff --git a/utils/test/reporting/img/weather-clear.png b/utils/test/reporting/img/weather-clear.png deleted file mode 100644 index a0d967750..000000000 Binary files a/utils/test/reporting/img/weather-clear.png and /dev/null differ diff --git a/utils/test/reporting/img/weather-few-clouds.png b/utils/test/reporting/img/weather-few-clouds.png deleted file mode 100644 index acfa78398..000000000 Binary files a/utils/test/reporting/img/weather-few-clouds.png and /dev/null differ diff --git a/utils/test/reporting/img/weather-overcast.png b/utils/test/reporting/img/weather-overcast.png deleted file mode 100644 index 4296246d0..000000000 Binary files a/utils/test/reporting/img/weather-overcast.png and /dev/null differ diff --git a/utils/test/reporting/img/weather-storm.png b/utils/test/reporting/img/weather-storm.png deleted file mode 100644 index 956f0e20f..000000000 Binary files a/utils/test/reporting/img/weather-storm.png and /dev/null differ diff --git a/utils/test/reporting/index-status-tmpl.html b/utils/test/reporting/index-status-tmpl.html deleted file mode 100644 index 0fd470d6f..000000000 --- a/utils/test/reporting/index-status-tmpl.html +++ /dev/null @@ -1,94 +0,0 @@ - - - - - - - - - - - -
    -
    -

    Functest status page

    - -
    -
    -
    -
    - - -
    -

    List of last scenarios run over the last {{period}} days

    - - - - - - {% for scenario,iteration in scenario_stats.iteritems() -%} - - - - - {%- endfor %} -
    ScenarioIteration
    {{scenario}}{{iteration}}
    -
    - - - - {% for scenario, iteration in scenario_stats.iteritems() -%} -
    - -
    -
    - - -
    - - - {% for test in items[scenario] -%} - - {%- endfor %} - - - {% for test in items[scenario] -%} - {% if test.isRunnable is sameas false -%} - - {% elif test.getCriteria() > 2 -%} - - {%- elif test.getCriteria() > 1 -%} - - {%- elif test.getCriteria() > 0 -%} - - {%- else -%} - - {%- endif %} - {%- endfor %} - -
    {{test.getName() }}
    N.R
    -
    -
    - {%- endfor %} -
    -
    -
    diff --git a/utils/test/reporting/index-tempest-tmpl.html b/utils/test/reporting/index-tempest-tmpl.html deleted file mode 100644 index be0b79734..000000000 --- a/utils/test/reporting/index-tempest-tmpl.html +++ /dev/null @@ -1,90 +0,0 @@ - - - - - - - - - - - -
    -
    -

    Tempest status page

    - -
    -
    -
    -
    - - {% for scenario_name, results in scenario_results.iteritems() -%} -
    - - {% for result in results -%} - {% if loop.index > 2 -%} - - {%- endfor %} -
    -
    -
    diff --git a/utils/test/reporting/index-vims-tmpl.html b/utils/test/reporting/index-vims-tmpl.html deleted file mode 100644 index 8858182c1..000000000 --- a/utils/test/reporting/index-vims-tmpl.html +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - -
    -
    -

    vIMS status page

    - -
    -
    -
    -
    - - {% for scenario_name, results in scenario_results.iteritems() -%} -
    - - {% for result in results -%} - {% if loop.index > 2 -%} - - {%- endfor %} -
    -
    -
    diff --git a/utils/test/reporting/index.html b/utils/test/reporting/index.html deleted file mode 100644 index af4033567..000000000 --- a/utils/test/reporting/index.html +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - -
    -
    -

    Functest reporting page

    - -
    -
    -
    -
    -
    -

    Functest

    - This project develops test suites that cover functionaling test cases in OPNFV. -
    The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario. -
    Weekly meeting: every Tuesday 8 AM UTC -
    IRC chan #opnfv-testperf - -
    -

    Useful Links

    -
  • Functest in Depth
  • -
  • Functest Repo
  • -
  • Functest Project
  • -
  • Functest Jenkins page
  • -
  • JIRA
  • - -
    -
    -
    -
    diff --git a/utils/test/reporting/reporting-status.py b/utils/test/reporting/reporting-status.py deleted file mode 100644 index e15bac9f7..000000000 --- a/utils/test/reporting/reporting-status.py +++ /dev/null @@ -1,306 +0,0 @@ -from urllib2 import Request, urlopen, URLError -import json -import jinja2 -import os -import re -import requests -import time -import yaml - -# Declaration of the variables -functest_test_list = ['vPing', 'vPing_userdata', - 'Tempest', 'Rally', - 'ODL', 'ONOS', 'vIMS'] -# functest_test_list = ['vPing'] -# functest_test_list = [] -companion_test_list = ['doctor/doctor-notification', 'promise/promise'] -# companion_test_list = [] -installers = ["apex", "compass", "fuel", "joid"] -# installers = ["apex"] -PERIOD = 10 - -# Correspondance between the name of the test case and the name in the DB -# ideally we should modify the DB to avoid such interface.... -# ' -# I know it is uggly... -test_match_matrix = {'vPing': 'vping_ssh', - 'vPing_userdata': 'vping_userdata', - 'ODL': 'odl', - 'ONOS': 'onos', - 'Tempest': 'tempest', - 'Rally': 'rally', - 'vIMS': 'vims', - 'doctor-notification': 'doctor', - 'promise': 'promise'} - - -class TestCase(object): - def __init__(self, name, project, criteria=-1, isRunnable=True): - self.name = name - self.project = project - self.criteria = criteria - self.isRunnable = isRunnable - - def getName(self): - return self.name - - def getProject(self): - return self.project - - def getCriteria(self): - return self.criteria - - def setCriteria(self, criteria): - self.criteria = criteria - - def setIsRunnable(self, isRunnable): - self.isRunnable = isRunnable - - def checkRunnable(self, installer, scenario, config): - # Re-use Functest declaration - # Retrieve Functest configuration file functest_config.yaml - is_runnable = True - config_test = "" - TEST_ENV = functest_yaml_config.get("test-dependencies") - - # print " *********************** " - # print TEST_ENV - # print " ---------------------- " - # print "case = " + self.name - # print "installer = " + installer - # print "scenario = " + scenario - # print "project = " + self.project - - # Retrieve test constraints - case_name_formated = test_match_matrix[self.name] - - try: - config_test = TEST_ENV[self.project][case_name_formated] - except KeyError: - # if not defined in dependencies => no dependencies - config_test = TEST_ENV[case_name_formated] - except Exception, e: - print "Error [getTestEnv]:", e - - # Retrieve test execution param - test_execution_context = {"installer": installer, - "scenario": scenario} - # By default we assume that all the tests are always runnable... - # if test_env not empty => dependencies to be checked - if config_test is not None and len(config_test) > 0: - # possible criteria = ["installer", "scenario"] - # consider test criteria from config file - # compare towards CI env through CI en variable - for criteria in config_test: - if re.search(config_test[criteria], - test_execution_context[criteria]) is None: - # print "Test "+ test + " cannot be run on the environment" - is_runnable = False - # print is_runnable - self.isRunnable = is_runnable - - -def getApiResults(case, installer, scenario): - case = case.getName() - results = json.dumps([]) - # to remove proxy (to be removed at the end for local test only) - # proxy_handler = urllib2.ProxyHandler({}) - # opener = urllib2.build_opener(proxy_handler) - # urllib2.install_opener(opener) - # url = "http://127.0.0.1:8000/results?case=" + case + \ - # "&period=30&installer=" + installer - url = "http://testresults.opnfv.org/testapi/results?case=" + case + \ - "&period=" + str(PERIOD) + "&installer=" + installer + \ - "&scenario=" + scenario - request = Request(url) - - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError, e: - print 'No kittez. Got an error code:', e - - return results - - -def getScenarios(case, installer): - - case = case.getName() - url = "http://testresults.opnfv.org/testapi/results?case=" + case + \ - "&period=" + str(PERIOD) + "&installer=" + installer - request = Request(url) - - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError, e: - print 'Got an error code:', e - - test_results = results['test_results'] - - if test_results is not None: - test_results.reverse() - - scenario_results = {} - - for r in test_results: - # Retrieve all the scenarios per installer - if not r['version'] in scenario_results.keys(): - scenario_results[r['version']] = [] - scenario_results[r['version']].append(r) - - return scenario_results - - -def getScenarioStats(scenario_results): - scenario_stats = {} - for k, v in scenario_results.iteritems(): - scenario_stats[k] = len(v) - - return scenario_stats - - -def getNbtestOk(results): - nb_test_ok = 0 - for r in results: - for k, v in r.iteritems(): - try: - if "passed" in v: - nb_test_ok += 1 - except: - print "Cannot retrieve test status" - return nb_test_ok - - -def getResult(testCase, installer, scenario): - - # retrieve raw results - results = getApiResults(testCase, installer, scenario) - # let's concentrate on test results only - test_results = results['test_results'] - - # if results found, analyze them - if test_results is not None: - test_results.reverse() - - scenario_results = [] - - # print " ---------------- " - # print test_results - # print " ---------------- " - # print "nb of results:" + str(len(test_results)) - - for r in test_results: - # print r["creation_date"] - # print r["criteria"] - scenario_results.append({r["creation_date"]: r["criteria"]}) - # sort results - scenario_results.sort() - # 4 levels for the results - # 3: 4+ consecutive runs passing the success criteria - # 2: <4 successful consecutive runs but passing the criteria - # 1: close to pass the success criteria - # 0: 0% success, not passing - test_result_indicator = 0 - nbTestOk = getNbtestOk(scenario_results) - # print "Nb test OK:"+ str(nbTestOk) - # check that we have at least 4 runs - if nbTestOk < 1: - test_result_indicator = 0 - elif nbTestOk < 2: - test_result_indicator = 1 - else: - # Test the last 4 run - if (len(scenario_results) > 3): - last4runResults = scenario_results[-4:] - if getNbtestOk(last4runResults): - test_result_indicator = 3 - else: - test_result_indicator = 2 - else: - test_result_indicator = 2 - print " >>>> Test indicator:" + str(test_result_indicator) - return test_result_indicator - -# ****************************************************************************** -# ****************************************************************************** -# ****************************************************************************** -# ****************************************************************************** -# ****************************************************************************** - -# as the criteria are all difference, we shall use a common way to indicate -# the criteria -# 100 = 100% = all the test must be OK -# 90 = 90% = all the test must be above 90% of success rate -# TODO harmonize success criteria -# some criteria could be the duration, the success rate, the packet loss,... -# to be done case by case -# TODo create TestCriteria Object - - -# init just tempest to get the list of scenarios -# as all the scenarios run Tempest -tempest = TestCase("Tempest", "functest", -1) - -# Retrieve the Functest configuration to detect which tests are relevant -# according to the installer, scenario -response = requests.get('https://git.opnfv.org/cgit/functest/plain/testcases/config_functest.yaml') -functest_yaml_config = yaml.load(response.text) - -print "****************************************" -print "* Generating reporting..... *" -print "****************************************" -# For all the installers -for installer in installers: - # get scenarios - scenario_results = getScenarios(tempest, installer) - scenario_stats = getScenarioStats(scenario_results) - - items = {} - # For all the scenarios get results - for s, s_result in scenario_results.items(): - testCases = [] - # For each scenario declare the test cases - # Functest cases - for test_case in functest_test_list: - testCases.append(TestCase(test_case, "functest")) - - # project/case - for test_case in companion_test_list: - test_split = test_case.split("/") - test_project = test_split[0] - test_case = test_split[1] - testCases.append(TestCase(test_case, test_project)) - - # Check if test case is runnable according to the installer, scenario - for test_case in testCases: - test_case.checkRunnable(installer, s, functest_yaml_config) - # print "testcase %s is %s" % (test_case.getName(), - # test_case.isRunnable) - - print "--------------------------" - print "%s / %s:" % (installer, s) - for testCase in testCases: - time.sleep(1) - if testCase.isRunnable: - print " Searching results for case %s " % testCase.getName() - result = getResult(testCase, installer, s) - testCase.setCriteria(result) - items[s] = testCases - print "--------------------------" - print "****************************************" - templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) - templateEnv = jinja2.Environment(loader=templateLoader) - - TEMPLATE_FILE = "index-status-tmpl.html" - template = templateEnv.get_template(TEMPLATE_FILE) - - outputText = template.render(scenario_stats=scenario_stats, - items=items, - installer=installer, - period=PERIOD) - - with open("index-status-" + installer + ".html", "wb") as fh: - fh.write(outputText) diff --git a/utils/test/reporting/reporting-tempest.py b/utils/test/reporting/reporting-tempest.py deleted file mode 100644 index 944b42809..000000000 --- a/utils/test/reporting/reporting-tempest.py +++ /dev/null @@ -1,99 +0,0 @@ -from urllib2 import Request, urlopen, URLError -import json -import jinja2 -import os - -installers = ["apex", "compass", "fuel", "joid"] -items = ["tests", "Success rate", "duration"] - -for installer in installers: - # we consider the Tempest results of the last 7 days - url = "http://testresults.opnfv.org/testapi/results?case=Tempest" - request = Request(url + '&period=7&installer=' + installer) - - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError, e: - print 'No kittez. Got an error code:', e - - test_results = results['test_results'] - test_results.reverse() - - scenario_results = {} - criteria = {} - errors = {} - - for r in test_results: - # Retrieve all the scenarios per installer - if not r['version'] in scenario_results.keys(): - scenario_results[r['version']] = [] - scenario_results[r['version']].append(r) - - for s, s_result in scenario_results.items(): - scenario_results[s] = s_result[0:5] - # For each scenario, we build a result object to deal with - # results, criteria and error handling - for result in scenario_results[s]: - result["creation_date"] = result["creation_date"].split(".")[0] - - # retrieve results - # **************** - nb_tests_run = result['details']['tests'] - if nb_tests_run != 0: - success_rate = 100*(int(result['details']['tests']) - int(result['details']['failures']))/int(result['details']['tests']) - else: - success_rate = 0 - - result['details']["tests"] = nb_tests_run - result['details']["Success rate"] = str(success_rate) + "%" - - # Criteria management - # ******************* - crit_tests = False - crit_rate = False - crit_time = False - - # Expect that at least 200 tests are run - if nb_tests_run >= 200: - crit_tests = True - - # Expect that at least 90% of success - if success_rate >= 90: - crit_rate = True - - # Expect that the suite duration is inferior to 45m - if result['details']['duration'] < 2700: - crit_time = True - - result['criteria'] = {'tests': crit_tests, - 'Success rate': crit_rate, - 'duration': crit_time} - - # error management - # **************** - - # TODO get information from artefact based on build tag - # to identify errors of the associated run - # build tag needed to wget errors on the artifacts - # the idea is to list the tests in errors and provide the link - # towards complete artifact - # another option will be to put the errors in the DB - # (in the detail section)... - result['errors'] = {'tests': "", - 'Success rate': "", - 'duration': ""} - - templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) - templateEnv = jinja2.Environment(loader=templateLoader) - - TEMPLATE_FILE = "index-tempest-tmpl.html" - template = templateEnv.get_template(TEMPLATE_FILE) - - outputText = template.render(scenario_results=scenario_results, - items=items, - installer=installer) - - with open("index-tempest-" + installer + ".html", "wb") as fh: - fh.write(outputText) diff --git a/utils/test/reporting/reporting-vims.py b/utils/test/reporting/reporting-vims.py deleted file mode 100644 index cf43f3ebc..000000000 --- a/utils/test/reporting/reporting-vims.py +++ /dev/null @@ -1,83 +0,0 @@ -from urllib2 import Request, urlopen, URLError -import json -import jinja2 -import os - -def sig_test_format(sig_test): - nbPassed = 0 - nbFailures = 0 - nbSkipped = 0 - for data_test in sig_test: - if data_test['result'] == "Passed": - nbPassed+= 1 - elif data_test['result'] == "Failed": - nbFailures += 1 - elif data_test['result'] == "Skipped": - nbSkipped += 1 - total_sig_test_result = {} - total_sig_test_result['passed'] = nbPassed - total_sig_test_result['failures'] = nbFailures - total_sig_test_result['skipped'] = nbSkipped - return total_sig_test_result - -installers = ["fuel", "compass", "joid", "apex"] -step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"] - -for installer in installers: - request = Request('http://testresults.opnfv.org/testapi/results?case=vIMS&installer=' + installer) - - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError, e: - print 'No kittez. Got an error code:', e - - test_results = results['test_results'] - test_results.reverse() - - scenario_results = {} - for r in test_results: - if not r['version'] in scenario_results.keys(): - scenario_results[r['version']] = [] - scenario_results[r['version']].append(r) - - for s, s_result in scenario_results.items(): - scenario_results[s] = s_result[0:5] - for result in scenario_results[s]: - result["creation_date"] = result["creation_date"].split(".")[0] - sig_test = result['details']['sig_test']['result'] - if not sig_test == "" and isinstance(sig_test, list): - format_result = sig_test_format(sig_test) - if format_result['failures'] > format_result['passed']: - result['details']['sig_test']['duration'] = 0 - result['details']['sig_test']['result'] = format_result - nb_step_ok = 0 - nb_step = len(result['details']) - - for step_name, step_result in result['details'].items(): - if step_result['duration'] != 0: - nb_step_ok += 1 - m, s = divmod(step_result['duration'], 60) - m_display = "" - if int(m) != 0: - m_display += str(int(m)) + "m " - step_result['duration_display'] = m_display + str(int(s)) + "s" - - result['pr_step_ok'] = 0 - if nb_step != 0: - result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100 - - - templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) - templateEnv = jinja2.Environment( loader=templateLoader ) - - TEMPLATE_FILE = "index-vims-tmpl.html" - template = templateEnv.get_template( TEMPLATE_FILE ) - - outputText = template.render( scenario_results = scenario_results, step_order = step_order, installer = installer) - - with open("index-vims" + installer + ".html", "wb") as fh: - fh.write(outputText) - - -- cgit 1.2.3-korg