diff options
Diffstat (limited to 'utils')
-rwxr-xr-x | utils/calculate_version.sh | 15 | ||||
-rw-r--r-- | utils/test/dashboard/js/opnfv_dashboard_tests_conf.js | 4 | ||||
-rw-r--r-- | utils/test/reporting/functest/default.css (renamed from utils/test/reporting/default.css) | 0 | ||||
-rw-r--r-- | utils/test/reporting/functest/img/icon-nok.png | bin | 0 -> 2317 bytes | |||
-rw-r--r-- | utils/test/reporting/functest/img/icon-ok.png | bin | 0 -> 4063 bytes | |||
-rw-r--r-- | utils/test/reporting/functest/img/weather-clear.png (renamed from utils/test/reporting/img/weather-clear.png) | bin | 1560 -> 1560 bytes | |||
-rw-r--r-- | utils/test/reporting/functest/img/weather-few-clouds.png (renamed from utils/test/reporting/img/weather-few-clouds.png) | bin | 1927 -> 1927 bytes | |||
-rw-r--r-- | utils/test/reporting/functest/img/weather-overcast.png (renamed from utils/test/reporting/img/weather-overcast.png) | bin | 1588 -> 1588 bytes | |||
-rw-r--r-- | utils/test/reporting/functest/img/weather-storm.png (renamed from utils/test/reporting/img/weather-storm.png) | bin | 2137 -> 2137 bytes | |||
-rw-r--r-- | utils/test/reporting/functest/index.html (renamed from utils/test/reporting/index.html) | 0 | ||||
-rw-r--r-- | utils/test/reporting/functest/reporting-status.py (renamed from utils/test/reporting/reporting-status.py) | 211 | ||||
-rw-r--r-- | utils/test/reporting/functest/reporting-tempest.py (renamed from utils/test/reporting/reporting-tempest.py) | 4 | ||||
-rw-r--r-- | utils/test/reporting/functest/reporting-vims.py (renamed from utils/test/reporting/reporting-vims.py) | 4 | ||||
-rw-r--r-- | utils/test/reporting/functest/template/index-status-tmpl.html (renamed from utils/test/reporting/index-status-tmpl.html) | 25 | ||||
-rw-r--r-- | utils/test/reporting/functest/template/index-tempest-tmpl.html (renamed from utils/test/reporting/index-tempest-tmpl.html) | 0 | ||||
-rw-r--r-- | utils/test/reporting/functest/template/index-vims-tmpl.html (renamed from utils/test/reporting/index-vims-tmpl.html) | 0 | ||||
-rw-r--r-- | utils/test/result_collection_api/dashboard/functest2Dashboard.py | 28 | ||||
-rw-r--r-- | utils/test/result_collection_api/resources/handlers.py | 6 | ||||
-rw-r--r-- | utils/test/result_collection_api/resources/models.py | 24 |
19 files changed, 207 insertions, 114 deletions
diff --git a/utils/calculate_version.sh b/utils/calculate_version.sh index 4bfd13f7a..608a3bd73 100755 --- a/utils/calculate_version.sh +++ b/utils/calculate_version.sh @@ -36,14 +36,15 @@ function docker_version() { tag_json=$(curl $url_tag 2>/dev/null | python -mjson.tool | grep ${BASE_VERSION} | head -1) #e.g. tag_json= "name": "brahmaputra.0.2", if [ "${tag_json}" == "" ]; then - error "The Docker Image ${docker_image} does not have a TAG with base version ${BASE_VERSION}" + echo ${BASE_VERSION}.0 + else + tag=$(echo $tag_json | awk '{print $2}' | sed 's/\,//' | sed 's/\"//g') + #e.g.: tag=brahmaputra.0.2 + tag_current_version=$(echo $tag | sed 's/.*\.//') + tag_new_version=$(($tag_current_version+1)) + #e.g.: tag=brahmaputra.0.3 + echo ${BASE_VERSION}.${tag_new_version} fi - tag=$(echo $tag_json | awk '{print $2}' | sed 's/\,//' | sed 's/\"//g') - #e.g.: tag=brahmaputra.0.2 - tag_current_version=$(echo $tag | sed 's/.*\.//') - tag_new_version=$(($tag_current_version+1)) - #e.g.: tag=brahmaputra.0.3 - echo ${BASE_VERSION}.${tag_new_version} } diff --git a/utils/test/dashboard/js/opnfv_dashboard_tests_conf.js b/utils/test/dashboard/js/opnfv_dashboard_tests_conf.js index 6778332eb..4d909c883 100644 --- a/utils/test/dashboard/js/opnfv_dashboard_tests_conf.js +++ b/utils/test/dashboard/js/opnfv_dashboard_tests_conf.js @@ -56,13 +56,13 @@ var opnfv_dashboard_testcases = { var opnfv_dashboard_installers_pods = {};
opnfv_dashboard_installers_pods['apex'] = ['all','intel-pod7','opnfv-jump-1'];
opnfv_dashboard_installers_pods['compass'] = ['all','huawei-us-deploy-bare-1','huawei-us-deploy-vm-1','huawei-us-deploy-vm2','intel-pod8'];
-opnfv_dashboard_installers_pods['fuel'] = ['all','ericsson-pod2','opnfv-jump-2'];
+opnfv_dashboard_installers_pods['fuel'] = ['all','ericsson-pod2','opnfv-jump-2','arm-pod1'];
opnfv_dashboard_installers_pods['joid'] = ['all','intel-pod5','intel-pod6','orange-fr-pod2'];
var opnfv_dashboard_installers_pods_print = {};
opnfv_dashboard_installers_pods_print['apex'] = ['all','intelpod7','opnfvjump1'];
opnfv_dashboard_installers_pods_print['compass'] = ['all','hwusbare1','hwusvm1','hwusvm2','intelpod8'];
-opnfv_dashboard_installers_pods_print['fuel'] = ['all','ericssonpod2','opnfvjump2'];
+opnfv_dashboard_installers_pods_print['fuel'] = ['all','ericssonpod2','opnfvjump2','armpod1'];
opnfv_dashboard_installers_pods_print['joid'] = ['all','intelpod5','intelpod6','orangefrpod2'];
var opnfv_dashboard_file_directory = 'res-test';
diff --git a/utils/test/reporting/default.css b/utils/test/reporting/functest/default.css index 0e330e965..0e330e965 100644 --- a/utils/test/reporting/default.css +++ b/utils/test/reporting/functest/default.css diff --git a/utils/test/reporting/functest/img/icon-nok.png b/utils/test/reporting/functest/img/icon-nok.png Binary files differnew file mode 100644 index 000000000..526b5294b --- /dev/null +++ b/utils/test/reporting/functest/img/icon-nok.png diff --git a/utils/test/reporting/functest/img/icon-ok.png b/utils/test/reporting/functest/img/icon-ok.png Binary files differnew file mode 100644 index 000000000..3a9de2e89 --- /dev/null +++ b/utils/test/reporting/functest/img/icon-ok.png diff --git a/utils/test/reporting/img/weather-clear.png b/utils/test/reporting/functest/img/weather-clear.png Binary files differindex a0d967750..a0d967750 100644 --- a/utils/test/reporting/img/weather-clear.png +++ b/utils/test/reporting/functest/img/weather-clear.png diff --git a/utils/test/reporting/img/weather-few-clouds.png b/utils/test/reporting/functest/img/weather-few-clouds.png Binary files differindex acfa78398..acfa78398 100644 --- a/utils/test/reporting/img/weather-few-clouds.png +++ b/utils/test/reporting/functest/img/weather-few-clouds.png diff --git a/utils/test/reporting/img/weather-overcast.png b/utils/test/reporting/functest/img/weather-overcast.png Binary files differindex 4296246d0..4296246d0 100644 --- a/utils/test/reporting/img/weather-overcast.png +++ b/utils/test/reporting/functest/img/weather-overcast.png diff --git a/utils/test/reporting/img/weather-storm.png b/utils/test/reporting/functest/img/weather-storm.png Binary files differindex 956f0e20f..956f0e20f 100644 --- a/utils/test/reporting/img/weather-storm.png +++ b/utils/test/reporting/functest/img/weather-storm.png diff --git a/utils/test/reporting/index.html b/utils/test/reporting/functest/index.html index af4033567..af4033567 100644 --- a/utils/test/reporting/index.html +++ b/utils/test/reporting/functest/index.html diff --git a/utils/test/reporting/reporting-status.py b/utils/test/reporting/functest/reporting-status.py index e15bac9f7..9271717bb 100644 --- a/utils/test/reporting/reporting-status.py +++ b/utils/test/reporting/functest/reporting-status.py @@ -1,9 +1,11 @@ from urllib2 import Request, urlopen, URLError +import datetime import json import jinja2 import os import re import requests +import sys import time import yaml @@ -12,18 +14,21 @@ functest_test_list = ['vPing', 'vPing_userdata', 'Tempest', 'Rally', 'ODL', 'ONOS', 'vIMS'] # functest_test_list = ['vPing'] -# functest_test_list = [] companion_test_list = ['doctor/doctor-notification', 'promise/promise'] # companion_test_list = [] installers = ["apex", "compass", "fuel", "joid"] -# installers = ["apex"] +# installers = ["fuel"] +versions = ["brahmaputra", "master"] +# versions = ["master"] PERIOD = 10 +MAX_SCENARIO_CRITERIA = 18 # Correspondance between the name of the test case and the name in the DB # ideally we should modify the DB to avoid such interface.... # '<name in the DB':'<name in the config'> # I know it is uggly... -test_match_matrix = {'vPing': 'vping_ssh', +test_match_matrix = {'healthcheck': 'healthcheck', + 'vPing': 'vping_ssh', 'vPing_userdata': 'vping_userdata', 'ODL': 'odl', 'ONOS': 'onos', @@ -100,7 +105,21 @@ class TestCase(object): self.isRunnable = is_runnable -def getApiResults(case, installer, scenario): +class ScenarioResult(object): + def __init__(self, status, score=0): + self.status = status + self.score = score + + def getStatus(self): + return self.status + + def getScore(self): + return self.score + +# ***************************************************************************** + + +def getApiResults(case, installer, scenario, version): case = case.getName() results = json.dumps([]) # to remove proxy (to be removed at the end for local test only) @@ -109,9 +128,9 @@ def getApiResults(case, installer, scenario): # urllib2.install_opener(opener) # url = "http://127.0.0.1:8000/results?case=" + case + \ # "&period=30&installer=" + installer - url = "http://testresults.opnfv.org/testapi/results?case=" + case + \ - "&period=" + str(PERIOD) + "&installer=" + installer + \ - "&scenario=" + scenario + url = ("http://testresults.opnfv.org/testapi/results?case=" + case + + "&period=" + str(PERIOD) + "&installer=" + installer + + "&scenario=" + scenario + "&version=" + version) request = Request(url) try: @@ -124,11 +143,12 @@ def getApiResults(case, installer, scenario): return results -def getScenarios(case, installer): +def getScenarios(case, installer, version): case = case.getName() url = "http://testresults.opnfv.org/testapi/results?case=" + case + \ - "&period=" + str(PERIOD) + "&installer=" + installer + "&period=" + str(PERIOD) + "&installer=" + installer + \ + "&version=" + version request = Request(url) try: @@ -147,9 +167,9 @@ def getScenarios(case, installer): for r in test_results: # Retrieve all the scenarios per installer - if not r['version'] in scenario_results.keys(): - scenario_results[r['version']] = [] - scenario_results[r['version']].append(r) + if not r['scenario'] in scenario_results.keys(): + scenario_results[r['scenario']] = [] + scenario_results[r['scenario']].append(r) return scenario_results @@ -174,10 +194,10 @@ def getNbtestOk(results): return nb_test_ok -def getResult(testCase, installer, scenario): +def getResult(testCase, installer, scenario, version): # retrieve raw results - results = getApiResults(testCase, installer, scenario) + results = getApiResults(testCase, installer, scenario, version) # let's concentrate on test results only test_results = results['test_results'] @@ -230,77 +250,114 @@ def getResult(testCase, installer, scenario): # ****************************************************************************** # ****************************************************************************** -# as the criteria are all difference, we shall use a common way to indicate -# the criteria -# 100 = 100% = all the test must be OK -# 90 = 90% = all the test must be above 90% of success rate -# TODO harmonize success criteria -# some criteria could be the duration, the success rate, the packet loss,... -# to be done case by case -# TODo create TestCriteria Object - - # init just tempest to get the list of scenarios # as all the scenarios run Tempest tempest = TestCase("Tempest", "functest", -1) # Retrieve the Functest configuration to detect which tests are relevant # according to the installer, scenario -response = requests.get('https://git.opnfv.org/cgit/functest/plain/testcases/config_functest.yaml') +cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml" +response = requests.get(cf) functest_yaml_config = yaml.load(response.text) print "****************************************" print "* Generating reporting..... *" +print ("* Data retention = %s days *" % PERIOD) +print "* *" print "****************************************" -# For all the installers -for installer in installers: - # get scenarios - scenario_results = getScenarios(tempest, installer) - scenario_stats = getScenarioStats(scenario_results) - - items = {} - # For all the scenarios get results - for s, s_result in scenario_results.items(): - testCases = [] - # For each scenario declare the test cases - # Functest cases - for test_case in functest_test_list: - testCases.append(TestCase(test_case, "functest")) - - # project/case - for test_case in companion_test_list: - test_split = test_case.split("/") - test_project = test_split[0] - test_case = test_split[1] - testCases.append(TestCase(test_case, test_project)) - - # Check if test case is runnable according to the installer, scenario - for test_case in testCases: - test_case.checkRunnable(installer, s, functest_yaml_config) - # print "testcase %s is %s" % (test_case.getName(), - # test_case.isRunnable) - - print "--------------------------" - print "%s / %s:" % (installer, s) - for testCase in testCases: - time.sleep(1) - if testCase.isRunnable: - print " Searching results for case %s " % testCase.getName() - result = getResult(testCase, installer, s) - testCase.setCriteria(result) - items[s] = testCases - print "--------------------------" - print "****************************************" - templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) - templateEnv = jinja2.Environment(loader=templateLoader) - - TEMPLATE_FILE = "index-status-tmpl.html" - template = templateEnv.get_template(TEMPLATE_FILE) - - outputText = template.render(scenario_stats=scenario_stats, - items=items, - installer=installer, - period=PERIOD) - - with open("index-status-" + installer + ".html", "wb") as fh: - fh.write(outputText) + +# For all the versions +for version in versions: + # For all the installers + for installer in installers: + # get scenarios + scenario_results = getScenarios(tempest, installer, version) + scenario_stats = getScenarioStats(scenario_results) + items = {} + scenario_result_criteria = {} + + # For all the scenarios get results + for s, s_result in scenario_results.items(): + testCases = [] + # Green or Red light for a given scenario + nb_test_runnable_for_this_scenario = 0 + scenario_score = 0 + + # For each scenario declare the test cases + # Functest cases + for test_case in functest_test_list: + testCases.append(TestCase(test_case, "functest")) + + # project/case + for test_case in companion_test_list: + test_split = test_case.split("/") + test_project = test_split[0] + test_case = test_split[1] + testCases.append(TestCase(test_case, test_project)) + + # Check if test case is runnable / installer, scenario + try: + for test_case in testCases: + test_case.checkRunnable(installer, s, functest_yaml_config) + # print "testcase %s is %s" % (test_case.getName(), + # test_case.isRunnable) + print ("installer %s, version %s, scenario %s:" % + (installer, version, s)) + for testCase in testCases: + time.sleep(1) + if testCase.isRunnable: + nb_test_runnable_for_this_scenario += 1 + print (" Searching results for case %s " % + (testCase.getName())) + result = getResult(testCase, installer, s, version) + testCase.setCriteria(result) + items[s] = testCases + scenario_score = scenario_score + result + except: + print ("installer %s, version %s, scenario %s" % + (installer, version, s)) + print "No data available , error %s " % (sys.exc_info()[0]) + + # the validation criteria = nb runnable tests x 3 + scenario_criteria = nb_test_runnable_for_this_scenario * 3 + # if 0 runnable tests set criteria at a high value + if scenario_criteria < 1: + scenario_criteria = MAX_SCENARIO_CRITERIA + + s_score = str(scenario_score) + "/" + str(scenario_criteria) + s_status = "KO" + if scenario_score < scenario_criteria: + print (">>>> scenario not OK, score = %s/%s" % + (scenario_score, scenario_criteria)) + s_status = "KO" + else: + print ">>>>> scenario OK, save the information" + s_status = "OK" + with open("./release/" + version + + "/validated_scenario_history.txt", "a") as f: + time_format = "%Y-%m-%d %H:%M" + info = (datetime.datetime.now().strftime(time_format) + + ";" + installer + ";" + s + "\n") + f.write(info) + + scenario_result_criteria[s] = ScenarioResult(s_status, s_score) + print "--------------------------" + + templateLoader = jinja2.FileSystemLoader(os.path.dirname + (os.path.abspath + (__file__))) + templateEnv = jinja2.Environment(loader=templateLoader) + + TEMPLATE_FILE = "./template/index-status-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_stats=scenario_stats, + scenario_results=scenario_result_criteria, + items=items, + installer=installer, + period=PERIOD, + version=version) + + with open("./release/" + version + + "/index-status-" + installer + ".html", "wb") as fh: + fh.write(outputText) diff --git a/utils/test/reporting/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py index 944b42809..563e53010 100644 --- a/utils/test/reporting/reporting-tempest.py +++ b/utils/test/reporting/functest/reporting-tempest.py @@ -88,12 +88,12 @@ for installer in installers: templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) templateEnv = jinja2.Environment(loader=templateLoader) - TEMPLATE_FILE = "index-tempest-tmpl.html" + TEMPLATE_FILE = "./template/index-tempest-tmpl.html" template = templateEnv.get_template(TEMPLATE_FILE) outputText = template.render(scenario_results=scenario_results, items=items, installer=installer) - with open("index-tempest-" + installer + ".html", "wb") as fh: + with open("./release/index-tempest-" + installer + ".html", "wb") as fh: fh.write(outputText) diff --git a/utils/test/reporting/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py index cf43f3ebc..78ca9f5b3 100644 --- a/utils/test/reporting/reporting-vims.py +++ b/utils/test/reporting/functest/reporting-vims.py @@ -72,12 +72,12 @@ for installer in installers: templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) templateEnv = jinja2.Environment( loader=templateLoader ) - TEMPLATE_FILE = "index-vims-tmpl.html" + TEMPLATE_FILE = "./template/index-vims-tmpl.html" template = templateEnv.get_template( TEMPLATE_FILE ) outputText = template.render( scenario_results = scenario_results, step_order = step_order, installer = installer) - with open("index-vims" + installer + ".html", "wb") as fh: + with open("./release/index-vims" + installer + ".html", "wb") as fh: fh.write(outputText) diff --git a/utils/test/reporting/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html index 0fd470d6f..7a0656b74 100644 --- a/utils/test/reporting/index-status-tmpl.html +++ b/utils/test/reporting/functest/template/index-status-tmpl.html @@ -18,7 +18,7 @@ <body> <div class="container"> <div class="masthead"> - <h3 class="text-muted">Functest status page</h3> + <h3 class="text-muted">Functest status page ({{version}})</h3> <nav> <ul class="nav nav-justified"> <li class="active"><a href="index.html">Home</a></li> @@ -37,15 +37,23 @@ </div> <div class="scenario-overview"> - <div class="panel-heading"><h4><b>List of last scenarios run over the last {{period}} days </b></h4></div> + <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div> <table class="table"> <tr> - <th width="80%">Scenario</th> - <th width="20%">Iteration</th> + <th width="60%">Scenario</th> + <th width="20%">Status</th> + <th width="10%">Score</th> + <th width="10%">Iteration</th> </tr> {% for scenario,iteration in scenario_stats.iteritems() -%} <tr class="tr-ok"> <td>{{scenario}}</td> + <td>{%if scenario_results[scenario].getStatus() is sameas "OK" -%} + <img src="../../img/icon-ok.png"> + {%- else -%} + <img src="../../img/icon-nok.png"> + {%- endif %}</td> + <td>{{scenario_results[scenario].getScore()}}</td> <td>{{iteration}}</td> </tr> {%- endfor %} @@ -53,7 +61,6 @@ </div> - {% for scenario, iteration in scenario_stats.iteritems() -%} <div class="scenario-part"> <div class="page-header"> @@ -75,13 +82,13 @@ {% if test.isRunnable is sameas false -%} <td>N.R</td> {% elif test.getCriteria() > 2 -%} - <td><img src="./img/weather-clear.png"></td> + <td><img src="../../img/weather-clear.png"></td> {%- elif test.getCriteria() > 1 -%} - <td><img src="./img/weather-few-clouds.png"></td> + <td><img src="../../img/weather-few-clouds.png"></td> {%- elif test.getCriteria() > 0 -%} - <td><img src="./img/weather-overcast.png"></td> + <td><img src="../../img/weather-overcast.png"></td> {%- else -%} - <td><img src="./img/weather-storm.png"></td> + <td><img src="../../img/weather-storm.png"></td> {%- endif %} {%- endfor %} </tr> diff --git a/utils/test/reporting/index-tempest-tmpl.html b/utils/test/reporting/functest/template/index-tempest-tmpl.html index be0b79734..be0b79734 100644 --- a/utils/test/reporting/index-tempest-tmpl.html +++ b/utils/test/reporting/functest/template/index-tempest-tmpl.html diff --git a/utils/test/reporting/index-vims-tmpl.html b/utils/test/reporting/functest/template/index-vims-tmpl.html index 8858182c1..8858182c1 100644 --- a/utils/test/reporting/index-vims-tmpl.html +++ b/utils/test/reporting/functest/template/index-vims-tmpl.html diff --git a/utils/test/result_collection_api/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/dashboard/functest2Dashboard.py index a2ed3085c..379b93279 100644 --- a/utils/test/result_collection_api/dashboard/functest2Dashboard.py +++ b/utils/test/result_collection_api/dashboard/functest2Dashboard.py @@ -117,12 +117,15 @@ def format_vIMS_for_dashboard(results): # Calculate nb of tests run and nb of tests failed # vIMS_results = get_vIMSresults(vIMS_test) # print vIMS_results - if data_test['result'] == "Passed": - nbTests += 1 - elif data_test['result'] == "Failed": - nbFailures += 1 - elif data_test['result'] == "Skipped": - nbSkipped += 1 + try: + if data_test['result'] == "Passed": + nbTests += 1 + elif data_test['result'] == "Failed": + nbFailures += 1 + elif data_test['result'] == "Skipped": + nbSkipped += 1 + except: + nbTests = 0 new_element.append({'x': data['creation_date'], 'y1': nbTests, @@ -149,10 +152,13 @@ def format_vIMS_for_dashboard(results): nbTestsOK = 0 nbTestsKO = 0 - if data_test['result'] == "Passed": - nbTestsOK += 1 - elif data_test['result'] == "Failed": - nbTestsKO += 1 + try: + if data_test['result'] == "Passed": + nbTestsOK += 1 + elif data_test['result'] == "Failed": + nbTestsKO += 1 + except: + nbTestsOK = 0 nbTests += nbTestsOK + nbTestsKO nbFailures += nbTestsKO @@ -366,7 +372,7 @@ def format_Rally_for_dashboard(results): # ******************************** new_element = [] for data in results: - summary_cursor = len(data) + summary_cursor = len(data['details']) - 1 new_element.append({'x': data['creation_date'], 'y': int(data['details'][summary_cursor]['summary']['duration'])}) diff --git a/utils/test/result_collection_api/resources/handlers.py b/utils/test/result_collection_api/resources/handlers.py index 1eda3b067..c1e8eb182 100644 --- a/utils/test/result_collection_api/resources/handlers.py +++ b/utils/test/result_collection_api/resources/handlers.py @@ -512,6 +512,8 @@ class TestResultsHandler(GenericApiHandler): - period : x (x last days) - scenario : the test scenario (previously version) - criteria : the global criteria status passed or failed + - trust_indicator : evaluate the stability of the test case to avoid + running systematically long and stable test case :param result_id: Get a result by ID @@ -531,6 +533,7 @@ class TestResultsHandler(GenericApiHandler): scenario_arg = self.get_query_argument("scenario", None) criteria_arg = self.get_query_argument("criteria", None) period_arg = self.get_query_argument("period", None) + trust_indicator_arg = self.get_query_argument("trust_indicator", None) # prepare request get_request = dict() @@ -559,6 +562,9 @@ class TestResultsHandler(GenericApiHandler): if criteria_arg is not None: get_request["criteria_tag"] = criteria_arg + if trust_indicator_arg is not None: + get_request["trust_indicator_arg"] = trust_indicator_arg + if period_arg is not None: try: period_arg = int(period_arg) diff --git a/utils/test/result_collection_api/resources/models.py b/utils/test/result_collection_api/resources/models.py index 35b6af11f..06e95f94f 100644 --- a/utils/test/result_collection_api/resources/models.py +++ b/utils/test/result_collection_api/resources/models.py @@ -153,6 +153,7 @@ class TestResult: self.build_tag = None
self.scenario = None
self.criteria = None
+ self.trust_indicator = None
@staticmethod
def test_result_from_dict(test_result_dict):
@@ -173,7 +174,21 @@ class TestResult: t.build_tag = test_result_dict.get('build_tag')
t.scenario = test_result_dict.get('scenario')
t.criteria = test_result_dict.get('criteria')
-
+ # 0 < trust indicator < 1
+ # if bad value => set this indicator to 0
+ if test_result_dict.get('trust_indicator') is not None:
+ if isinstance(test_result_dict.get('trust_indicator'),
+ (int, long, float)):
+ if test_result_dict.get('trust_indicator') < 0:
+ t.trust_indicator = 0
+ elif test_result_dict.get('trust_indicator') > 1:
+ t.trust_indicator = 1
+ else:
+ t.trust_indicator = test_result_dict.get('trust_indicator')
+ else:
+ t.trust_indicator = 0
+ else:
+ t.trust_indicator = 0
return t
def format(self):
@@ -188,7 +203,8 @@ class TestResult: "details": self.details,
"build_tag": self.build_tag,
"scenario": self.scenario,
- "criteria": self.criteria
+ "criteria": self.criteria,
+ "trust_indicator": self.trust_indicator
}
def format_http(self):
@@ -204,6 +220,6 @@ class TestResult: "details": self.details,
"build_tag": self.build_tag,
"scenario": self.scenario,
- "criteria": self.criteria
+ "criteria": self.criteria,
+ "trust_indicator": self.trust_indicator
}
-
|