summaryrefslogtreecommitdiffstats
path: root/utils/test
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test')
-rw-r--r--utils/test/reporting/functest/img/icon-nok.pngbin0 -> 2317 bytes
-rw-r--r--utils/test/reporting/functest/img/icon-ok.pngbin0 -> 4063 bytes
-rw-r--r--utils/test/reporting/functest/reporting-status.py66
-rw-r--r--utils/test/reporting/functest/template/index-status-tmpl.html13
-rw-r--r--utils/test/result_collection_api/resources/handlers.py6
-rw-r--r--utils/test/result_collection_api/resources/models.py24
6 files changed, 95 insertions, 14 deletions
diff --git a/utils/test/reporting/functest/img/icon-nok.png b/utils/test/reporting/functest/img/icon-nok.png
new file mode 100644
index 000000000..526b5294b
--- /dev/null
+++ b/utils/test/reporting/functest/img/icon-nok.png
Binary files differ
diff --git a/utils/test/reporting/functest/img/icon-ok.png b/utils/test/reporting/functest/img/icon-ok.png
new file mode 100644
index 000000000..3a9de2e89
--- /dev/null
+++ b/utils/test/reporting/functest/img/icon-ok.png
Binary files differ
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
index 2eb0f50d5..9271717bb 100644
--- a/utils/test/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -1,4 +1,5 @@
from urllib2 import Request, urlopen, URLError
+import datetime
import json
import jinja2
import os
@@ -20,12 +21,14 @@ installers = ["apex", "compass", "fuel", "joid"]
versions = ["brahmaputra", "master"]
# versions = ["master"]
PERIOD = 10
+MAX_SCENARIO_CRITERIA = 18
# Correspondance between the name of the test case and the name in the DB
# ideally we should modify the DB to avoid such interface....
# '<name in the DB':'<name in the config'>
# I know it is uggly...
-test_match_matrix = {'vPing': 'vping_ssh',
+test_match_matrix = {'healthcheck': 'healthcheck',
+ 'vPing': 'vping_ssh',
'vPing_userdata': 'vping_userdata',
'ODL': 'odl',
'ONOS': 'onos',
@@ -102,6 +105,20 @@ class TestCase(object):
self.isRunnable = is_runnable
+class ScenarioResult(object):
+ def __init__(self, status, score=0):
+ self.status = status
+ self.score = score
+
+ def getStatus(self):
+ return self.status
+
+ def getScore(self):
+ return self.score
+
+# *****************************************************************************
+
+
def getApiResults(case, installer, scenario, version):
case = case.getName()
results = json.dumps([])
@@ -239,13 +256,16 @@ tempest = TestCase("Tempest", "functest", -1)
# Retrieve the Functest configuration to detect which tests are relevant
# according to the installer, scenario
-cf = "https://git.opnfv.org/cgit/functest/plain/testcases/config_functest.yaml"
+cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml"
response = requests.get(cf)
functest_yaml_config = yaml.load(response.text)
print "****************************************"
print "* Generating reporting..... *"
+print ("* Data retention = %s days *" % PERIOD)
+print "* *"
print "****************************************"
+
# For all the versions
for version in versions:
# For all the installers
@@ -253,11 +273,16 @@ for version in versions:
# get scenarios
scenario_results = getScenarios(tempest, installer, version)
scenario_stats = getScenarioStats(scenario_results)
-
items = {}
+ scenario_result_criteria = {}
+
# For all the scenarios get results
for s, s_result in scenario_results.items():
testCases = []
+ # Green or Red light for a given scenario
+ nb_test_runnable_for_this_scenario = 0
+ scenario_score = 0
+
# For each scenario declare the test cases
# Functest cases
for test_case in functest_test_list:
@@ -276,31 +301,58 @@ for version in versions:
test_case.checkRunnable(installer, s, functest_yaml_config)
# print "testcase %s is %s" % (test_case.getName(),
# test_case.isRunnable)
- print "--------------------------"
print ("installer %s, version %s, scenario %s:" %
(installer, version, s))
for testCase in testCases:
time.sleep(1)
if testCase.isRunnable:
+ nb_test_runnable_for_this_scenario += 1
print (" Searching results for case %s " %
(testCase.getName()))
result = getResult(testCase, installer, s, version)
testCase.setCriteria(result)
items[s] = testCases
- print "--------------------------"
+ scenario_score = scenario_score + result
except:
print ("installer %s, version %s, scenario %s" %
(installer, version, s))
print "No data available , error %s " % (sys.exc_info()[0])
- print "****************************************"
- templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
+ # the validation criteria = nb runnable tests x 3
+ scenario_criteria = nb_test_runnable_for_this_scenario * 3
+ # if 0 runnable tests set criteria at a high value
+ if scenario_criteria < 1:
+ scenario_criteria = MAX_SCENARIO_CRITERIA
+
+ s_score = str(scenario_score) + "/" + str(scenario_criteria)
+ s_status = "KO"
+ if scenario_score < scenario_criteria:
+ print (">>>> scenario not OK, score = %s/%s" %
+ (scenario_score, scenario_criteria))
+ s_status = "KO"
+ else:
+ print ">>>>> scenario OK, save the information"
+ s_status = "OK"
+ with open("./release/" + version +
+ "/validated_scenario_history.txt", "a") as f:
+ time_format = "%Y-%m-%d %H:%M"
+ info = (datetime.datetime.now().strftime(time_format) +
+ ";" + installer + ";" + s + "\n")
+ f.write(info)
+
+ scenario_result_criteria[s] = ScenarioResult(s_status, s_score)
+ print "--------------------------"
+
+ templateLoader = jinja2.FileSystemLoader(os.path.dirname
+ (os.path.abspath
+ (__file__)))
templateEnv = jinja2.Environment(loader=templateLoader)
TEMPLATE_FILE = "./template/index-status-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(scenario_stats=scenario_stats,
+ scenario_results=scenario_result_criteria,
items=items,
installer=installer,
period=PERIOD,
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
index 604f2c8e4..7a0656b74 100644
--- a/utils/test/reporting/functest/template/index-status-tmpl.html
+++ b/utils/test/reporting/functest/template/index-status-tmpl.html
@@ -40,12 +40,20 @@
<div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
<table class="table">
<tr>
- <th width="80%">Scenario</th>
- <th width="20%">Iteration</th>
+ <th width="60%">Scenario</th>
+ <th width="20%">Status</th>
+ <th width="10%">Score</th>
+ <th width="10%">Iteration</th>
</tr>
{% for scenario,iteration in scenario_stats.iteritems() -%}
<tr class="tr-ok">
<td>{{scenario}}</td>
+ <td>{%if scenario_results[scenario].getStatus() is sameas "OK" -%}
+ <img src="../../img/icon-ok.png">
+ {%- else -%}
+ <img src="../../img/icon-nok.png">
+ {%- endif %}</td>
+ <td>{{scenario_results[scenario].getScore()}}</td>
<td>{{iteration}}</td>
</tr>
{%- endfor %}
@@ -53,7 +61,6 @@
</div>
-
{% for scenario, iteration in scenario_stats.iteritems() -%}
<div class="scenario-part">
<div class="page-header">
diff --git a/utils/test/result_collection_api/resources/handlers.py b/utils/test/result_collection_api/resources/handlers.py
index 1eda3b067..c1e8eb182 100644
--- a/utils/test/result_collection_api/resources/handlers.py
+++ b/utils/test/result_collection_api/resources/handlers.py
@@ -512,6 +512,8 @@ class TestResultsHandler(GenericApiHandler):
- period : x (x last days)
- scenario : the test scenario (previously version)
- criteria : the global criteria status passed or failed
+ - trust_indicator : evaluate the stability of the test case to avoid
+ running systematically long and stable test case
:param result_id: Get a result by ID
@@ -531,6 +533,7 @@ class TestResultsHandler(GenericApiHandler):
scenario_arg = self.get_query_argument("scenario", None)
criteria_arg = self.get_query_argument("criteria", None)
period_arg = self.get_query_argument("period", None)
+ trust_indicator_arg = self.get_query_argument("trust_indicator", None)
# prepare request
get_request = dict()
@@ -559,6 +562,9 @@ class TestResultsHandler(GenericApiHandler):
if criteria_arg is not None:
get_request["criteria_tag"] = criteria_arg
+ if trust_indicator_arg is not None:
+ get_request["trust_indicator_arg"] = trust_indicator_arg
+
if period_arg is not None:
try:
period_arg = int(period_arg)
diff --git a/utils/test/result_collection_api/resources/models.py b/utils/test/result_collection_api/resources/models.py
index 35b6af11f..06e95f94f 100644
--- a/utils/test/result_collection_api/resources/models.py
+++ b/utils/test/result_collection_api/resources/models.py
@@ -153,6 +153,7 @@ class TestResult:
self.build_tag = None
self.scenario = None
self.criteria = None
+ self.trust_indicator = None
@staticmethod
def test_result_from_dict(test_result_dict):
@@ -173,7 +174,21 @@ class TestResult:
t.build_tag = test_result_dict.get('build_tag')
t.scenario = test_result_dict.get('scenario')
t.criteria = test_result_dict.get('criteria')
-
+ # 0 < trust indicator < 1
+ # if bad value => set this indicator to 0
+ if test_result_dict.get('trust_indicator') is not None:
+ if isinstance(test_result_dict.get('trust_indicator'),
+ (int, long, float)):
+ if test_result_dict.get('trust_indicator') < 0:
+ t.trust_indicator = 0
+ elif test_result_dict.get('trust_indicator') > 1:
+ t.trust_indicator = 1
+ else:
+ t.trust_indicator = test_result_dict.get('trust_indicator')
+ else:
+ t.trust_indicator = 0
+ else:
+ t.trust_indicator = 0
return t
def format(self):
@@ -188,7 +203,8 @@ class TestResult:
"details": self.details,
"build_tag": self.build_tag,
"scenario": self.scenario,
- "criteria": self.criteria
+ "criteria": self.criteria,
+ "trust_indicator": self.trust_indicator
}
def format_http(self):
@@ -204,6 +220,6 @@ class TestResult:
"details": self.details,
"build_tag": self.build_tag,
"scenario": self.scenario,
- "criteria": self.criteria
+ "criteria": self.criteria,
+ "trust_indicator": self.trust_indicator
}
-