summaryrefslogtreecommitdiffstats
path: root/utils/test/reporting/reporting-status.py
diff options
context:
space:
mode:
authorMorgan Richomme <morgan.richomme@orange.com>2016-03-28 19:15:11 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2016-03-28 19:15:11 +0000
commit344bc79ecf6efcc28fc1ac78aaa2e9708a4037ff (patch)
tree179e8e3f7da0b415fc71ce22393200f8a79280b0 /utils/test/reporting/reporting-status.py
parent9209de0961d4229ba535b20fe9385a9c7138c311 (diff)
parent70e38d07f681b165291406cc4e010377c6202041 (diff)
Merge "Complete reporting status, process the scenario and get success criteria"
Diffstat (limited to 'utils/test/reporting/reporting-status.py')
-rw-r--r--utils/test/reporting/reporting-status.py256
1 files changed, 192 insertions, 64 deletions
diff --git a/utils/test/reporting/reporting-status.py b/utils/test/reporting/reporting-status.py
index b27af4b14..e15bac9f7 100644
--- a/utils/test/reporting/reporting-status.py
+++ b/utils/test/reporting/reporting-status.py
@@ -1,16 +1,45 @@
from urllib2 import Request, urlopen, URLError
-import urllib2
import json
import jinja2
import os
-import random
+import re
+import requests
+import time
+import yaml
+
+# Declaration of the variables
+functest_test_list = ['vPing', 'vPing_userdata',
+ 'Tempest', 'Rally',
+ 'ODL', 'ONOS', 'vIMS']
+# functest_test_list = ['vPing']
+# functest_test_list = []
+companion_test_list = ['doctor/doctor-notification', 'promise/promise']
+# companion_test_list = []
+installers = ["apex", "compass", "fuel", "joid"]
+# installers = ["apex"]
+PERIOD = 10
+
+# Correspondance between the name of the test case and the name in the DB
+# ideally we should modify the DB to avoid such interface....
+# '<name in the DB':'<name in the config'>
+# I know it is uggly...
+test_match_matrix = {'vPing': 'vping_ssh',
+ 'vPing_userdata': 'vping_userdata',
+ 'ODL': 'odl',
+ 'ONOS': 'onos',
+ 'Tempest': 'tempest',
+ 'Rally': 'rally',
+ 'vIMS': 'vims',
+ 'doctor-notification': 'doctor',
+ 'promise': 'promise'}
class TestCase(object):
- def __init__(self, name, project, criteria=-1):
+ def __init__(self, name, project, criteria=-1, isRunnable=True):
self.name = name
self.project = project
self.criteria = criteria
+ self.isRunnable = isRunnable
def getName(self):
return self.name
@@ -24,16 +53,65 @@ class TestCase(object):
def setCriteria(self, criteria):
self.criteria = criteria
-
-def getApiResults(case, installer):
+ def setIsRunnable(self, isRunnable):
+ self.isRunnable = isRunnable
+
+ def checkRunnable(self, installer, scenario, config):
+ # Re-use Functest declaration
+ # Retrieve Functest configuration file functest_config.yaml
+ is_runnable = True
+ config_test = ""
+ TEST_ENV = functest_yaml_config.get("test-dependencies")
+
+ # print " *********************** "
+ # print TEST_ENV
+ # print " ---------------------- "
+ # print "case = " + self.name
+ # print "installer = " + installer
+ # print "scenario = " + scenario
+ # print "project = " + self.project
+
+ # Retrieve test constraints
+ case_name_formated = test_match_matrix[self.name]
+
+ try:
+ config_test = TEST_ENV[self.project][case_name_formated]
+ except KeyError:
+ # if not defined in dependencies => no dependencies
+ config_test = TEST_ENV[case_name_formated]
+ except Exception, e:
+ print "Error [getTestEnv]:", e
+
+ # Retrieve test execution param
+ test_execution_context = {"installer": installer,
+ "scenario": scenario}
+ # By default we assume that all the tests are always runnable...
+ # if test_env not empty => dependencies to be checked
+ if config_test is not None and len(config_test) > 0:
+ # possible criteria = ["installer", "scenario"]
+ # consider test criteria from config file
+ # compare towards CI env through CI en variable
+ for criteria in config_test:
+ if re.search(config_test[criteria],
+ test_execution_context[criteria]) is None:
+ # print "Test "+ test + " cannot be run on the environment"
+ is_runnable = False
+ # print is_runnable
+ self.isRunnable = is_runnable
+
+
+def getApiResults(case, installer, scenario):
case = case.getName()
-
+ results = json.dumps([])
# to remove proxy (to be removed at the end for local test only)
# proxy_handler = urllib2.ProxyHandler({})
# opener = urllib2.build_opener(proxy_handler)
# urllib2.install_opener(opener)
- url = "http://testresults.opnfv.org/testapi/results?case=" + case + "&period=30&installer=" + installer
- #url = "http://127.0.0.1:8000/results?case=" + case + "&period=30&installer=" + installer
+ # url = "http://127.0.0.1:8000/results?case=" + case + \
+ # "&period=30&installer=" + installer
+ url = "http://testresults.opnfv.org/testapi/results?case=" + case + \
+ "&period=" + str(PERIOD) + "&installer=" + installer + \
+ "&scenario=" + scenario
request = Request(url)
try:
@@ -48,7 +126,18 @@ def getApiResults(case, installer):
def getScenarios(case, installer):
- results = getApiResults(case, installer)
+ case = case.getName()
+ url = "http://testresults.opnfv.org/testapi/results?case=" + case + \
+ "&period=" + str(PERIOD) + "&installer=" + installer
+ request = Request(url)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError, e:
+ print 'Got an error code:', e
+
test_results = results['test_results']
if test_results is not None:
@@ -73,10 +162,22 @@ def getScenarioStats(scenario_results):
return scenario_stats
-def getResult(testCase, installer):
+def getNbtestOk(results):
+ nb_test_ok = 0
+ for r in results:
+ for k, v in r.iteritems():
+ try:
+ if "passed" in v:
+ nb_test_ok += 1
+ except:
+ print "Cannot retrieve test status"
+ return nb_test_ok
+
+
+def getResult(testCase, installer, scenario):
# retrieve raw results
- results = getApiResults(testCase, installer)
+ results = getApiResults(testCase, installer, scenario)
# let's concentrate on test results only
test_results = results['test_results']
@@ -84,34 +185,44 @@ def getResult(testCase, installer):
if test_results is not None:
test_results.reverse()
- scenario_results = {}
+ scenario_results = []
- for r in test_results:
- if not r['version'] in scenario_results.keys():
- scenario_results[r['version']] = []
- scenario_results[r['version']].append(r)
+ # print " ---------------- "
+ # print test_results
+ # print " ---------------- "
+ # print "nb of results:" + str(len(test_results))
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- # For each scenario, we build a result object to deal with
- # results, criteria and error handling
- for result in scenario_results[s]:
- result["creation_date"] = result["creation_date"].split(".")[0]
-
- # Cannot be fully generic
- # need to look for specific criteria case by case
- # TODO add a criteria passed/failed in DB??
- # TODO result["Success_criteria"] = result["success_criteria"]
- # meanwhile just random....
- # and consider the last random arbitrarily
- # 4 levels for the results
- # 3: 4+ consecutive runs passing the success criteria
- # 2: <4 successful consecutive runs but passing the criteria
- # 1: close to pass the success criteria
- # 0: 0% success, not passing
- #
-
- return int(random.random()*4)+1
+ for r in test_results:
+ # print r["creation_date"]
+ # print r["criteria"]
+ scenario_results.append({r["creation_date"]: r["criteria"]})
+ # sort results
+ scenario_results.sort()
+ # 4 levels for the results
+ # 3: 4+ consecutive runs passing the success criteria
+ # 2: <4 successful consecutive runs but passing the criteria
+ # 1: close to pass the success criteria
+ # 0: 0% success, not passing
+ test_result_indicator = 0
+ nbTestOk = getNbtestOk(scenario_results)
+ # print "Nb test OK:"+ str(nbTestOk)
+ # check that we have at least 4 runs
+ if nbTestOk < 1:
+ test_result_indicator = 0
+ elif nbTestOk < 2:
+ test_result_indicator = 1
+ else:
+ # Test the last 4 run
+ if (len(scenario_results) > 3):
+ last4runResults = scenario_results[-4:]
+ if getNbtestOk(last4runResults):
+ test_result_indicator = 3
+ else:
+ test_result_indicator = 2
+ else:
+ test_result_indicator = 2
+ print " >>>> Test indicator:" + str(test_result_indicator)
+ return test_result_indicator
# ******************************************************************************
# ******************************************************************************
@@ -129,41 +240,57 @@ def getResult(testCase, installer):
# TODo create TestCriteria Object
-installers = ["apex", "compass", "fuel", "joid"]
-# init just tempest to get the scenario as all the scenarios run Temepst
+# init just tempest to get the list of scenarios
+# as all the scenarios run Tempest
tempest = TestCase("Tempest", "functest", -1)
-for installer in installers:
+# Retrieve the Functest configuration to detect which tests are relevant
+# according to the installer, scenario
+response = requests.get('https://git.opnfv.org/cgit/functest/plain/testcases/config_functest.yaml')
+functest_yaml_config = yaml.load(response.text)
+print "****************************************"
+print "* Generating reporting..... *"
+print "****************************************"
+# For all the installers
+for installer in installers:
+ # get scenarios
scenario_results = getScenarios(tempest, installer)
scenario_stats = getScenarioStats(scenario_results)
items = {}
-
+ # For all the scenarios get results
for s, s_result in scenario_results.items():
-
- vPing = TestCase("vPing", "functest")
- vPing_userdata = TestCase("vPing_userdata", "functest")
- tempest = TestCase("Tempest", "functest")
- rally = TestCase("Rally", "functest")
- odl = TestCase("ODL", "functest")
- onos = TestCase("ONOS", "functest")
- ovno = TestCase("OVNO", "functest")
- vIMS = TestCase("vIMS", "functest")
- doctor = TestCase("doctor-notification", "doctor")
- promise = TestCase("promise", "promise")
- odl_vpn = TestCase("ODL VPN Service tests", "sdnvpn")
- bgpvpn_api = TestCase("OpenStack Neutron BGPVPN API extension tests",
- "sdnvpn")
- testCases = [vPing, vPing_userdata, tempest, rally, odl, onos, vIMS,
- doctor, promise]
-
+ testCases = []
+ # For each scenario declare the test cases
+ # Functest cases
+ for test_case in functest_test_list:
+ testCases.append(TestCase(test_case, "functest"))
+
+ # project/case
+ for test_case in companion_test_list:
+ test_split = test_case.split("/")
+ test_project = test_split[0]
+ test_case = test_split[1]
+ testCases.append(TestCase(test_case, test_project))
+
+ # Check if test case is runnable according to the installer, scenario
+ for test_case in testCases:
+ test_case.checkRunnable(installer, s, functest_yaml_config)
+ # print "testcase %s is %s" % (test_case.getName(),
+ # test_case.isRunnable)
+
+ print "--------------------------"
+ print "%s / %s:" % (installer, s)
for testCase in testCases:
- result = getResult(testCase, installer)
- testCase.setCriteria(result)
- # print "case %s (%s) = %s " % (testCase.getName(), s, result)
- items[s] = testCases
-
+ time.sleep(1)
+ if testCase.isRunnable:
+ print " Searching results for case %s " % testCase.getName()
+ result = getResult(testCase, installer, s)
+ testCase.setCriteria(result)
+ items[s] = testCases
+ print "--------------------------"
+ print "****************************************"
templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
templateEnv = jinja2.Environment(loader=templateLoader)
@@ -172,7 +299,8 @@ for installer in installers:
outputText = template.render(scenario_stats=scenario_stats,
items=items,
- installer=installer)
+ installer=installer,
+ period=PERIOD)
with open("index-status-" + installer + ".html", "wb") as fh:
fh.write(outputText)