summaryrefslogtreecommitdiffstats
path: root/utils/test/reporting/functest
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test/reporting/functest')
-rw-r--r--utils/test/reporting/functest/reporting-status.py87
-rw-r--r--utils/test/reporting/functest/reporting-tempest.py71
-rw-r--r--utils/test/reporting/functest/reporting-vims.py68
-rw-r--r--utils/test/reporting/functest/reportingConf.py8
-rw-r--r--utils/test/reporting/functest/reportingUtils.py38
-rw-r--r--utils/test/reporting/functest/template/index-status-tmpl.html2
-rw-r--r--utils/test/reporting/functest/testCase.py58
7 files changed, 208 insertions, 124 deletions
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
index adbee36aa..b76f783fc 100644
--- a/utils/test/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -8,8 +8,6 @@
#
import datetime
import jinja2
-import logging
-import os
import requests
import sys
import time
@@ -21,17 +19,7 @@ import testCase as tc
import scenarioResult as sr
# Logger
-logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
-logger = logging.getLogger()
-
-fileHandler = logging.FileHandler("{0}/{1}".format('.', conf.LOG_FILE))
-fileHandler.setFormatter(logFormatter)
-logger.addHandler(fileHandler)
-
-consoleHandler = logging.StreamHandler()
-consoleHandler.setFormatter(logFormatter)
-logger.addHandler(consoleHandler)
-logger.setLevel(conf.LOG_LEVEL)
+logger = utils.getLogger("Status")
# Initialization
testValid = []
@@ -48,11 +36,11 @@ response = requests.get(cf)
functest_yaml_config = yaml.load(response.text)
-logger.info("****************************************")
-logger.info("* Generating reporting..... *")
-logger.info("* Data retention = %s days *" % conf.PERIOD)
-logger.info("* *")
-logger.info("****************************************")
+logger.info("*******************************************")
+logger.info("* Generating reporting scenario status *")
+logger.info("* Data retention = %s days *" % conf.PERIOD)
+logger.info("* *")
+logger.info("*******************************************")
# Retrieve test cases of Tier 1 (smoke)
config_tiers = functest_yaml_config.get("tiers")
@@ -111,27 +99,33 @@ for version in conf.versions:
for test_case in testValid:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
- logger.debug("testcase %s is %s" % (test_case.getName(),
- test_case.isRunnable))
+ logger.debug("testcase %s is %s" %
+ (test_case.getDisplayName(),
+ test_case.isRunnable))
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
+ displayName = test_case.getDisplayName()
project = test_case.getProject()
nb_test_runnable_for_this_scenario += 1
logger.info(" Searching results for case %s " %
- (dbName))
+ (displayName))
result = utils.getResult(dbName, installer, s, version)
- logger.info(" >>>> Test score = " + str(result))
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 1))
- scenario_score = scenario_score + result
+ # at least 1 result for the test
+ if result > -1:
+ logger.info(" >>>> Test score = " + str(result))
+ test_case.setCriteria(result)
+ test_case.setIsRunnable(True)
+ testCases2BeDisplayed.append(tc.TestCase(name,
+ project,
+ "",
+ result,
+ True,
+ 1))
+ scenario_score = scenario_score + result
+ else:
+ logger.debug("No results found")
# 2) Manage the test cases for the scenario qualification
# concretely Tiers > 3
@@ -144,18 +138,23 @@ for version in conf.versions:
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
+ displayName = test_case.getDisplayName()
project = test_case.getProject()
logger.info(" Searching results for case %s " %
- (dbName))
+ (displayName))
result = utils.getResult(dbName, installer, s, version)
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 4))
+ # at least 1 result for the test
+ if result > -1:
+ test_case.setCriteria(result)
+ test_case.setIsRunnable(True)
+ testCases2BeDisplayed.append(tc.TestCase(name,
+ project,
+ "",
+ result,
+ True,
+ 4))
+ else:
+ logger.debug("No results found")
items[s] = testCases2BeDisplayed
except:
@@ -193,12 +192,10 @@ for version in conf.versions:
scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score)
logger.info("--------------------------")
- templateLoader = jinja2.FileSystemLoader(os.path.dirname
- (os.path.abspath
- (__file__)))
+ templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
templateEnv = jinja2.Environment(loader=templateLoader)
- TEMPLATE_FILE = "./template/index-status-tmpl.html"
+ TEMPLATE_FILE = "/template/index-status-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(scenario_stats=scenario_stats,
@@ -208,6 +205,6 @@ for version in conf.versions:
period=conf.PERIOD,
version=version)
- with open("./release/" + version +
+ with open(conf.REPORTING_PATH + "/release/" + version +
"/index-status-" + installer + ".html", "wb") as fh:
fh.write(outputText)
diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py
index a065ef442..6da18c425 100644
--- a/utils/test/reporting/functest/reporting-tempest.py
+++ b/utils/test/reporting/functest/reporting-tempest.py
@@ -1,25 +1,43 @@
from urllib2 import Request, urlopen, URLError
import json
import jinja2
-import os
+import reportingConf as conf
+import reportingUtils as utils
-installers = ["apex", "compass", "fuel", "joid"]
+installers = conf.installers
items = ["tests", "Success rate", "duration"]
-PERIOD = 7
-print "Generate Tempest automatic reporting"
+PERIOD = conf.PERIOD
+criteria_nb_test = 165
+criteria_duration = 1800
+criteria_success_rate = 90
+
+logger = utils.getLogger("Tempest")
+logger.info("************************************************")
+logger.info("* Generating reporting Tempest_smoke_serial *")
+logger.info("* Data retention = %s days *" % PERIOD)
+logger.info("* *")
+logger.info("************************************************")
+
+logger.info("Success criteria: nb tests executed > %s s," +
+ "test duration < %s s," +
+ "success rate > %s " % (criteria_nb_test,
+ criteria_duration,
+ criteria_success_rate))
+
for installer in installers:
# we consider the Tempest results of the last PERIOD days
- url = "http://testresults.opnfv.org/test/api/v1/results?case=tempest_smoke_serial"
- request = Request(url + '&period=' + str(PERIOD)
- + '&installer=' + installer + '&version=master')
-
+ url = conf.URL_BASE + "?case=tempest_smoke_serial"
+ request = Request(url + '&period=' + str(PERIOD) +
+ '&installer=' + installer + '&version=master')
+ logger.info("Search tempest_smoke_serial results for installer %s"
+ % installer)
try:
response = urlopen(request)
k = response.read()
results = json.loads(k)
except URLError, e:
- print 'No kittez. Got an error code:', e
+ logger.error("Error code: %s" % e)
test_results = results['results']
test_results.reverse()
@@ -48,8 +66,8 @@ for installer in installers:
nb_tests_run = result['details']['tests']
nb_tests_failed = result['details']['failures']
if nb_tests_run != 0:
- success_rate = 100*(int(nb_tests_run)
- - int(nb_tests_failed))/int(nb_tests_run)
+ success_rate = 100*(int(nb_tests_run) -
+ int(nb_tests_failed)) / int(nb_tests_run)
else:
success_rate = 0
@@ -63,40 +81,49 @@ for installer in installers:
crit_time = False
# Expect that at least 165 tests are run
- if nb_tests_run >= 165:
+ if nb_tests_run >= criteria_nb_test:
crit_tests = True
# Expect that at least 90% of success
- if success_rate >= 90:
+ if success_rate >= criteria_success_rate:
crit_rate = True
# Expect that the suite duration is inferior to 30m
- if result['details']['duration'] < 1800:
+ if result['details']['duration'] < criteria_duration:
crit_time = True
result['criteria'] = {'tests': crit_tests,
'Success rate': crit_rate,
'duration': crit_time}
- # error management
+ try:
+ logger.debug("Scenario %s, Installer %s"
+ % (s_result[1]['scenario'], installer))
+ logger.debug("Nb Test run: %s" % nb_tests_run)
+ logger.debug("Test duration: %s"
+ % result['details']['duration'])
+ logger.debug("Success rate: %s" % success_rate)
+ except:
+ logger.error("Data format error")
+
+ # Error management
# ****************
try:
errors = result['details']['errors']
result['errors'] = errors.replace('{0}', '')
except:
- print "Error field not present (Brahamputra runs?)"
+ logger.error("Error field not present (Brahamputra runs?)")
- mypath = os.path.abspath(__file__)
- tplLoader = jinja2.FileSystemLoader(os.path.dirname(mypath))
- templateEnv = jinja2.Environment(loader=tplLoader)
+ templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+ templateEnv = jinja2.Environment(loader=templateLoader)
- TEMPLATE_FILE = "./template/index-tempest-tmpl.html"
+ TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(scenario_results=scenario_results,
items=items,
installer=installer)
- with open("./release/master/index-tempest-" +
+ with open(conf.REPORTING_PATH + "/release/master/index-tempest-" +
installer + ".html", "wb") as fh:
fh.write(outputText)
-print "Tempest automatic reporting Done"
+logger.info("Tempest automatic reporting succesfully generated.")
diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py
index 4033687e8..d0436ed14 100644
--- a/utils/test/reporting/functest/reporting-vims.py
+++ b/utils/test/reporting/functest/reporting-vims.py
@@ -1,7 +1,11 @@
from urllib2 import Request, urlopen, URLError
import json
import jinja2
-import os
+import reportingConf as conf
+import reportingUtils as utils
+
+logger = utils.getLogger("vIMS")
+
def sig_test_format(sig_test):
nbPassed = 0
@@ -9,7 +13,7 @@ def sig_test_format(sig_test):
nbSkipped = 0
for data_test in sig_test:
if data_test['result'] == "Passed":
- nbPassed+= 1
+ nbPassed += 1
elif data_test['result'] == "Failed":
nbFailures += 1
elif data_test['result'] == "Skipped":
@@ -20,21 +24,29 @@ def sig_test_format(sig_test):
total_sig_test_result['skipped'] = nbSkipped
return total_sig_test_result
-installers = ["fuel", "compass", "joid", "apex"]
-step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
+logger.info("****************************************")
+logger.info("* Generating reporting vIMS *")
+logger.info("* Data retention = %s days *" % conf.PERIOD)
+logger.info("* *")
+logger.info("****************************************")
+installers = conf.installers
+step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
+logger.info("Start processing....")
for installer in installers:
- request = Request('http://testresults.opnfv.org/test/api/v1/results?case=vims&installer=' + installer)
+ logger.info("Search vIMS results for installer %s" % installer)
+ request = Request(conf.URL_BASE + '?case=vims&installer=' + installer)
try:
response = urlopen(request)
k = response.read()
results = json.loads(k)
except URLError, e:
- print 'No kittez. Got an error code:', e
+ logger.error("Error code: %s" % e)
test_results = results['results']
- test_results.reverse()
+
+ logger.debug("Results found: %s" % test_results)
scenario_results = {}
for r in test_results:
@@ -44,6 +56,7 @@ for installer in installers:
for s, s_result in scenario_results.items():
scenario_results[s] = s_result[0:5]
+ logger.debug("Search for success criteria")
for result in scenario_results[s]:
result["start_date"] = result["start_date"].split(".")[0]
sig_test = result['details']['sig_test']['result']
@@ -67,17 +80,34 @@ for installer in installers:
result['pr_step_ok'] = 0
if nb_step != 0:
result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100
-
-
- templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
- templateEnv = jinja2.Environment( loader=templateLoader )
-
- TEMPLATE_FILE = "./template/index-vims-tmpl.html"
- template = templateEnv.get_template( TEMPLATE_FILE )
-
- outputText = template.render( scenario_results = scenario_results, step_order = step_order, installer = installer)
-
- with open("./release/master/index-vims-" + installer + ".html", "wb") as fh:
+ try:
+ logger.debug("Scenario %s, Installer %s"
+ % (s_result[1]['scenario'], installer))
+ logger.debug("Orchestrator deployment: %s s"
+ % result['details']['orchestrator']['duration'])
+ logger.debug("vIMS deployment: %s s"
+ % result['details']['vIMS']['duration'])
+ logger.debug("Signaling testing: %s s"
+ % result['details']['sig_test']['duration'])
+ logger.debug("Signaling testing results: %s"
+ % format_result)
+ except:
+ logger.error("Data badly formatted")
+ logger.debug("------------------------------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+ templateEnv = jinja2.Environment(loader=templateLoader)
+
+ TEMPLATE_FILE = "/template/index-vims-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_results,
+ step_order=step_order,
+ installer=installer)
+
+ with open(conf.REPORTING_PATH +
+ "/release/master/index-vims-" +
+ installer + ".html", "wb") as fh:
fh.write(outputText)
-
+logger.info("vIMS report succesfully generated")
diff --git a/utils/test/reporting/functest/reportingConf.py b/utils/test/reporting/functest/reportingConf.py
index 61410b414..a58eeecc9 100644
--- a/utils/test/reporting/functest/reportingConf.py
+++ b/utils/test/reporting/functest/reportingConf.py
@@ -13,14 +13,16 @@ installers = ["apex", "compass", "fuel", "joid"]
# installers = ["apex"]
# list of test cases declared in testcases.yaml but that must not be
# taken into account for the scoring
-blacklist = ["odl", "ovno", "security_scan"]
+blacklist = ["odl", "ovno", "security_scan", "copper", "moon"]
# versions = ["brahmaputra", "master"]
versions = ["master"]
PERIOD = 10
MAX_SCENARIO_CRITERIA = 18
# get the last 5 test results to determinate the success criteria
NB_TESTS = 5
+# REPORTING_PATH = "/usr/share/nginx/html/reporting/functest"
+REPORTING_PATH = "."
URL_BASE = 'http://testresults.opnfv.org/test/api/v1/results'
TEST_CONF = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
-LOG_LEVEL = "INFO"
-LOG_FILE = "reporting.log"
+LOG_LEVEL = "ERROR"
+LOG_FILE = REPORTING_PATH + "/reporting.log"
diff --git a/utils/test/reporting/functest/reportingUtils.py b/utils/test/reporting/functest/reportingUtils.py
index 2f06b8449..5051ffa95 100644
--- a/utils/test/reporting/functest/reportingUtils.py
+++ b/utils/test/reporting/functest/reportingUtils.py
@@ -7,8 +7,26 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
from urllib2 import Request, urlopen, URLError
+import logging
import json
-import reportingConf
+import reportingConf as conf
+
+
+def getLogger(module):
+ logFormatter = logging.Formatter("%(asctime)s [" +
+ module +
+ "] [%(levelname)-5.5s] %(message)s")
+ logger = logging.getLogger()
+
+ fileHandler = logging.FileHandler("{0}/{1}".format('.', conf.LOG_FILE))
+ fileHandler.setFormatter(logFormatter)
+ logger.addHandler(fileHandler)
+
+ consoleHandler = logging.StreamHandler()
+ consoleHandler.setFormatter(logFormatter)
+ logger.addHandler(consoleHandler)
+ logger.setLevel(conf.LOG_LEVEL)
+ return logger
def getApiResults(case, installer, scenario, version):
@@ -19,10 +37,10 @@ def getApiResults(case, installer, scenario, version):
# urllib2.install_opener(opener)
# url = "http://127.0.0.1:8000/results?case=" + case + \
# "&period=30&installer=" + installer
- url = (reportingConf.URL_BASE + "?case=" + case +
- "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
+ url = (conf.URL_BASE + "?case=" + case +
+ "&period=" + str(conf.PERIOD) + "&installer=" + installer +
"&scenario=" + scenario + "&version=" + version +
- "&last=" + str(reportingConf.NB_TESTS))
+ "&last=" + str(conf.NB_TESTS))
request = Request(url)
try:
@@ -38,9 +56,8 @@ def getApiResults(case, installer, scenario, version):
def getScenarios(case, installer, version):
case = case.getName()
- print case
- url = (reportingConf.URL_BASE + "?case=" + case +
- "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
+ url = (conf.URL_BASE + "?case=" + case +
+ "&period=" + str(conf.PERIOD) + "&installer=" + installer +
"&version=" + version)
request = Request(url)
@@ -115,11 +132,16 @@ def getResult(testCase, installer, scenario, version):
# 2: <4 successful consecutive runs but passing the criteria
# 1: close to pass the success criteria
# 0: 0% success, not passing
+ # -1: no run available
test_result_indicator = 0
nbTestOk = getNbtestOk(scenario_results)
+
# print "Nb test OK (last 10 days):"+ str(nbTestOk)
# check that we have at least 4 runs
- if nbTestOk < 1:
+ if len(scenario_results) < 1:
+ # No results available
+ test_result_indicator = -1
+ elif nbTestOk < 1:
test_result_indicator = 0
elif nbTestOk < 2:
test_result_indicator = 1
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
index 89a1d1527..0c3fa9426 100644
--- a/utils/test/reporting/functest/template/index-status-tmpl.html
+++ b/utils/test/reporting/functest/template/index-status-tmpl.html
@@ -76,7 +76,7 @@
{% for test in items[scenario] -%}
<th>
{% if test.getCriteria() > -1 -%}
- {{test.getDbName() }}
+ {{test.getDisplayName() }}
{%- endif %}
{% if test.getTier() > 3 -%}
*
diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py
index f0e8f5995..e19853a09 100644
--- a/utils/test/reporting/functest/testCase.py
+++ b/utils/test/reporting/functest/testCase.py
@@ -19,6 +19,28 @@ class TestCase(object):
self.criteria = criteria
self.isRunnable = isRunnable
self.tier = tier
+ display_name_matrix = {'healthcheck': 'healthcheck',
+ 'vping_ssh': 'vPing (ssh)',
+ 'vping_userdata': 'vPing (userdata)',
+ 'odl': 'ODL',
+ 'onos': 'ONOS',
+ 'ocl': 'OCL',
+ 'tempest_smoke_serial': 'Tempest (smoke)',
+ 'tempest_full_parallel': 'Tempest (full)',
+ 'rally_sanity': 'Rally (smoke)',
+ 'bgpvpn': 'bgpvpn',
+ 'rally_full': 'Rally (full)',
+ 'vims': 'vIMS',
+ 'doctor': 'Doctor',
+ 'promise': 'Promise',
+ 'moon': 'moon',
+ 'copper': 'copper',
+ 'security_scan': 'security'
+ }
+ try:
+ self.displayName = display_name_matrix[self.name]
+ except:
+ self.displayName = "unknown"
def getName(self):
return self.name
@@ -74,10 +96,10 @@ class TestCase(object):
self.isRunnable = is_runnable
def toString(self):
- testcase = ("Name=" + self.name + ";Criteria=" + str(self.criteria)
- + ";Project=" + self.project + ";Constraints="
- + str(self.constraints) + ";IsRunnable"
- + str(self.isRunnable))
+ testcase = ("Name=" + self.name + ";Criteria=" +
+ str(self.criteria) + ";Project=" + self.project +
+ ";Constraints=" + str(self.constraints) +
+ ";IsRunnable" + str(self.isRunnable))
return testcase
def getDbName(self):
@@ -98,31 +120,15 @@ class TestCase(object):
'rally_full': 'rally_full',
'vims': 'vims',
'doctor': 'doctor-notification',
- 'promise': 'promise'
+ 'promise': 'promise',
+ 'moon': 'moon',
+ 'copper': 'copper',
+ 'security_scan': 'security'
}
try:
return test_match_matrix[self.name]
except:
return "unknown"
- def getTestDisplayName(self):
- # Correspondance name of the test case / name in the DB
- test_match_matrix = {'healthcheck': 'healthcheck',
- 'vping_ssh': 'vPing (ssh)',
- 'vping_userdata': 'vPing (userdata)',
- 'odl': 'ODL',
- 'onos': 'ONOS',
- 'ocl': 'OCL',
- 'tempest_smoke_serial': 'Tempest (smoke)',
- 'tempest_full_parallel': 'Tempest (full)',
- 'rally_sanity': 'Rally (smoke)',
- 'bgpvpn': 'bgpvpn',
- 'rally_full': 'Rally (full)',
- 'vims': 'vIMS',
- 'doctor': 'Doctor',
- 'promise': 'Promise'
- }
- try:
- return test_match_matrix[self.name]
- except:
- return "unknown"
+ def getDisplayName(self):
+ return self.displayName