summaryrefslogtreecommitdiffstats
path: root/utils/test/reporting/functest
diff options
context:
space:
mode:
authorMorgan Richomme <morgan.richomme@orange.com>2016-04-26 14:24:56 +0200
committerMorgan Richomme <morgan.richomme@orange.com>2016-04-26 15:15:50 +0200
commit42c0c75a9e3564758aaaccadec9e1bad42b283eb (patch)
treeb4e407feb611c70b0b9dc5da7b7da1cf587cc7f6 /utils/test/reporting/functest
parent9bd71f3a00944616299fa2168105b57d74e549ea (diff)
Functest reporting refactoring
integrate the notion of version (brahmaputra, master, ..) change dir structure Change-Id: Ieb8aed811ed4e1ab8738fb02e4db411da3d07ca2 Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
Diffstat (limited to 'utils/test/reporting/functest')
-rw-r--r--utils/test/reporting/functest/default.css56
-rw-r--r--utils/test/reporting/functest/img/weather-clear.pngbin0 -> 1560 bytes
-rw-r--r--utils/test/reporting/functest/img/weather-few-clouds.pngbin0 -> 1927 bytes
-rw-r--r--utils/test/reporting/functest/img/weather-overcast.pngbin0 -> 1588 bytes
-rw-r--r--utils/test/reporting/functest/img/weather-storm.pngbin0 -> 2137 bytes
-rw-r--r--utils/test/reporting/functest/index.html52
-rw-r--r--utils/test/reporting/functest/reporting-status.py307
-rw-r--r--utils/test/reporting/functest/reporting-tempest.py99
-rw-r--r--utils/test/reporting/functest/reporting-vims.py83
-rw-r--r--utils/test/reporting/functest/template/index-status-tmpl.html94
-rw-r--r--utils/test/reporting/functest/template/index-tempest-tmpl.html90
-rw-r--r--utils/test/reporting/functest/template/index-vims-tmpl.html91
12 files changed, 872 insertions, 0 deletions
diff --git a/utils/test/reporting/functest/default.css b/utils/test/reporting/functest/default.css
new file mode 100644
index 000000000..0e330e965
--- /dev/null
+++ b/utils/test/reporting/functest/default.css
@@ -0,0 +1,56 @@
+.panel-header-item {
+ position: relative;
+ display: inline-block;
+ padding-left: 17px;
+ padding-right: 17px;
+}
+
+.panel-pod-name {
+ margin-top: 10px;
+ margin-right: 27px;
+ float:right;
+ padding: 6px;
+}
+
+.panel-default > .panel-heading .badge {
+ background-color: #007e88;
+ position: relative;
+ display: inline-block;
+}
+
+.panel-default > .panel-heading .progress-bar {
+ height: 100%;
+ position: absolute;
+ left: 0;
+ top: 0;
+ width: 100%;
+ background-color: #0095a2
+}
+.panel-default > .panel-heading h4 {
+ color: white;
+}
+
+.panel-default > .panel-heading {
+ background-color: #00ADBB;
+ overflow: hidden;
+ position: relative;
+ width: 100%;
+}
+
+th{
+ text-align: center;
+}
+
+td{
+ text-align: center;
+}
+
+.tr-danger {
+ background-color: #177870;
+ color: white;
+}
+
+.btn-more {
+ color: white;
+ background-color: #0095a2;
+} \ No newline at end of file
diff --git a/utils/test/reporting/functest/img/weather-clear.png b/utils/test/reporting/functest/img/weather-clear.png
new file mode 100644
index 000000000..a0d967750
--- /dev/null
+++ b/utils/test/reporting/functest/img/weather-clear.png
Binary files differ
diff --git a/utils/test/reporting/functest/img/weather-few-clouds.png b/utils/test/reporting/functest/img/weather-few-clouds.png
new file mode 100644
index 000000000..acfa78398
--- /dev/null
+++ b/utils/test/reporting/functest/img/weather-few-clouds.png
Binary files differ
diff --git a/utils/test/reporting/functest/img/weather-overcast.png b/utils/test/reporting/functest/img/weather-overcast.png
new file mode 100644
index 000000000..4296246d0
--- /dev/null
+++ b/utils/test/reporting/functest/img/weather-overcast.png
Binary files differ
diff --git a/utils/test/reporting/functest/img/weather-storm.png b/utils/test/reporting/functest/img/weather-storm.png
new file mode 100644
index 000000000..956f0e20f
--- /dev/null
+++ b/utils/test/reporting/functest/img/weather-storm.png
Binary files differ
diff --git a/utils/test/reporting/functest/index.html b/utils/test/reporting/functest/index.html
new file mode 100644
index 000000000..af4033567
--- /dev/null
+++ b/utils/test/reporting/functest/index.html
@@ -0,0 +1,52 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Functest reporting page</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="#">Home</a></li>
+ <li><a href="./index-status-apex.html">Status</a></li>
+ <li><a href="./index-tempest-apex.html">Tempest</a></li>
+ <li><a href="./index-vims-apex.html">vIMS</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-main">
+ <h2>Functest</h2>
+ This project develops test suites that cover functionaling test cases in OPNFV.
+ <br>The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario.
+ <br> Weekly meeting: every Tuesday 8 AM UTC
+ <br> IRC chan #opnfv-testperf
+
+ <br>
+ <h2>Useful Links</h2>
+ <li><a href="http://events.linuxfoundation.org/sites/events/files/slides/Functest%20in%20Depth_0.pdf">Functest in Depth</a></li>
+ <li><a href="https://git.opnfv.org/cgit/functest">Functest Repo</a></li>
+ <li><a href="https://wiki.opnfv.org/opnfv_functional_testing">Functest Project</a></li>
+ <li><a href="https://build.opnfv.org/ci/view/functest/">Functest Jenkins page</a></li>
+ <li><a href="https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=59&projectKey=FUNCTEST">JIRA</a></li>
+
+ </div>
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
new file mode 100644
index 000000000..9e6aeb1ad
--- /dev/null
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -0,0 +1,307 @@
+from urllib2 import Request, urlopen, URLError
+import json
+import jinja2
+import os
+import re
+import requests
+import sys
+import time
+import yaml
+
+# Declaration of the variables
+functest_test_list = ['vPing', 'vPing_userdata',
+ 'Tempest', 'Rally',
+ 'ODL', 'ONOS', 'vIMS']
+# functest_test_list = ['vPing']
+companion_test_list = ['doctor/doctor-notification', 'promise/promise']
+# companion_test_list = []
+installers = ["apex", "compass", "fuel", "joid"]
+# installers = ["fuel"]
+versions = ["brahmaputra", "master"]
+# versions = ["master"]
+PERIOD = 10
+
+# Correspondance between the name of the test case and the name in the DB
+# ideally we should modify the DB to avoid such interface....
+# '<name in the DB':'<name in the config'>
+# I know it is uggly...
+test_match_matrix = {'vPing': 'vping_ssh',
+ 'vPing_userdata': 'vping_userdata',
+ 'ODL': 'odl',
+ 'ONOS': 'onos',
+ 'Tempest': 'tempest',
+ 'Rally': 'rally',
+ 'vIMS': 'vims',
+ 'doctor-notification': 'doctor',
+ 'promise': 'promise'}
+
+
+class TestCase(object):
+ def __init__(self, name, project, criteria=-1, isRunnable=True):
+ self.name = name
+ self.project = project
+ self.criteria = criteria
+ self.isRunnable = isRunnable
+
+ def getName(self):
+ return self.name
+
+ def getProject(self):
+ return self.project
+
+ def getCriteria(self):
+ return self.criteria
+
+ def setCriteria(self, criteria):
+ self.criteria = criteria
+
+ def setIsRunnable(self, isRunnable):
+ self.isRunnable = isRunnable
+
+ def checkRunnable(self, installer, scenario, config):
+ # Re-use Functest declaration
+ # Retrieve Functest configuration file functest_config.yaml
+ is_runnable = True
+ config_test = ""
+ TEST_ENV = functest_yaml_config.get("test-dependencies")
+
+ # print " *********************** "
+ # print TEST_ENV
+ # print " ---------------------- "
+ # print "case = " + self.name
+ # print "installer = " + installer
+ # print "scenario = " + scenario
+ # print "project = " + self.project
+
+ # Retrieve test constraints
+ case_name_formated = test_match_matrix[self.name]
+
+ try:
+ config_test = TEST_ENV[self.project][case_name_formated]
+ except KeyError:
+ # if not defined in dependencies => no dependencies
+ config_test = TEST_ENV[case_name_formated]
+ except Exception, e:
+ print "Error [getTestEnv]:", e
+
+ # Retrieve test execution param
+ test_execution_context = {"installer": installer,
+ "scenario": scenario}
+ # By default we assume that all the tests are always runnable...
+ # if test_env not empty => dependencies to be checked
+ if config_test is not None and len(config_test) > 0:
+ # possible criteria = ["installer", "scenario"]
+ # consider test criteria from config file
+ # compare towards CI env through CI en variable
+ for criteria in config_test:
+ if re.search(config_test[criteria],
+ test_execution_context[criteria]) is None:
+ # print "Test "+ test + " cannot be run on the environment"
+ is_runnable = False
+ # print is_runnable
+ self.isRunnable = is_runnable
+
+
+def getApiResults(case, installer, scenario, version):
+ case = case.getName()
+ results = json.dumps([])
+ # to remove proxy (to be removed at the end for local test only)
+ # proxy_handler = urllib2.ProxyHandler({})
+ # opener = urllib2.build_opener(proxy_handler)
+ # urllib2.install_opener(opener)
+ # url = "http://127.0.0.1:8000/results?case=" + case + \
+ # "&period=30&installer=" + installer
+ url = "http://testresults.opnfv.org/testapi/results?case=" + case + \
+ "&period=" + str(PERIOD) + "&installer=" + installer + \
+ "&scenario=" + scenario + "&version=" + version
+ request = Request(url)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError, e:
+ print 'No kittez. Got an error code:', e
+
+ return results
+
+
+def getScenarios(case, installer, version):
+
+ case = case.getName()
+ url = "http://testresults.opnfv.org/testapi/results?case=" + case + \
+ "&period=" + str(PERIOD) + "&installer=" + installer + \
+ "&version=" + version
+ request = Request(url)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError, e:
+ print 'Got an error code:', e
+
+ test_results = results['test_results']
+
+ if test_results is not None:
+ test_results.reverse()
+
+ scenario_results = {}
+
+ for r in test_results:
+ # Retrieve all the scenarios per installer
+ if not r['scenario'] in scenario_results.keys():
+ scenario_results[r['scenario']] = []
+ scenario_results[r['scenario']].append(r)
+
+ return scenario_results
+
+
+def getScenarioStats(scenario_results):
+ scenario_stats = {}
+ for k, v in scenario_results.iteritems():
+ scenario_stats[k] = len(v)
+
+ return scenario_stats
+
+
+def getNbtestOk(results):
+ nb_test_ok = 0
+ for r in results:
+ for k, v in r.iteritems():
+ try:
+ if "passed" in v:
+ nb_test_ok += 1
+ except:
+ print "Cannot retrieve test status"
+ return nb_test_ok
+
+
+def getResult(testCase, installer, scenario, version):
+
+ # retrieve raw results
+ results = getApiResults(testCase, installer, scenario, version)
+ # let's concentrate on test results only
+ test_results = results['test_results']
+
+ # if results found, analyze them
+ if test_results is not None:
+ test_results.reverse()
+
+ scenario_results = []
+
+ # print " ---------------- "
+ # print test_results
+ # print " ---------------- "
+ # print "nb of results:" + str(len(test_results))
+
+ for r in test_results:
+ # print r["creation_date"]
+ # print r["criteria"]
+ scenario_results.append({r["creation_date"]: r["criteria"]})
+ # sort results
+ scenario_results.sort()
+ # 4 levels for the results
+ # 3: 4+ consecutive runs passing the success criteria
+ # 2: <4 successful consecutive runs but passing the criteria
+ # 1: close to pass the success criteria
+ # 0: 0% success, not passing
+ test_result_indicator = 0
+ nbTestOk = getNbtestOk(scenario_results)
+ # print "Nb test OK:"+ str(nbTestOk)
+ # check that we have at least 4 runs
+ if nbTestOk < 1:
+ test_result_indicator = 0
+ elif nbTestOk < 2:
+ test_result_indicator = 1
+ else:
+ # Test the last 4 run
+ if (len(scenario_results) > 3):
+ last4runResults = scenario_results[-4:]
+ if getNbtestOk(last4runResults):
+ test_result_indicator = 3
+ else:
+ test_result_indicator = 2
+ else:
+ test_result_indicator = 2
+ print " >>>> Test indicator:" + str(test_result_indicator)
+ return test_result_indicator
+
+# ******************************************************************************
+# ******************************************************************************
+# ******************************************************************************
+# ******************************************************************************
+# ******************************************************************************
+
+# init just tempest to get the list of scenarios
+# as all the scenarios run Tempest
+tempest = TestCase("Tempest", "functest", -1)
+
+# Retrieve the Functest configuration to detect which tests are relevant
+# according to the installer, scenario
+response = requests.get('https://git.opnfv.org/cgit/functest/plain/testcases/config_functest.yaml')
+functest_yaml_config = yaml.load(response.text)
+
+print "****************************************"
+print "* Generating reporting..... *"
+print "****************************************"
+# For all the versions
+for version in versions:
+ # For all the installers
+ for installer in installers:
+ # get scenarios
+ scenario_results = getScenarios(tempest, installer, version)
+ scenario_stats = getScenarioStats(scenario_results)
+
+ items = {}
+ # For all the scenarios get results
+ for s, s_result in scenario_results.items():
+ testCases = []
+ # For each scenario declare the test cases
+ # Functest cases
+ for test_case in functest_test_list:
+ testCases.append(TestCase(test_case, "functest"))
+
+ # project/case
+ for test_case in companion_test_list:
+ test_split = test_case.split("/")
+ test_project = test_split[0]
+ test_case = test_split[1]
+ testCases.append(TestCase(test_case, test_project))
+
+ # Check if test case is runnable / installer, scenario
+ try:
+ for test_case in testCases:
+ test_case.checkRunnable(installer, s, functest_yaml_config)
+ # print "testcase %s is %s" % (test_case.getName(),
+ # test_case.isRunnable)
+ print "--------------------------"
+ print "installer %s, version %s, scenario %s:" % (installer, version, s)
+ for testCase in testCases:
+ time.sleep(1)
+ if testCase.isRunnable:
+ print " Searching results for case %s " % (testCase.getName())
+ result = getResult(testCase, installer, s, version)
+ testCase.setCriteria(result)
+ items[s] = testCases
+ print "--------------------------"
+ except:
+ print "installer %s, version %s, scenario %s" % (installer, version, s)
+ print "No data available , error %s " % (sys.exc_info()[0])
+
+ print "****************************************"
+ templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
+ templateEnv = jinja2.Environment(loader=templateLoader)
+
+ TEMPLATE_FILE = "./template/index-status-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_stats=scenario_stats,
+ items=items,
+ installer=installer,
+ period=PERIOD,
+ version=version)
+
+ with open("./release/" + version +
+ "/index-status-" + installer + ".html", "wb") as fh:
+ fh.write(outputText)
diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py
new file mode 100644
index 000000000..563e53010
--- /dev/null
+++ b/utils/test/reporting/functest/reporting-tempest.py
@@ -0,0 +1,99 @@
+from urllib2 import Request, urlopen, URLError
+import json
+import jinja2
+import os
+
+installers = ["apex", "compass", "fuel", "joid"]
+items = ["tests", "Success rate", "duration"]
+
+for installer in installers:
+ # we consider the Tempest results of the last 7 days
+ url = "http://testresults.opnfv.org/testapi/results?case=Tempest"
+ request = Request(url + '&period=7&installer=' + installer)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError, e:
+ print 'No kittez. Got an error code:', e
+
+ test_results = results['test_results']
+ test_results.reverse()
+
+ scenario_results = {}
+ criteria = {}
+ errors = {}
+
+ for r in test_results:
+ # Retrieve all the scenarios per installer
+ if not r['version'] in scenario_results.keys():
+ scenario_results[r['version']] = []
+ scenario_results[r['version']].append(r)
+
+ for s, s_result in scenario_results.items():
+ scenario_results[s] = s_result[0:5]
+ # For each scenario, we build a result object to deal with
+ # results, criteria and error handling
+ for result in scenario_results[s]:
+ result["creation_date"] = result["creation_date"].split(".")[0]
+
+ # retrieve results
+ # ****************
+ nb_tests_run = result['details']['tests']
+ if nb_tests_run != 0:
+ success_rate = 100*(int(result['details']['tests']) - int(result['details']['failures']))/int(result['details']['tests'])
+ else:
+ success_rate = 0
+
+ result['details']["tests"] = nb_tests_run
+ result['details']["Success rate"] = str(success_rate) + "%"
+
+ # Criteria management
+ # *******************
+ crit_tests = False
+ crit_rate = False
+ crit_time = False
+
+ # Expect that at least 200 tests are run
+ if nb_tests_run >= 200:
+ crit_tests = True
+
+ # Expect that at least 90% of success
+ if success_rate >= 90:
+ crit_rate = True
+
+ # Expect that the suite duration is inferior to 45m
+ if result['details']['duration'] < 2700:
+ crit_time = True
+
+ result['criteria'] = {'tests': crit_tests,
+ 'Success rate': crit_rate,
+ 'duration': crit_time}
+
+ # error management
+ # ****************
+
+ # TODO get information from artefact based on build tag
+ # to identify errors of the associated run
+ # build tag needed to wget errors on the artifacts
+ # the idea is to list the tests in errors and provide the link
+ # towards complete artifact
+ # another option will be to put the errors in the DB
+ # (in the detail section)...
+ result['errors'] = {'tests': "",
+ 'Success rate': "",
+ 'duration': ""}
+
+ templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
+ templateEnv = jinja2.Environment(loader=templateLoader)
+
+ TEMPLATE_FILE = "./template/index-tempest-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_results,
+ items=items,
+ installer=installer)
+
+ with open("./release/index-tempest-" + installer + ".html", "wb") as fh:
+ fh.write(outputText)
diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py
new file mode 100644
index 000000000..78ca9f5b3
--- /dev/null
+++ b/utils/test/reporting/functest/reporting-vims.py
@@ -0,0 +1,83 @@
+from urllib2 import Request, urlopen, URLError
+import json
+import jinja2
+import os
+
+def sig_test_format(sig_test):
+ nbPassed = 0
+ nbFailures = 0
+ nbSkipped = 0
+ for data_test in sig_test:
+ if data_test['result'] == "Passed":
+ nbPassed+= 1
+ elif data_test['result'] == "Failed":
+ nbFailures += 1
+ elif data_test['result'] == "Skipped":
+ nbSkipped += 1
+ total_sig_test_result = {}
+ total_sig_test_result['passed'] = nbPassed
+ total_sig_test_result['failures'] = nbFailures
+ total_sig_test_result['skipped'] = nbSkipped
+ return total_sig_test_result
+
+installers = ["fuel", "compass", "joid", "apex"]
+step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
+
+for installer in installers:
+ request = Request('http://testresults.opnfv.org/testapi/results?case=vIMS&installer=' + installer)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError, e:
+ print 'No kittez. Got an error code:', e
+
+ test_results = results['test_results']
+ test_results.reverse()
+
+ scenario_results = {}
+ for r in test_results:
+ if not r['version'] in scenario_results.keys():
+ scenario_results[r['version']] = []
+ scenario_results[r['version']].append(r)
+
+ for s, s_result in scenario_results.items():
+ scenario_results[s] = s_result[0:5]
+ for result in scenario_results[s]:
+ result["creation_date"] = result["creation_date"].split(".")[0]
+ sig_test = result['details']['sig_test']['result']
+ if not sig_test == "" and isinstance(sig_test, list):
+ format_result = sig_test_format(sig_test)
+ if format_result['failures'] > format_result['passed']:
+ result['details']['sig_test']['duration'] = 0
+ result['details']['sig_test']['result'] = format_result
+ nb_step_ok = 0
+ nb_step = len(result['details'])
+
+ for step_name, step_result in result['details'].items():
+ if step_result['duration'] != 0:
+ nb_step_ok += 1
+ m, s = divmod(step_result['duration'], 60)
+ m_display = ""
+ if int(m) != 0:
+ m_display += str(int(m)) + "m "
+ step_result['duration_display'] = m_display + str(int(s)) + "s"
+
+ result['pr_step_ok'] = 0
+ if nb_step != 0:
+ result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100
+
+
+ templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
+ templateEnv = jinja2.Environment( loader=templateLoader )
+
+ TEMPLATE_FILE = "./template/index-vims-tmpl.html"
+ template = templateEnv.get_template( TEMPLATE_FILE )
+
+ outputText = template.render( scenario_results = scenario_results, step_order = step_order, installer = installer)
+
+ with open("./release/index-vims" + installer + ".html", "wb") as fh:
+ fh.write(outputText)
+
+
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
new file mode 100644
index 000000000..604f2c8e4
--- /dev/null
+++ b/utils/test/reporting/functest/template/index-status-tmpl.html
@@ -0,0 +1,94 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Functest status page ({{version}})</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="index.html">Home</a></li>
+ <li><a href="index-status-apex.html">Apex</a></li>
+ <li><a href="index-status-compass.html">Compass</a></li>
+ <li><a href="index-status-fuel.html">Fuel</a></li>
+ <li><a href="index-status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+
+ <div class="scenario-overview">
+ <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+ <table class="table">
+ <tr>
+ <th width="80%">Scenario</th>
+ <th width="20%">Iteration</th>
+ </tr>
+ {% for scenario,iteration in scenario_stats.iteritems() -%}
+ <tr class="tr-ok">
+ <td>{{scenario}}</td>
+ <td>{{iteration}}</td>
+ </tr>
+ {%- endfor %}
+ </table>
+ </div>
+
+
+
+ {% for scenario, iteration in scenario_stats.iteritems() -%}
+ <div class="scenario-part">
+ <div class="page-header">
+ <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario}}</b></h3>
+ </div>
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <span class="panel-header-item">
+ </span>
+ </div>
+ <table class="table">
+ <tr>
+ {% for test in items[scenario] -%}
+ <th>{{test.getName() }}</th>
+ {%- endfor %}
+ </tr>
+ <tr class="tr-weather-weather">
+ {% for test in items[scenario] -%}
+ {% if test.isRunnable is sameas false -%}
+ <td>N.R</td>
+ {% elif test.getCriteria() > 2 -%}
+ <td><img src="../../img/weather-clear.png"></td>
+ {%- elif test.getCriteria() > 1 -%}
+ <td><img src="../../img/weather-few-clouds.png"></td>
+ {%- elif test.getCriteria() > 0 -%}
+ <td><img src="../../img/weather-overcast.png"></td>
+ {%- else -%}
+ <td><img src="../../img/weather-storm.png"></td>
+ {%- endif %}
+ {%- endfor %}
+ </tr>
+ </table>
+ </div>
+ </div>
+ {%- endfor %}
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/functest/template/index-tempest-tmpl.html b/utils/test/reporting/functest/template/index-tempest-tmpl.html
new file mode 100644
index 000000000..be0b79734
--- /dev/null
+++ b/utils/test/reporting/functest/template/index-tempest-tmpl.html
@@ -0,0 +1,90 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Tempest status page</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="index.html">Home</a></li>
+ <li><a href="index-tempest-apex.html">Apex</a></li>
+ <li><a href="index-tempest-compass.html">Compass</a></li>
+ <li><a href="index-tempest-fuel.html">Fuel</a></li>
+ <li><a href="index-tempest-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+ {% for scenario_name, results in scenario_results.iteritems() -%}
+ <div class="scenario-part">
+ <div class="page-header">
+ <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
+ </div>
+ {% for result in results -%}
+ {% if loop.index > 2 -%}
+ <div class="panel panel-default" hidden>
+ {%- else -%}
+ <div class="panel panel-default">
+ {%- endif %}
+ <div class="panel-heading">
+ <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
+ <span class="panel-header-item">
+ <h4><b>{{result.creation_date}}</b></h4>
+ </span>
+ <span class="badge panel-pod-name">{{result.pod_name}}</span>
+ </div>
+ <table class="table">
+ <tr>
+ <th width="20%">Item</th>
+ <th width="10%">Result</th>
+ <th width="10%">Status</th>
+ <th width="60%">Errors</th>
+ </tr>
+ {% for item in items -%}
+ {% if item in result.details.keys() -%}
+ {% if result.criteria[item] -%}
+ <tr class="tr-ok">
+ <td>{{item}}</td>
+ <td>{{result.details[item]}}</td>
+ <td><span class="glyphicon glyphicon-ok"></td>
+ <td>{{result.errors[item]}}</td>
+ </tr>
+ {%- else -%}
+ <tr class="tr-danger">
+ <td>{{item}}</td>
+ <td>{{result.details[item]}}</td>
+ <td><span class="glyphicon glyphicon-remove"></td>
+ <td>{{result.errors[item]}}</td>
+ </tr>
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ </table>
+ </div>
+ {%- endfor %}
+ <button type="button" class="btn btn-more">More than two</button>
+ </div>
+ {%- endfor %}
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/functest/template/index-vims-tmpl.html
new file mode 100644
index 000000000..8858182c1
--- /dev/null
+++ b/utils/test/reporting/functest/template/index-vims-tmpl.html
@@ -0,0 +1,91 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">vIMS status page</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="index.html">Home</a></li>
+ <li><a href="index-vims-fuel.html">Fuel</a></li>
+ <li><a href="index--vims-compass.html">Compass</a></li>
+ <li><a href="index-vims-joid.html">JOID</a></li>
+ <li><a href="index-vims-apex.html">APEX</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+ {% for scenario_name, results in scenario_results.iteritems() -%}
+ <div class="scenario-part">
+ <div class="page-header">
+ <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
+ </div>
+ {% for result in results -%}
+ {% if loop.index > 2 -%}
+ <div class="panel panel-default" hidden>
+ {%- else -%}
+ <div class="panel panel-default">
+ {%- endif %}
+ <div class="panel-heading">
+ <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
+ <span class="panel-header-item">
+ <h4><b>{{result.creation_date}}</b></h4>
+ </span>
+ <span class="badge panel-pod-name">{{result.pod_name}}</span>
+ </div>
+ <table class="table">
+ <tr>
+ <th width="20%">Step</th>
+ <th width="10%">Status</th>
+ <th width="10%">Duration</th>
+ <th width="60%">Result</th>
+ </tr>
+ {% for step_od_name in step_order -%}
+ {% if step_od_name in result.details.keys() -%}
+ {% set step_result = result.details[step_od_name] -%}
+ {% if step_result.duration != 0 -%}
+ <tr class="tr-ok">
+ <td>{{step_od_name}}</td>
+ <td><span class="glyphicon glyphicon-ok"></td>
+ <td><b>{{step_result.duration_display}}</b></td>
+ <td>{{step_result.result}}</td>
+ </tr>
+ {%- else -%}
+ <tr class="tr-danger">
+ <td>{{step_od_name}}</td>
+ <td><span class="glyphicon glyphicon-remove"></td>
+ <td><b>0s</b></td>
+ <td>{{step_result.result}}</td>
+ </tr>
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ </table>
+ </div>
+ {%- endfor %}
+ <button type="button" class="btn btn-more">More than two</button>
+ </div>
+ {%- endfor %}
+ </div>
+ <div class="col-md-1"></div>
+</div>