summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rw-r--r--utils/create_pod_file.py2
-rwxr-xr-xutils/test/reporting/docker/reporting.sh20
-rwxr-xr-xutils/test/reporting/functest/reporting-status.py405
-rw-r--r--utils/test/reporting/functest/template/index-status-tmpl.html30
-rw-r--r--utils/test/reporting/functest/testCase.py59
-rw-r--r--utils/test/reporting/js/trend-qtip.js76
-rw-r--r--utils/test/reporting/qtip/__init__.py0
-rw-r--r--utils/test/reporting/qtip/index.html51
-rw-r--r--utils/test/reporting/qtip/reporting-status.py110
-rw-r--r--utils/test/reporting/qtip/template/index-status-tmpl.html86
-rw-r--r--utils/test/reporting/reporting.yaml9
-rw-r--r--utils/test/reporting/utils/reporting_utils.py76
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/testcase_models.py15
13 files changed, 667 insertions, 272 deletions
diff --git a/utils/create_pod_file.py b/utils/create_pod_file.py
index 7e30cc639..197e4933c 100644
--- a/utils/create_pod_file.py
+++ b/utils/create_pod_file.py
@@ -58,7 +58,7 @@ def create_file(handler):
Other installers use key file of each node.
"""
if not os.path.exists(os.path.dirname(args.filepath)):
- os.path.makedirs(os.path.dirname(args.filepath))
+ os.makedirs(os.path.dirname(args.filepath))
nodes = handler.nodes
node_list = []
index = 1
diff --git a/utils/test/reporting/docker/reporting.sh b/utils/test/reporting/docker/reporting.sh
index 1de13ae32..49f4517f7 100755
--- a/utils/test/reporting/docker/reporting.sh
+++ b/utils/test/reporting/docker/reporting.sh
@@ -3,7 +3,7 @@
export PYTHONPATH="${PYTHONPATH}:."
export CONFIG_REPORTING_YAML=./reporting.yaml
-declare -a versions=(colorado master)
+declare -a versions=(danube master)
declare -a projects=(functest storperf yardstick)
project=$1
@@ -32,6 +32,22 @@ cp -Rf js display
# yardstick |
# storperf |
+function report_project()
+{
+ project=$1
+ dir=$2
+ type=$3
+ echo "********************************"
+ echo " $project reporting "
+ echo "********************************"
+ python ./$dir/reporting-$type.py
+ if [ $? ]; then
+ echo "$project reporting $type...OK"
+ else
+ echo "$project reporting $type...KO"
+ fi
+}
+
if [ -z "$1" ]; then
echo "********************************"
echo " Functest reporting "
@@ -60,6 +76,8 @@ if [ -z "$1" ]; then
python ./storperf/reporting-status.py
echo "Storperf reporting status...OK"
+ report_project "QTIP" "qtip" "status"
+
else
if [ -z "$2" ]; then
reporting_type="status"
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
index af1d1d8a5..e700e047f 100755
--- a/utils/test/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -9,10 +9,8 @@
import datetime
import jinja2
import os
-import requests
import sys
import time
-import yaml
import testCase as tc
import scenarioResult as sr
@@ -43,9 +41,7 @@ log_level = rp_utils.get_config('general.log.log_level')
exclude_noha = rp_utils.get_config('functest.exclude_noha')
exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
-response = requests.get(cf)
-
-functest_yaml_config = yaml.safe_load(response.text)
+functest_yaml_config = rp_utils.getFunctestConfig()
logger.info("*******************************************")
logger.info("* *")
@@ -69,128 +65,116 @@ config_tiers = functest_yaml_config.get("tiers")
for tier in config_tiers:
if tier['order'] >= 0 and tier['order'] < 2:
for case in tier['testcases']:
- if case['name'] not in blacklist:
- testValid.append(tc.TestCase(case['name'],
+ if case['case_name'] not in blacklist:
+ testValid.append(tc.TestCase(case['case_name'],
"functest",
case['dependencies']))
elif tier['order'] == 2:
for case in tier['testcases']:
- if case['name'] not in blacklist:
- testValid.append(tc.TestCase(case['name'],
- case['name'],
+ if case['case_name'] not in blacklist:
+ testValid.append(tc.TestCase(case['case_name'],
+ case['case_name'],
case['dependencies']))
elif tier['order'] > 2:
for case in tier['testcases']:
- if case['name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['name'],
+ if case['case_name'] not in blacklist:
+ otherTestCases.append(tc.TestCase(case['case_name'],
"functest",
case['dependencies']))
logger.debug("Functest reporting start")
+
# For all the versions
for version in versions:
# For all the installers
+ scenario_directory = "./display/" + version + "/functest/"
+ scenario_file_name = scenario_directory + "scenario_history.txt"
+
+ # check that the directory exists, if not create it
+ # (first run on new version)
+ if not os.path.exists(scenario_directory):
+ os.makedirs(scenario_directory)
+
+ # initiate scenario file if it does not exist
+ if not os.path.isfile(scenario_file_name):
+ with open(scenario_file_name, "a") as my_file:
+ logger.debug("Create scenario file: %s" % scenario_file_name)
+ my_file.write("date,scenario,installer,detail,score\n")
+
for installer in installers:
+
# get scenarios
scenario_results = rp_utils.getScenarios(healthcheck,
installer,
version)
- scenario_stats = rp_utils.getScenarioStats(scenario_results)
- items = {}
- scenario_result_criteria = {}
- scenario_directory = "./display/" + version + "/functest/"
- scenario_file_name = scenario_directory + "scenario_history.txt"
-
- # check that the directory exists, if not create it
- # (first run on new version)
- if not os.path.exists(scenario_directory):
- os.makedirs(scenario_directory)
-
- # initiate scenario file if it does not exist
- if not os.path.isfile(scenario_file_name):
- with open(scenario_file_name, "a") as my_file:
- logger.debug("Create scenario file: %s" % scenario_file_name)
- my_file.write("date,scenario,installer,detail,score\n")
-
- # For all the scenarios get results
- for s, s_result in scenario_results.items():
- logger.info("---------------------------------")
- logger.info("installer %s, version %s, scenario %s:" %
- (installer, version, s))
- logger.debug("Scenario results: %s" % s_result)
-
- # Green or Red light for a given scenario
- nb_test_runnable_for_this_scenario = 0
- scenario_score = 0
- # url of the last jenkins log corresponding to a given
- # scenario
- s_url = ""
- if len(s_result) > 0:
- build_tag = s_result[len(s_result)-1]['build_tag']
- logger.debug("Build tag: %s" % build_tag)
- s_url = rp_utils.getJenkinsUrl(build_tag)
- if s_url is None:
- s_url = "http://testresultS.opnfv.org/reporting"
- logger.info("last jenkins url: %s" % s_url)
- testCases2BeDisplayed = []
- # Check if test case is runnable / installer, scenario
- # for the test case used for Scenario validation
- try:
- # 1) Manage the test cases for the scenario validation
- # concretely Tiers 0-3
- for test_case in testValid:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- logger.debug("testcase %s (%s) is %s" %
- (test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable))
- time.sleep(1)
- if test_case.isRunnable:
- dbName = test_case.getDbName()
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- nb_test_runnable_for_this_scenario += 1
- logger.info(" Searching results for case %s " %
- (displayName))
- result = rp_utils.getResult(dbName, installer,
- s, version)
- # if no result set the value to 0
- if result < 0:
- result = 0
- logger.info(" >>>> Test score = " + str(result))
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 1))
- scenario_score = scenario_score + result
-
- # 2) Manage the test cases for the scenario qualification
- # concretely Tiers > 3
- for test_case in otherTestCases:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- logger.debug("testcase %s (%s) is %s" %
- (test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable))
- time.sleep(1)
- if test_case.isRunnable:
- dbName = test_case.getDbName()
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- logger.info(" Searching results for case %s " %
- (displayName))
- result = rp_utils.getResult(dbName, installer,
- s, version)
- # at least 1 result for the test
- if result > -1:
+
+ # get nb of supported architecture (x86, aarch64)
+ architectures = rp_utils.getArchitectures(scenario_results)
+ logger.info("Supported architectures: {}".format(architectures))
+
+ for architecture in architectures:
+ logger.info("architecture: {}".format(architecture))
+ # Consider only the results for the selected architecture
+ # i.e drop x86 for aarch64 and vice versa
+ filter_results = rp_utils.filterArchitecture(scenario_results,
+ architecture)
+ scenario_stats = rp_utils.getScenarioStats(filter_results)
+ items = {}
+ scenario_result_criteria = {}
+
+ # in case of more than 1 architecture supported
+ # precise the architecture
+ installer_display = installer
+ if (len(architectures) > 1):
+ installer_display = installer + "@" + architecture
+
+ # For all the scenarios get results
+ for s, s_result in filter_results.items():
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s:" %
+ (installer, version, s))
+ logger.debug("Scenario results: %s" % s_result)
+
+ # Green or Red light for a given scenario
+ nb_test_runnable_for_this_scenario = 0
+ scenario_score = 0
+ # url of the last jenkins log corresponding to a given
+ # scenario
+ s_url = ""
+ if len(s_result) > 0:
+ build_tag = s_result[len(s_result)-1]['build_tag']
+ logger.debug("Build tag: %s" % build_tag)
+ s_url = rp_utils.getJenkinsUrl(build_tag)
+ if s_url is None:
+ s_url = "http://testresultS.opnfv.org/reporting"
+ logger.info("last jenkins url: %s" % s_url)
+ testCases2BeDisplayed = []
+ # Check if test case is runnable / installer, scenario
+ # for the test case used for Scenario validation
+ try:
+ # 1) Manage the test cases for the scenario validation
+ # concretely Tiers 0-3
+ for test_case in testValid:
+ test_case.checkRunnable(installer, s,
+ test_case.getConstraints())
+ logger.debug("testcase %s (%s) is %s" %
+ (test_case.getDisplayName(),
+ test_case.getName(),
+ test_case.isRunnable))
+ time.sleep(1)
+ if test_case.isRunnable:
+ name = test_case.getName()
+ displayName = test_case.getDisplayName()
+ project = test_case.getProject()
+ nb_test_runnable_for_this_scenario += 1
+ logger.info(" Searching results for case %s " %
+ (displayName))
+ result = rp_utils.getResult(name, installer,
+ s, version)
+ # if no result set the value to 0
+ if result < 0:
+ result = 0
+ logger.info(" >>>> Test score = " + str(result))
test_case.setCriteria(result)
test_case.setIsRunnable(True)
testCases2BeDisplayed.append(tc.TestCase(name,
@@ -198,91 +182,126 @@ for version in versions:
"",
result,
True,
- 4))
- else:
- logger.debug("No results found")
-
- items[s] = testCases2BeDisplayed
- except:
- logger.error("Error: installer %s, version %s, scenario %s" %
- (installer, version, s))
- logger.error("No data available: %s " % (sys.exc_info()[0]))
-
- # **********************************************
- # Evaluate the results for scenario validation
- # **********************************************
- # the validation criteria = nb runnable tests x 3
- # because each test case = 0,1,2 or 3
- scenario_criteria = nb_test_runnable_for_this_scenario * 3
- # if 0 runnable tests set criteria at a high value
- if scenario_criteria < 1:
- scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA
-
- s_score = str(scenario_score) + "/" + str(scenario_criteria)
- s_score_percent = rp_utils.getScenarioPercent(scenario_score,
- scenario_criteria)
-
- s_status = "KO"
- if scenario_score < scenario_criteria:
- logger.info(">>>> scenario not OK, score = %s/%s" %
- (scenario_score, scenario_criteria))
+ 1))
+ scenario_score = scenario_score + result
+
+ # 2) Manage the test cases for the scenario qualification
+ # concretely Tiers > 3
+ for test_case in otherTestCases:
+ test_case.checkRunnable(installer, s,
+ test_case.getConstraints())
+ logger.debug("testcase %s (%s) is %s" %
+ (test_case.getDisplayName(),
+ test_case.getName(),
+ test_case.isRunnable))
+ time.sleep(1)
+ if test_case.isRunnable:
+ name = test_case.getName()
+ displayName = test_case.getDisplayName()
+ project = test_case.getProject()
+ logger.info(" Searching results for case %s " %
+ (displayName))
+ result = rp_utils.getResult(name, installer,
+ s, version)
+ # at least 1 result for the test
+ if result > -1:
+ test_case.setCriteria(result)
+ test_case.setIsRunnable(True)
+ testCases2BeDisplayed.append(tc.TestCase(
+ name,
+ project,
+ "",
+ result,
+ True,
+ 4))
+ else:
+ logger.debug("No results found")
+
+ items[s] = testCases2BeDisplayed
+ except:
+ logger.error("Error: installer %s, version %s, scenario %s"
+ % (installer, version, s))
+ logger.error("No data available: %s" % (sys.exc_info()[0]))
+
+ # **********************************************
+ # Evaluate the results for scenario validation
+ # **********************************************
+ # the validation criteria = nb runnable tests x 3
+ # because each test case = 0,1,2 or 3
+ scenario_criteria = nb_test_runnable_for_this_scenario * 3
+ # if 0 runnable tests set criteria at a high value
+ if scenario_criteria < 1:
+ scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA
+
+ s_score = str(scenario_score) + "/" + str(scenario_criteria)
+ s_score_percent = rp_utils.getScenarioPercent(
+ scenario_score,
+ scenario_criteria)
+
s_status = "KO"
- else:
- logger.info(">>>>> scenario OK, save the information")
- s_status = "OK"
- path_validation_file = ("./display/" + version +
- "/functest/" +
- "validated_scenario_history.txt")
- with open(path_validation_file, "a") as f:
- time_format = "%Y-%m-%d %H:%M"
- info = (datetime.datetime.now().strftime(time_format) +
- ";" + installer + ";" + s + "\n")
+ if scenario_score < scenario_criteria:
+ logger.info(">>>> scenario not OK, score = %s/%s" %
+ (scenario_score, scenario_criteria))
+ s_status = "KO"
+ else:
+ logger.info(">>>>> scenario OK, save the information")
+ s_status = "OK"
+ path_validation_file = ("./display/" + version +
+ "/functest/" +
+ "validated_scenario_history.txt")
+ with open(path_validation_file, "a") as f:
+ time_format = "%Y-%m-%d %H:%M"
+ info = (datetime.datetime.now().strftime(time_format) +
+ ";" + installer_display + ";" + s + "\n")
+ f.write(info)
+
+ # Save daily results in a file
+ with open(scenario_file_name, "a") as f:
+ info = (reportingDate + "," + s + "," + installer_display +
+ "," + s_score + "," +
+ str(round(s_score_percent)) + "\n")
f.write(info)
- # Save daily results in a file
- with open(scenario_file_name, "a") as f:
- info = (reportingDate + "," + s + "," + installer +
- "," + s_score + "," +
- str(round(s_score_percent)) + "\n")
- f.write(info)
-
- scenario_result_criteria[s] = sr.ScenarioResult(s_status,
- s_score,
- s_score_percent,
- s_url)
- logger.info("--------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(
- loader=templateLoader, autoescape=True)
-
- TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_stats=scenario_stats,
- scenario_results=scenario_result_criteria,
- items=items,
- installer=installer,
- period=period,
- version=version,
- date=reportingDate)
-
- with open("./display/" + version +
- "/functest/status-" + installer + ".html", "wb") as fh:
- fh.write(outputText)
-
- logger.info("Manage export CSV & PDF")
- rp_utils.export_csv(scenario_file_name, installer, version)
- logger.error("CSV generated...")
-
- # Generate outputs for export
- # pdf
- # TODO Change once web site updated...use the current one
- # to test pdf production
- url_pdf = rp_utils.get_config('general.url')
- pdf_path = ("./display/" + version +
- "/functest/status-" + installer + ".html")
- pdf_doc_name = ("./display/" + version +
- "/functest/status-" + installer + ".pdf")
- rp_utils.export_pdf(pdf_path, pdf_doc_name)
- logger.info("PDF generated...")
+ scenario_result_criteria[s] = sr.ScenarioResult(
+ s_status,
+ s_score,
+ s_score_percent,
+ s_url)
+ logger.info("--------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ templateEnv = jinja2.Environment(
+ loader=templateLoader, autoescape=True)
+
+ TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(
+ scenario_stats=scenario_stats,
+ scenario_results=scenario_result_criteria,
+ items=items,
+ installer=installer_display,
+ period=period,
+ version=version,
+ date=reportingDate)
+
+ with open("./display/" + version +
+ "/functest/status-" +
+ installer_display + ".html", "wb") as fh:
+ fh.write(outputText)
+
+ logger.info("Manage export CSV & PDF")
+ rp_utils.export_csv(scenario_file_name, installer_display, version)
+ logger.error("CSV generated...")
+
+ # Generate outputs for export
+ # pdf
+ # TODO Change once web site updated...use the current one
+ # to test pdf production
+ url_pdf = rp_utils.get_config('general.url')
+ pdf_path = ("./display/" + version +
+ "/functest/status-" + installer_display + ".html")
+ pdf_doc_name = ("./display/" + version +
+ "/functest/status-" + installer_display + ".pdf")
+ rp_utils.export_pdf(pdf_path, pdf_doc_name)
+ logger.info("PDF generated...")
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
index 52046c37f..cc4edaac5 100644
--- a/utils/test/reporting/functest/template/index-status-tmpl.html
+++ b/utils/test/reporting/functest/template/index-status-tmpl.html
@@ -15,27 +15,27 @@
{% for scenario in scenario_stats.iteritems() -%}
var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
{%- endfor %}
-
+
// assign success rate to the gauge
function updateReadings() {
{% for scenario,iteration in scenario_stats.iteritems() -%}
gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
{%- endfor %}
}
- updateReadings();
+ updateReadings();
}
-
+
// trend line management
- d3.csv("./scenario_history.csv", function(data) {
+ d3.csv("./scenario_history.txt", function(data) {
// ***************************************
// Create the trend line
{% for scenario,iteration in scenario_stats.iteritems() -%}
- // for scenario {{scenario}}
+ // for scenario {{scenario}}
// Filter results
- var trend{{loop.index}} = data.filter(function(row) {
+ var trend{{loop.index}} = data.filter(function(row) {
return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
})
- // Parse the date
+ // Parse the date
trend{{loop.index}}.forEach(function(d) {
d.date = parseDate(d.date);
d.score = +d.score
@@ -44,7 +44,7 @@
var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
// ****************************************
{%- endfor %}
- });
+ });
if ( !window.isLoaded ) {
window.addEventListener("load", function() {
onDocumentReady();
@@ -61,7 +61,7 @@ $(document).ready(function (){
});
})
</script>
-
+
</head>
<body>
<div class="container">
@@ -72,8 +72,8 @@ $(document).ready(function (){
<li class="active"><a href="../../index.html">Home</a></li>
<li><a href="status-apex.html">Apex</a></li>
<li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-daisy.html">Daisy</a></li>
- <li><a href="status-fuel.html">Fuel</a></li>
+ <li><a href="status-fuel@x86.html">fuel@x86</a></li>
+ <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
<li><a href="status-joid.html">Joid</a></li>
</ul>
</nav>
@@ -134,13 +134,13 @@ $(document).ready(function (){
<tr class="tr-weather-weather">
{% for test in items[scenario] -%}
{% if test.getCriteria() > 2 -%}
- <td><img src="../../../img/weather-clear.png"></td>
+ <td><img src="../../img/weather-clear.png"></td>
{%- elif test.getCriteria() > 1 -%}
- <td><img src="../../../img/weather-few-clouds.png"></td>
+ <td><img src="../../img/weather-few-clouds.png"></td>
{%- elif test.getCriteria() > 0 -%}
- <td><img src="../../../img/weather-overcast.png"></td>
+ <td><img src="../../img/weather-overcast.png"></td>
{%- elif test.getCriteria() > -1 -%}
- <td><img src="../../../img/weather-storm.png"></td>
+ <td><img src="../../img/weather-storm.png"></td>
{%- endif %}
{%- endfor %}
</tr>
diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py
index c89e619c0..9834f0753 100644
--- a/utils/test/reporting/functest/testCase.py
+++ b/utils/test/reporting/functest/testCase.py
@@ -33,27 +33,29 @@ class TestCase(object):
'bgpvpn': 'bgpvpn',
'rally_full': 'Rally (full)',
'vims': 'vIMS',
- 'doctor': 'Doctor',
+ 'doctor-notification': 'Doctor',
'promise': 'Promise',
'moon': 'Moon',
'copper': 'Copper',
'security_scan': 'Security',
'multisite': 'Multisite',
- 'domino': 'Domino',
- 'odl-sfc': 'SFC',
+ 'domino-multinode': 'Domino',
+ 'functest-odl-sfc': 'SFC',
'onos_sfc': 'SFC',
- 'parser': 'Parser',
+ 'parser-basics': 'Parser',
'connection_check': 'Health (connection)',
'api_check': 'Health (api)',
'snaps_smoke': 'SNAPS',
'snaps_health_check': 'Health (dhcp)',
- 'netready': 'Netready',
+ 'gluon_vping': 'Netready',
'fds': 'FDS',
'cloudify_ims': 'vIMS (Cloudify)',
'orchestra_ims': 'OpenIMS (OpenBaton)',
'opera_ims': 'vIMS (Open-O)',
'vyos_vrouter': 'vyos',
- 'barometer': 'Barometer'}
+ 'barometercollectd': 'Barometer',
+ 'odl_netvirt': 'Netvirt',
+ 'security_scan': 'Security'}
try:
self.displayName = display_name_matrix[self.name]
except:
@@ -119,50 +121,5 @@ class TestCase(object):
";IsRunnable" + str(self.isRunnable))
return testcase
- def getDbName(self):
- # Correspondance name of the test case / name in the DB
- # ideally we should modify the DB to avoid such interface....
- # '<name in the config>':'<name in the DB>'
- # I know it is uggly...
- test_match_matrix = {'healthcheck': 'healthcheck',
- 'vping_ssh': 'vping_ssh',
- 'vping_userdata': 'vping_userdata',
- 'odl': 'odl',
- 'onos': 'onos',
- 'ocl': 'ocl',
- 'tempest_smoke_serial': 'tempest_smoke_serial',
- 'tempest_full_parallel': 'tempest_full_parallel',
- 'tempest_defcore': 'tempest_defcore',
- 'refstack_defcore': 'refstack_defcore',
- 'rally_sanity': 'rally_sanity',
- 'bgpvpn': 'bgpvpn',
- 'rally_full': 'rally_full',
- 'vims': 'vims',
- 'doctor': 'doctor-notification',
- 'promise': 'promise',
- 'moon': 'moon_authentication',
- 'copper': 'copper-notification',
- 'security_scan': 'security',
- 'multisite': 'multisite',
- 'domino': 'domino-multinode',
- 'odl-sfc': 'functest-odl-sfc',
- 'onos_sfc': 'onos_sfc',
- 'parser': 'parser-basics',
- 'connection_check': 'connection_check',
- 'api_check': 'api_check',
- 'snaps_smoke': 'snaps_smoke',
- 'snaps_health_check': 'snaps_health_check',
- 'netready': 'gluon_vping',
- 'fds': 'fds',
- 'cloudify_ims': 'cloudify_ims',
- 'orchestra_ims': 'orchestra_ims',
- 'opera_ims': 'opera_ims',
- 'vyos_vrouter': 'vyos_vrouter',
- 'barometer': 'barometercollectd'}
- try:
- return test_match_matrix[self.name]
- except:
- return "unknown"
-
def getDisplayName(self):
return self.displayName
diff --git a/utils/test/reporting/js/trend-qtip.js b/utils/test/reporting/js/trend-qtip.js
new file mode 100644
index 000000000..d4c8735d9
--- /dev/null
+++ b/utils/test/reporting/js/trend-qtip.js
@@ -0,0 +1,76 @@
+// ******************************************
+// Trend line for reporting
+// based on scenario_history.txt
+// where data looks like
+// date,scenario,installer,detail,score
+// 2016-09-22 13:12,os-nosdn-fdio-noha,apex,4/12,33.0
+// 2016-09-22 13:13,os-odl_l2-fdio-noha,apex,12/15,80.0
+// 2016-09-22 13:13,os-odl_l2-sfc-noha,apex,18/24,75.0
+// .....
+// ******************************************
+// Set the dimensions of the canvas / graph
+var trend_margin = {top: 20, right: 30, bottom: 50, left: 40},
+ trend_width = 300 - trend_margin.left - trend_margin.right,
+ trend_height = 130 - trend_margin.top - trend_margin.bottom;
+
+// Parse the date / time
+var parseDate = d3.time.format("%Y-%m-%d %H:%M").parse;
+
+// Set the ranges
+var trend_x = d3.time.scale().range([0, trend_width]);
+var trend_y = d3.scale.linear().range([trend_height, 0]);
+
+// Define the axes
+var trend_xAxis = d3.svg.axis().scale(trend_x)
+ .orient("bottom").ticks(2).tickFormat(d3.time.format("%m-%d"));
+
+var trend_yAxis = d3.svg.axis().scale(trend_y)
+ .orient("left").ticks(4, "s");
+
+// Define the line
+var valueline = d3.svg.line()
+ .x(function(d) { return trend_x(d.date); })
+ .y(function(d) { return trend_y(d.score); });
+
+var trend = function(container, trend_data) {
+
+ var trend_svg = d3.select(container)
+ .append("svg")
+ .attr("width", trend_width + trend_margin.left + trend_margin.right)
+ .attr("height", trend_height + trend_margin.top + trend_margin.bottom)
+ .attr("style", "font-size: small")
+ .append("g")
+ .attr("transform",
+ "translate(" + trend_margin.left + "," + trend_margin.top + ")");
+
+ // Scale the range of the data
+ trend_x.domain(d3.extent(trend_data, function(d) { return d.date; }));
+ trend_y.domain([0, d3.max(trend_data, function(d) { return d.score; })]);
+
+ // Add the X Axis
+ trend_svg.append("g")
+ .attr("class", "x axis")
+ .attr("transform", "translate(0," + trend_height + ")")
+ .call(trend_xAxis);
+
+ // Add the Y Axis
+ trend_svg.append("g")
+ .attr("class", "y axis")
+ .call(trend_yAxis);
+
+ // Add the valueline path.
+ trend_svg.append("path")
+ .attr("class", "line")
+ .attr("d", valueline(trend_data))
+ .attr("stroke", "steelblue")
+ .attr("fill", "none");
+
+ trend_svg.selectAll(".dot")
+ .data(trend_data)
+ .enter().append("circle")
+ .attr("r", 2.5)
+ .attr("cx", function(d) { return trend_x(d.date); })
+ .attr("cy", function(d) { return trend_y(d.score); });
+
+ return trend;
+}
diff --git a/utils/test/reporting/qtip/__init__.py b/utils/test/reporting/qtip/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/reporting/qtip/__init__.py
diff --git a/utils/test/reporting/qtip/index.html b/utils/test/reporting/qtip/index.html
new file mode 100644
index 000000000..0f9df8564
--- /dev/null
+++ b/utils/test/reporting/qtip/index.html
@@ -0,0 +1,51 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">QTIP reporting page</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="index-status-apex.html">Apex</a></li>
+ <li><a href="index-status-compass.html">Compass</a></li>
+ <li><a href="index-status-fuel.html">Fuel</a></li>
+ <li><a href="index-status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-main">
+ <h2>QTIP</h2>
+ QTIP is used in OPNFV for verifying the OPNFV infrastructure and some of the OPNFV features.
+ <br>The QTIP framework is deployed in several OPNFV community labs.
+ <br>It is installer, infrastructure and application independent.
+
+ <h2>Useful Links</h2>
+ <li><a href="https://wiki.opnfv.org/download/attachments/5734608/qtip%20in%20depth.pdf?version=1&modificationDate=1463410431000&api=v2">QTIP in Depth</a></li>
+ <li><a href="https://git.opnfv.org/cgit/qtip">QTIP Repo</a></li>
+ <li><a href="https://wiki.opnfv.org/display/qtip">QTIP Project</a></li>
+ <li><a href="https://build.opnfv.org/ci/view/qtip/">QTIP Jenkins page</a></li>
+ <li><a href="https://jira.opnfv.org/browse/QTIP-119?jql=project%20%3D%20QTIP">JIRA</a></li>
+
+ </div>
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/qtip/reporting-status.py b/utils/test/reporting/qtip/reporting-status.py
new file mode 100644
index 000000000..5967cf6b9
--- /dev/null
+++ b/utils/test/reporting/qtip/reporting-status.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import os
+
+import jinja2
+import utils.reporting_utils as rp_utils
+import utils.scenarioResult as sr
+
+installers = rp_utils.get_config('general.installers')
+versions = rp_utils.get_config('general.versions')
+PERIOD = rp_utils.get_config('general.period')
+
+# Logger
+logger = rp_utils.getLogger("Qtip-Status")
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+logger.info("*******************************************")
+logger.info("* Generating reporting scenario status *")
+logger.info("* Data retention = %s days *" % PERIOD)
+logger.info("* *")
+logger.info("*******************************************")
+
+
+def prepare_profile_file(version):
+ profile_dir = './display/{}/qtip'.format(version)
+ if not os.path.exists(profile_dir):
+ os.makedirs(profile_dir)
+
+ profile_file = '{}/scenario_history.txt'.format(profile_dir, version)
+ if not os.path.exists(profile_file):
+ with open(profile_file, 'w') as f:
+ info = 'date,scenario,installer,details,score\n'
+ f.write(info)
+ f.close()
+ return profile_file
+
+
+def profile_results(results, installer, profile_fd):
+ result_criterias = {}
+ for s_p, s_p_result in results.iteritems():
+ ten_criteria = len(s_p_result)
+ ten_score = sum(s_p_result)
+
+ LASTEST_TESTS = rp_utils.get_config(
+ 'general.nb_iteration_tests_success_criteria')
+ four_result = s_p_result[:LASTEST_TESTS]
+ four_criteria = len(four_result)
+ four_score = sum(four_result)
+
+ s_four_score = str(four_score / four_criteria)
+ s_ten_score = str(ten_score / ten_criteria)
+
+ info = '{},{},{},{},{}\n'.format(reportingDate,
+ s_p,
+ installer,
+ s_ten_score,
+ s_four_score)
+ profile_fd.write(info)
+ result_criterias[s_p] = sr.ScenarioResult('OK',
+ s_four_score,
+ s_ten_score,
+ '100')
+
+ logger.info("--------------------------")
+ return result_criterias
+
+
+def render_html(prof_results, installer, version):
+ template_loader = jinja2.FileSystemLoader(".")
+ template_env = jinja2.Environment(loader=template_loader,
+ autoescape=True)
+
+ template_file = "./qtip/template/index-status-tmpl.html"
+ template = template_env.get_template(template_file)
+
+ render_outcome = template.render(prof_results=prof_results,
+ installer=installer,
+ period=PERIOD,
+ version=version,
+ date=reportingDate)
+
+ with open('./display/{}/qtip/status-{}.html'.format(version, installer),
+ 'wb') as fh:
+ fh.write(render_outcome)
+
+
+def render_reporter():
+ for version in versions:
+ profile_file = prepare_profile_file(version)
+ profile_fd = open(profile_file, 'a')
+ for installer in installers:
+ results = rp_utils.getQtipResults(version, installer)
+ prof_results = profile_results(results, installer, profile_fd)
+ render_html(prof_results=prof_results,
+ installer=installer,
+ version=version)
+ profile_fd.close()
+ logger.info("Manage export CSV")
+ rp_utils.generate_csv(profile_file)
+ logger.info("CSV generated...")
+
+if __name__ == '__main__':
+ render_reporter()
diff --git a/utils/test/reporting/qtip/template/index-status-tmpl.html b/utils/test/reporting/qtip/template/index-status-tmpl.html
new file mode 100644
index 000000000..26da36ceb
--- /dev/null
+++ b/utils/test/reporting/qtip/template/index-status-tmpl.html
@@ -0,0 +1,86 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="../../css/default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+ <script type="text/javascript" src="../../js/trend-qtip.js"></script>
+ <script>
+ // trend line management
+ d3.csv("./scenario_history.csv", function(data) {
+ // ***************************************
+ // Create the trend line
+ {% for scenario in prof_results.keys() -%}
+ // for scenario {{scenario}}
+ // Filter results
+ var trend{{loop.index}} = data.filter(function(row) {
+ return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+ })
+ // Parse the date
+ trend{{loop.index}}.forEach(function(d) {
+ d.date = parseDate(d.date);
+ d.score = +d.score
+ });
+ // Draw the trend line
+ var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+ // ****************************************
+ {%- endfor %}
+ });
+ </script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">QTIP status page ({{version}}, {{date}})</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="index-status-apex.html">Apex</a></li>
+ <li><a href="index-status-compass.html">Compass</a></li>
+ <li><a href="index-status-fuel.html">Fuel</a></li>
+ <li><a href="index-status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+
+ <div class="scenario-overview">
+ <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+ <table class="table">
+ <tr>
+ <th width="25%">Pod/Scenario</th>
+ <th width="25%">Trend</th>
+ <th width="25%">Last 4 Iterations</th>
+ <th width="25%">Last 10 Days</th>
+ </tr>
+ {% for scenario,result in prof_results.iteritems() -%}
+ <tr class="tr-ok">
+ <td>{{scenario}}</td>
+ <td><div id="trend_svg{{loop.index}}"></div></td>
+ <td>{{prof_results[scenario].getFourDaysScore()}}</td>
+ <td>{{prof_results[scenario].getTenDaysScore()}}</td>
+ </tr>
+ {%- endfor %}
+ </table>
+ </div>
+
+
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting.yaml b/utils/test/reporting/reporting.yaml
index 8c5ce1383..1692f481d 100644
--- a/utils/test/reporting/reporting.yaml
+++ b/utils/test/reporting/reporting.yaml
@@ -3,7 +3,6 @@ general:
installers:
- apex
- compass
- - daisy
- fuel
- joid
@@ -37,7 +36,6 @@ functest:
blacklist:
- ovno
- security_scan
- - rally_sanity
- healthcheck
- odl_netvirt
- aaa
@@ -45,13 +43,12 @@ functest:
- orchestra_ims
- juju_epc
- orchestra
- - promise
max_scenario_criteria: 50
test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
log_level: ERROR
jenkins_url: https://build.opnfv.org/ci/view/functest/job/
- exclude_noha: "False"
- exclude_virtual: "False"
+ exclude_noha: False
+ exclude_virtual: False
yardstick:
test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml
@@ -63,6 +60,8 @@ storperf:
log_level: ERROR
qtip:
+ log_level: ERROR
+ period: 1
bottleneck:
diff --git a/utils/test/reporting/utils/reporting_utils.py b/utils/test/reporting/utils/reporting_utils.py
index aab7a3f4f..599a93818 100644
--- a/utils/test/reporting/utils/reporting_utils.py
+++ b/utils/test/reporting/utils/reporting_utils.py
@@ -10,6 +10,7 @@ from urllib2 import Request, urlopen, URLError
import logging
import json
import os
+import requests
import pdfkit
import yaml
@@ -198,6 +199,35 @@ def getScenarioStatus(installer, version):
return result_dict
+def getQtipResults(version, installer):
+ period = get_config('qtip.period')
+ url_base = get_config('testapi.url')
+
+ url = ("http://" + url_base + "?project=qtip" +
+ "&installer=" + installer +
+ "&version=" + version + "&period=" + str(period))
+ request = Request(url)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ response.close()
+ results = json.loads(k)['results']
+ except URLError as e:
+ print('Got an error code:', e)
+
+ result_dict = {}
+ if results:
+ for r in results:
+ key = '{}/{}'.format(r['pod_name'], r['scenario'])
+ if key not in result_dict.keys():
+ result_dict[key] = []
+ result_dict[key].append(r['details']['score'])
+
+ # return scenario_results
+ return result_dict
+
+
def getNbtestOk(results):
nb_test_ok = 0
for r in results:
@@ -299,6 +329,44 @@ def getScenarioPercent(scenario_score, scenario_criteria):
# *********
+# Functest
+# *********
+def getFunctestConfig(version=""):
+ config_file = get_config('functest.test_conf') + version
+ response = requests.get(config_file)
+ return yaml.safe_load(response.text)
+
+
+def getArchitectures(scenario_results):
+ supported_arch = ['x86']
+ if (len(scenario_results) > 0):
+ for scenario_result in scenario_results.values():
+ for value in scenario_result:
+ if ("armband" in value['build_tag']):
+ supported_arch.append('aarch64')
+ return supported_arch
+ return supported_arch
+
+
+def filterArchitecture(results, architecture):
+ filtered_results = {}
+ for name, results in results.items():
+ filtered_values = []
+ for value in results:
+ if (architecture is "x86"):
+ # drop aarch64 results
+ if ("armband" not in value['build_tag']):
+ filtered_values.append(value)
+ elif(architecture is "aarch64"):
+ # drop x86 results
+ if ("armband" in value['build_tag']):
+ filtered_values.append(value)
+ if (len(filtered_values) > 0):
+ filtered_results[name] = filtered_values
+ return filtered_results
+
+
+# *********
# Yardstick
# *********
def subfind(given_list, pattern_list):
@@ -367,6 +435,14 @@ def export_csv(scenario_file_name, installer, version):
scenario_installer_file.close
+def generate_csv(scenario_file):
+ import shutil
+ # csv
+ # generate sub files based on scenario_history.txt
+ csv_file = scenario_file.replace('txt', 'csv')
+ shutil.copy2(scenario_file, csv_file)
+
+
def export_pdf(pdf_path, pdf_doc_name):
try:
pdfkit.from_file(pdf_path, pdf_doc_name)
diff --git a/utils/test/testapi/opnfv_testapi/resources/testcase_models.py b/utils/test/testapi/opnfv_testapi/resources/testcase_models.py
index 86aed25ef..2379dfc4c 100644
--- a/utils/test/testapi/opnfv_testapi/resources/testcase_models.py
+++ b/utils/test/testapi/opnfv_testapi/resources/testcase_models.py
@@ -13,12 +13,13 @@ from opnfv_testapi.tornado_swagger import swagger
@swagger.model()
class TestcaseCreateRequest(models.ModelBase):
def __init__(self, name, url=None, description=None,
- tier=None, ci_loop=None, criteria=None,
- blocking=None, dependencies=None, run=None,
+ catalog_description=None, tier=None, ci_loop=None,
+ criteria=None, blocking=None, dependencies=None, run=None,
domains=None, tags=None, version=None):
self.name = name
self.url = url
self.description = description
+ self.catalog_description = catalog_description
self.tier = tier
self.ci_loop = ci_loop
self.criteria = criteria
@@ -34,11 +35,12 @@ class TestcaseCreateRequest(models.ModelBase):
@swagger.model()
class TestcaseUpdateRequest(models.ModelBase):
def __init__(self, name=None, description=None, project_name=None,
- tier=None, ci_loop=None, criteria=None,
- blocking=None, dependencies=None, run=None,
+ catalog_description=None, tier=None, ci_loop=None,
+ criteria=None, blocking=None, dependencies=None, run=None,
domains=None, tags=None, version=None, trust=None):
self.name = name
self.description = description
+ self.catalog_description = catalog_description
self.project_name = project_name
self.tier = tier
self.ci_loop = ci_loop
@@ -56,14 +58,15 @@ class TestcaseUpdateRequest(models.ModelBase):
class Testcase(models.ModelBase):
def __init__(self, _id=None, name=None, project_name=None,
description=None, url=None, creation_date=None,
- tier=None, ci_loop=None, criteria=None,
- blocking=None, dependencies=None, run=None,
+ catalog_description=None, tier=None, ci_loop=None,
+ criteria=None, blocking=None, dependencies=None, run=None,
domains=None, tags=None, version=None,
trust=None):
self._id = None
self.name = None
self.project_name = None
self.description = None
+ self.catalog_description = None
self.url = None
self.creation_date = None
self.tier = None