summaryrefslogtreecommitdiffstats
path: root/reporting
diff options
context:
space:
mode:
authorMorgan Richomme <morgan.richomme@orange.com>2017-09-25 06:34:10 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-09-25 06:34:10 +0000
commit94b28843c6d0ffc3289c208ab21211806d3d9ae5 (patch)
tree49d040954e4d68cdbfd1c19c8f98cbef2565f238 /reporting
parent7f2f6965e1f48922f89373eedbd6ae3049e83015 (diff)
parent2bbdff796d8735148f6e386d16b1abf8dc8b07ab (diff)
Merge "cloudify_ims reporting fixes"
Diffstat (limited to 'reporting')
-rwxr-xr-xreporting/reporting/functest/reporting-status.py96
-rwxr-xr-xreporting/reporting/functest/reporting-vims.py232
-rw-r--r--reporting/reporting/functest/template/index-vims-tmpl.html13
-rw-r--r--reporting/reporting/functest/testCase.py5
4 files changed, 182 insertions, 164 deletions
diff --git a/reporting/reporting/functest/reporting-status.py b/reporting/reporting/functest/reporting-status.py
index 02bf67d..808c841 100755
--- a/reporting/reporting/functest/reporting-status.py
+++ b/reporting/reporting/functest/reporting-status.py
@@ -22,7 +22,7 @@ Functest reporting status
"""
# Logger
-logger = rp_utils.getLogger("Functest-Status")
+LOGGER = rp_utils.getLogger("Functest-Status")
# Initialization
testValid = []
@@ -46,16 +46,16 @@ exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
functest_yaml_config = rp_utils.getFunctestConfig()
-logger.info("*******************************************")
-logger.info("* *")
-logger.info("* Generating reporting scenario status *")
-logger.info("* Data retention: %s days *" % period)
-logger.info("* Log level: %s *" % log_level)
-logger.info("* *")
-logger.info("* Virtual PODs exluded: %s *" % exclude_virtual)
-logger.info("* NOHA scenarios excluded: %s *" % exclude_noha)
-logger.info("* *")
-logger.info("*******************************************")
+LOGGER.info("*******************************************")
+LOGGER.info("* *")
+LOGGER.info("* Generating reporting scenario status *")
+LOGGER.info("* Data retention: %s days *", period)
+LOGGER.info("* Log level: %s *", log_level)
+LOGGER.info("* *")
+LOGGER.info("* Virtual PODs exluded: %s *", exclude_virtual)
+LOGGER.info("* NOHA scenarios excluded: %s *", exclude_noha)
+LOGGER.info("* *")
+LOGGER.info("*******************************************")
# Retrieve test cases of Tier 1 (smoke)
config_tiers = functest_yaml_config.get("tiers")
@@ -75,9 +75,9 @@ for tier in config_tiers:
elif tier['order'] == 2:
for case in tier['testcases']:
if case['case_name'] not in blacklist:
- testValid.append(tc.TestCase(case['case_name'],
- case['case_name'],
- case['dependencies']))
+ otherTestCases.append(tc.TestCase(case['case_name'],
+ case['case_name'],
+ case['dependencies']))
elif tier['order'] > 2:
for case in tier['testcases']:
if case['case_name'] not in blacklist:
@@ -85,7 +85,7 @@ for tier in config_tiers:
"functest",
case['dependencies']))
-logger.debug("Functest reporting start")
+LOGGER.debug("Functest reporting start")
# For all the versions
for version in versions:
@@ -101,7 +101,7 @@ for version in versions:
# initiate scenario file if it does not exist
if not os.path.isfile(scenario_file_name):
with open(scenario_file_name, "a") as my_file:
- logger.debug("Create scenario file: %s" % scenario_file_name)
+ LOGGER.debug("Create scenario file: %s", scenario_file_name)
my_file.write("date,scenario,installer,detail,score\n")
for installer in installers:
@@ -113,10 +113,10 @@ for version in versions:
version)
# get nb of supported architecture (x86, aarch64)
architectures = rp_utils.getArchitectures(scenario_results)
- logger.info("Supported architectures: {}".format(architectures))
+ LOGGER.info("Supported architectures: %s", architectures)
for architecture in architectures:
- logger.info("architecture: {}".format(architecture))
+ LOGGER.info("Architecture: %s", architecture)
# Consider only the results for the selected architecture
# i.e drop x86 for aarch64 and vice versa
filter_results = rp_utils.filterArchitecture(scenario_results,
@@ -133,10 +133,10 @@ for version in versions:
# For all the scenarios get results
for s, s_result in filter_results.items():
- logger.info("---------------------------------")
- logger.info("installer %s, version %s, scenario %s:" %
- (installer, version, s))
- logger.debug("Scenario results: %s" % s_result)
+ LOGGER.info("---------------------------------")
+ LOGGER.info("installer %s, version %s, scenario %s:",
+ installer, version, s)
+ LOGGER.debug("Scenario results: %s", s_result)
# Green or Red light for a given scenario
nb_test_runnable_for_this_scenario = 0
@@ -146,11 +146,11 @@ for version in versions:
s_url = ""
if len(s_result) > 0:
build_tag = s_result[len(s_result)-1]['build_tag']
- logger.debug("Build tag: %s" % build_tag)
+ LOGGER.debug("Build tag: %s", build_tag)
s_url = rp_utils.getJenkinsUrl(build_tag)
if s_url is None:
s_url = "http://testresultS.opnfv.org/reporting"
- logger.info("last jenkins url: %s" % s_url)
+ LOGGER.info("last jenkins url: %s", s_url)
testCases2BeDisplayed = []
# Check if test case is runnable / installer, scenario
# for the test case used for Scenario validation
@@ -160,24 +160,24 @@ for version in versions:
for test_case in testValid:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
- logger.debug("testcase %s (%s) is %s" %
- (test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable))
+ LOGGER.debug("testcase %s (%s) is %s",
+ test_case.getDisplayName(),
+ test_case.getName(),
+ test_case.isRunnable)
time.sleep(1)
if test_case.isRunnable:
name = test_case.getName()
displayName = test_case.getDisplayName()
project = test_case.getProject()
nb_test_runnable_for_this_scenario += 1
- logger.info(" Searching results for case %s " %
- (displayName))
+ LOGGER.info(" Searching results for case %s ",
+ displayName)
result = rp_utils.getResult(name, installer,
s, version)
# if no result set the value to 0
if result < 0:
result = 0
- logger.info(" >>>> Test score = " + str(result))
+ LOGGER.info(" >>>> Test score = " + str(result))
test_case.setCriteria(result)
test_case.setIsRunnable(True)
testCases2BeDisplayed.append(tc.TestCase(name,
@@ -193,17 +193,17 @@ for version in versions:
for test_case in otherTestCases:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
- logger.debug("testcase %s (%s) is %s" %
- (test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable))
+ LOGGER.debug("testcase %s (%s) is %s",
+ test_case.getDisplayName(),
+ test_case.getName(),
+ test_case.isRunnable)
time.sleep(1)
if test_case.isRunnable:
name = test_case.getName()
displayName = test_case.getDisplayName()
project = test_case.getProject()
- logger.info(" Searching results for case %s " %
- (displayName))
+ LOGGER.info(" Searching results for case %s ",
+ displayName)
result = rp_utils.getResult(name, installer,
s, version)
# at least 1 result for the test
@@ -218,13 +218,13 @@ for version in versions:
True,
4))
else:
- logger.debug("No results found")
+ LOGGER.debug("No results found")
items[s] = testCases2BeDisplayed
except Exception:
- logger.error("Error: installer %s, version %s, scenario %s"
- % (installer, version, s))
- logger.error("No data available: %s" % (sys.exc_info()[0]))
+ LOGGER.error("Error installer %s, version %s, scenario %s",
+ installer, version, s)
+ LOGGER.error("No data available: %s", sys.exc_info()[0])
# **********************************************
# Evaluate the results for scenario validation
@@ -243,11 +243,11 @@ for version in versions:
s_status = "KO"
if scenario_score < scenario_criteria:
- logger.info(">>>> scenario not OK, score = %s/%s" %
- (scenario_score, scenario_criteria))
+ LOGGER.info(">>>> scenario not OK, score = %s/%s",
+ scenario_score, scenario_criteria)
s_status = "KO"
else:
- logger.info(">>>>> scenario OK, save the information")
+ LOGGER.info(">>>>> scenario OK, save the information")
s_status = "OK"
path_validation_file = ("./display/" + version +
"/functest/" +
@@ -270,7 +270,7 @@ for version in versions:
s_score,
s_score_percent,
s_url)
- logger.info("--------------------------")
+ LOGGER.info("--------------------------")
templateLoader = jinja2.FileSystemLoader(".")
templateEnv = jinja2.Environment(
@@ -294,9 +294,9 @@ for version in versions:
installer_display + ".html", "wb") as fh:
fh.write(outputText)
- logger.info("Manage export CSV & PDF")
+ LOGGER.info("Manage export CSV & PDF")
rp_utils.export_csv(scenario_file_name, installer_display, version)
- logger.error("CSV generated...")
+ LOGGER.error("CSV generated...")
# Generate outputs for export
# pdf
@@ -306,4 +306,4 @@ for version in versions:
pdf_doc_name = ("./display/" + version +
"/functest/status-" + installer_display + ".pdf")
rp_utils.export_pdf(pdf_path, pdf_doc_name)
- logger.info("PDF generated...")
+ LOGGER.info("PDF generated...")
diff --git a/reporting/reporting/functest/reporting-vims.py b/reporting/reporting/functest/reporting-vims.py
index 14fddbe..3b25e91 100755
--- a/reporting/reporting/functest/reporting-vims.py
+++ b/reporting/reporting/functest/reporting-vims.py
@@ -1,112 +1,128 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+"""
+vIMS reporting status
+"""
from urllib2 import Request, urlopen, URLError
import json
import jinja2
-# manage conf
-import utils.reporting_utils as rp_utils
-
-logger = rp_utils.getLogger("vIMS")
-
-
-def sig_test_format(sig_test):
- nbPassed = 0
- nbFailures = 0
- nbSkipped = 0
- for data_test in sig_test:
- if data_test['result'] == "Passed":
- nbPassed += 1
- elif data_test['result'] == "Failed":
- nbFailures += 1
- elif data_test['result'] == "Skipped":
- nbSkipped += 1
- total_sig_test_result = {}
- total_sig_test_result['passed'] = nbPassed
- total_sig_test_result['failures'] = nbFailures
- total_sig_test_result['skipped'] = nbSkipped
- return total_sig_test_result
-
-period = rp_utils.get_config('general.period')
-versions = rp_utils.get_config('general.versions')
-url_base = rp_utils.get_config('testapi.url')
-
-logger.info("****************************************")
-logger.info("* Generating reporting vIMS *")
-logger.info("* Data retention = %s days *" % period)
-logger.info("* *")
-logger.info("****************************************")
-
-installers = rp_utils.get_config('general.installers')
-step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
-logger.info("Start processing....")
+import reporting.utils.reporting_utils as rp_utils
+
+LOGGER = rp_utils.getLogger("vIMS")
+
+PERIOD = rp_utils.get_config('general.period')
+VERSIONS = rp_utils.get_config('general.versions')
+URL_BASE = rp_utils.get_config('testapi.url')
+
+LOGGER.info("****************************************")
+LOGGER.info("* Generating reporting vIMS *")
+LOGGER.info("* Data retention = %s days *", PERIOD)
+LOGGER.info("* *")
+LOGGER.info("****************************************")
+
+INSTALLERS = rp_utils.get_config('general.installers')
+STEP_ORDER = ["initialisation", "orchestrator", "vnf", "test_vnf"]
+LOGGER.info("Start vIMS reporting processing....")
# For all the versions
-for version in versions:
- for installer in installers:
- logger.info("Search vIMS results for installer: %s, version: %s"
- % (installer, version))
- request = Request("http://" + url_base + '?case=vims&installer=' +
- installer + '&version=' + version)
-
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError as e:
- logger.error("Error code: %s" % e)
-
- test_results = results['results']
-
- logger.debug("Results found: %s" % test_results)
-
- scenario_results = {}
- for r in test_results:
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- logger.debug("Search for success criteria")
- for result in scenario_results[s]:
- result["start_date"] = result["start_date"].split(".")[0]
- sig_test = result['details']['sig_test']['result']
- if not sig_test == "" and isinstance(sig_test, list):
- format_result = sig_test_format(sig_test)
- if format_result['failures'] > format_result['passed']:
- result['details']['sig_test']['duration'] = 0
- result['details']['sig_test']['result'] = format_result
- nb_step_ok = 0
- nb_step = len(result['details'])
-
- for step_name, step_result in result['details'].items():
- if step_result['duration'] != 0:
- nb_step_ok += 1
- m, s = divmod(step_result['duration'], 60)
- m_display = ""
- if int(m) != 0:
- m_display += str(int(m)) + "m "
-
- step_result['duration_display'] = (m_display +
- str(int(s)) + "s")
-
- result['pr_step_ok'] = 0
- if nb_step != 0:
- result['pr_step_ok'] = (float(nb_step_ok) / nb_step) * 100
- try:
- logger.debug("Scenario %s, Installer %s"
- % (s_result[1]['scenario'], installer))
- res = result['details']['orchestrator']['duration']
- logger.debug("Orchestrator deployment: %s s"
- % res)
- logger.debug("vIMS deployment: %s s"
- % result['details']['vIMS']['duration'])
- logger.debug("Signaling testing: %s s"
- % result['details']['sig_test']['duration'])
- logger.debug("Signaling testing results: %s"
- % format_result)
- except Exception:
- logger.error("Data badly formatted")
- logger.debug("----------------------------------------")
+for version in VERSIONS:
+ for installer in INSTALLERS:
+
+ # get nb of supported architecture (x86, aarch64)
+ # get scenarios
+ scenario_results = rp_utils.getScenarios("functest",
+ "cloudify_ims",
+ installer,
+ version)
+
+ architectures = rp_utils.getArchitectures(scenario_results)
+ LOGGER.info("Supported architectures: %s", architectures)
+
+ for architecture in architectures:
+ LOGGER.info("Architecture: %s", architecture)
+ # Consider only the results for the selected architecture
+ # i.e drop x86 for aarch64 and vice versa
+ filter_results = rp_utils.filterArchitecture(scenario_results,
+ architecture)
+ scenario_stats = rp_utils.getScenarioStats(filter_results)
+ items = {}
+ scenario_result_criteria = {}
+
+ # in case of more than 1 architecture supported
+ # precise the architecture
+ installer_display = installer
+ if "fuel" in installer:
+ installer_display = installer + "@" + architecture
+
+ LOGGER.info("Search vIMS results for installer: %s, version: %s",
+ installer, version)
+ request = Request("http://" + URL_BASE + '?case=cloudify_ims&'
+ 'installer=' + installer + '&version=' + version)
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError as err:
+ LOGGER.error("Error code: %s", err)
+
+ test_results = results['results']
+
+ # LOGGER.debug("Results found: %s" % test_results)
+
+ scenario_results = {}
+ for r in test_results:
+ if not r['scenario'] in scenario_results.keys():
+ scenario_results[r['scenario']] = []
+ scenario_results[r['scenario']].append(r)
+
+ # LOGGER.debug("scenario result: %s" % scenario_results)
+
+ for s, s_result in scenario_results.items():
+ scenario_results[s] = s_result[0:5]
+ for result in scenario_results[s]:
+ try:
+ format_result = result['details']['test_vnf']['result']
+
+ # round durations of the different steps
+ result['details']['orchestrator']['duration'] = round(
+ result['details']['orchestrator']['duration'], 1)
+ result['details']['vnf']['duration'] = round(
+ result['details']['vnf']['duration'], 1)
+ result['details']['test_vnf']['duration'] = round(
+ result['details']['test_vnf']['duration'], 1)
+
+ res_orch = \
+ result['details']['orchestrator']['duration']
+ res_vnf = result['details']['vnf']['duration']
+ res_test_vnf = \
+ result['details']['test_vnf']['duration']
+ res_signaling = \
+ result['details']['test_vnf']['result']['failures']
+
+ # Manage test result status
+ if res_signaling != 0:
+ LOGGER.debug("At least 1 signalig test FAIL")
+ result['details']['test_vnf']['status'] = "FAIL"
+ else:
+ LOGGER.debug("All signalig tests PASS")
+ result['details']['test_vnf']['status'] = "PASS"
+
+ LOGGER.debug("Scenario %s, Installer %s",
+ s_result[1]['scenario'], installer)
+ LOGGER.debug("Orchestrator deployment: %ss", res_orch)
+ LOGGER.debug("vIMS deployment: %ss", res_vnf)
+ LOGGER.debug("VNF testing: %ss", res_test_vnf)
+ LOGGER.debug("VNF testing results: %s", format_result)
+ except Exception as err: # pylint: disable=broad-except
+ LOGGER.error("Uncomplete data %s", err)
+ LOGGER.debug("----------------------------------------")
templateLoader = jinja2.FileSystemLoader(".")
templateEnv = jinja2.Environment(loader=templateLoader,
@@ -116,11 +132,11 @@ for version in versions:
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(scenario_results=scenario_results,
- step_order=step_order,
- installer=installer)
-
+ step_order=STEP_ORDER,
+ installer=installer_display)
+ LOGGER.debug("Generate html page for %s", installer_display)
with open("./display/" + version + "/functest/vims-" +
- installer + ".html", "wb") as fh:
+ installer_display + ".html", "wb") as fh:
fh.write(outputText)
-logger.info("vIMS report succesfully generated")
+LOGGER.info("vIMS report succesfully generated")
diff --git a/reporting/reporting/functest/template/index-vims-tmpl.html b/reporting/reporting/functest/template/index-vims-tmpl.html
index cd51607..9bd2b2f 100644
--- a/reporting/reporting/functest/template/index-vims-tmpl.html
+++ b/reporting/reporting/functest/template/index-vims-tmpl.html
@@ -22,11 +22,12 @@
<nav>
<ul class="nav nav-justified">
<li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="vims-fuel.html">Fuel</a></li>
+ <li><a href="vims-apex.html">Apex</a></li>
<li><a href="vims-compass.html">Compass</a></li>
<li><a href="vims-daisy.html">Daisy</a></li>
- <li><a href="vims-joid.html">JOID</a></li>
- <li><a href="vims-apex.html">APEX</a></li>
+ <li><a href="vims-fuel@x86.html">Fuel@x86</a></li>
+ <li><a href="vims-fuel@aarch64.html">Fuel@aarch64</a></li>
+ <li><a href="vims-joid.html">Joid</a></li>
</ul>
</nav>
</div>
@@ -58,17 +59,17 @@
<tr>
<th width="20%">Step</th>
<th width="10%">Status</th>
- <th width="10%">Duration</th>
+ <th width="10%">Duration(s)</th>
<th width="60%">Result</th>
</tr>
{% for step_od_name in step_order -%}
{% if step_od_name in result.details.keys() -%}
{% set step_result = result.details[step_od_name] -%}
- {% if step_result.duration != 0 -%}
+ {% if step_result.status == "PASS" -%}
<tr class="tr-ok">
<td>{{step_od_name}}</td>
<td><span class="glyphicon glyphicon-ok"></td>
- <td><b>{{step_result.duration_display}}</b></td>
+ <td><b>{{step_result.duration}}</b></td>
<td>{{step_result.result}}</td>
</tr>
{%- else -%}
diff --git a/reporting/reporting/functest/testCase.py b/reporting/reporting/functest/testCase.py
index 9834f07..a182dd4 100644
--- a/reporting/reporting/functest/testCase.py
+++ b/reporting/reporting/functest/testCase.py
@@ -50,9 +50,10 @@ class TestCase(object):
'gluon_vping': 'Netready',
'fds': 'FDS',
'cloudify_ims': 'vIMS (Cloudify)',
- 'orchestra_ims': 'OpenIMS (OpenBaton)',
+ 'orchestra_openims': 'OpenIMS (OpenBaton)',
+ 'orchestra_clearwaterims': 'vIMS (OpenBaton)',
'opera_ims': 'vIMS (Open-O)',
- 'vyos_vrouter': 'vyos',
+ 'vyos_vrouter': 'vyos (Cloudify)',
'barometercollectd': 'Barometer',
'odl_netvirt': 'Netvirt',
'security_scan': 'Security'}