summaryrefslogtreecommitdiffstats
path: root/reporting/reporting
diff options
context:
space:
mode:
authorMorgan Richomme <morgan.richomme@orange.com>2017-07-04 17:23:33 +0200
committerMorgan Richomme <morgan.richomme@orange.com>2017-08-09 16:45:43 +0200
commit980523c1c5fa65b4c4a786d6ddb1ddfb70a63bc6 (patch)
tree68072820e868a4688036c2425195966167fdd8fb /reporting/reporting
parent5eb90d88748ee25c0d1a858167d435a9498d175c (diff)
Initiate packetization of Testing reporting
Testing reporting provides - static reporting pages [1] (jinja2) - landing pages including test case catalogue [2] (angular) It consumes the Test API to build web pages providing status for the testing projects (so far functest, yardstick, storperf and qtip). Dockerization has been initiated [3]. The goal is to replace the static page hosted on testresults.opnfv.org by a docker regenerated and redeployed when reporting code is changed But the docker and more generally the testing reporting directory must be refactored to - manage dependencies properly - leverage tox (py27, docs, pylint, pep8) This patch - setups tox (py27, pep8, pylint, docs) - integrate the requirement management - fix pep8 errors - introduce 1 dummy unit test This patch does not - fix pylint errors - create any doc [1]: http://testresults.opnfv.org/reporting2/display/index.html [2]: http://testresults.opnfv.org/reporting2/reporting/index.html [3]: https://gerrit.opnfv.org/gerrit/#/c/36735/ Change-Id: I4613de7ca7036d6c6bbb8f58ade492b1d673599b Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
Diffstat (limited to 'reporting/reporting')
-rw-r--r--reporting/reporting/__init__.py0
-rw-r--r--reporting/reporting/functest/__init__.py0
-rw-r--r--reporting/reporting/functest/img/gauge_0.pngbin0 -> 3644 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_100.pngbin0 -> 3191 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_16.7.pngbin0 -> 3170 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_25.pngbin0 -> 3108 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_33.3.pngbin0 -> 3081 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_41.7.pngbin0 -> 3169 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_50.pngbin0 -> 3123 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_58.3.pngbin0 -> 3161 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_66.7.pngbin0 -> 3069 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_75.pngbin0 -> 3030 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_8.3.pngbin0 -> 2993 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_83.3.pngbin0 -> 3122 bytes
-rw-r--r--reporting/reporting/functest/img/gauge_91.7.pngbin0 -> 3008 bytes
-rw-r--r--reporting/reporting/functest/img/icon-nok.pngbin0 -> 2317 bytes
-rw-r--r--reporting/reporting/functest/img/icon-ok.pngbin0 -> 4063 bytes
-rw-r--r--reporting/reporting/functest/img/weather-clear.pngbin0 -> 1560 bytes
-rw-r--r--reporting/reporting/functest/img/weather-few-clouds.pngbin0 -> 1927 bytes
-rw-r--r--reporting/reporting/functest/img/weather-overcast.pngbin0 -> 1588 bytes
-rw-r--r--reporting/reporting/functest/img/weather-storm.pngbin0 -> 2137 bytes
-rw-r--r--reporting/reporting/functest/index.html53
-rwxr-xr-xreporting/reporting/functest/reporting-status.py309
-rwxr-xr-xreporting/reporting/functest/reporting-tempest.py155
-rwxr-xr-xreporting/reporting/functest/reporting-vims.py126
-rw-r--r--reporting/reporting/functest/scenarioResult.py29
-rw-r--r--reporting/reporting/functest/template/index-status-tmpl.html157
-rw-r--r--reporting/reporting/functest/template/index-tempest-tmpl.html95
-rw-r--r--reporting/reporting/functest/template/index-vims-tmpl.html92
-rw-r--r--reporting/reporting/functest/testCase.py125
-rw-r--r--reporting/reporting/qtip/__init__.py0
-rw-r--r--reporting/reporting/qtip/index.html51
-rw-r--r--reporting/reporting/qtip/reporting-status.py112
-rw-r--r--reporting/reporting/qtip/template/index-status-tmpl.html86
-rw-r--r--reporting/reporting/reporting.yaml68
-rw-r--r--reporting/reporting/storperf/__init__.py0
-rw-r--r--reporting/reporting/storperf/reporting-status.py145
-rw-r--r--reporting/reporting/storperf/template/index-status-tmpl.html110
-rw-r--r--reporting/reporting/tests/__init__.py0
-rw-r--r--reporting/reporting/tests/unit/__init__.py0
-rw-r--r--reporting/reporting/tests/unit/utils/__init__.py0
-rw-r--r--reporting/reporting/tests/unit/utils/test_utils.py28
-rw-r--r--reporting/reporting/utils/__init__.py0
-rw-r--r--reporting/reporting/utils/reporting_utils.py463
-rw-r--r--reporting/reporting/utils/scenarioResult.py33
-rw-r--r--reporting/reporting/yardstick/__init__.py0
-rw-r--r--reporting/reporting/yardstick/img/gauge_0.pngbin0 -> 3644 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_100.pngbin0 -> 3191 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_16.7.pngbin0 -> 3170 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_25.pngbin0 -> 3108 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_33.3.pngbin0 -> 3081 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_41.7.pngbin0 -> 3169 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_50.pngbin0 -> 3123 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_58.3.pngbin0 -> 3161 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_66.7.pngbin0 -> 3069 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_75.pngbin0 -> 3030 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_8.3.pngbin0 -> 2993 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_83.3.pngbin0 -> 3122 bytes
-rw-r--r--reporting/reporting/yardstick/img/gauge_91.7.pngbin0 -> 3008 bytes
-rw-r--r--reporting/reporting/yardstick/img/icon-nok.pngbin0 -> 2317 bytes
-rw-r--r--reporting/reporting/yardstick/img/icon-ok.pngbin0 -> 4063 bytes
-rw-r--r--reporting/reporting/yardstick/img/weather-clear.pngbin0 -> 1560 bytes
-rw-r--r--reporting/reporting/yardstick/img/weather-few-clouds.pngbin0 -> 1927 bytes
-rw-r--r--reporting/reporting/yardstick/img/weather-overcast.pngbin0 -> 1588 bytes
-rw-r--r--reporting/reporting/yardstick/img/weather-storm.pngbin0 -> 2137 bytes
-rw-r--r--reporting/reporting/yardstick/index.html51
-rw-r--r--reporting/reporting/yardstick/reporting-status.py120
-rw-r--r--reporting/reporting/yardstick/scenarios.py27
-rw-r--r--reporting/reporting/yardstick/template/index-status-tmpl.html110
69 files changed, 2545 insertions, 0 deletions
diff --git a/reporting/reporting/__init__.py b/reporting/reporting/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/reporting/reporting/__init__.py
diff --git a/reporting/reporting/functest/__init__.py b/reporting/reporting/functest/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/reporting/reporting/functest/__init__.py
diff --git a/reporting/reporting/functest/img/gauge_0.png b/reporting/reporting/functest/img/gauge_0.png
new file mode 100644
index 0000000..ecefc0e
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_0.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_100.png b/reporting/reporting/functest/img/gauge_100.png
new file mode 100644
index 0000000..e199e15
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_100.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_16.7.png b/reporting/reporting/functest/img/gauge_16.7.png
new file mode 100644
index 0000000..3e3993c
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_16.7.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_25.png b/reporting/reporting/functest/img/gauge_25.png
new file mode 100644
index 0000000..4923659
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_25.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_33.3.png b/reporting/reporting/functest/img/gauge_33.3.png
new file mode 100644
index 0000000..364574b
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_33.3.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_41.7.png b/reporting/reporting/functest/img/gauge_41.7.png
new file mode 100644
index 0000000..8c3e910
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_41.7.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_50.png b/reporting/reporting/functest/img/gauge_50.png
new file mode 100644
index 0000000..2874b9f
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_50.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_58.3.png b/reporting/reporting/functest/img/gauge_58.3.png
new file mode 100644
index 0000000..beedc8a
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_58.3.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_66.7.png b/reporting/reporting/functest/img/gauge_66.7.png
new file mode 100644
index 0000000..93f44d1
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_66.7.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_75.png b/reporting/reporting/functest/img/gauge_75.png
new file mode 100644
index 0000000..9fc261f
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_75.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_8.3.png b/reporting/reporting/functest/img/gauge_8.3.png
new file mode 100644
index 0000000..59f8657
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_8.3.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_83.3.png b/reporting/reporting/functest/img/gauge_83.3.png
new file mode 100644
index 0000000..27ae4ec
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_83.3.png
Binary files differ
diff --git a/reporting/reporting/functest/img/gauge_91.7.png b/reporting/reporting/functest/img/gauge_91.7.png
new file mode 100644
index 0000000..2808657
--- /dev/null
+++ b/reporting/reporting/functest/img/gauge_91.7.png
Binary files differ
diff --git a/reporting/reporting/functest/img/icon-nok.png b/reporting/reporting/functest/img/icon-nok.png
new file mode 100644
index 0000000..526b529
--- /dev/null
+++ b/reporting/reporting/functest/img/icon-nok.png
Binary files differ
diff --git a/reporting/reporting/functest/img/icon-ok.png b/reporting/reporting/functest/img/icon-ok.png
new file mode 100644
index 0000000..3a9de2e
--- /dev/null
+++ b/reporting/reporting/functest/img/icon-ok.png
Binary files differ
diff --git a/reporting/reporting/functest/img/weather-clear.png b/reporting/reporting/functest/img/weather-clear.png
new file mode 100644
index 0000000..a0d9677
--- /dev/null
+++ b/reporting/reporting/functest/img/weather-clear.png
Binary files differ
diff --git a/reporting/reporting/functest/img/weather-few-clouds.png b/reporting/reporting/functest/img/weather-few-clouds.png
new file mode 100644
index 0000000..acfa783
--- /dev/null
+++ b/reporting/reporting/functest/img/weather-few-clouds.png
Binary files differ
diff --git a/reporting/reporting/functest/img/weather-overcast.png b/reporting/reporting/functest/img/weather-overcast.png
new file mode 100644
index 0000000..4296246
--- /dev/null
+++ b/reporting/reporting/functest/img/weather-overcast.png
Binary files differ
diff --git a/reporting/reporting/functest/img/weather-storm.png b/reporting/reporting/functest/img/weather-storm.png
new file mode 100644
index 0000000..956f0e2
--- /dev/null
+++ b/reporting/reporting/functest/img/weather-storm.png
Binary files differ
diff --git a/reporting/reporting/functest/index.html b/reporting/reporting/functest/index.html
new file mode 100644
index 0000000..bb1bce2
--- /dev/null
+++ b/reporting/reporting/functest/index.html
@@ -0,0 +1,53 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Functest reporting page</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="index-status-apex.html">Apex</a></li>
+ <li><a href="index-status-compass.html">Compass</a></li>
+ <li><a href="index-status-fuel.html">Fuel</a></li>
+ <li><a href="index-status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-main">
+ <h2>Functest</h2>
+ This project develops test suites that cover functionaling test cases in OPNFV.
+ <br>The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario.
+ <br> Weekly meeting: every Tuesday 8 AM UTC
+ <br> IRC chan #opnfv-testperf
+
+ <br>
+ <h2>Useful Links</h2>
+ <li><a href="http://events.linuxfoundation.org/sites/events/files/slides/Functest%20in%20Depth_0.pdf">Functest in Depth</a></li>
+ <li><a href="https://git.opnfv.org/cgit/functest">Functest Repo</a></li>
+ <li><a href="https://wiki.opnfv.org/opnfv_functional_testing">Functest Project</a></li>
+ <li><a href="https://build.opnfv.org/ci/view/functest/">Functest Jenkins page</a></li>
+ <li><a href="https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=59&projectKey=FUNCTEST">JIRA</a></li>
+
+ </div>
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/reporting/reporting/functest/reporting-status.py b/reporting/reporting/functest/reporting-status.py
new file mode 100755
index 0000000..48c4bb1
--- /dev/null
+++ b/reporting/reporting/functest/reporting-status.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import jinja2
+import os
+import sys
+import time
+
+import testCase as tc
+import scenarioResult as sr
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+"""Functest reporting status"""
+
+# Logger
+logger = rp_utils.getLogger("Functest-Status")
+
+# Initialization
+testValid = []
+otherTestCases = []
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+# init just connection_check to get the list of scenarios
+# as all the scenarios run connection_check
+healthcheck = tc.TestCase("connection_check", "functest", -1)
+
+# Retrieve the Functest configuration to detect which tests are relevant
+# according to the installer, scenario
+cf = rp_utils.get_config('functest.test_conf')
+period = rp_utils.get_config('general.period')
+versions = rp_utils.get_config('general.versions')
+installers = rp_utils.get_config('general.installers')
+blacklist = rp_utils.get_config('functest.blacklist')
+log_level = rp_utils.get_config('general.log.log_level')
+exclude_noha = rp_utils.get_config('functest.exclude_noha')
+exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
+
+functest_yaml_config = rp_utils.getFunctestConfig()
+
+logger.info("*******************************************")
+logger.info("* *")
+logger.info("* Generating reporting scenario status *")
+logger.info("* Data retention: %s days *" % period)
+logger.info("* Log level: %s *" % log_level)
+logger.info("* *")
+logger.info("* Virtual PODs exluded: %s *" % exclude_virtual)
+logger.info("* NOHA scenarios excluded: %s *" % exclude_noha)
+logger.info("* *")
+logger.info("*******************************************")
+
+# Retrieve test cases of Tier 1 (smoke)
+config_tiers = functest_yaml_config.get("tiers")
+
+# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
+# to validate scenarios
+# Tier > 2 are not used to validate scenarios but we display the results anyway
+# tricky thing for the API as some tests are Functest tests
+# other tests are declared directly in the feature projects
+for tier in config_tiers:
+ if tier['order'] >= 0 and tier['order'] < 2:
+ for case in tier['testcases']:
+ if case['case_name'] not in blacklist:
+ testValid.append(tc.TestCase(case['case_name'],
+ "functest",
+ case['dependencies']))
+ elif tier['order'] == 2:
+ for case in tier['testcases']:
+ if case['case_name'] not in blacklist:
+ testValid.append(tc.TestCase(case['case_name'],
+ case['case_name'],
+ case['dependencies']))
+ elif tier['order'] > 2:
+ for case in tier['testcases']:
+ if case['case_name'] not in blacklist:
+ otherTestCases.append(tc.TestCase(case['case_name'],
+ "functest",
+ case['dependencies']))
+
+logger.debug("Functest reporting start")
+
+# For all the versions
+for version in versions:
+ # For all the installers
+ scenario_directory = "./display/" + version + "/functest/"
+ scenario_file_name = scenario_directory + "scenario_history.txt"
+
+ # check that the directory exists, if not create it
+ # (first run on new version)
+ if not os.path.exists(scenario_directory):
+ os.makedirs(scenario_directory)
+
+ # initiate scenario file if it does not exist
+ if not os.path.isfile(scenario_file_name):
+ with open(scenario_file_name, "a") as my_file:
+ logger.debug("Create scenario file: %s" % scenario_file_name)
+ my_file.write("date,scenario,installer,detail,score\n")
+
+ for installer in installers:
+
+ # get scenarios
+ scenario_results = rp_utils.getScenarios(healthcheck,
+ installer,
+ version)
+ # get nb of supported architecture (x86, aarch64)
+ architectures = rp_utils.getArchitectures(scenario_results)
+ logger.info("Supported architectures: {}".format(architectures))
+
+ for architecture in architectures:
+ logger.info("architecture: {}".format(architecture))
+ # Consider only the results for the selected architecture
+ # i.e drop x86 for aarch64 and vice versa
+ filter_results = rp_utils.filterArchitecture(scenario_results,
+ architecture)
+ scenario_stats = rp_utils.getScenarioStats(filter_results)
+ items = {}
+ scenario_result_criteria = {}
+
+ # in case of more than 1 architecture supported
+ # precise the architecture
+ installer_display = installer
+ if (len(architectures) > 1):
+ installer_display = installer + "@" + architecture
+
+ # For all the scenarios get results
+ for s, s_result in filter_results.items():
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s:" %
+ (installer, version, s))
+ logger.debug("Scenario results: %s" % s_result)
+
+ # Green or Red light for a given scenario
+ nb_test_runnable_for_this_scenario = 0
+ scenario_score = 0
+ # url of the last jenkins log corresponding to a given
+ # scenario
+ s_url = ""
+ if len(s_result) > 0:
+ build_tag = s_result[len(s_result)-1]['build_tag']
+ logger.debug("Build tag: %s" % build_tag)
+ s_url = rp_utils.getJenkinsUrl(build_tag)
+ if s_url is None:
+ s_url = "http://testresultS.opnfv.org/reporting"
+ logger.info("last jenkins url: %s" % s_url)
+ testCases2BeDisplayed = []
+ # Check if test case is runnable / installer, scenario
+ # for the test case used for Scenario validation
+ try:
+ # 1) Manage the test cases for the scenario validation
+ # concretely Tiers 0-3
+ for test_case in testValid:
+ test_case.checkRunnable(installer, s,
+ test_case.getConstraints())
+ logger.debug("testcase %s (%s) is %s" %
+ (test_case.getDisplayName(),
+ test_case.getName(),
+ test_case.isRunnable))
+ time.sleep(1)
+ if test_case.isRunnable:
+ name = test_case.getName()
+ displayName = test_case.getDisplayName()
+ project = test_case.getProject()
+ nb_test_runnable_for_this_scenario += 1
+ logger.info(" Searching results for case %s " %
+ (displayName))
+ result = rp_utils.getResult(name, installer,
+ s, version)
+ # if no result set the value to 0
+ if result < 0:
+ result = 0
+ logger.info(" >>>> Test score = " + str(result))
+ test_case.setCriteria(result)
+ test_case.setIsRunnable(True)
+ testCases2BeDisplayed.append(tc.TestCase(name,
+ project,
+ "",
+ result,
+ True,
+ 1))
+ scenario_score = scenario_score + result
+
+ # 2) Manage the test cases for the scenario qualification
+ # concretely Tiers > 3
+ for test_case in otherTestCases:
+ test_case.checkRunnable(installer, s,
+ test_case.getConstraints())
+ logger.debug("testcase %s (%s) is %s" %
+ (test_case.getDisplayName(),
+ test_case.getName(),
+ test_case.isRunnable))
+ time.sleep(1)
+ if test_case.isRunnable:
+ name = test_case.getName()
+ displayName = test_case.getDisplayName()
+ project = test_case.getProject()
+ logger.info(" Searching results for case %s " %
+ (displayName))
+ result = rp_utils.getResult(name, installer,
+ s, version)
+ # at least 1 result for the test
+ if result > -1:
+ test_case.setCriteria(result)
+ test_case.setIsRunnable(True)
+ testCases2BeDisplayed.append(tc.TestCase(
+ name,
+ project,
+ "",
+ result,
+ True,
+ 4))
+ else:
+ logger.debug("No results found")
+
+ items[s] = testCases2BeDisplayed
+ except:
+ logger.error("Error: installer %s, version %s, scenario %s"
+ % (installer, version, s))
+ logger.error("No data available: %s" % (sys.exc_info()[0]))
+
+ # **********************************************
+ # Evaluate the results for scenario validation
+ # **********************************************
+ # the validation criteria = nb runnable tests x 3
+ # because each test case = 0,1,2 or 3
+ scenario_criteria = nb_test_runnable_for_this_scenario * 3
+ # if 0 runnable tests set criteria at a high value
+ if scenario_criteria < 1:
+ scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA
+
+ s_score = str(scenario_score) + "/" + str(scenario_criteria)
+ s_score_percent = rp_utils.getScenarioPercent(
+ scenario_score,
+ scenario_criteria)
+
+ s_status = "KO"
+ if scenario_score < scenario_criteria:
+ logger.info(">>>> scenario not OK, score = %s/%s" %
+ (scenario_score, scenario_criteria))
+ s_status = "KO"
+ else:
+ logger.info(">>>>> scenario OK, save the information")
+ s_status = "OK"
+ path_validation_file = ("./display/" + version +
+ "/functest/" +
+ "validated_scenario_history.txt")
+ with open(path_validation_file, "a") as f:
+ time_format = "%Y-%m-%d %H:%M"
+ info = (datetime.datetime.now().strftime(time_format) +
+ ";" + installer_display + ";" + s + "\n")
+ f.write(info)
+
+ # Save daily results in a file
+ with open(scenario_file_name, "a") as f:
+ info = (reportingDate + "," + s + "," + installer_display +
+ "," + s_score + "," +
+ str(round(s_score_percent)) + "\n")
+ f.write(info)
+
+ scenario_result_criteria[s] = sr.ScenarioResult(
+ s_status,
+ s_score,
+ s_score_percent,
+ s_url)
+ logger.info("--------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ templateEnv = jinja2.Environment(
+ loader=templateLoader, autoescape=True)
+
+ TEMPLATE_FILE = ("./reporting/functest/template"
+ "/index-status-tmpl.html")
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(
+ scenario_stats=scenario_stats,
+ scenario_results=scenario_result_criteria,
+ items=items,
+ installer=installer_display,
+ period=period,
+ version=version,
+ date=reportingDate)
+
+ with open("./display/" + version +
+ "/functest/status-" +
+ installer_display + ".html", "wb") as fh:
+ fh.write(outputText)
+
+ logger.info("Manage export CSV & PDF")
+ rp_utils.export_csv(scenario_file_name, installer_display, version)
+ logger.error("CSV generated...")
+
+ # Generate outputs for export
+ # pdf
+ # TODO Change once web site updated...use the current one
+ # to test pdf production
+ url_pdf = rp_utils.get_config('general.url')
+ pdf_path = ("./display/" + version +
+ "/functest/status-" + installer_display + ".html")
+ pdf_doc_name = ("./display/" + version +
+ "/functest/status-" + installer_display + ".pdf")
+ rp_utils.export_pdf(pdf_path, pdf_doc_name)
+ logger.info("PDF generated...")
diff --git a/reporting/reporting/functest/reporting-tempest.py b/reporting/reporting/functest/reporting-tempest.py
new file mode 100755
index 0000000..bc28856
--- /dev/null
+++ b/reporting/reporting/functest/reporting-tempest.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# SPDX-license-identifier: Apache-2.0
+
+from urllib2 import Request, urlopen, URLError
+from datetime import datetime
+import json
+import jinja2
+import os
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+installers = rp_utils.get_config('general.installers')
+items = ["tests", "Success rate", "duration"]
+
+CURRENT_DIR = os.getcwd()
+
+PERIOD = rp_utils.get_config('general.period')
+criteria_nb_test = 165
+criteria_duration = 1800
+criteria_success_rate = 90
+
+logger = rp_utils.getLogger("Tempest")
+logger.info("************************************************")
+logger.info("* Generating reporting Tempest_smoke_serial *")
+logger.info("* Data retention = %s days *" % PERIOD)
+logger.info("* *")
+logger.info("************************************************")
+
+logger.info("Success criteria:")
+logger.info("nb tests executed > %s s " % criteria_nb_test)
+logger.info("test duration < %s s " % criteria_duration)
+logger.info("success rate > %s " % criteria_success_rate)
+
+# For all the versions
+for version in rp_utils.get_config('general.versions'):
+ for installer in installers:
+ # we consider the Tempest results of the last PERIOD days
+ url = ("http://" + rp_utils.get_config('testapi.url') +
+ "?case=tempest_smoke_serial")
+ request = Request(url + '&period=' + str(PERIOD) +
+ '&installer=' + installer +
+ '&version=' + version)
+ logger.info("Search tempest_smoke_serial results for installer %s"
+ " for version %s"
+ % (installer, version))
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError as e:
+ logger.error("Error code: %s" % e)
+
+ test_results = results['results']
+
+ scenario_results = {}
+ criteria = {}
+ errors = {}
+
+ for r in test_results:
+ # Retrieve all the scenarios per installer
+ # In Brahmaputra use version
+ # Since Colorado use scenario
+ if not r['scenario'] in scenario_results.keys():
+ scenario_results[r['scenario']] = []
+ scenario_results[r['scenario']].append(r)
+
+ for s, s_result in scenario_results.items():
+ scenario_results[s] = s_result[0:5]
+ # For each scenario, we build a result object to deal with
+ # results, criteria and error handling
+ for result in scenario_results[s]:
+ result["start_date"] = result["start_date"].split(".")[0]
+
+ # retrieve results
+ # ****************
+ nb_tests_run = result['details']['tests']
+ nb_tests_failed = result['details']['failures']
+ if nb_tests_run != 0:
+ success_rate = 100 * ((int(nb_tests_run) -
+ int(nb_tests_failed)) /
+ int(nb_tests_run))
+ else:
+ success_rate = 0
+
+ result['details']["tests"] = nb_tests_run
+ result['details']["Success rate"] = str(success_rate) + "%"
+
+ # Criteria management
+ # *******************
+ crit_tests = False
+ crit_rate = False
+ crit_time = False
+
+ # Expect that at least 165 tests are run
+ if nb_tests_run >= criteria_nb_test:
+ crit_tests = True
+
+ # Expect that at least 90% of success
+ if success_rate >= criteria_success_rate:
+ crit_rate = True
+
+ # Expect that the suite duration is inferior to 30m
+ stop_date = datetime.strptime(result['stop_date'],
+ '%Y-%m-%d %H:%M:%S')
+ start_date = datetime.strptime(result['start_date'],
+ '%Y-%m-%d %H:%M:%S')
+
+ delta = stop_date - start_date
+ if (delta.total_seconds() < criteria_duration):
+ crit_time = True
+
+ result['criteria'] = {'tests': crit_tests,
+ 'Success rate': crit_rate,
+ 'duration': crit_time}
+ try:
+ logger.debug("Scenario %s, Installer %s"
+ % (s_result[1]['scenario'], installer))
+ logger.debug("Nb Test run: %s" % nb_tests_run)
+ logger.debug("Test duration: %s"
+ % result['details']['duration'])
+ logger.debug("Success rate: %s" % success_rate)
+ except:
+ logger.error("Data format error")
+
+ # Error management
+ # ****************
+ try:
+ errors = result['details']['errors']
+ result['errors'] = errors.replace('{0}', '')
+ except:
+ logger.error("Error field not present (Brahamputra runs?)")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ templateEnv = jinja2.Environment(loader=templateLoader,
+ autoescape=True)
+
+ TEMPLATE_FILE = "./reporting/functest/template/index-tempest-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_results,
+ items=items,
+ installer=installer)
+
+ with open("./display/" + version +
+ "/functest/tempest-" + installer + ".html", "wb") as fh:
+ fh.write(outputText)
+logger.info("Tempest automatic reporting succesfully generated.")
diff --git a/reporting/reporting/functest/reporting-vims.py b/reporting/reporting/functest/reporting-vims.py
new file mode 100755
index 0000000..14fddbe
--- /dev/null
+++ b/reporting/reporting/functest/reporting-vims.py
@@ -0,0 +1,126 @@
+from urllib2 import Request, urlopen, URLError
+import json
+import jinja2
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+logger = rp_utils.getLogger("vIMS")
+
+
+def sig_test_format(sig_test):
+ nbPassed = 0
+ nbFailures = 0
+ nbSkipped = 0
+ for data_test in sig_test:
+ if data_test['result'] == "Passed":
+ nbPassed += 1
+ elif data_test['result'] == "Failed":
+ nbFailures += 1
+ elif data_test['result'] == "Skipped":
+ nbSkipped += 1
+ total_sig_test_result = {}
+ total_sig_test_result['passed'] = nbPassed
+ total_sig_test_result['failures'] = nbFailures
+ total_sig_test_result['skipped'] = nbSkipped
+ return total_sig_test_result
+
+period = rp_utils.get_config('general.period')
+versions = rp_utils.get_config('general.versions')
+url_base = rp_utils.get_config('testapi.url')
+
+logger.info("****************************************")
+logger.info("* Generating reporting vIMS *")
+logger.info("* Data retention = %s days *" % period)
+logger.info("* *")
+logger.info("****************************************")
+
+installers = rp_utils.get_config('general.installers')
+step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
+logger.info("Start processing....")
+
+# For all the versions
+for version in versions:
+ for installer in installers:
+ logger.info("Search vIMS results for installer: %s, version: %s"
+ % (installer, version))
+ request = Request("http://" + url_base + '?case=vims&installer=' +
+ installer + '&version=' + version)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError as e:
+ logger.error("Error code: %s" % e)
+
+ test_results = results['results']
+
+ logger.debug("Results found: %s" % test_results)
+
+ scenario_results = {}
+ for r in test_results:
+ if not r['scenario'] in scenario_results.keys():
+ scenario_results[r['scenario']] = []
+ scenario_results[r['scenario']].append(r)
+
+ for s, s_result in scenario_results.items():
+ scenario_results[s] = s_result[0:5]
+ logger.debug("Search for success criteria")
+ for result in scenario_results[s]:
+ result["start_date"] = result["start_date"].split(".")[0]
+ sig_test = result['details']['sig_test']['result']
+ if not sig_test == "" and isinstance(sig_test, list):
+ format_result = sig_test_format(sig_test)
+ if format_result['failures'] > format_result['passed']:
+ result['details']['sig_test']['duration'] = 0
+ result['details']['sig_test']['result'] = format_result
+ nb_step_ok = 0
+ nb_step = len(result['details'])
+
+ for step_name, step_result in result['details'].items():
+ if step_result['duration'] != 0:
+ nb_step_ok += 1
+ m, s = divmod(step_result['duration'], 60)
+ m_display = ""
+ if int(m) != 0:
+ m_display += str(int(m)) + "m "
+
+ step_result['duration_display'] = (m_display +
+ str(int(s)) + "s")
+
+ result['pr_step_ok'] = 0
+ if nb_step != 0:
+ result['pr_step_ok'] = (float(nb_step_ok) / nb_step) * 100
+ try:
+ logger.debug("Scenario %s, Installer %s"
+ % (s_result[1]['scenario'], installer))
+ res = result['details']['orchestrator']['duration']
+ logger.debug("Orchestrator deployment: %s s"
+ % res)
+ logger.debug("vIMS deployment: %s s"
+ % result['details']['vIMS']['duration'])
+ logger.debug("Signaling testing: %s s"
+ % result['details']['sig_test']['duration'])
+ logger.debug("Signaling testing results: %s"
+ % format_result)
+ except Exception:
+ logger.error("Data badly formatted")
+ logger.debug("----------------------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ templateEnv = jinja2.Environment(loader=templateLoader,
+ autoescape=True)
+
+ TEMPLATE_FILE = "./reporting/functest/template/index-vims-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_results,
+ step_order=step_order,
+ installer=installer)
+
+ with open("./display/" + version + "/functest/vims-" +
+ installer + ".html", "wb") as fh:
+ fh.write(outputText)
+
+logger.info("vIMS report succesfully generated")
diff --git a/reporting/reporting/functest/scenarioResult.py b/reporting/reporting/functest/scenarioResult.py
new file mode 100644
index 0000000..5a54eed
--- /dev/null
+++ b/reporting/reporting/functest/scenarioResult.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+
+class ScenarioResult(object):
+
+ def __init__(self, status, score=0, score_percent=0, url_lastrun=''):
+ self.status = status
+ self.score = score
+ self.score_percent = score_percent
+ self.url_lastrun = url_lastrun
+
+ def getStatus(self):
+ return self.status
+
+ def getScore(self):
+ return self.score
+
+ def getScorePercent(self):
+ return self.score_percent
+
+ def getUrlLastRun(self):
+ return self.url_lastrun
diff --git a/reporting/reporting/functest/template/index-status-tmpl.html b/reporting/reporting/functest/template/index-status-tmpl.html
new file mode 100644
index 0000000..cc4edaa
--- /dev/null
+++ b/reporting/reporting/functest/template/index-status-tmpl.html
@@ -0,0 +1,157 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="../../css/default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+ <script type="text/javascript" src="../../js/gauge.js"></script>
+ <script type="text/javascript" src="../../js/trend.js"></script>
+ <script>
+ function onDocumentReady() {
+ // Gauge management
+ {% for scenario in scenario_stats.iteritems() -%}
+ var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
+ {%- endfor %}
+
+ // assign success rate to the gauge
+ function updateReadings() {
+ {% for scenario,iteration in scenario_stats.iteritems() -%}
+ gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
+ {%- endfor %}
+ }
+ updateReadings();
+ }
+
+ // trend line management
+ d3.csv("./scenario_history.txt", function(data) {
+ // ***************************************
+ // Create the trend line
+ {% for scenario,iteration in scenario_stats.iteritems() -%}
+ // for scenario {{scenario}}
+ // Filter results
+ var trend{{loop.index}} = data.filter(function(row) {
+ return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+ })
+ // Parse the date
+ trend{{loop.index}}.forEach(function(d) {
+ d.date = parseDate(d.date);
+ d.score = +d.score
+ });
+ // Draw the trend line
+ var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+ // ****************************************
+ {%- endfor %}
+ });
+ if ( !window.isLoaded ) {
+ window.addEventListener("load", function() {
+ onDocumentReady();
+ }, false);
+ } else {
+ onDocumentReady();
+ }
+</script>
+<script type="text/javascript">
+$(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+})
+</script>
+
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Functest status page ({{version}}, {{date}})</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="../../index.html">Home</a></li>
+ <li><a href="status-apex.html">Apex</a></li>
+ <li><a href="status-compass.html">Compass</a></li>
+ <li><a href="status-fuel@x86.html">fuel@x86</a></li>
+ <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
+ <li><a href="status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+
+ <div class="scenario-overview">
+ <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+ <table class="table">
+ <tr>
+ <th width="40%">Scenario</th>
+ <th width="20%">Status</th>
+ <th width="20%">Trend</th>
+ <th width="10%">Score</th>
+ <th width="10%">Iteration</th>
+ </tr>
+ {% for scenario,iteration in scenario_stats.iteritems() -%}
+ <tr class="tr-ok">
+ <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
+ <td><div id="gaugeScenario{{loop.index}}"></div></td>
+ <td><div id="trend_svg{{loop.index}}"></div></td>
+ <td>{{scenario_results[scenario].getScore()}}</td>
+ <td>{{iteration}}</td>
+ </tr>
+ {%- endfor %}
+ </table>
+ </div>
+
+
+ {% for scenario, iteration in scenario_stats.iteritems() -%}
+ <div class="scenario-part">
+ <div class="page-header">
+ <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario}}</b></h3>
+ </div>
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <span class="panel-header-item">
+ </span>
+ </div>
+ <table class="table">
+ <tr>
+ {% for test in items[scenario] -%}
+ <th>
+ {% if test.getCriteria() > -1 -%}
+ {{test.getDisplayName() }}
+ {%- endif %}
+ {% if test.getTier() > 3 -%}
+ *
+ {%- endif %}
+ </th>
+ {%- endfor %}
+ </tr>
+ <tr class="tr-weather-weather">
+ {% for test in items[scenario] -%}
+ {% if test.getCriteria() > 2 -%}
+ <td><img src="../../img/weather-clear.png"></td>
+ {%- elif test.getCriteria() > 1 -%}
+ <td><img src="../../img/weather-few-clouds.png"></td>
+ {%- elif test.getCriteria() > 0 -%}
+ <td><img src="../../img/weather-overcast.png"></td>
+ {%- elif test.getCriteria() > -1 -%}
+ <td><img src="../../img/weather-storm.png"></td>
+ {%- endif %}
+ {%- endfor %}
+ </tr>
+ </table>
+ </div>
+ </div>
+ {%- endfor %}
+ see <a href="https://wiki.opnfv.org/pages/viewpage.action?pageId=6828617">Functest scoring wiki page</a> for details on scenario scoring
+ <div> <br>
+ <a href="./status-{{installer}}.pdf" class="myButtonPdf">Export to PDF</a> <a href="./scenario_history_{{installer}}.txt" class="myButtonCSV">Export to CSV</a>
+ </div>
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/reporting/reporting/functest/template/index-tempest-tmpl.html b/reporting/reporting/functest/template/index-tempest-tmpl.html
new file mode 100644
index 0000000..3a22227
--- /dev/null
+++ b/reporting/reporting/functest/template/index-tempest-tmpl.html
@@ -0,0 +1,95 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="../../css/default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Tempest status page</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="../../index.html">Home</a></li>
+ <li><a href="tempest-apex.html">Apex</a></li>
+ <li><a href="tempest-compass.html">Compass</a></li>
+ <li><a href="tempest-daisy.html">Daisy</a></li>
+ <li><a href="tempest-fuel.html">Fuel</a></li>
+ <li><a href="tempest-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+ {% for scenario_name, results in scenario_results.iteritems() -%}
+ <div class="scenario-part">
+ <div class="page-header">
+ <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
+ </div>
+ {% for result in results -%}
+ {% if loop.index > 2 -%}
+ <div class="panel panel-default" hidden>
+ {%- else -%}
+ <div class="panel panel-default">
+ {%- endif %}
+ <div class="panel-heading">
+ <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
+ <span class="panel-header-item">
+ <h4><b>{{result.start_date}}</b></h4>
+ </span>
+ <span class="badge panel-pod-name">{{result.pod_name}}</span>
+ </div>
+ <table class="table">
+ <tr>
+ <th width="20%">Item</th>
+ <th width="10%">Result</th>
+ <th width="10%">Status</th>
+ <th width="60%">Errors</th>
+ </tr>
+ {% for item in items -%}
+ {% if item in result.details.keys() -%}
+ {% if result.criteria[item] -%}
+ <tr class="tr-ok">
+ <td>{{item}}</td>
+ <td>{{result.details[item]}}</td>
+ <td><span class="glyphicon glyphicon-ok"></td>
+ {% if item is equalto "Success rate" %}
+ <td>{{result.errors}}</td>
+ {% endif %}
+ </tr>
+ {%- else -%}
+ <tr class="tr-danger">
+ <td>{{item}}</td>
+ <td>{{result.details[item]}}</td>
+ <td><span class="glyphicon glyphicon-remove"></td>
+ {% if item is equalto "Success rate" %}
+ <td>{{result.errors}}</td>
+ {% endif %}
+ </tr>
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ </table>
+ </div>
+ {%- endfor %}
+ <button type="button" class="btn btn-more">More than two</button>
+ </div>
+ {%- endfor %}
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/reporting/reporting/functest/template/index-vims-tmpl.html b/reporting/reporting/functest/template/index-vims-tmpl.html
new file mode 100644
index 0000000..cd51607
--- /dev/null
+++ b/reporting/reporting/functest/template/index-vims-tmpl.html
@@ -0,0 +1,92 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="../../css/default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">vIMS status page</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="../../index.html">Home</a></li>
+ <li><a href="vims-fuel.html">Fuel</a></li>
+ <li><a href="vims-compass.html">Compass</a></li>
+ <li><a href="vims-daisy.html">Daisy</a></li>
+ <li><a href="vims-joid.html">JOID</a></li>
+ <li><a href="vims-apex.html">APEX</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+ {% for scenario_name, results in scenario_results.iteritems() -%}
+ <div class="scenario-part">
+ <div class="page-header">
+ <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
+ </div>
+ {% for result in results -%}
+ {% if loop.index > 2 -%}
+ <div class="panel panel-default" hidden>
+ {%- else -%}
+ <div class="panel panel-default">
+ {%- endif %}
+ <div class="panel-heading">
+ <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
+ <span class="panel-header-item">
+ <h4><b>{{result.start_date}}</b></h4>
+ </span>
+ <span class="badge panel-pod-name">{{result.pod_name}}</span>
+ </div>
+ <table class="table">
+ <tr>
+ <th width="20%">Step</th>
+ <th width="10%">Status</th>
+ <th width="10%">Duration</th>
+ <th width="60%">Result</th>
+ </tr>
+ {% for step_od_name in step_order -%}
+ {% if step_od_name in result.details.keys() -%}
+ {% set step_result = result.details[step_od_name] -%}
+ {% if step_result.duration != 0 -%}
+ <tr class="tr-ok">
+ <td>{{step_od_name}}</td>
+ <td><span class="glyphicon glyphicon-ok"></td>
+ <td><b>{{step_result.duration_display}}</b></td>
+ <td>{{step_result.result}}</td>
+ </tr>
+ {%- else -%}
+ <tr class="tr-danger">
+ <td>{{step_od_name}}</td>
+ <td><span class="glyphicon glyphicon-remove"></td>
+ <td><b>0s</b></td>
+ <td>{{step_result.result}}</td>
+ </tr>
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ </table>
+ </div>
+ {%- endfor %}
+ <button type="button" class="btn btn-more">More than two</button>
+ </div>
+ {%- endfor %}
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/reporting/reporting/functest/testCase.py b/reporting/reporting/functest/testCase.py
new file mode 100644
index 0000000..9834f07
--- /dev/null
+++ b/reporting/reporting/functest/testCase.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import re
+
+
+class TestCase(object):
+
+ def __init__(self, name, project, constraints,
+ criteria=-1, isRunnable=True, tier=-1):
+ self.name = name
+ self.project = project
+ self.constraints = constraints
+ self.criteria = criteria
+ self.isRunnable = isRunnable
+ self.tier = tier
+ display_name_matrix = {'healthcheck': 'healthcheck',
+ 'vping_ssh': 'vPing (ssh)',
+ 'vping_userdata': 'vPing (userdata)',
+ 'odl': 'ODL',
+ 'onos': 'ONOS',
+ 'ocl': 'OCL',
+ 'tempest_smoke_serial': 'Tempest (smoke)',
+ 'tempest_full_parallel': 'Tempest (full)',
+ 'tempest_defcore': 'Tempest (Defcore)',
+ 'refstack_defcore': 'Refstack',
+ 'rally_sanity': 'Rally (smoke)',
+ 'bgpvpn': 'bgpvpn',
+ 'rally_full': 'Rally (full)',
+ 'vims': 'vIMS',
+ 'doctor-notification': 'Doctor',
+ 'promise': 'Promise',
+ 'moon': 'Moon',
+ 'copper': 'Copper',
+ 'security_scan': 'Security',
+ 'multisite': 'Multisite',
+ 'domino-multinode': 'Domino',
+ 'functest-odl-sfc': 'SFC',
+ 'onos_sfc': 'SFC',
+ 'parser-basics': 'Parser',
+ 'connection_check': 'Health (connection)',
+ 'api_check': 'Health (api)',
+ 'snaps_smoke': 'SNAPS',
+ 'snaps_health_check': 'Health (dhcp)',
+ 'gluon_vping': 'Netready',
+ 'fds': 'FDS',
+ 'cloudify_ims': 'vIMS (Cloudify)',
+ 'orchestra_ims': 'OpenIMS (OpenBaton)',
+ 'opera_ims': 'vIMS (Open-O)',
+ 'vyos_vrouter': 'vyos',
+ 'barometercollectd': 'Barometer',
+ 'odl_netvirt': 'Netvirt',
+ 'security_scan': 'Security'}
+ try:
+ self.displayName = display_name_matrix[self.name]
+ except:
+ self.displayName = "unknown"
+
+ def getName(self):
+ return self.name
+
+ def getProject(self):
+ return self.project
+
+ def getConstraints(self):
+ return self.constraints
+
+ def getCriteria(self):
+ return self.criteria
+
+ def getTier(self):
+ return self.tier
+
+ def setCriteria(self, criteria):
+ self.criteria = criteria
+
+ def setIsRunnable(self, isRunnable):
+ self.isRunnable = isRunnable
+
+ def checkRunnable(self, installer, scenario, config):
+ # Re-use Functest declaration
+ # Retrieve Functest configuration file functest_config.yaml
+ is_runnable = True
+ config_test = config
+ # print " *********************** "
+ # print TEST_ENV
+ # print " ---------------------- "
+ # print "case = " + self.name
+ # print "installer = " + installer
+ # print "scenario = " + scenario
+ # print "project = " + self.project
+
+ # Retrieve test constraints
+ # Retrieve test execution param
+ test_execution_context = {"installer": installer,
+ "scenario": scenario}
+
+ # By default we assume that all the tests are always runnable...
+ # if test_env not empty => dependencies to be checked
+ if config_test is not None and len(config_test) > 0:
+ # possible criteria = ["installer", "scenario"]
+ # consider test criteria from config file
+ # compare towards CI env through CI en variable
+ for criteria in config_test:
+ if re.search(config_test[criteria],
+ test_execution_context[criteria]) is None:
+ # print "Test "+ test + " cannot be run on the environment"
+ is_runnable = False
+ # print is_runnable
+ self.isRunnable = is_runnable
+
+ def toString(self):
+ testcase = ("Name=" + self.name + ";Criteria=" +
+ str(self.criteria) + ";Project=" + self.project +
+ ";Constraints=" + str(self.constraints) +
+ ";IsRunnable" + str(self.isRunnable))
+ return testcase
+
+ def getDisplayName(self):
+ return self.displayName
diff --git a/reporting/reporting/qtip/__init__.py b/reporting/reporting/qtip/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/reporting/reporting/qtip/__init__.py
diff --git a/reporting/reporting/qtip/index.html b/reporting/reporting/qtip/index.html
new file mode 100644
index 0000000..0f9df85
--- /dev/null
+++ b/reporting/reporting/qtip/index.html
@@ -0,0 +1,51 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">QTIP reporting page</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="index-status-apex.html">Apex</a></li>
+ <li><a href="index-status-compass.html">Compass</a></li>
+ <li><a href="index-status-fuel.html">Fuel</a></li>
+ <li><a href="index-status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-main">
+ <h2>QTIP</h2>
+ QTIP is used in OPNFV for verifying the OPNFV infrastructure and some of the OPNFV features.
+ <br>The QTIP framework is deployed in several OPNFV community labs.
+ <br>It is installer, infrastructure and application independent.
+
+ <h2>Useful Links</h2>
+ <li><a href="https://wiki.opnfv.org/download/attachments/5734608/qtip%20in%20depth.pdf?version=1&modificationDate=1463410431000&api=v2">QTIP in Depth</a></li>
+ <li><a href="https://git.opnfv.org/cgit/qtip">QTIP Repo</a></li>
+ <li><a href="https://wiki.opnfv.org/display/qtip">QTIP Project</a></li>
+ <li><a href="https://build.opnfv.org/ci/view/qtip/">QTIP Jenkins page</a></li>
+ <li><a href="https://jira.opnfv.org/browse/QTIP-119?jql=project%20%3D%20QTIP">JIRA</a></li>
+
+ </div>
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/reporting/reporting/qtip/reporting-status.py b/reporting/reporting/qtip/reporting-status.py
new file mode 100644
index 0000000..f0127b5
--- /dev/null
+++ b/reporting/reporting/qtip/reporting-status.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import os
+
+import jinja2
+import utils.reporting_utils as rp_utils
+import utils.scenarioResult as sr
+
+installers = rp_utils.get_config('general.installers')
+versions = rp_utils.get_config('general.versions')
+PERIOD = rp_utils.get_config('general.period')
+
+# Logger
+logger = rp_utils.getLogger("Qtip-Status")
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+logger.info("*******************************************")
+logger.info("* Generating reporting scenario status *")
+logger.info("* Data retention = {} days *".format(PERIOD))
+logger.info("* *")
+logger.info("*******************************************")
+
+
+def prepare_profile_file(version):
+ profile_dir = './display/{}/qtip'.format(version)
+ if not os.path.exists(profile_dir):
+ os.makedirs(profile_dir)
+
+ profile_file = "{}/{}/scenario_history.txt".format(profile_dir,
+ version)
+ if not os.path.exists(profile_file):
+ with open(profile_file, 'w') as f:
+ info = 'date,scenario,installer,details,score\n'
+ f.write(info)
+ f.close()
+ return profile_file
+
+
+def profile_results(results, installer, profile_fd):
+ result_criterias = {}
+ for s_p, s_p_result in results.iteritems():
+ ten_criteria = len(s_p_result)
+ ten_score = sum(s_p_result)
+
+ LASTEST_TESTS = rp_utils.get_config(
+ 'general.nb_iteration_tests_success_criteria')
+ four_result = s_p_result[:LASTEST_TESTS]
+ four_criteria = len(four_result)
+ four_score = sum(four_result)
+
+ s_four_score = str(four_score / four_criteria)
+ s_ten_score = str(ten_score / ten_criteria)
+
+ info = '{},{},{},{},{}\n'.format(reportingDate,
+ s_p,
+ installer,
+ s_ten_score,
+ s_four_score)
+ profile_fd.write(info)
+ result_criterias[s_p] = sr.ScenarioResult('OK',
+ s_four_score,
+ s_ten_score,
+ '100')
+
+ logger.info("--------------------------")
+ return result_criterias
+
+
+def render_html(prof_results, installer, version):
+ template_loader = jinja2.FileSystemLoader(".")
+ template_env = jinja2.Environment(loader=template_loader,
+ autoescape=True)
+
+ template_file = "./reporting/qtip/template/index-status-tmpl.html"
+ template = template_env.get_template(template_file)
+
+ render_outcome = template.render(prof_results=prof_results,
+ installer=installer,
+ period=PERIOD,
+ version=version,
+ date=reportingDate)
+
+ with open('./display/{}/qtip/status-{}.html'.format(version, installer),
+ 'wb') as fh:
+ fh.write(render_outcome)
+
+
+def render_reporter():
+ for version in versions:
+ profile_file = prepare_profile_file(version)
+ profile_fd = open(profile_file, 'a')
+ for installer in installers:
+ results = rp_utils.getQtipResults(version, installer)
+ prof_results = profile_results(results, installer, profile_fd)
+ render_html(prof_results=prof_results,
+ installer=installer,
+ version=version)
+ profile_fd.close()
+ logger.info("Manage export CSV")
+ rp_utils.generate_csv(profile_file)
+ logger.info("CSV generated...")
+
+
+if __name__ == '__main__':
+ render_reporter()
diff --git a/reporting/reporting/qtip/template/index-status-tmpl.html b/reporting/reporting/qtip/template/index-status-tmpl.html
new file mode 100644
index 0000000..26da36c
--- /dev/null
+++ b/reporting/reporting/qtip/template/index-status-tmpl.html
@@ -0,0 +1,86 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="../../css/default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+ <script type="text/javascript" src="../../js/trend-qtip.js"></script>
+ <script>
+ // trend line management
+ d3.csv("./scenario_history.csv", function(data) {
+ // ***************************************
+ // Create the trend line
+ {% for scenario in prof_results.keys() -%}
+ // for scenario {{scenario}}
+ // Filter results
+ var trend{{loop.index}} = data.filter(function(row) {
+ return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+ })
+ // Parse the date
+ trend{{loop.index}}.forEach(function(d) {
+ d.date = parseDate(d.date);
+ d.score = +d.score
+ });
+ // Draw the trend line
+ var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+ // ****************************************
+ {%- endfor %}
+ });
+ </script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">QTIP status page ({{version}}, {{date}})</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="index-status-apex.html">Apex</a></li>
+ <li><a href="index-status-compass.html">Compass</a></li>
+ <li><a href="index-status-fuel.html">Fuel</a></li>
+ <li><a href="index-status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+
+ <div class="scenario-overview">
+ <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+ <table class="table">
+ <tr>
+ <th width="25%">Pod/Scenario</th>
+ <th width="25%">Trend</th>
+ <th width="25%">Last 4 Iterations</th>
+ <th width="25%">Last 10 Days</th>
+ </tr>
+ {% for scenario,result in prof_results.iteritems() -%}
+ <tr class="tr-ok">
+ <td>{{scenario}}</td>
+ <td><div id="trend_svg{{loop.index}}"></div></td>
+ <td>{{prof_results[scenario].getFourDaysScore()}}</td>
+ <td>{{prof_results[scenario].getTenDaysScore()}}</td>
+ </tr>
+ {%- endfor %}
+ </table>
+ </div>
+
+
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/reporting/reporting/reporting.yaml b/reporting/reporting/reporting.yaml
new file mode 100644
index 0000000..1692f48
--- /dev/null
+++ b/reporting/reporting/reporting.yaml
@@ -0,0 +1,68 @@
+---
+general:
+ installers:
+ - apex
+ - compass
+ - fuel
+ - joid
+
+ versions:
+ - master
+ - danube
+
+ log:
+ log_file: reporting.log
+ log_level: ERROR
+
+ period: 10
+
+ nb_iteration_tests_success_criteria: 4
+
+ directories:
+ # Relative to the path where the repo is cloned:
+ dir_reporting: utils/tests/reporting/
+ dir_log: utils/tests/reporting/log/
+ dir_conf: utils/tests/reporting/conf/
+ dir_utils: utils/tests/reporting/utils/
+ dir_templates: utils/tests/reporting/templates/
+ dir_display: utils/tests/reporting/display/
+
+ url: testresults.opnfv.org/reporting/
+
+testapi:
+ url: testresults.opnfv.org/test/api/v1/results
+
+functest:
+ blacklist:
+ - ovno
+ - security_scan
+ - healthcheck
+ - odl_netvirt
+ - aaa
+ - cloudify_ims
+ - orchestra_ims
+ - juju_epc
+ - orchestra
+ max_scenario_criteria: 50
+ test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
+ log_level: ERROR
+ jenkins_url: https://build.opnfv.org/ci/view/functest/job/
+ exclude_noha: False
+ exclude_virtual: False
+
+yardstick:
+ test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml
+ log_level: ERROR
+
+storperf:
+ test_list:
+ - snia_steady_state
+ log_level: ERROR
+
+qtip:
+ log_level: ERROR
+ period: 1
+
+bottleneck:
+
+vsperf:
diff --git a/reporting/reporting/storperf/__init__.py b/reporting/reporting/storperf/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/reporting/reporting/storperf/__init__.py
diff --git a/reporting/reporting/storperf/reporting-status.py b/reporting/reporting/storperf/reporting-status.py
new file mode 100644
index 0000000..0c188a3
--- /dev/null
+++ b/reporting/reporting/storperf/reporting-status.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import jinja2
+import os
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+import utils.scenarioResult as sr
+
+installers = rp_utils.get_config('general.installers')
+versions = rp_utils.get_config('general.versions')
+PERIOD = rp_utils.get_config('general.period')
+
+# Logger
+logger = rp_utils.getLogger("Storperf-Status")
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+logger.info("*******************************************")
+logger.info("* Generating reporting scenario status *")
+logger.info("* Data retention = %s days *" % PERIOD)
+logger.info("* *")
+logger.info("*******************************************")
+
+# retrieve the list of storperf tests
+storperf_tests = rp_utils.get_config('storperf.test_list')
+logger.info("Storperf tests: %s" % storperf_tests)
+
+# For all the versions
+for version in versions:
+ # For all the installers
+ for installer in installers:
+ # get scenarios results data
+ # for the moment we consider only 1 case snia_steady_state
+ scenario_results = rp_utils.getScenarios("snia_steady_state",
+ installer,
+ version)
+ # logger.info("scenario_results: %s" % scenario_results)
+
+ scenario_stats = rp_utils.getScenarioStats(scenario_results)
+ logger.info("scenario_stats: %s" % scenario_stats)
+ items = {}
+ scenario_result_criteria = {}
+
+ # From each scenarios get results list
+ for s, s_result in scenario_results.items():
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s", installer,
+ version, s)
+ ten_criteria = len(s_result)
+
+ ten_score = 0
+ for v in s_result:
+ if "PASS" in v['criteria']:
+ ten_score += 1
+
+ logger.info("ten_score: %s / %s" % (ten_score, ten_criteria))
+
+ four_score = 0
+ try:
+ LASTEST_TESTS = rp_utils.get_config(
+ 'general.nb_iteration_tests_success_criteria')
+ s_result.sort(key=lambda x: x['start_date'])
+ four_result = s_result[-LASTEST_TESTS:]
+ logger.debug("four_result: {}".format(four_result))
+ logger.debug("LASTEST_TESTS: {}".format(LASTEST_TESTS))
+ # logger.debug("four_result: {}".format(four_result))
+ four_criteria = len(four_result)
+ for v in four_result:
+ if "PASS" in v['criteria']:
+ four_score += 1
+ logger.info("4 Score: %s / %s " % (four_score,
+ four_criteria))
+ except:
+ logger.error("Impossible to retrieve the four_score")
+
+ try:
+ s_status = (four_score * 100) / four_criteria
+ except:
+ s_status = 0
+ logger.info("Score percent = %s" % str(s_status))
+ s_four_score = str(four_score) + '/' + str(four_criteria)
+ s_ten_score = str(ten_score) + '/' + str(ten_criteria)
+ s_score_percent = str(s_status)
+
+ logger.debug(" s_status: {}".format(s_status))
+ if s_status == 100:
+ logger.info(">>>>> scenario OK, save the information")
+ else:
+ logger.info(">>>> scenario not OK, last 4 iterations = %s, \
+ last 10 days = %s" % (s_four_score, s_ten_score))
+
+ s_url = ""
+ if len(s_result) > 0:
+ build_tag = s_result[len(s_result)-1]['build_tag']
+ logger.debug("Build tag: %s" % build_tag)
+ s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
+ logger.info("last jenkins url: %s" % s_url)
+
+ # Save daily results in a file
+ path_validation_file = ("./display/" + version +
+ "/storperf/scenario_history.txt")
+
+ if not os.path.exists(path_validation_file):
+ with open(path_validation_file, 'w') as f:
+ info = 'date,scenario,installer,details,score\n'
+ f.write(info)
+
+ with open(path_validation_file, "a") as f:
+ info = (reportingDate + "," + s + "," + installer +
+ "," + s_ten_score + "," +
+ str(s_score_percent) + "\n")
+ f.write(info)
+
+ scenario_result_criteria[s] = sr.ScenarioResult(s_status,
+ s_four_score,
+ s_ten_score,
+ s_score_percent,
+ s_url)
+
+ logger.info("--------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ templateEnv = jinja2.Environment(loader=templateLoader,
+ autoescape=True)
+
+ TEMPLATE_FILE = "./reporting/storperf/template/index-status-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_result_criteria,
+ installer=installer,
+ period=PERIOD,
+ version=version,
+ date=reportingDate)
+
+ with open("./display/" + version +
+ "/storperf/status-" + installer + ".html", "wb") as fh:
+ fh.write(outputText)
diff --git a/reporting/reporting/storperf/template/index-status-tmpl.html b/reporting/reporting/storperf/template/index-status-tmpl.html
new file mode 100644
index 0000000..e872272
--- /dev/null
+++ b/reporting/reporting/storperf/template/index-status-tmpl.html
@@ -0,0 +1,110 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="../../css/default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+ <script type="text/javascript" src="../../js/gauge.js"></script>
+ <script type="text/javascript" src="../../js/trend.js"></script>
+ <script>
+ function onDocumentReady() {
+ // Gauge management
+ {% for scenario in scenario_results.keys() -%}
+ var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
+ {%- endfor %}
+ // assign success rate to the gauge
+ function updateReadings() {
+ {% for scenario in scenario_results.keys() -%}
+ gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
+ {%- endfor %}
+ }
+ updateReadings();
+ }
+
+ // trend line management
+ d3.csv("./scenario_history.txt", function(data) {
+ // ***************************************
+ // Create the trend line
+ {% for scenario in scenario_results.keys() -%}
+ // for scenario {{scenario}}
+ // Filter results
+ var trend{{loop.index}} = data.filter(function(row) {
+ return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+ })
+ // Parse the date
+ trend{{loop.index}}.forEach(function(d) {
+ d.date = parseDate(d.date);
+ d.score = +d.score
+ });
+ // Draw the trend line
+ var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+ // ****************************************
+ {%- endfor %}
+ });
+ if ( !window.isLoaded ) {
+ window.addEventListener("load", function() {
+ onDocumentReady();
+ }, false);
+ } else {
+ onDocumentReady();
+ }
+ </script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Storperf status page ({{version}}, {{date}})</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="status-apex.html">Apex</a></li>
+ <li><a href="status-compass.html">Compass</a></li>
+ <li><a href="status-fuel.html">Fuel</a></li>
+ <li><a href="status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+
+ <div class="scenario-overview">
+ <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+ <table class="table">
+ <tr>
+ <th width="40%">Scenario</th>
+ <th width="20%">Status</th>
+ <th width="20%">Trend</th>
+ <th width="10%">Last 4 Iterations</th>
+ <th width="10%">Last 10 Days</th>
+ </tr>
+ {% for scenario,result in scenario_results.iteritems() -%}
+ <tr class="tr-ok">
+ <td><a href="{{scenario_results[scenario].getLastUrl()}}">{{scenario}}</a></td>
+ <td><div id="gaugeScenario{{loop.index}}"></div></td>
+ <td><div id="trend_svg{{loop.index}}"></div></td>
+ <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
+ <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
+ </tr>
+ {%- endfor %}
+ </table>
+ </div>
+
+
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/reporting/reporting/tests/__init__.py b/reporting/reporting/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/reporting/reporting/tests/__init__.py
diff --git a/reporting/reporting/tests/unit/__init__.py b/reporting/reporting/tests/unit/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/reporting/reporting/tests/unit/__init__.py
diff --git a/reporting/reporting/tests/unit/utils/__init__.py b/reporting/reporting/tests/unit/utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/reporting/reporting/tests/unit/utils/__init__.py
diff --git a/reporting/reporting/tests/unit/utils/test_utils.py b/reporting/reporting/tests/unit/utils/test_utils.py
new file mode 100644
index 0000000..9614d74
--- /dev/null
+++ b/reporting/reporting/tests/unit/utils/test_utils.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import logging
+import unittest
+
+from reporting.utils import reporting_utils
+
+
+class reportingUtilsTesting(unittest.TestCase):
+
+ logging.disable(logging.CRITICAL)
+
+ def setUp(self):
+ self.test = reporting_utils
+
+ def test_foo(self):
+ self.assertTrue(0 < 1)
+
+
+if __name__ == "__main__":
+ unittest.main(verbosity=2)
diff --git a/reporting/reporting/utils/__init__.py b/reporting/reporting/utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/reporting/reporting/utils/__init__.py
diff --git a/reporting/reporting/utils/reporting_utils.py b/reporting/reporting/utils/reporting_utils.py
new file mode 100644
index 0000000..6282091
--- /dev/null
+++ b/reporting/reporting/utils/reporting_utils.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+from urllib2 import Request, urlopen, URLError
+import logging
+import json
+import os
+import requests
+import pdfkit
+import yaml
+
+
+# ----------------------------------------------------------
+#
+# YAML UTILS
+#
+# -----------------------------------------------------------
+def get_parameter_from_yaml(parameter, file):
+ """
+ Returns the value of a given parameter in file.yaml
+ parameter must be given in string format with dots
+ Example: general.openstack.image_name
+ """
+ with open(file) as f:
+ file_yaml = yaml.safe_load(f)
+ f.close()
+ value = file_yaml
+ for element in parameter.split("."):
+ value = value.get(element)
+ if value is None:
+ raise ValueError("The parameter %s is not defined in"
+ " reporting.yaml" % parameter)
+ return value
+
+
+def get_config(parameter):
+ yaml_ = os.environ["CONFIG_REPORTING_YAML"]
+ return get_parameter_from_yaml(parameter, yaml_)
+
+
+# ----------------------------------------------------------
+#
+# LOGGER UTILS
+#
+# -----------------------------------------------------------
+def getLogger(module):
+ logFormatter = logging.Formatter("%(asctime)s [" +
+ module +
+ "] [%(levelname)-5.5s] %(message)s")
+ logger = logging.getLogger()
+ log_file = get_config('general.log.log_file')
+ log_level = get_config('general.log.log_level')
+
+ fileHandler = logging.FileHandler("{0}/{1}".format('.', log_file))
+ fileHandler.setFormatter(logFormatter)
+ logger.addHandler(fileHandler)
+
+ consoleHandler = logging.StreamHandler()
+ consoleHandler.setFormatter(logFormatter)
+ logger.addHandler(consoleHandler)
+ logger.setLevel(log_level)
+ return logger
+
+
+# ----------------------------------------------------------
+#
+# REPORTING UTILS
+#
+# -----------------------------------------------------------
+def getApiResults(case, installer, scenario, version):
+ results = json.dumps([])
+ # to remove proxy (to be removed at the end for local test only)
+ # proxy_handler = urllib2.ProxyHandler({})
+ # opener = urllib2.build_opener(proxy_handler)
+ # urllib2.install_opener(opener)
+ # url = "http://127.0.0.1:8000/results?case=" + case + \
+ # "&period=30&installer=" + installer
+ period = get_config('general.period')
+ url_base = get_config('testapi.url')
+ nb_tests = get_config('general.nb_iteration_tests_success_criteria')
+
+ url = ("http://" + url_base + "?case=" + case +
+ "&period=" + str(period) + "&installer=" + installer +
+ "&scenario=" + scenario + "&version=" + version +
+ "&last=" + str(nb_tests))
+ request = Request(url)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError as e:
+ print 'No kittez. Got an error code:'.format(e)
+
+ return results
+
+
+def getScenarios(case, installer, version):
+
+ try:
+ case = case.getName()
+ except:
+ # if case is not an object test case, try the string
+ if type(case) == str:
+ case = case
+ else:
+ raise ValueError("Case cannot be evaluated")
+
+ period = get_config('general.period')
+ url_base = get_config('testapi.url')
+
+ url = ("http://" + url_base + "?case=" + case +
+ "&period=" + str(period) + "&installer=" + installer +
+ "&version=" + version)
+
+ try:
+ request = Request(url)
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ test_results = results['results']
+ try:
+ page = results['pagination']['total_pages']
+ if page > 1:
+ test_results = []
+ for i in range(1, page + 1):
+ url_page = url + "&page=" + str(i)
+ request = Request(url_page)
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ test_results += results['results']
+ except KeyError:
+ print ('No pagination detected')
+ except URLError as err:
+ print 'Got an error code: {}'.format(err)
+
+ if test_results is not None:
+ test_results.reverse()
+ scenario_results = {}
+
+ for r in test_results:
+ # Retrieve all the scenarios per installer
+ if not r['scenario'] in scenario_results.keys():
+ scenario_results[r['scenario']] = []
+ # Do we consider results from virtual pods ...
+ # Do we consider results for non HA scenarios...
+ exclude_virtual_pod = get_config('functest.exclude_virtual')
+ exclude_noha = get_config('functest.exclude_noha')
+ if ((exclude_virtual_pod and "virtual" in r['pod_name']) or
+ (exclude_noha and "noha" in r['scenario'])):
+ print "exclude virtual pod results..."
+ else:
+ scenario_results[r['scenario']].append(r)
+
+ return scenario_results
+
+
+def getScenarioStats(scenario_results):
+ scenario_stats = {}
+ for k, v in scenario_results.iteritems():
+ scenario_stats[k] = len(v)
+
+ return scenario_stats
+
+
+def getScenarioStatus(installer, version):
+ period = get_config('general.period')
+ url_base = get_config('testapi.url')
+
+ url = ("http://" + url_base + "?case=scenario_status" +
+ "&installer=" + installer +
+ "&version=" + version + "&period=" + str(period))
+ request = Request(url)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ response.close()
+ results = json.loads(k)
+ test_results = results['results']
+ except URLError as e:
+ print 'Got an error code: {}'.format(e)
+
+ scenario_results = {}
+ result_dict = {}
+ if test_results is not None:
+ for r in test_results:
+ if r['stop_date'] != 'None' and r['criteria'] is not None:
+ if not r['scenario'] in scenario_results.keys():
+ scenario_results[r['scenario']] = []
+ scenario_results[r['scenario']].append(r)
+
+ for k, v in scenario_results.items():
+ # scenario_results[k] = v[:LASTEST_TESTS]
+ s_list = []
+ for element in v:
+ if element['criteria'] == 'SUCCESS':
+ s_list.append(1)
+ else:
+ s_list.append(0)
+ result_dict[k] = s_list
+
+ # return scenario_results
+ return result_dict
+
+
+def getQtipResults(version, installer):
+ period = get_config('qtip.period')
+ url_base = get_config('testapi.url')
+
+ url = ("http://" + url_base + "?project=qtip" +
+ "&installer=" + installer +
+ "&version=" + version + "&period=" + str(period))
+ request = Request(url)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ response.close()
+ results = json.loads(k)['results']
+ except URLError as err:
+ print 'Got an error code: {}'.format(err)
+
+ result_dict = {}
+ if results:
+ for r in results:
+ key = '{}/{}'.format(r['pod_name'], r['scenario'])
+ if key not in result_dict.keys():
+ result_dict[key] = []
+ result_dict[key].append(r['details']['score'])
+
+ # return scenario_results
+ return result_dict
+
+
+def getNbtestOk(results):
+ nb_test_ok = 0
+ for r in results:
+ for k, v in r.iteritems():
+ try:
+ if "PASS" in v:
+ nb_test_ok += 1
+ except:
+ print "Cannot retrieve test status"
+ return nb_test_ok
+
+
+def getResult(testCase, installer, scenario, version):
+
+ # retrieve raw results
+ results = getApiResults(testCase, installer, scenario, version)
+ # let's concentrate on test results only
+ test_results = results['results']
+
+ # if results found, analyze them
+ if test_results is not None:
+ test_results.reverse()
+
+ scenario_results = []
+
+ # print " ---------------- "
+ # print test_results
+ # print " ---------------- "
+ # print "nb of results:" + str(len(test_results))
+
+ for r in test_results:
+ # print r["start_date"]
+ # print r["criteria"]
+ scenario_results.append({r["start_date"]: r["criteria"]})
+ # sort results
+ scenario_results.sort()
+ # 4 levels for the results
+ # 3: 4+ consecutive runs passing the success criteria
+ # 2: <4 successful consecutive runs but passing the criteria
+ # 1: close to pass the success criteria
+ # 0: 0% success, not passing
+ # -1: no run available
+ test_result_indicator = 0
+ nbTestOk = getNbtestOk(scenario_results)
+
+ # print "Nb test OK (last 10 days):"+ str(nbTestOk)
+ # check that we have at least 4 runs
+ if len(scenario_results) < 1:
+ # No results available
+ test_result_indicator = -1
+ elif nbTestOk < 1:
+ test_result_indicator = 0
+ elif nbTestOk < 2:
+ test_result_indicator = 1
+ else:
+ # Test the last 4 run
+ if (len(scenario_results) > 3):
+ last4runResults = scenario_results[-4:]
+ nbTestOkLast4 = getNbtestOk(last4runResults)
+ # print "Nb test OK (last 4 run):"+ str(nbTestOkLast4)
+ if nbTestOkLast4 > 3:
+ test_result_indicator = 3
+ else:
+ test_result_indicator = 2
+ else:
+ test_result_indicator = 2
+ return test_result_indicator
+
+
+def getJenkinsUrl(build_tag):
+ # e.g. jenkins-functest-apex-apex-daily-colorado-daily-colorado-246
+ # id = 246
+ # jenkins-functest-compass-huawei-pod5-daily-master-136
+ # id = 136
+ # note it is linked to jenkins format
+ # if this format changes...function to be adapted....
+ url_base = get_config('functest.jenkins_url')
+ try:
+ build_id = [int(s) for s in build_tag.split("-") if s.isdigit()]
+ url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] +
+ "/" + str(build_id[0]))
+ jenkins_url = url_base + url_id + "/console"
+ except:
+ print 'Impossible to get jenkins url:'
+
+ if "jenkins-" not in build_tag:
+ jenkins_url = None
+
+ return jenkins_url
+
+
+def getScenarioPercent(scenario_score, scenario_criteria):
+ score = 0.0
+ try:
+ score = float(scenario_score) / float(scenario_criteria) * 100
+ except:
+ print 'Impossible to calculate the percentage score'
+ return score
+
+
+# *********
+# Functest
+# *********
+def getFunctestConfig(version=""):
+ config_file = get_config('functest.test_conf') + version
+ response = requests.get(config_file)
+ return yaml.safe_load(response.text)
+
+
+def getArchitectures(scenario_results):
+ supported_arch = ['x86']
+ if (len(scenario_results) > 0):
+ for scenario_result in scenario_results.values():
+ for value in scenario_result:
+ if ("armband" in value['build_tag']):
+ supported_arch.append('aarch64')
+ return supported_arch
+ return supported_arch
+
+
+def filterArchitecture(results, architecture):
+ filtered_results = {}
+ for name, results in results.items():
+ filtered_values = []
+ for value in results:
+ if (architecture is "x86"):
+ # drop aarch64 results
+ if ("armband" not in value['build_tag']):
+ filtered_values.append(value)
+ elif(architecture is "aarch64"):
+ # drop x86 results
+ if ("armband" in value['build_tag']):
+ filtered_values.append(value)
+ if (len(filtered_values) > 0):
+ filtered_results[name] = filtered_values
+ return filtered_results
+
+
+# *********
+# Yardstick
+# *********
+def subfind(given_list, pattern_list):
+ LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
+ for i in range(len(given_list)):
+ if given_list[i] == pattern_list[0] and \
+ given_list[i:i + LASTEST_TESTS] == pattern_list:
+ return True
+ return False
+
+
+def _get_percent(status):
+
+ if status * 100 % 6:
+ return round(float(status) * 100 / 6, 1)
+ else:
+ return status * 100 / 6
+
+
+def get_percent(four_list, ten_list):
+ four_score = 0
+ ten_score = 0
+
+ for v in four_list:
+ four_score += v
+ for v in ten_list:
+ ten_score += v
+
+ LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
+ if four_score == LASTEST_TESTS:
+ status = 6
+ elif subfind(ten_list, [1, 1, 1, 1]):
+ status = 5
+ elif ten_score == 0:
+ status = 0
+ else:
+ status = four_score + 1
+
+ return _get_percent(status)
+
+
+def _test():
+ status = getScenarioStatus("compass", "master")
+ print "status:++++++++++++++++++++++++"
+ print(json.dumps(status, indent=4))
+
+
+# ----------------------------------------------------------
+#
+# Export
+#
+# -----------------------------------------------------------
+
+def export_csv(scenario_file_name, installer, version):
+ # csv
+ # generate sub files based on scenario_history.txt
+ scenario_installer_file_name = ("./display/" + version +
+ "/functest/scenario_history_" +
+ installer + ".csv")
+ scenario_installer_file = open(scenario_installer_file_name, "a")
+ with open(scenario_file_name, "r") as scenario_file:
+ scenario_installer_file.write("date,scenario,installer,detail,score\n")
+ for line in scenario_file:
+ if installer in line:
+ scenario_installer_file.write(line)
+ scenario_installer_file.close
+
+
+def generate_csv(scenario_file):
+ import shutil
+ # csv
+ # generate sub files based on scenario_history.txt
+ csv_file = scenario_file.replace('txt', 'csv')
+ shutil.copy2(scenario_file, csv_file)
+
+
+def export_pdf(pdf_path, pdf_doc_name):
+ try:
+ pdfkit.from_file(pdf_path, pdf_doc_name)
+ except IOError:
+ print "Error but pdf generated anyway..."
+ except:
+ print "impossible to generate PDF"
diff --git a/reporting/reporting/utils/scenarioResult.py b/reporting/reporting/utils/scenarioResult.py
new file mode 100644
index 0000000..6029d7f
--- /dev/null
+++ b/reporting/reporting/utils/scenarioResult.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+
+class ScenarioResult(object):
+ def __init__(self, status, four_days_score='', ten_days_score='',
+ score_percent=0.0, last_url=''):
+ self.status = status
+ self.four_days_score = four_days_score
+ self.ten_days_score = ten_days_score
+ self.score_percent = score_percent
+ self.last_url = last_url
+
+ def getStatus(self):
+ return self.status
+
+ def getTenDaysScore(self):
+ return self.ten_days_score
+
+ def getFourDaysScore(self):
+ return self.four_days_score
+
+ def getScorePercent(self):
+ return self.score_percent
+
+ def getLastUrl(self):
+ return self.last_url
diff --git a/reporting/reporting/yardstick/__init__.py b/reporting/reporting/yardstick/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/reporting/reporting/yardstick/__init__.py
diff --git a/reporting/reporting/yardstick/img/gauge_0.png b/reporting/reporting/yardstick/img/gauge_0.png
new file mode 100644
index 0000000..ecefc0e
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_0.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_100.png b/reporting/reporting/yardstick/img/gauge_100.png
new file mode 100644
index 0000000..e199e15
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_100.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_16.7.png b/reporting/reporting/yardstick/img/gauge_16.7.png
new file mode 100644
index 0000000..3e3993c
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_16.7.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_25.png b/reporting/reporting/yardstick/img/gauge_25.png
new file mode 100644
index 0000000..4923659
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_25.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_33.3.png b/reporting/reporting/yardstick/img/gauge_33.3.png
new file mode 100644
index 0000000..364574b
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_33.3.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_41.7.png b/reporting/reporting/yardstick/img/gauge_41.7.png
new file mode 100644
index 0000000..8c3e910
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_41.7.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_50.png b/reporting/reporting/yardstick/img/gauge_50.png
new file mode 100644
index 0000000..2874b9f
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_50.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_58.3.png b/reporting/reporting/yardstick/img/gauge_58.3.png
new file mode 100644
index 0000000..beedc8a
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_58.3.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_66.7.png b/reporting/reporting/yardstick/img/gauge_66.7.png
new file mode 100644
index 0000000..93f44d1
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_66.7.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_75.png b/reporting/reporting/yardstick/img/gauge_75.png
new file mode 100644
index 0000000..9fc261f
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_75.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_8.3.png b/reporting/reporting/yardstick/img/gauge_8.3.png
new file mode 100644
index 0000000..59f8657
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_8.3.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_83.3.png b/reporting/reporting/yardstick/img/gauge_83.3.png
new file mode 100644
index 0000000..27ae4ec
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_83.3.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/gauge_91.7.png b/reporting/reporting/yardstick/img/gauge_91.7.png
new file mode 100644
index 0000000..2808657
--- /dev/null
+++ b/reporting/reporting/yardstick/img/gauge_91.7.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/icon-nok.png b/reporting/reporting/yardstick/img/icon-nok.png
new file mode 100644
index 0000000..526b529
--- /dev/null
+++ b/reporting/reporting/yardstick/img/icon-nok.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/icon-ok.png b/reporting/reporting/yardstick/img/icon-ok.png
new file mode 100644
index 0000000..3a9de2e
--- /dev/null
+++ b/reporting/reporting/yardstick/img/icon-ok.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/weather-clear.png b/reporting/reporting/yardstick/img/weather-clear.png
new file mode 100644
index 0000000..a0d9677
--- /dev/null
+++ b/reporting/reporting/yardstick/img/weather-clear.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/weather-few-clouds.png b/reporting/reporting/yardstick/img/weather-few-clouds.png
new file mode 100644
index 0000000..acfa783
--- /dev/null
+++ b/reporting/reporting/yardstick/img/weather-few-clouds.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/weather-overcast.png b/reporting/reporting/yardstick/img/weather-overcast.png
new file mode 100644
index 0000000..4296246
--- /dev/null
+++ b/reporting/reporting/yardstick/img/weather-overcast.png
Binary files differ
diff --git a/reporting/reporting/yardstick/img/weather-storm.png b/reporting/reporting/yardstick/img/weather-storm.png
new file mode 100644
index 0000000..956f0e2
--- /dev/null
+++ b/reporting/reporting/yardstick/img/weather-storm.png
Binary files differ
diff --git a/reporting/reporting/yardstick/index.html b/reporting/reporting/yardstick/index.html
new file mode 100644
index 0000000..488f142
--- /dev/null
+++ b/reporting/reporting/yardstick/index.html
@@ -0,0 +1,51 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Yardstick reporting page</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="index-status-apex.html">Apex</a></li>
+ <li><a href="index-status-compass.html">Compass</a></li>
+ <li><a href="index-status-fuel.html">Fuel</a></li>
+ <li><a href="index-status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-main">
+ <h2>Yardstick</h2>
+ Yardstick is used in OPNFV for verifying the OPNFV infrastructure and some of the OPNFV features.
+ <br>The Yardstick framework is deployed in several OPNFV community labs.
+ <br>It is installer, infrastructure and application independent.
+
+ <h2>Useful Links</h2>
+ <li><a href="https://wiki.opnfv.org/download/attachments/5734608/yardstick%20in%20depth.pdf?version=1&modificationDate=1463410431000&api=v2">Yardstick in Depth</a></li>
+ <li><a href="https://git.opnfv.org/cgit/yardstick">Yardstick Repo</a></li>
+ <li><a href="https://wiki.opnfv.org/display/yardstick">Yardstick Project</a></li>
+ <li><a href="https://build.opnfv.org/ci/view/yardstick/">Yardstick Jenkins page</a></li>
+ <li><a href="https://jira.opnfv.org/browse/YARDSTICK-119?jql=project%20%3D%20YARDSTICK">JIRA</a></li>
+
+ </div>
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/reporting/reporting/yardstick/reporting-status.py b/reporting/reporting/yardstick/reporting-status.py
new file mode 100644
index 0000000..85c386b
--- /dev/null
+++ b/reporting/reporting/yardstick/reporting-status.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import jinja2
+import os
+
+import utils.scenarioResult as sr
+from scenarios import config as cf
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+installers = rp_utils.get_config('general.installers')
+versions = rp_utils.get_config('general.versions')
+PERIOD = rp_utils.get_config('general.period')
+
+# Logger
+logger = rp_utils.getLogger("Yardstick-Status")
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+logger.info("*******************************************")
+logger.info("* Generating reporting scenario status *")
+logger.info("* Data retention = %s days *" % PERIOD)
+logger.info("* *")
+logger.info("*******************************************")
+
+
+# For all the versions
+for version in versions:
+ # For all the installers
+ for installer in installers:
+ # get scenarios results data
+ scenario_results = rp_utils.getScenarioStatus(installer, version)
+ if 'colorado' == version:
+ stable_result = rp_utils.getScenarioStatus(installer,
+ 'stable/colorado')
+ for k, v in stable_result.items():
+ if k not in scenario_results.keys():
+ scenario_results[k] = []
+ scenario_results[k] += stable_result[k]
+ scenario_result_criteria = {}
+
+ for s in scenario_results.keys():
+ if installer in cf.keys() and s in cf[installer].keys():
+ scenario_results.pop(s)
+
+ # From each scenarios get results list
+ for s, s_result in scenario_results.items():
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s", installer,
+ version, s)
+
+ ten_criteria = len(s_result)
+ ten_score = 0
+ for v in s_result:
+ ten_score += v
+
+ LASTEST_TESTS = rp_utils.get_config(
+ 'general.nb_iteration_tests_success_criteria')
+ four_result = s_result[:LASTEST_TESTS]
+ four_criteria = len(four_result)
+ four_score = 0
+ for v in four_result:
+ four_score += v
+
+ s_status = str(rp_utils.get_percent(four_result, s_result))
+ s_four_score = str(four_score) + '/' + str(four_criteria)
+ s_ten_score = str(ten_score) + '/' + str(ten_criteria)
+ s_score_percent = rp_utils.get_percent(four_result, s_result)
+
+ if '100' == s_status:
+ logger.info(">>>>> scenario OK, save the information")
+ else:
+ logger.info(">>>> scenario not OK, last 4 iterations = %s, \
+ last 10 days = %s" % (s_four_score, s_ten_score))
+
+ # Save daily results in a file
+ path_validation_file = ("./display/" + version +
+ "/yardstick/scenario_history.txt")
+
+ if not os.path.exists(path_validation_file):
+ with open(path_validation_file, 'w') as f:
+ info = 'date,scenario,installer,details,score\n'
+ f.write(info)
+
+ with open(path_validation_file, "a") as f:
+ info = (reportingDate + "," + s + "," + installer +
+ "," + s_ten_score + "," +
+ str(s_score_percent) + "\n")
+ f.write(info)
+
+ scenario_result_criteria[s] = sr.ScenarioResult(s_status,
+ s_four_score,
+ s_ten_score,
+ s_score_percent)
+
+ logger.info("--------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ templateEnv = jinja2.Environment(loader=templateLoader,
+ autoescape=True)
+
+ TEMPLATE_FILE = "./reporting/yardstick/template/index-status-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_result_criteria,
+ installer=installer,
+ period=PERIOD,
+ version=version,
+ date=reportingDate)
+
+ with open("./display/" + version +
+ "/yardstick/status-" + installer + ".html", "wb") as fh:
+ fh.write(outputText)
diff --git a/reporting/reporting/yardstick/scenarios.py b/reporting/reporting/yardstick/scenarios.py
new file mode 100644
index 0000000..26e8c8b
--- /dev/null
+++ b/reporting/reporting/yardstick/scenarios.py
@@ -0,0 +1,27 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import requests
+import yaml
+
+import utils.reporting_utils as rp_utils
+
+yardstick_conf = rp_utils.get_config('yardstick.test_conf')
+response = requests.get(yardstick_conf)
+yaml_file = yaml.safe_load(response.text)
+reporting = yaml_file.get('reporting')
+
+config = {}
+
+for element in reporting:
+ name = element['name']
+ scenarios = element['scenario']
+ for s in scenarios:
+ if name not in config:
+ config[name] = {}
+ config[name][s] = True
diff --git a/reporting/reporting/yardstick/template/index-status-tmpl.html b/reporting/reporting/yardstick/template/index-status-tmpl.html
new file mode 100644
index 0000000..77ba950
--- /dev/null
+++ b/reporting/reporting/yardstick/template/index-status-tmpl.html
@@ -0,0 +1,110 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="../../css/default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+ <script type="text/javascript" src="../../js/gauge.js"></script>
+ <script type="text/javascript" src="../../js/trend.js"></script>
+ <script>
+ function onDocumentReady() {
+ // Gauge management
+ {% for scenario in scenario_results.keys() -%}
+ var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
+ {%- endfor %}
+ // assign success rate to the gauge
+ function updateReadings() {
+ {% for scenario in scenario_results.keys() -%}
+ gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
+ {%- endfor %}
+ }
+ updateReadings();
+ }
+
+ // trend line management
+ d3.csv("./scenario_history.csv", function(data) {
+ // ***************************************
+ // Create the trend line
+ {% for scenario in scenario_results.keys() -%}
+ // for scenario {{scenario}}
+ // Filter results
+ var trend{{loop.index}} = data.filter(function(row) {
+ return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+ })
+ // Parse the date
+ trend{{loop.index}}.forEach(function(d) {
+ d.date = parseDate(d.date);
+ d.score = +d.score
+ });
+ // Draw the trend line
+ var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+ // ****************************************
+ {%- endfor %}
+ });
+ if ( !window.isLoaded ) {
+ window.addEventListener("load", function() {
+ onDocumentReady();
+ }, false);
+ } else {
+ onDocumentReady();
+ }
+ </script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Yardstick status page ({{version}}, {{date}})</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="status-apex.html">Apex</a></li>
+ <li><a href="status-compass.html">Compass</a></li>
+ <li><a href="status-fuel.html">Fuel</a></li>
+ <li><a href="status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+
+ <div class="scenario-overview">
+ <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+ <table class="table">
+ <tr>
+ <th width="40%">Scenario</th>
+ <th width="20%">Status</th>
+ <th width="20%">Trend</th>
+ <th width="10%">Last 4 Iterations</th>
+ <th width="10%">Last 10 Days</th>
+ </tr>
+ {% for scenario,result in scenario_results.iteritems() -%}
+ <tr class="tr-ok">
+ <td>{{scenario}}</td>
+ <td><div id="gaugeScenario{{loop.index}}"></div></td>
+ <td><div id="trend_svg{{loop.index}}"></div></td>
+ <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
+ <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
+ </tr>
+ {%- endfor %}
+ </table>
+ </div>
+
+
+ </div>
+ <div class="col-md-1"></div>
+</div>