summaryrefslogtreecommitdiffstats
path: root/utils/test/reporting/reporting
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test/reporting/reporting')
-rw-r--r--utils/test/reporting/reporting/__init__.py0
-rw-r--r--utils/test/reporting/reporting/bottlenecks/__init__.py0
-rw-r--r--utils/test/reporting/reporting/bottlenecks/reporting-status.py149
-rw-r--r--utils/test/reporting/reporting/bottlenecks/template/index-status-tmpl.html114
-rw-r--r--utils/test/reporting/reporting/functest/__init__.py0
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_0.pngbin3644 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_100.pngbin3191 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_16.7.pngbin3170 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_25.pngbin3108 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_33.3.pngbin3081 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_41.7.pngbin3169 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_50.pngbin3123 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_58.3.pngbin3161 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_66.7.pngbin3069 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_75.pngbin3030 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_8.3.pngbin2993 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_83.3.pngbin3122 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_91.7.pngbin3008 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/icon-nok.pngbin2317 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/icon-ok.pngbin4063 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/weather-clear.pngbin1560 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/weather-few-clouds.pngbin1927 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/weather-overcast.pngbin1588 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/weather-storm.pngbin2137 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/index.html53
-rwxr-xr-xutils/test/reporting/reporting/functest/reporting-status.py335
-rwxr-xr-xutils/test/reporting/reporting/functest/reporting-tempest.py163
-rwxr-xr-xutils/test/reporting/reporting/functest/reporting-vims.py142
-rw-r--r--utils/test/reporting/reporting/functest/scenarioResult.py29
-rw-r--r--utils/test/reporting/reporting/functest/template/index-status-tmpl.html183
-rw-r--r--utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html95
-rw-r--r--utils/test/reporting/reporting/functest/template/index-vims-tmpl.html93
-rw-r--r--utils/test/reporting/reporting/functest/testCase.py126
-rw-r--r--utils/test/reporting/reporting/qtip/__init__.py0
-rw-r--r--utils/test/reporting/reporting/qtip/index.html51
-rw-r--r--utils/test/reporting/reporting/qtip/reporting-status.py106
-rw-r--r--utils/test/reporting/reporting/qtip/template/index-status-tmpl.html87
-rw-r--r--utils/test/reporting/reporting/reporting.yaml70
-rw-r--r--utils/test/reporting/reporting/storperf/__init__.py0
-rw-r--r--utils/test/reporting/reporting/storperf/reporting-status.py145
-rw-r--r--utils/test/reporting/reporting/storperf/template/index-status-tmpl.html110
-rw-r--r--utils/test/reporting/reporting/tests/__init__.py0
-rw-r--r--utils/test/reporting/reporting/tests/unit/__init__.py0
-rw-r--r--utils/test/reporting/reporting/tests/unit/utils/__init__.py0
-rw-r--r--utils/test/reporting/reporting/tests/unit/utils/test_utils.py28
-rw-r--r--utils/test/reporting/reporting/utils/__init__.py0
-rw-r--r--utils/test/reporting/reporting/utils/reporting_utils.py566
-rw-r--r--utils/test/reporting/reporting/utils/scenarioResult.py33
-rw-r--r--utils/test/reporting/reporting/vsperf/__init__.py0
-rw-r--r--utils/test/reporting/reporting/vsperf/reporting-status.py138
-rw-r--r--utils/test/reporting/reporting/vsperf/template/index-status-tmpl.html114
-rw-r--r--utils/test/reporting/reporting/yardstick/__init__.py0
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_0.pngbin3644 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_100.pngbin3191 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_16.7.pngbin3170 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_25.pngbin3108 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_33.3.pngbin3081 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_41.7.pngbin3169 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_50.pngbin3123 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_58.3.pngbin3161 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_66.7.pngbin3069 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_75.pngbin3030 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_8.3.pngbin2993 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_83.3.pngbin3122 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/gauge_91.7.pngbin3008 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/icon-nok.pngbin2317 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/icon-ok.pngbin4063 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/weather-clear.pngbin1560 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/weather-few-clouds.pngbin1927 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/weather-overcast.pngbin1588 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/img/weather-storm.pngbin2137 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/yardstick/index.html51
-rw-r--r--utils/test/reporting/reporting/yardstick/reporting-status.py169
-rw-r--r--utils/test/reporting/reporting/yardstick/scenarios.py27
-rw-r--r--utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html111
75 files changed, 0 insertions, 3288 deletions
diff --git a/utils/test/reporting/reporting/__init__.py b/utils/test/reporting/reporting/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/bottlenecks/__init__.py b/utils/test/reporting/reporting/bottlenecks/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/bottlenecks/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/bottlenecks/reporting-status.py b/utils/test/reporting/reporting/bottlenecks/reporting-status.py
deleted file mode 100644
index 225227ac3..000000000
--- a/utils/test/reporting/reporting/bottlenecks/reporting-status.py
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import os
-
-import jinja2
-
-import reporting.utils.reporting_utils as rp_utils
-import reporting.utils.scenarioResult as sr
-
-INSTALLERS = rp_utils.get_config('general.installers')
-VERSIONS = rp_utils.get_config('general.versions')
-PERIOD = rp_utils.get_config('general.period')
-
-# Logger
-LOGGER = rp_utils.getLogger("Bottlenecks-Status")
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-LOGGER.info("*******************************************")
-LOGGER.info("* Generating reporting scenario status *")
-LOGGER.info("* Data retention = %s days *", PERIOD)
-LOGGER.info("* *")
-LOGGER.info("*******************************************")
-
-# retrieve the list of bottlenecks tests
-BOTTLENECKS_TESTS = rp_utils.get_config('bottlenecks.test_list')
-LOGGER.info("Bottlenecks tests: %s", BOTTLENECKS_TESTS)
-
-# For all the versions
-for version in VERSIONS:
- # For all the installers
- for installer in INSTALLERS:
- # get scenarios results data
- if version != 'master':
- new_version = "stable/{}".format(version)
- else:
- new_version = version
- scenario_results = rp_utils.getScenarios("bottlenecks",
- "posca_factor_ping",
- installer,
- new_version)
- LOGGER.info("scenario_results: %s", scenario_results)
-
- scenario_stats = rp_utils.getScenarioStats(scenario_results)
- LOGGER.info("scenario_stats: %s", scenario_stats)
- items = {}
- scenario_result_criteria = {}
-
- # From each scenarios get results list
- for s, s_result in scenario_results.items():
- LOGGER.info("---------------------------------")
- LOGGER.info("installer %s, version %s, scenario %s", installer,
- version, s)
- ten_criteria = len(s_result)
-
- ten_score = 0
- for v in s_result:
- if "PASS" in v['criteria']:
- ten_score += 1
-
- LOGGER.info("ten_score: %s / %s", (ten_score, ten_criteria))
-
- four_score = 0
- try:
- LASTEST_TESTS = rp_utils.get_config(
- 'general.nb_iteration_tests_success_criteria')
- s_result.sort(key=lambda x: x['start_date'])
- four_result = s_result[-LASTEST_TESTS:]
- LOGGER.debug("four_result: {}".format(four_result))
- LOGGER.debug("LASTEST_TESTS: {}".format(LASTEST_TESTS))
- # logger.debug("four_result: {}".format(four_result))
- four_criteria = len(four_result)
- for v in four_result:
- if "PASS" in v['criteria']:
- four_score += 1
- LOGGER.info("4 Score: %s / %s ", (four_score,
- four_criteria))
- except Exception:
- LOGGER.error("Impossible to retrieve the four_score")
-
- try:
- s_status = (four_score * 100) / four_criteria
- except Exception:
- s_status = 0
- LOGGER.info("Score percent = %s", str(s_status))
- s_four_score = str(four_score) + '/' + str(four_criteria)
- s_ten_score = str(ten_score) + '/' + str(ten_criteria)
- s_score_percent = str(s_status)
-
- LOGGER.debug(" s_status: %s", s_status)
- if s_status == 100:
- LOGGER.info(">>>>> scenario OK, save the information")
- else:
- LOGGER.info(">>>> scenario not OK, last 4 iterations = %s, \
- last 10 days = %s", (s_four_score, s_ten_score))
-
- s_url = ""
- if len(s_result) > 0:
- build_tag = s_result[len(s_result)-1]['build_tag']
- LOGGER.debug("Build tag: %s", build_tag)
- s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
- LOGGER.info("last jenkins url: %s", s_url)
-
- # Save daily results in a file
- path_validation_file = ("./display/" + version +
- "/bottlenecks/scenario_history.txt")
-
- if not os.path.exists(path_validation_file):
- with open(path_validation_file, 'w') as f:
- info = 'date,scenario,installer,details,score\n'
- f.write(info)
-
- with open(path_validation_file, "a") as f:
- info = (reportingDate + "," + s + "," + installer +
- "," + s_ten_score + "," +
- str(s_score_percent) + "\n")
- f.write(info)
-
- scenario_result_criteria[s] = sr.ScenarioResult(s_status,
- s_four_score,
- s_ten_score,
- s_score_percent,
- s_url)
-
- LOGGER.info("--------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = ("./reporting/bottlenecks/template"
- "/index-status-tmpl.html")
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_result_criteria,
- installer=installer,
- period=PERIOD,
- version=version,
- date=reportingDate)
-
- with open("./display/" + version +
- "/bottlenecks/status-" + installer + ".html", "wb") as fh:
- fh.write(outputText)
diff --git a/utils/test/reporting/reporting/bottlenecks/template/index-status-tmpl.html b/utils/test/reporting/reporting/bottlenecks/template/index-status-tmpl.html
deleted file mode 100644
index c4497ac1b..000000000
--- a/utils/test/reporting/reporting/bottlenecks/template/index-status-tmpl.html
+++ /dev/null
@@ -1,114 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
- <script type="text/javascript" src="../../js/gauge.js"></script>
- <script type="text/javascript" src="../../js/trend.js"></script>
- <script>
- function onDocumentReady() {
- // Gauge management
- {% for scenario in scenario_results.keys() -%}
- var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
- {%- endfor %}
- // assign success rate to the gauge
- function updateReadings() {
- {% for scenario in scenario_results.keys() -%}
- gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
- {%- endfor %}
- }
- updateReadings();
- }
-
- // trend line management
- d3.csv("./scenario_history.txt", function(data) {
- // ***************************************
- // Create the trend line
- {% for scenario in scenario_results.keys() -%}
- // for scenario {{scenario}}
- // Filter results
- var trend{{loop.index}} = data.filter(function(row) {
- return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
- })
- // Parse the date
- trend{{loop.index}}.forEach(function(d) {
- d.date = parseDate(d.date);
- d.score = +d.score
- });
- // Draw the trend line
- var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
- // ****************************************
- {%- endfor %}
- });
- if ( !window.isLoaded ) {
- window.addEventListener("load", function() {
- onDocumentReady();
- }, false);
- } else {
- onDocumentReady();
- }
- </script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Bottlenecks status page ({{version}}, {{date}})</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="status-apex.html">Apex</a></li>
- <li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-fuel.html">Fuel</a></li>
- <li><a href="status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
- <div><h1>Reported values represent the percentage of completed
-
- CI tests (posca_factor_ping) during the reporting period, where results
-
- were communicated to the Test Database.</h1></div>
- <div class="scenario-overview">
- <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
- <table class="table">
- <tr>
- <th width="40%">Scenario</th>
- <th width="20%">Status</th>
- <th width="20%">Trend</th>
- <th width="10%">Last 4 Iterations</th>
- <th width="10%">Last 10 Days</th>
- </tr>
- {% for scenario,result in scenario_results.iteritems() -%}
- <tr class="tr-ok">
- <td><a href="{{scenario_results[scenario].getLastUrl()}}">{{scenario}}</a></td>
- <td><div id="gaugeScenario{{loop.index}}"></div></td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
- <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
- </tr>
- {%- endfor %}
- </table>
- </div>
-
-
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/functest/__init__.py b/utils/test/reporting/reporting/functest/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/functest/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/functest/img/gauge_0.png b/utils/test/reporting/reporting/functest/img/gauge_0.png
deleted file mode 100644
index ecefc0e66..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_0.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_100.png b/utils/test/reporting/reporting/functest/img/gauge_100.png
deleted file mode 100644
index e199e1561..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_100.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_16.7.png b/utils/test/reporting/reporting/functest/img/gauge_16.7.png
deleted file mode 100644
index 3e3993c3b..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_16.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_25.png b/utils/test/reporting/reporting/functest/img/gauge_25.png
deleted file mode 100644
index 4923659b9..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_25.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_33.3.png b/utils/test/reporting/reporting/functest/img/gauge_33.3.png
deleted file mode 100644
index 364574b4a..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_33.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_41.7.png b/utils/test/reporting/reporting/functest/img/gauge_41.7.png
deleted file mode 100644
index 8c3e910fa..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_41.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_50.png b/utils/test/reporting/reporting/functest/img/gauge_50.png
deleted file mode 100644
index 2874b9fcf..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_50.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_58.3.png b/utils/test/reporting/reporting/functest/img/gauge_58.3.png
deleted file mode 100644
index beedc8aa9..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_58.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_66.7.png b/utils/test/reporting/reporting/functest/img/gauge_66.7.png
deleted file mode 100644
index 93f44d133..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_66.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_75.png b/utils/test/reporting/reporting/functest/img/gauge_75.png
deleted file mode 100644
index 9fc261ff8..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_75.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_8.3.png b/utils/test/reporting/reporting/functest/img/gauge_8.3.png
deleted file mode 100644
index 59f86571e..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_8.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_83.3.png b/utils/test/reporting/reporting/functest/img/gauge_83.3.png
deleted file mode 100644
index 27ae4ec54..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_83.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_91.7.png b/utils/test/reporting/reporting/functest/img/gauge_91.7.png
deleted file mode 100644
index 280865714..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_91.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/icon-nok.png b/utils/test/reporting/reporting/functest/img/icon-nok.png
deleted file mode 100644
index 526b5294b..000000000
--- a/utils/test/reporting/reporting/functest/img/icon-nok.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/icon-ok.png b/utils/test/reporting/reporting/functest/img/icon-ok.png
deleted file mode 100644
index 3a9de2e89..000000000
--- a/utils/test/reporting/reporting/functest/img/icon-ok.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-clear.png b/utils/test/reporting/reporting/functest/img/weather-clear.png
deleted file mode 100644
index a0d967750..000000000
--- a/utils/test/reporting/reporting/functest/img/weather-clear.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-few-clouds.png b/utils/test/reporting/reporting/functest/img/weather-few-clouds.png
deleted file mode 100644
index acfa78398..000000000
--- a/utils/test/reporting/reporting/functest/img/weather-few-clouds.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-overcast.png b/utils/test/reporting/reporting/functest/img/weather-overcast.png
deleted file mode 100644
index 4296246d0..000000000
--- a/utils/test/reporting/reporting/functest/img/weather-overcast.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-storm.png b/utils/test/reporting/reporting/functest/img/weather-storm.png
deleted file mode 100644
index 956f0e20f..000000000
--- a/utils/test/reporting/reporting/functest/img/weather-storm.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/index.html b/utils/test/reporting/reporting/functest/index.html
deleted file mode 100644
index bb1bce209..000000000
--- a/utils/test/reporting/reporting/functest/index.html
+++ /dev/null
@@ -1,53 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Functest reporting page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="index-status-apex.html">Apex</a></li>
- <li><a href="index-status-compass.html">Compass</a></li>
- <li><a href="index-status-fuel.html">Fuel</a></li>
- <li><a href="index-status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-main">
- <h2>Functest</h2>
- This project develops test suites that cover functionaling test cases in OPNFV.
- <br>The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario.
- <br> Weekly meeting: every Tuesday 8 AM UTC
- <br> IRC chan #opnfv-testperf
-
- <br>
- <h2>Useful Links</h2>
- <li><a href="http://events.linuxfoundation.org/sites/events/files/slides/Functest%20in%20Depth_0.pdf">Functest in Depth</a></li>
- <li><a href="https://git.opnfv.org/cgit/functest">Functest Repo</a></li>
- <li><a href="https://wiki.opnfv.org/opnfv_functional_testing">Functest Project</a></li>
- <li><a href="https://build.opnfv.org/ci/view/functest/">Functest Jenkins page</a></li>
- <li><a href="https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=59&projectKey=FUNCTEST">JIRA</a></li>
-
- </div>
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/functest/reporting-status.py b/utils/test/reporting/reporting/functest/reporting-status.py
deleted file mode 100755
index 592f92996..000000000
--- a/utils/test/reporting/reporting/functest/reporting-status.py
+++ /dev/null
@@ -1,335 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import os
-import sys
-import time
-
-import jinja2
-
-import reporting.functest.testCase as tc
-import reporting.functest.scenarioResult as sr
-import reporting.utils.reporting_utils as rp_utils
-
-"""
-Functest reporting status
-"""
-
-# Logger
-LOGGER = rp_utils.getLogger("Functest-Status")
-
-# Initialization
-testValid = []
-otherTestCases = []
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-# init just connection_check to get the list of scenarios
-# as all the scenarios run connection_check
-healthcheck = tc.TestCase("connection_check", "functest", -1)
-
-# Retrieve the Functest configuration to detect which tests are relevant
-# according to the installer, scenario
-cf = rp_utils.get_config('functest.test_conf')
-period = rp_utils.get_config('general.period')
-versions = rp_utils.get_config('general.versions')
-installers = rp_utils.get_config('general.installers')
-blacklist = rp_utils.get_config('functest.blacklist')
-log_level = rp_utils.get_config('general.log.log_level')
-exclude_noha = rp_utils.get_config('functest.exclude_noha')
-exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
-
-functest_yaml_config = rp_utils.getFunctestConfig()
-
-LOGGER.info("*******************************************")
-LOGGER.info("* *")
-LOGGER.info("* Generating reporting scenario status *")
-LOGGER.info("* Data retention: %s days *", period)
-LOGGER.info("* Log level: %s *", log_level)
-LOGGER.info("* *")
-LOGGER.info("* Virtual PODs exluded: %s *", exclude_virtual)
-LOGGER.info("* NOHA scenarios excluded: %s *", exclude_noha)
-LOGGER.info("* *")
-LOGGER.info("*******************************************")
-
-# Retrieve test cases of Tier 1 (smoke)
-config_tiers = functest_yaml_config.get("tiers")
-
-# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
-# to validate scenarios
-# Tier > 2 are not used to validate scenarios but we display the results anyway
-# tricky thing for the API as some tests are Functest tests
-# other tests are declared directly in the feature projects
-for tier in config_tiers:
- if tier['order'] >= 0 and tier['order'] < 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- testValid.append(tc.TestCase(case['case_name'],
- "functest",
- case['dependencies']))
- elif tier['order'] == 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['case_name'],
- case['case_name'],
- case['dependencies']))
- elif tier['order'] > 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['case_name'],
- "functest",
- case['dependencies']))
-
-LOGGER.debug("Functest reporting start")
-
-# For all the versions
-for version in versions:
- # For all the installers
- scenario_directory = "./display/" + version + "/functest/"
- scenario_file_name = scenario_directory + "scenario_history.txt"
-
- # check that the directory exists, if not create it
- # (first run on new version)
- if not os.path.exists(scenario_directory):
- os.makedirs(scenario_directory)
-
- # initiate scenario file if it does not exist
- if not os.path.isfile(scenario_file_name):
- with open(scenario_file_name, "a") as my_file:
- LOGGER.debug("Create scenario file: %s", scenario_file_name)
- my_file.write("date,scenario,installer,detail,score\n")
-
- for installer in installers:
-
- # get scenarios
- scenario_results = rp_utils.getScenarios("functest",
- "connection_check",
- installer,
- version)
- # get nb of supported architecture (x86, aarch64)
- architectures = rp_utils.getArchitectures(scenario_results)
- LOGGER.info("Supported architectures: %s", architectures)
-
- for architecture in architectures:
- LOGGER.info("Architecture: %s", architecture)
- # Consider only the results for the selected architecture
- # i.e drop x86 for aarch64 and vice versa
- filter_results = rp_utils.filterArchitecture(scenario_results,
- architecture)
- scenario_stats = rp_utils.getScenarioStats(filter_results)
- items = {}
- scenario_result_criteria = {}
-
- # in case of more than 1 architecture supported
- # precise the architecture
- installer_display = installer
- if "fuel" in installer:
- installer_display = installer + "@" + architecture
-
- # For all the scenarios get results
- for s, s_result in filter_results.items():
- LOGGER.info("---------------------------------")
- LOGGER.info("installer %s, version %s, scenario %s:",
- installer, version, s)
- LOGGER.debug("Scenario results: %s", s_result)
-
- # Green or Red light for a given scenario
- nb_test_runnable_for_this_scenario = 0
- scenario_score = 0
- # url of the last jenkins log corresponding to a given
- # scenario
- s_url = ""
- if len(s_result) > 0:
- build_tag = s_result[len(s_result)-1]['build_tag']
- LOGGER.debug("Build tag: %s", build_tag)
- s_url = rp_utils.getJenkinsUrl(build_tag)
- if s_url is None:
- s_url = "http://testresultS.opnfv.org/reporting"
- LOGGER.info("last jenkins url: %s", s_url)
- testCases2BeDisplayed = []
- # Check if test case is runnable / installer, scenario
- # for the test case used for Scenario validation
- try:
- # 1) Manage the test cases for the scenario validation
- # concretely Tiers 0-3
- for test_case in testValid:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- LOGGER.debug("testcase %s (%s) is %s",
- test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable)
- time.sleep(1)
- if test_case.isRunnable:
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- nb_test_runnable_for_this_scenario += 1
- LOGGER.info(" Searching results for case %s ",
- displayName)
- if "fuel" in installer:
- result = rp_utils.getCaseScoreFromBuildTag(
- name,
- s_result)
- else:
- result = rp_utils.getCaseScore(name, installer,
- s, version)
- # if no result set the value to 0
- if result < 0:
- result = 0
- LOGGER.info(" >>>> Test score = " + str(result))
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 1))
- scenario_score = scenario_score + result
-
- # 2) Manage the test cases for the scenario qualification
- # concretely Tiers > 3
- for test_case in otherTestCases:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- LOGGER.debug("testcase %s (%s) is %s",
- test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable)
- time.sleep(1)
- if test_case.isRunnable:
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- LOGGER.info(" Searching results for case %s ",
- displayName)
- if "fuel" in installer:
- result = rp_utils.getCaseScoreFromBuildTag(
- name,
- s_result)
- else:
- result = rp_utils.getCaseScore(name, installer,
- s, version)
- # at least 1 result for the test
- if result > -1:
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(
- name,
- project,
- "",
- result,
- True,
- 4))
- else:
- LOGGER.debug("No results found")
-
- items[s] = testCases2BeDisplayed
- except Exception: # pylint: disable=broad-except
- LOGGER.error("Error installer %s, version %s, scenario %s",
- installer, version, s)
- LOGGER.error("No data available: %s", sys.exc_info()[0])
-
- # **********************************************
- # Evaluate the results for scenario validation
- # **********************************************
- # the validation criteria = nb runnable tests x 3
- # because each test case can get
- # 0 point (never PASS)
- # 1 point at least (PASS once over the time window)
- # 2 points (PASS more than once but 1 FAIL on the last 4)
- # 3 points PASS on the last 4 iterations
- # e.g. 1 scenario = 10 cases
- # 1 iteration : max score = 10 (10x1)
- # 2 iterations : max score = 20 (10x2)
- # 3 iterations : max score = 20
- # 4 or more iterations : max score = 30 (1x30)
- LOGGER.info("Number of iterations for this scenario: %s",
- len(s_result))
- if len(s_result) > 3:
- k_score = 3
- elif len(s_result) < 2:
- k_score = 1
- else:
- k_score = 2
-
- scenario_criteria = nb_test_runnable_for_this_scenario*k_score
-
- # score for reporting
- s_score = str(scenario_score) + "/" + str(scenario_criteria)
- s_score_percent = rp_utils.getScenarioPercent(
- scenario_score,
- scenario_criteria)
-
- s_status = "KO"
- if scenario_score < scenario_criteria:
- LOGGER.info(">>>> scenario not OK, score = %s/%s",
- scenario_score, scenario_criteria)
- s_status = "KO"
- else:
- LOGGER.info(">>>>> scenario OK, save the information")
- s_status = "OK"
- path_validation_file = ("./display/" + version +
- "/functest/" +
- "validated_scenario_history.txt")
- with open(path_validation_file, "a") as f:
- time_format = "%Y-%m-%d %H:%M"
- info = (datetime.datetime.now().strftime(time_format) +
- ";" + installer_display + ";" + s + "\n")
- f.write(info)
-
- # Save daily results in a file
- with open(scenario_file_name, "a") as f:
- info = (reportingDate + "," + s + "," + installer_display +
- "," + s_score + "," +
- str(round(s_score_percent)) + "\n")
- f.write(info)
-
- scenario_result_criteria[s] = sr.ScenarioResult(
- s_status,
- s_score,
- s_score_percent,
- s_url)
- LOGGER.info("--------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(
- loader=templateLoader, autoescape=True)
-
- TEMPLATE_FILE = ("./reporting/functest/template"
- "/index-status-tmpl.html")
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(
- scenario_stats=scenario_stats,
- scenario_results=scenario_result_criteria,
- items=items,
- installer=installer_display,
- period=period,
- version=version,
- date=reportingDate)
-
- with open("./display/" + version +
- "/functest/status-" +
- installer_display + ".html", "wb") as fh:
- fh.write(outputText)
-
- LOGGER.info("Manage export CSV & PDF")
- rp_utils.export_csv(scenario_file_name, installer_display, version)
- LOGGER.error("CSV generated...")
-
- # Generate outputs for export
- # pdf
- url_pdf = rp_utils.get_config('general.url')
- pdf_path = ("./display/" + version +
- "/functest/status-" + installer_display + ".html")
- pdf_doc_name = ("./display/" + version +
- "/functest/status-" + installer_display + ".pdf")
- rp_utils.export_pdf(pdf_path, pdf_doc_name)
- LOGGER.info("PDF generated...")
diff --git a/utils/test/reporting/reporting/functest/reporting-tempest.py b/utils/test/reporting/reporting/functest/reporting-tempest.py
deleted file mode 100755
index d78d9a19d..000000000
--- a/utils/test/reporting/reporting/functest/reporting-tempest.py
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2017 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-# SPDX-license-identifier: Apache-2.0
-
-from datetime import datetime
-import json
-import os
-
-from urllib2 import Request, urlopen, URLError
-import jinja2
-
-import reporting.utils.reporting_utils as rp_utils
-
-INSTALLERS = rp_utils.get_config('general.installers')
-ITEMS = ["tests", "Success rate", "duration"]
-
-CURRENT_DIR = os.getcwd()
-
-PERIOD = rp_utils.get_config('general.period')
-CRITERIA_NB_TEST = 100
-CRITERIA_DURATION = 1800
-CRITERIA_SUCCESS_RATE = 100
-
-logger = rp_utils.getLogger("Tempest")
-logger.info("************************************************")
-logger.info("* Generating reporting Tempest_smoke_serial *")
-logger.info("* Data retention = %s days *", PERIOD)
-logger.info("* *")
-logger.info("************************************************")
-
-logger.info("Success criteria:")
-logger.info("nb tests executed > %s s ", CRITERIA_NB_TEST)
-logger.info("test duration < %s s ", CRITERIA_DURATION)
-logger.info("success rate > %s ", CRITERIA_SUCCESS_RATE)
-
-# For all the versions
-for version in rp_utils.get_config('general.versions'):
- for installer in INSTALLERS:
- # we consider the Tempest results of the last PERIOD days
- url = ("http://" + rp_utils.get_config('testapi.url') +
- "?case=tempest_smoke_serial&period=" + str(PERIOD) +
- "&installer=" + installer + "&version=" + version)
- request = Request(url)
- logger.info(("Search tempest_smoke_serial results for installer %s"
- " for version %s"), installer, version)
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError as err:
- logger.error("Error code: %s", err)
- logger.debug("request sent: %s", url)
- logger.debug("Results from API: %s", results)
- test_results = results['results']
- logger.debug("Test results: %s", test_results)
- scenario_results = {}
- criteria = {}
- errors = {}
-
- for r in test_results:
- # Retrieve all the scenarios per installer
- # In Brahmaputra use version
- # Since Colorado use scenario
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- logger.debug("Scenario results: %s", scenario_results)
-
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- # For each scenario, we build a result object to deal with
- # results, criteria and error handling
- for result in scenario_results[s]:
- result["start_date"] = result["start_date"].split(".")[0]
- logger.debug("start_date= %s", result["start_date"])
-
- # retrieve results
- # ****************
- nb_tests_run = result['details']['tests']
- nb_tests_failed = result['details']['failures']
- logger.debug("nb_tests_run= %s", nb_tests_run)
- logger.debug("nb_tests_failed= %s", nb_tests_failed)
-
- try:
- success_rate = (100 * (int(nb_tests_run) -
- int(nb_tests_failed)) /
- int(nb_tests_run))
- except ZeroDivisionError:
- success_rate = 0
-
- result['details']["tests"] = nb_tests_run
- result['details']["Success rate"] = str(success_rate) + "%"
-
- logger.info("nb_tests_run= %s", result['details']["tests"])
- logger.info("test rate = %s",
- result['details']["Success rate"])
-
- # Criteria management
- # *******************
- crit_tests = False
- crit_rate = False
- crit_time = False
-
- # Expect that at least 165 tests are run
- if nb_tests_run >= CRITERIA_NB_TEST:
- crit_tests = True
-
- # Expect that at least 90% of success
- if success_rate >= CRITERIA_SUCCESS_RATE:
- crit_rate = True
-
- # Expect that the suite duration is inferior to 30m
- stop_date = datetime.strptime(result['stop_date'],
- '%Y-%m-%d %H:%M:%S')
- start_date = datetime.strptime(result['start_date'],
- '%Y-%m-%d %H:%M:%S')
-
- delta = stop_date - start_date
-
- if delta.total_seconds() < CRITERIA_DURATION:
- crit_time = True
-
- result['criteria'] = {'tests': crit_tests,
- 'Success rate': crit_rate,
- 'duration': crit_time}
- try:
- logger.debug("Nb Test run: %s", nb_tests_run)
- logger.debug("Test duration: %s", delta)
- logger.debug("Success rate: %s", success_rate)
- except Exception: # pylint: disable=broad-except
- logger.error("Data format error")
-
- # Error management
- # ****************
- try:
- errors = result['details']['errors']
- logger.info("errors: %s", errors)
- result['errors'] = errors
- except Exception: # pylint: disable=broad-except
- logger.error("Error field not present (Brahamputra runs?)")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = "./reporting/functest/template/index-tempest-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_results,
- items=ITEMS,
- installer=installer)
-
- with open("./display/" + version +
- "/functest/tempest-" + installer + ".html", "wb") as fh:
- fh.write(outputText)
-logger.info("Tempest automatic reporting succesfully generated.")
diff --git a/utils/test/reporting/reporting/functest/reporting-vims.py b/utils/test/reporting/reporting/functest/reporting-vims.py
deleted file mode 100755
index 3b25e911d..000000000
--- a/utils/test/reporting/reporting/functest/reporting-vims.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-"""
-vIMS reporting status
-"""
-from urllib2 import Request, urlopen, URLError
-import json
-import jinja2
-
-import reporting.utils.reporting_utils as rp_utils
-
-LOGGER = rp_utils.getLogger("vIMS")
-
-PERIOD = rp_utils.get_config('general.period')
-VERSIONS = rp_utils.get_config('general.versions')
-URL_BASE = rp_utils.get_config('testapi.url')
-
-LOGGER.info("****************************************")
-LOGGER.info("* Generating reporting vIMS *")
-LOGGER.info("* Data retention = %s days *", PERIOD)
-LOGGER.info("* *")
-LOGGER.info("****************************************")
-
-INSTALLERS = rp_utils.get_config('general.installers')
-STEP_ORDER = ["initialisation", "orchestrator", "vnf", "test_vnf"]
-LOGGER.info("Start vIMS reporting processing....")
-
-# For all the versions
-for version in VERSIONS:
- for installer in INSTALLERS:
-
- # get nb of supported architecture (x86, aarch64)
- # get scenarios
- scenario_results = rp_utils.getScenarios("functest",
- "cloudify_ims",
- installer,
- version)
-
- architectures = rp_utils.getArchitectures(scenario_results)
- LOGGER.info("Supported architectures: %s", architectures)
-
- for architecture in architectures:
- LOGGER.info("Architecture: %s", architecture)
- # Consider only the results for the selected architecture
- # i.e drop x86 for aarch64 and vice versa
- filter_results = rp_utils.filterArchitecture(scenario_results,
- architecture)
- scenario_stats = rp_utils.getScenarioStats(filter_results)
- items = {}
- scenario_result_criteria = {}
-
- # in case of more than 1 architecture supported
- # precise the architecture
- installer_display = installer
- if "fuel" in installer:
- installer_display = installer + "@" + architecture
-
- LOGGER.info("Search vIMS results for installer: %s, version: %s",
- installer, version)
- request = Request("http://" + URL_BASE + '?case=cloudify_ims&'
- 'installer=' + installer + '&version=' + version)
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError as err:
- LOGGER.error("Error code: %s", err)
-
- test_results = results['results']
-
- # LOGGER.debug("Results found: %s" % test_results)
-
- scenario_results = {}
- for r in test_results:
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- # LOGGER.debug("scenario result: %s" % scenario_results)
-
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- for result in scenario_results[s]:
- try:
- format_result = result['details']['test_vnf']['result']
-
- # round durations of the different steps
- result['details']['orchestrator']['duration'] = round(
- result['details']['orchestrator']['duration'], 1)
- result['details']['vnf']['duration'] = round(
- result['details']['vnf']['duration'], 1)
- result['details']['test_vnf']['duration'] = round(
- result['details']['test_vnf']['duration'], 1)
-
- res_orch = \
- result['details']['orchestrator']['duration']
- res_vnf = result['details']['vnf']['duration']
- res_test_vnf = \
- result['details']['test_vnf']['duration']
- res_signaling = \
- result['details']['test_vnf']['result']['failures']
-
- # Manage test result status
- if res_signaling != 0:
- LOGGER.debug("At least 1 signalig test FAIL")
- result['details']['test_vnf']['status'] = "FAIL"
- else:
- LOGGER.debug("All signalig tests PASS")
- result['details']['test_vnf']['status'] = "PASS"
-
- LOGGER.debug("Scenario %s, Installer %s",
- s_result[1]['scenario'], installer)
- LOGGER.debug("Orchestrator deployment: %ss", res_orch)
- LOGGER.debug("vIMS deployment: %ss", res_vnf)
- LOGGER.debug("VNF testing: %ss", res_test_vnf)
- LOGGER.debug("VNF testing results: %s", format_result)
- except Exception as err: # pylint: disable=broad-except
- LOGGER.error("Uncomplete data %s", err)
- LOGGER.debug("----------------------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = "./reporting/functest/template/index-vims-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_results,
- step_order=STEP_ORDER,
- installer=installer_display)
- LOGGER.debug("Generate html page for %s", installer_display)
- with open("./display/" + version + "/functest/vims-" +
- installer_display + ".html", "wb") as fh:
- fh.write(outputText)
-
-LOGGER.info("vIMS report succesfully generated")
diff --git a/utils/test/reporting/reporting/functest/scenarioResult.py b/utils/test/reporting/reporting/functest/scenarioResult.py
deleted file mode 100644
index 5a54eed96..000000000
--- a/utils/test/reporting/reporting/functest/scenarioResult.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-
-class ScenarioResult(object):
-
- def __init__(self, status, score=0, score_percent=0, url_lastrun=''):
- self.status = status
- self.score = score
- self.score_percent = score_percent
- self.url_lastrun = url_lastrun
-
- def getStatus(self):
- return self.status
-
- def getScore(self):
- return self.score
-
- def getScorePercent(self):
- return self.score_percent
-
- def getUrlLastRun(self):
- return self.url_lastrun
diff --git a/utils/test/reporting/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/reporting/functest/template/index-status-tmpl.html
deleted file mode 100644
index 50fc648aa..000000000
--- a/utils/test/reporting/reporting/functest/template/index-status-tmpl.html
+++ /dev/null
@@ -1,183 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
- <script type="text/javascript" src="../../js/gauge.js"></script>
- <script type="text/javascript" src="../../js/trend.js"></script>
- <script>
- function onDocumentReady() {
- // Gauge management
- {% for scenario in scenario_stats.iteritems() -%}
- var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
- {%- endfor %}
-
- // assign success rate to the gauge
- function updateReadings() {
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
- {%- endfor %}
- }
- updateReadings();
- }
-
- // trend line management
- d3.csv("./scenario_history.txt", function(data) {
- // ***************************************
- // Create the trend line
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- // for scenario {{scenario}}
- // Filter results
- var trend{{loop.index}} = data.filter(function(row) {
- return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
- })
- // Parse the date
- trend{{loop.index}}.forEach(function(d) {
- d.date = parseDate(d.date);
- d.score = +d.score
- });
- // Draw the trend line
- var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
- // ****************************************
- {%- endfor %}
- });
- if ( !window.isLoaded ) {
- window.addEventListener("load", function() {
- onDocumentReady();
- }, false);
- } else {
- onDocumentReady();
- }
-</script>
-<script type="text/javascript">
-$(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
-})
-</script>
-
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Functest status page ({{version}}, {{date}})</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="status-apex.html">Apex</a></li>
- <li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-daisy.html">Daisy</a></li>
- <li><a href="status-fuel@x86.html">fuel@x86</a></li>
- <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
- <li><a href="status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
-
- <div class="scenario-overview">
- <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
- <table class="table">
- <tr>
- <th width="40%">HA Scenario</th>
- <th width="20%">Status</th>
- <th width="20%">Trend</th>
- <th width="10%">Score</th>
- <th width="10%">Iteration</th>
- </tr>
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- <tr class="tr-ok">
- {% if '-ha' in scenario -%}
- <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
- <td><div id="gaugeScenario{{loop.index}}"></div></td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{scenario_results[scenario].getScore()}}</td>
- <td>{{iteration}}</td>
- {%- endif %}
- </tr>
- {%- endfor %}
- <br>
- </table>
- <br>
- <table class="table">
- <tr>
- <th width="40%">NOHA Scenario</th>
- <th width="20%">Status</th>
- <th width="20%">Trend</th>
- <th width="10%">Score</th>
- <th width="10%">Iteration</th>
- </tr>
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- <tr class="tr-ok">
- {% if '-noha' in scenario -%}
- <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
- <td><div id="gaugeScenario{{loop.index}}"></div></td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{scenario_results[scenario].getScore()}}</td>
- <td>{{iteration}}</td>
- {%- endif %}
- </tr>
- {%- endfor %}
- </table>
-
- </div>
-
-
- {% for scenario, iteration in scenario_stats.iteritems() -%}
- <div class="scenario-part">
- <div class="page-header">
- <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario}}</b></h3>
- </div>
- <div class="panel panel-default">
- <div class="panel-heading">
- <span class="panel-header-item">
- </span>
- </div>
- <table class="table">
- <tr>
- {% for test in items[scenario] -%}
- <th>
- {% if test.getCriteria() > -1 -%}
- {{test.getDisplayName() }}
- {%- endif %}
- {% if test.getTier() > 3 -%}
- *
- {%- endif %}
- </th>
- {%- endfor %}
- </tr>
- <tr class="tr-weather-weather">
- {% for test in items[scenario] -%}
- {% if test.getCriteria() > 2 -%}
- <td><img src="../../img/weather-clear.png"></td>
- {%- elif test.getCriteria() > 1 -%}
- <td><img src="../../img/weather-few-clouds.png"></td>
- {%- elif test.getCriteria() > 0 -%}
- <td><img src="../../img/weather-overcast.png"></td>
- {%- elif test.getCriteria() > -1 -%}
- <td><img src="../../img/weather-storm.png"></td>
- {%- endif %}
- {%- endfor %}
- </tr>
- </table>
- </div>
- </div>
- {%- endfor %}
- see <a href="https://wiki.opnfv.org/pages/viewpage.action?pageId=6828617">Functest scoring wiki page</a> for details on scenario scoring
- <div> <br>
- <a href="./status-{{installer}}.pdf" class="myButtonPdf">Export to PDF</a> <a href="./scenario_history_{{installer}}.txt" class="myButtonCSV">Export to CSV</a>
- </div>
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html b/utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html
deleted file mode 100644
index 3a222276e..000000000
--- a/utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html
+++ /dev/null
@@ -1,95 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Tempest status page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="tempest-apex.html">Apex</a></li>
- <li><a href="tempest-compass.html">Compass</a></li>
- <li><a href="tempest-daisy.html">Daisy</a></li>
- <li><a href="tempest-fuel.html">Fuel</a></li>
- <li><a href="tempest-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
- {% for scenario_name, results in scenario_results.iteritems() -%}
- <div class="scenario-part">
- <div class="page-header">
- <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
- </div>
- {% for result in results -%}
- {% if loop.index > 2 -%}
- <div class="panel panel-default" hidden>
- {%- else -%}
- <div class="panel panel-default">
- {%- endif %}
- <div class="panel-heading">
- <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
- <span class="panel-header-item">
- <h4><b>{{result.start_date}}</b></h4>
- </span>
- <span class="badge panel-pod-name">{{result.pod_name}}</span>
- </div>
- <table class="table">
- <tr>
- <th width="20%">Item</th>
- <th width="10%">Result</th>
- <th width="10%">Status</th>
- <th width="60%">Errors</th>
- </tr>
- {% for item in items -%}
- {% if item in result.details.keys() -%}
- {% if result.criteria[item] -%}
- <tr class="tr-ok">
- <td>{{item}}</td>
- <td>{{result.details[item]}}</td>
- <td><span class="glyphicon glyphicon-ok"></td>
- {% if item is equalto "Success rate" %}
- <td>{{result.errors}}</td>
- {% endif %}
- </tr>
- {%- else -%}
- <tr class="tr-danger">
- <td>{{item}}</td>
- <td>{{result.details[item]}}</td>
- <td><span class="glyphicon glyphicon-remove"></td>
- {% if item is equalto "Success rate" %}
- <td>{{result.errors}}</td>
- {% endif %}
- </tr>
- {%- endif %}
- {%- endif %}
- {%- endfor %}
- </table>
- </div>
- {%- endfor %}
- <button type="button" class="btn btn-more">More than two</button>
- </div>
- {%- endfor %}
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html
deleted file mode 100644
index 9bd2b2f66..000000000
--- a/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html
+++ /dev/null
@@ -1,93 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">vIMS status page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="vims-apex.html">Apex</a></li>
- <li><a href="vims-compass.html">Compass</a></li>
- <li><a href="vims-daisy.html">Daisy</a></li>
- <li><a href="vims-fuel@x86.html">Fuel@x86</a></li>
- <li><a href="vims-fuel@aarch64.html">Fuel@aarch64</a></li>
- <li><a href="vims-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
- {% for scenario_name, results in scenario_results.iteritems() -%}
- <div class="scenario-part">
- <div class="page-header">
- <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
- </div>
- {% for result in results -%}
- {% if loop.index > 2 -%}
- <div class="panel panel-default" hidden>
- {%- else -%}
- <div class="panel panel-default">
- {%- endif %}
- <div class="panel-heading">
- <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
- <span class="panel-header-item">
- <h4><b>{{result.start_date}}</b></h4>
- </span>
- <span class="badge panel-pod-name">{{result.pod_name}}</span>
- </div>
- <table class="table">
- <tr>
- <th width="20%">Step</th>
- <th width="10%">Status</th>
- <th width="10%">Duration(s)</th>
- <th width="60%">Result</th>
- </tr>
- {% for step_od_name in step_order -%}
- {% if step_od_name in result.details.keys() -%}
- {% set step_result = result.details[step_od_name] -%}
- {% if step_result.status == "PASS" -%}
- <tr class="tr-ok">
- <td>{{step_od_name}}</td>
- <td><span class="glyphicon glyphicon-ok"></td>
- <td><b>{{step_result.duration}}</b></td>
- <td>{{step_result.result}}</td>
- </tr>
- {%- else -%}
- <tr class="tr-danger">
- <td>{{step_od_name}}</td>
- <td><span class="glyphicon glyphicon-remove"></td>
- <td><b>0s</b></td>
- <td>{{step_result.result}}</td>
- </tr>
- {%- endif %}
- {%- endif %}
- {%- endfor %}
- </table>
- </div>
- {%- endfor %}
- <button type="button" class="btn btn-more">More than two</button>
- </div>
- {%- endfor %}
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/functest/testCase.py b/utils/test/reporting/reporting/functest/testCase.py
deleted file mode 100644
index a182dd4cf..000000000
--- a/utils/test/reporting/reporting/functest/testCase.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import re
-
-
-class TestCase(object):
-
- def __init__(self, name, project, constraints,
- criteria=-1, isRunnable=True, tier=-1):
- self.name = name
- self.project = project
- self.constraints = constraints
- self.criteria = criteria
- self.isRunnable = isRunnable
- self.tier = tier
- display_name_matrix = {'healthcheck': 'healthcheck',
- 'vping_ssh': 'vPing (ssh)',
- 'vping_userdata': 'vPing (userdata)',
- 'odl': 'ODL',
- 'onos': 'ONOS',
- 'ocl': 'OCL',
- 'tempest_smoke_serial': 'Tempest (smoke)',
- 'tempest_full_parallel': 'Tempest (full)',
- 'tempest_defcore': 'Tempest (Defcore)',
- 'refstack_defcore': 'Refstack',
- 'rally_sanity': 'Rally (smoke)',
- 'bgpvpn': 'bgpvpn',
- 'rally_full': 'Rally (full)',
- 'vims': 'vIMS',
- 'doctor-notification': 'Doctor',
- 'promise': 'Promise',
- 'moon': 'Moon',
- 'copper': 'Copper',
- 'security_scan': 'Security',
- 'multisite': 'Multisite',
- 'domino-multinode': 'Domino',
- 'functest-odl-sfc': 'SFC',
- 'onos_sfc': 'SFC',
- 'parser-basics': 'Parser',
- 'connection_check': 'Health (connection)',
- 'api_check': 'Health (api)',
- 'snaps_smoke': 'SNAPS',
- 'snaps_health_check': 'Health (dhcp)',
- 'gluon_vping': 'Netready',
- 'fds': 'FDS',
- 'cloudify_ims': 'vIMS (Cloudify)',
- 'orchestra_openims': 'OpenIMS (OpenBaton)',
- 'orchestra_clearwaterims': 'vIMS (OpenBaton)',
- 'opera_ims': 'vIMS (Open-O)',
- 'vyos_vrouter': 'vyos (Cloudify)',
- 'barometercollectd': 'Barometer',
- 'odl_netvirt': 'Netvirt',
- 'security_scan': 'Security'}
- try:
- self.displayName = display_name_matrix[self.name]
- except:
- self.displayName = "unknown"
-
- def getName(self):
- return self.name
-
- def getProject(self):
- return self.project
-
- def getConstraints(self):
- return self.constraints
-
- def getCriteria(self):
- return self.criteria
-
- def getTier(self):
- return self.tier
-
- def setCriteria(self, criteria):
- self.criteria = criteria
-
- def setIsRunnable(self, isRunnable):
- self.isRunnable = isRunnable
-
- def checkRunnable(self, installer, scenario, config):
- # Re-use Functest declaration
- # Retrieve Functest configuration file functest_config.yaml
- is_runnable = True
- config_test = config
- # print " *********************** "
- # print TEST_ENV
- # print " ---------------------- "
- # print "case = " + self.name
- # print "installer = " + installer
- # print "scenario = " + scenario
- # print "project = " + self.project
-
- # Retrieve test constraints
- # Retrieve test execution param
- test_execution_context = {"installer": installer,
- "scenario": scenario}
-
- # By default we assume that all the tests are always runnable...
- # if test_env not empty => dependencies to be checked
- if config_test is not None and len(config_test) > 0:
- # possible criteria = ["installer", "scenario"]
- # consider test criteria from config file
- # compare towards CI env through CI en variable
- for criteria in config_test:
- if re.search(config_test[criteria],
- test_execution_context[criteria]) is None:
- # print "Test "+ test + " cannot be run on the environment"
- is_runnable = False
- # print is_runnable
- self.isRunnable = is_runnable
-
- def toString(self):
- testcase = ("Name=" + self.name + ";Criteria=" +
- str(self.criteria) + ";Project=" + self.project +
- ";Constraints=" + str(self.constraints) +
- ";IsRunnable" + str(self.isRunnable))
- return testcase
-
- def getDisplayName(self):
- return self.displayName
diff --git a/utils/test/reporting/reporting/qtip/__init__.py b/utils/test/reporting/reporting/qtip/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/qtip/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/qtip/index.html b/utils/test/reporting/reporting/qtip/index.html
deleted file mode 100644
index 0f9df8564..000000000
--- a/utils/test/reporting/reporting/qtip/index.html
+++ /dev/null
@@ -1,51 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">QTIP reporting page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="index-status-apex.html">Apex</a></li>
- <li><a href="index-status-compass.html">Compass</a></li>
- <li><a href="index-status-fuel.html">Fuel</a></li>
- <li><a href="index-status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-main">
- <h2>QTIP</h2>
- QTIP is used in OPNFV for verifying the OPNFV infrastructure and some of the OPNFV features.
- <br>The QTIP framework is deployed in several OPNFV community labs.
- <br>It is installer, infrastructure and application independent.
-
- <h2>Useful Links</h2>
- <li><a href="https://wiki.opnfv.org/download/attachments/5734608/qtip%20in%20depth.pdf?version=1&modificationDate=1463410431000&api=v2">QTIP in Depth</a></li>
- <li><a href="https://git.opnfv.org/cgit/qtip">QTIP Repo</a></li>
- <li><a href="https://wiki.opnfv.org/display/qtip">QTIP Project</a></li>
- <li><a href="https://build.opnfv.org/ci/view/qtip/">QTIP Jenkins page</a></li>
- <li><a href="https://jira.opnfv.org/browse/QTIP-119?jql=project%20%3D%20QTIP">JIRA</a></li>
-
- </div>
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/qtip/reporting-status.py b/utils/test/reporting/reporting/qtip/reporting-status.py
deleted file mode 100644
index 524338ca0..000000000
--- a/utils/test/reporting/reporting/qtip/reporting-status.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import os
-
-import jinja2
-import utils.reporting_utils as rp_utils
-import utils.scenarioResult as sr
-
-installers = rp_utils.get_config('general.installers')
-versions = rp_utils.get_config('general.versions')
-PERIOD = rp_utils.get_config('general.period')
-
-# Logger
-logger = rp_utils.getLogger("Qtip-Status")
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-logger.info("*******************************************")
-logger.info("* Generating reporting scenario status *")
-logger.info("* Data retention = {} days *".format(PERIOD))
-logger.info("* *")
-logger.info("*******************************************")
-
-
-def prepare_profile_file(version):
- profile_dir = './display/{}/qtip'.format(version)
- if not os.path.exists(profile_dir):
- os.makedirs(profile_dir)
-
- profile_file = "{}/scenario_history.txt".format(profile_dir)
- if not os.path.exists(profile_file):
- with open(profile_file, 'w') as f:
- info = 'date,scenario,installer,details,score\n'
- f.write(info)
- f.close()
- return profile_file
-
-
-def profile_results(results, installer, profile_fd):
- result_criterias = {}
- for s_p, s_p_result in results.iteritems():
- avg_last_ten = sum([int(s) for s in s_p_result]) / len(s_p_result)
-
- LASTEST_TESTS = rp_utils.get_config(
- 'general.nb_iteration_tests_success_criteria')
- last_four = s_p_result[-LASTEST_TESTS:]
- avg_last_four = sum([int(s) for s in last_four]) / len(last_four)
-
- info = '{},{},{},{},{}\n'.format(reportingDate,
- s_p,
- installer,
- '',
- avg_last_four)
- profile_fd.write(info)
- result_criterias[s_p] = sr.ScenarioResult('OK',
- avg_last_four,
- avg_last_ten,
- '100')
-
- logger.info("--------------------------")
- return result_criterias
-
-
-def render_html(prof_results, installer, version):
- template_loader = jinja2.FileSystemLoader(".")
- template_env = jinja2.Environment(loader=template_loader,
- autoescape=True)
-
- template_file = "./reporting/qtip/template/index-status-tmpl.html"
- template = template_env.get_template(template_file)
-
- render_outcome = template.render(prof_results=prof_results,
- installer=installer,
- period=PERIOD,
- version=version,
- date=reportingDate)
-
- with open('./display/{}/qtip/status-{}.html'.format(version, installer),
- 'wb') as fh:
- fh.write(render_outcome)
-
-
-def render_reporter():
- for version in versions:
- profile_file = prepare_profile_file(version)
- profile_fd = open(profile_file, 'a')
- for installer in installers:
- results = rp_utils.getQtipResults(version, installer)
- prof_results = profile_results(results, installer, profile_fd)
- render_html(prof_results=prof_results,
- installer=installer,
- version=version)
- profile_fd.close()
- logger.info("Manage export CSV")
- rp_utils.generate_csv(profile_file)
- logger.info("CSV generated...")
-
-
-if __name__ == '__main__':
- render_reporter()
diff --git a/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html b/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html
deleted file mode 100644
index f55f78144..000000000
--- a/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html
+++ /dev/null
@@ -1,87 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
- <script type="text/javascript" src="../../js/trend-qtip.js"></script>
- <script>
- // trend line management
- d3.csv("./scenario_history.csv", function(data) {
- // ***************************************
- // Create the trend line
- {% for scenario in prof_results.keys() -%}
- // for scenario {{scenario}}
- // Filter results
- var trend{{loop.index}} = data.filter(function(row) {
- return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
- })
- // Parse the date
- trend{{loop.index}}.forEach(function(d) {
- d.date = parseDate(d.date);
- d.score = +d.score
- });
- // Draw the trend line
- var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
- // ****************************************
- {%- endfor %}
- });
- </script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">QTIP status page ({{version}}, {{date}})</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="status-apex.html">Apex</a></li>
- <!--<li><a href="status-compass.html">Compass</a></li>-->
- <!--<li><a href="status-daisy.html">Daisy</a></li>-->
- <!--<li><a href="status-fuel.html">Fuel</a></li>-->
- <!--<li><a href="status-joid.html">Joid</a></li>-->
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
-
- <div class="scenario-overview">
- <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
- <table class="table">
- <tr>
- <th width="25%">Pod/Scenario</th>
- <th width="25%">Scoring Trend</th>
- <th width="25%">Avg. in Last 4 Runs</th>
- <th width="25%">Avg. in Last 10 Days</th>
- </tr>
- {% for scenario,result in prof_results.iteritems() -%}
- <tr class="tr-ok">
- <td>{{scenario}}</td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{prof_results[scenario].getFourDaysScore()}}</td>
- <td>{{prof_results[scenario].getTenDaysScore()}}</td>
- </tr>
- {%- endfor %}
- </table>
- </div>
-
-
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/reporting.yaml b/utils/test/reporting/reporting/reporting.yaml
deleted file mode 100644
index 8123d0135..000000000
--- a/utils/test/reporting/reporting/reporting.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-general:
- installers:
- - apex
- - compass
- - daisy
- - fuel
- - joid
-
- versions:
- - master
- - euphrates
-
- log:
- log_file: reporting.log
- log_level: ERROR
-
- period: 10
-
- nb_iteration_tests_success_criteria: 4
-
- directories:
- # Relative to the path where the repo is cloned:
- dir_reporting: utils/tests/reporting/
- dir_log: utils/tests/reporting/log/
- dir_conf: utils/tests/reporting/conf/
- dir_utils: utils/tests/reporting/utils/
- dir_templates: utils/tests/reporting/templates/
- dir_display: utils/tests/reporting/display/
-
- url: testresults.opnfv.org/reporting/
-
-testapi:
- url: testresults.opnfv.org/test/api/v1/results
-
-functest:
- blacklist:
- - odl_netvirt
- - juju_epc
- - tempest_full_parallel
- - rally_full
- max_scenario_criteria: 50
- test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
- log_level: ERROR
- jenkins_url: https://build.opnfv.org/ci/view/functest/job/
- # yamllint disable rule:truthy
- exclude_noha: False
- exclude_virtual: False
- # yamllint enable
-
-yardstick:
- test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml
- log_level: ERROR
-
-storperf:
- test_list:
- - snia_steady_state
- log_level: ERROR
-
-qtip:
- log_level: ERROR
- period: 10
-
-bottlenecks:
- test_list:
- - posca_factor_ping
- - posca_factor_system_bandwidth
- log_level: ERROR
-
-vsperf:
diff --git a/utils/test/reporting/reporting/storperf/__init__.py b/utils/test/reporting/reporting/storperf/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/storperf/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/storperf/reporting-status.py b/utils/test/reporting/reporting/storperf/reporting-status.py
deleted file mode 100644
index 103b80fd9..000000000
--- a/utils/test/reporting/reporting/storperf/reporting-status.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import os
-
-import jinja2
-
-import reporting.utils.reporting_utils as rp_utils
-import reporting.utils.scenarioResult as sr
-
-installers = rp_utils.get_config('general.installers')
-versions = rp_utils.get_config('general.versions')
-PERIOD = rp_utils.get_config('general.period')
-
-# Logger
-logger = rp_utils.getLogger("Storperf-Status")
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-logger.info("*******************************************")
-logger.info("* Generating reporting scenario status *")
-logger.info("* Data retention = %s days *" % PERIOD)
-logger.info("* *")
-logger.info("*******************************************")
-
-# retrieve the list of storperf tests
-storperf_tests = rp_utils.get_config('storperf.test_list')
-logger.info("Storperf tests: %s" % storperf_tests)
-
-# For all the versions
-for version in versions:
- # For all the installers
- for installer in installers:
- # get scenarios results data
- # for the moment we consider only 1 case snia_steady_state
- scenario_results = rp_utils.getScenarios("storperf",
- "snia_steady_state",
- installer,
- version)
- # logger.info("scenario_results: %s" % scenario_results)
-
- scenario_stats = rp_utils.getScenarioStats(scenario_results)
- logger.info("scenario_stats: %s" % scenario_stats)
- items = {}
- scenario_result_criteria = {}
-
- # From each scenarios get results list
- for s, s_result in scenario_results.items():
- logger.info("---------------------------------")
- logger.info("installer %s, version %s, scenario %s", installer,
- version, s)
- ten_criteria = len(s_result)
-
- ten_score = 0
- for v in s_result:
- if "PASS" in v['criteria']:
- ten_score += 1
-
- logger.info("ten_score: %s / %s" % (ten_score, ten_criteria))
-
- four_score = 0
- try:
- LASTEST_TESTS = rp_utils.get_config(
- 'general.nb_iteration_tests_success_criteria')
- s_result.sort(key=lambda x: x['start_date'])
- four_result = s_result[-LASTEST_TESTS:]
- logger.debug("four_result: {}".format(four_result))
- logger.debug("LASTEST_TESTS: {}".format(LASTEST_TESTS))
- # logger.debug("four_result: {}".format(four_result))
- four_criteria = len(four_result)
- for v in four_result:
- if "PASS" in v['criteria']:
- four_score += 1
- logger.info("4 Score: %s / %s " % (four_score,
- four_criteria))
- except:
- logger.error("Impossible to retrieve the four_score")
-
- try:
- s_status = (four_score * 100) / four_criteria
- except:
- s_status = 0
- logger.info("Score percent = %s" % str(s_status))
- s_four_score = str(four_score) + '/' + str(four_criteria)
- s_ten_score = str(ten_score) + '/' + str(ten_criteria)
- s_score_percent = str(s_status)
-
- logger.debug(" s_status: {}".format(s_status))
- if s_status == 100:
- logger.info(">>>>> scenario OK, save the information")
- else:
- logger.info(">>>> scenario not OK, last 4 iterations = %s, \
- last 10 days = %s" % (s_four_score, s_ten_score))
-
- s_url = ""
- if len(s_result) > 0:
- build_tag = s_result[len(s_result)-1]['build_tag']
- logger.debug("Build tag: %s" % build_tag)
- s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
- logger.info("last jenkins url: %s" % s_url)
-
- # Save daily results in a file
- path_validation_file = ("./display/" + version +
- "/storperf/scenario_history.txt")
-
- if not os.path.exists(path_validation_file):
- with open(path_validation_file, 'w') as f:
- info = 'date,scenario,installer,details,score\n'
- f.write(info)
-
- with open(path_validation_file, "a") as f:
- info = (reportingDate + "," + s + "," + installer +
- "," + s_ten_score + "," +
- str(s_score_percent) + "\n")
- f.write(info)
-
- scenario_result_criteria[s] = sr.ScenarioResult(s_status,
- s_four_score,
- s_ten_score,
- s_score_percent,
- s_url)
-
- logger.info("--------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = "./reporting/storperf/template/index-status-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_result_criteria,
- installer=installer,
- period=PERIOD,
- version=version,
- date=reportingDate)
-
- with open("./display/" + version +
- "/storperf/status-" + installer + ".html", "wb") as fh:
- fh.write(outputText)
diff --git a/utils/test/reporting/reporting/storperf/template/index-status-tmpl.html b/utils/test/reporting/reporting/storperf/template/index-status-tmpl.html
deleted file mode 100644
index e872272c3..000000000
--- a/utils/test/reporting/reporting/storperf/template/index-status-tmpl.html
+++ /dev/null
@@ -1,110 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
- <script type="text/javascript" src="../../js/gauge.js"></script>
- <script type="text/javascript" src="../../js/trend.js"></script>
- <script>
- function onDocumentReady() {
- // Gauge management
- {% for scenario in scenario_results.keys() -%}
- var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
- {%- endfor %}
- // assign success rate to the gauge
- function updateReadings() {
- {% for scenario in scenario_results.keys() -%}
- gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
- {%- endfor %}
- }
- updateReadings();
- }
-
- // trend line management
- d3.csv("./scenario_history.txt", function(data) {
- // ***************************************
- // Create the trend line
- {% for scenario in scenario_results.keys() -%}
- // for scenario {{scenario}}
- // Filter results
- var trend{{loop.index}} = data.filter(function(row) {
- return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
- })
- // Parse the date
- trend{{loop.index}}.forEach(function(d) {
- d.date = parseDate(d.date);
- d.score = +d.score
- });
- // Draw the trend line
- var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
- // ****************************************
- {%- endfor %}
- });
- if ( !window.isLoaded ) {
- window.addEventListener("load", function() {
- onDocumentReady();
- }, false);
- } else {
- onDocumentReady();
- }
- </script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Storperf status page ({{version}}, {{date}})</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="status-apex.html">Apex</a></li>
- <li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-fuel.html">Fuel</a></li>
- <li><a href="status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
-
- <div class="scenario-overview">
- <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
- <table class="table">
- <tr>
- <th width="40%">Scenario</th>
- <th width="20%">Status</th>
- <th width="20%">Trend</th>
- <th width="10%">Last 4 Iterations</th>
- <th width="10%">Last 10 Days</th>
- </tr>
- {% for scenario,result in scenario_results.iteritems() -%}
- <tr class="tr-ok">
- <td><a href="{{scenario_results[scenario].getLastUrl()}}">{{scenario}}</a></td>
- <td><div id="gaugeScenario{{loop.index}}"></div></td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
- <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
- </tr>
- {%- endfor %}
- </table>
- </div>
-
-
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/tests/__init__.py b/utils/test/reporting/reporting/tests/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/tests/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/tests/unit/__init__.py b/utils/test/reporting/reporting/tests/unit/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/tests/unit/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/tests/unit/utils/__init__.py b/utils/test/reporting/reporting/tests/unit/utils/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/tests/unit/utils/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/tests/unit/utils/test_utils.py b/utils/test/reporting/reporting/tests/unit/utils/test_utils.py
deleted file mode 100644
index 9614d74ff..000000000
--- a/utils/test/reporting/reporting/tests/unit/utils/test_utils.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-import unittest
-
-from reporting.utils import reporting_utils
-
-
-class reportingUtilsTesting(unittest.TestCase):
-
- logging.disable(logging.CRITICAL)
-
- def setUp(self):
- self.test = reporting_utils
-
- def test_foo(self):
- self.assertTrue(0 < 1)
-
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/utils/test/reporting/reporting/utils/__init__.py b/utils/test/reporting/reporting/utils/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/utils/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/utils/reporting_utils.py b/utils/test/reporting/reporting/utils/reporting_utils.py
deleted file mode 100644
index 58a0c6233..000000000
--- a/utils/test/reporting/reporting/utils/reporting_utils.py
+++ /dev/null
@@ -1,566 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import logging
-import json
-import os
-import requests
-import pdfkit
-import yaml
-
-from urllib2 import Request, urlopen, URLError
-
-
-# ----------------------------------------------------------
-#
-# YAML UTILS
-#
-# -----------------------------------------------------------
-def get_parameter_from_yaml(parameter, config_file):
- """
- Returns the value of a given parameter in file.yaml
- parameter must be given in string format with dots
- Example: general.openstack.image_name
- """
- with open(config_file) as my_file:
- file_yaml = yaml.safe_load(my_file)
- my_file.close()
- value = file_yaml
- for element in parameter.split("."):
- value = value.get(element)
- if value is None:
- raise ValueError("The parameter %s is not defined in"
- " reporting.yaml" % parameter)
- return value
-
-
-def get_config(parameter):
- """
- Get configuration parameter from yaml configuration file
- """
- yaml_ = os.environ["CONFIG_REPORTING_YAML"]
- return get_parameter_from_yaml(parameter, yaml_)
-
-
-# ----------------------------------------------------------
-#
-# LOGGER UTILS
-#
-# -----------------------------------------------------------
-def getLogger(module):
- """
- Get Logger
- """
- log_formatter = logging.Formatter("%(asctime)s [" +
- module +
- "] [%(levelname)-5.5s] %(message)s")
- logger = logging.getLogger()
- log_file = get_config('general.log.log_file')
- log_level = get_config('general.log.log_level')
-
- file_handler = logging.FileHandler("{0}/{1}".format('.', log_file))
- file_handler.setFormatter(log_formatter)
- logger.addHandler(file_handler)
-
- console_handler = logging.StreamHandler()
- console_handler.setFormatter(log_formatter)
- logger.addHandler(console_handler)
- logger.setLevel(log_level)
- return logger
-
-
-# ----------------------------------------------------------
-#
-# REPORTING UTILS
-#
-# -----------------------------------------------------------
-def getApiResults(case, installer, scenario, version):
- """
- Get Results by calling the API
- """
- results = json.dumps([])
- # to remove proxy (to be removed at the end for local test only)
- # proxy_handler = urllib2.ProxyHandler({})
- # opener = urllib2.build_opener(proxy_handler)
- # urllib2.install_opener(opener)
- # url = "http://127.0.0.1:8000/results?case=" + case + \
- # "&period=30&installer=" + installer
- period = get_config('general.period')
- url_base = get_config('testapi.url')
- nb_tests = get_config('general.nb_iteration_tests_success_criteria')
-
- url = ("http://" + url_base + "?case=" + case +
- "&period=" + str(period) + "&installer=" + installer +
- "&scenario=" + scenario + "&version=" + version +
- "&last=" + str(nb_tests))
- request = Request(url)
-
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError:
- print "Error when retrieving results form API"
-
- return results
-
-
-def getScenarios(project, case, installer, version):
- """
- Get the list of Scenarios
- """
- test_results = None
- scenario_results = None
- period = get_config('general.period')
- url_base = get_config('testapi.url')
-
- url = ("http://" + url_base +
- "?installer=" + installer +
- "&period=" + str(period))
-
- if version is not None:
- url += "&version=" + version
-
- if project is not None:
- url += "&project=" + project
-
- if case is not None:
- url += "&case=" + case
-
- try:
- request = Request(url)
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- test_results = results['results']
- try:
- page = results['pagination']['total_pages']
- if page > 1:
- test_results = []
- for i in range(1, page + 1):
- url_page = url + "&page=" + str(i)
- request = Request(url_page)
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- test_results += results['results']
- except KeyError:
- print "No pagination detected"
- except URLError as err:
- print 'Got an error code: {}'.format(err)
-
- if test_results is not None:
- test_results.reverse()
- scenario_results = {}
-
- for my_result in test_results:
- # Retrieve all the scenarios per installer
- if not my_result['scenario'] in scenario_results.keys():
- scenario_results[my_result['scenario']] = []
- # Do we consider results from virtual pods ...
- # Do we consider results for non HA scenarios...
- exclude_virtual_pod = get_config('functest.exclude_virtual')
- exclude_noha = get_config('functest.exclude_noha')
- if ((exclude_virtual_pod and "virtual" in my_result['pod_name']) or
- (exclude_noha and "noha" in my_result['scenario'])):
- print "exclude virtual pod results..."
- else:
- scenario_results[my_result['scenario']].append(my_result)
-
- return scenario_results
-
-
-def getScenarioStats(scenario_results):
- """
- Get the number of occurence of scenarios over the defined PERIOD
- """
- scenario_stats = {}
- for res_k, res_v in scenario_results.iteritems():
- scenario_stats[res_k] = len(res_v)
- return scenario_stats
-
-
-def getScenarioStatus(installer, version):
- """
- Get the status of a scenariofor Yardstick
- """
- period = get_config('general.period')
- url_base = get_config('testapi.url')
-
- url = ("http://" + url_base + "?case=scenario_status" +
- "&installer=" + installer +
- "&version=" + version + "&period=" + str(period))
- request = Request(url)
-
- try:
- response = urlopen(request)
- k = response.read()
- response.close()
- results = json.loads(k)
- test_results = results['results']
- except URLError:
- print "GetScenarioStatus: error when calling the API"
-
- x86 = 'x86'
- aarch64 = 'aarch64'
- scenario_results = {x86: {}, aarch64: {}}
- result_dict = {x86: {}, aarch64: {}}
- if test_results is not None:
- for test_r in test_results:
- if (test_r['stop_date'] != 'None' and
- test_r['criteria'] is not None):
- scenario_name = test_r['scenario']
- if 'arm' in test_r['pod_name']:
- if not test_r['scenario'] in scenario_results[aarch64]:
- scenario_results[aarch64][scenario_name] = []
- scenario_results[aarch64][scenario_name].append(test_r)
- else:
- if not test_r['scenario'] in scenario_results[x86]:
- scenario_results[x86][scenario_name] = []
- scenario_results[x86][scenario_name].append(test_r)
-
- for key in scenario_results:
- for scen_k, scen_v in scenario_results[key].items():
- # scenario_results[k] = v[:LASTEST_TESTS]
- s_list = []
- for element in scen_v:
- if element['criteria'] == 'PASS':
- s_list.append(1)
- else:
- s_list.append(0)
- result_dict[key][scen_k] = s_list
-
- # return scenario_results
- return result_dict
-
-
-def getQtipResults(version, installer):
- """
- Get QTIP results
- """
- period = get_config('qtip.period')
- url_base = get_config('testapi.url')
-
- url = ("http://" + url_base + "?project=qtip" +
- "&installer=" + installer +
- "&version=" + version + "&period=" + str(period))
- request = Request(url)
-
- try:
- response = urlopen(request)
- k = response.read()
- response.close()
- results = json.loads(k)['results']
- except URLError as err:
- print 'Got an error code: {}'.format(err)
-
- result_dict = {}
- if results:
- for r in results:
- key = '{}/{}'.format(r['pod_name'], r['scenario'])
- if key not in result_dict.keys():
- result_dict[key] = []
- result_dict[key].append(r['details']['score'])
-
- # return scenario_results
- return result_dict
-
-
-def getNbtestOk(results):
- """
- based on default value (PASS) count the number of test OK
- """
- nb_test_ok = 0
- for my_result in results:
- for res_k, res_v in my_result.iteritems():
- try:
- if "PASS" in res_v:
- nb_test_ok += 1
- except Exception:
- print "Cannot retrieve test status"
- return nb_test_ok
-
-
-def getCaseScore(testCase, installer, scenario, version):
- """
- Get Result for a given Functest Testcase
- """
- # retrieve raw results
- results = getApiResults(testCase, installer, scenario, version)
- # let's concentrate on test results only
- test_results = results['results']
-
- # if results found, analyze them
- if test_results is not None:
- test_results.reverse()
-
- scenario_results = []
-
- # print " ---------------- "
- # print test_results
- # print " ---------------- "
- # print "nb of results:" + str(len(test_results))
-
- for res_r in test_results:
- # print r["start_date"]
- # print r["criteria"]
- scenario_results.append({res_r["start_date"]: res_r["criteria"]})
- # sort results
- scenario_results.sort()
- # 4 levels for the results
- # 3: 4+ consecutive runs passing the success criteria
- # 2: <4 successful consecutive runs but passing the criteria
- # 1: close to pass the success criteria
- # 0: 0% success, not passing
- # -1: no run available
- test_result_indicator = 0
- nbTestOk = getNbtestOk(scenario_results)
-
- # print "Nb test OK (last 10 days):"+ str(nbTestOk)
- # check that we have at least 4 runs
- if len(scenario_results) < 1:
- # No results available
- test_result_indicator = -1
- elif nbTestOk < 1:
- test_result_indicator = 0
- elif nbTestOk < 2:
- test_result_indicator = 1
- else:
- # Test the last 4 run
- if len(scenario_results) > 3:
- last4runResults = scenario_results[-4:]
- nbTestOkLast4 = getNbtestOk(last4runResults)
- # print "Nb test OK (last 4 run):"+ str(nbTestOkLast4)
- if nbTestOkLast4 > 3:
- test_result_indicator = 3
- else:
- test_result_indicator = 2
- else:
- test_result_indicator = 2
- return test_result_indicator
-
-
-def getCaseScoreFromBuildTag(testCase, s_results):
- """
- Get Results for a given Functest Testcase with arch filtering
- """
- url_base = get_config('testapi.url')
- nb_tests = get_config('general.nb_iteration_tests_success_criteria')
- test_result_indicator = 0
- # architecture is not a result field...so we cannot use getResult as it is
- res_matrix = []
- try:
- for s_result in s_results:
- build_tag = s_result['build_tag']
- d = s_result['start_date']
- res_matrix.append({'date': d,
- 'build_tag': build_tag})
- # sort res_matrix
- filter_res_matrix = sorted(res_matrix, key=lambda k: k['date'],
- reverse=True)[:nb_tests]
- for my_res in filter_res_matrix:
- url = ("http://" + url_base + "?case=" + testCase +
- "&build_tag=" + my_res['build_tag'])
- request = Request(url)
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- if "PASS" in results['results'][0]['criteria']:
- test_result_indicator += 1
- except:
- print "No results found for this case"
- if test_result_indicator > 2:
- test_result_indicator = test_result_indicator - 1
-
- return test_result_indicator
-
-
-def getJenkinsUrl(build_tag):
- """
- Get Jenkins url_base corespoding to the last test CI run
- e.g. jenkins-functest-apex-apex-daily-colorado-daily-colorado-246
- id = 246
- jenkins-functest-compass-huawei-pod5-daily-master-136
- id = 136
- note it is linked to jenkins format
- if this format changes...function to be adapted....
- """
- url_base = get_config('functest.jenkins_url')
- try:
- build_id = [int(s) for s in build_tag.split("-") if s.isdigit()]
- url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] +
- "/" + str(build_id[0]))
- jenkins_url = url_base + url_id + "/console"
- except Exception:
- print 'Impossible to get jenkins url:'
-
- if "jenkins-" not in build_tag:
- jenkins_url = None
-
- return jenkins_url
-
-
-def getScenarioPercent(scenario_score, scenario_criteria):
- """
- Get success rate of the scenario (in %)
- """
- score = 0.0
- try:
- score = float(scenario_score) / float(scenario_criteria) * 100
- except Exception:
- print 'Impossible to calculate the percentage score'
- return score
-
-
-# *********
-# Functest
-# *********
-def getFunctestConfig(version=""):
- """
- Get Functest configuration
- """
- config_file = get_config('functest.test_conf') + version
- response = requests.get(config_file)
- return yaml.safe_load(response.text)
-
-
-def getArchitectures(scenario_results):
- """
- Get software architecture (x86 or Aarch64)
- """
- supported_arch = ['x86']
- if len(scenario_results) > 0:
- for scenario_result in scenario_results.values():
- for value in scenario_result:
- if "armband" in value['build_tag']:
- supported_arch.append('aarch64')
- return supported_arch
- return supported_arch
-
-
-def filterArchitecture(results, architecture):
- """
- Restrict the list of results based on given architecture
- """
- filtered_results = {}
- for name, res in results.items():
- filtered_values = []
- for value in res:
- if architecture is "x86":
- # drop aarch64 results
- if ("armband" not in value['build_tag']):
- filtered_values.append(value)
- elif architecture is "aarch64":
- # drop x86 results
- if ("armband" in value['build_tag']):
- filtered_values.append(value)
- if (len(filtered_values) > 0):
- filtered_results[name] = filtered_values
- return filtered_results
-
-
-# *********
-# Yardstick
-# *********
-def subfind(given_list, pattern_list):
- """
- Yardstick util function
- """
- LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
- for i in range(len(given_list)):
- if given_list[i] == pattern_list[0] and \
- given_list[i:i + LASTEST_TESTS] == pattern_list:
- return True
- return False
-
-
-def _get_percent(status):
- """
- Yardstick util function to calculate success rate
- """
- if status * 100 % 6:
- return round(float(status) * 100 / 6, 1)
- else:
- return status * 100 / 6
-
-
-def get_percent(four_list, ten_list):
- """
- Yardstick util function to calculate success rate
- """
- four_score = 0
- ten_score = 0
-
- for res_v in four_list:
- four_score += res_v
- for res_v in ten_list:
- ten_score += res_v
-
- LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
- if four_score == LASTEST_TESTS:
- status = 6
- elif subfind(ten_list, [1, 1, 1, 1]):
- status = 5
- elif ten_score == 0:
- status = 0
- else:
- status = four_score + 1
-
- return _get_percent(status)
-
-
-def _test():
- """
- Yardstick util function (test)
- """
- status = getScenarioStatus("compass", "master")
- print "status:++++++++++++++++++++++++"
- print json.dumps(status, indent=4)
-
-
-# ----------------------------------------------------------
-#
-# Export
-#
-# -----------------------------------------------------------
-
-def export_csv(scenario_file_name, installer, version):
- """
- Generate sub files based on scenario_history.txt
- """
- scenario_installer_file_name = ("./display/" + version +
- "/functest/scenario_history_" +
- installer + ".csv")
- scenario_installer_file = open(scenario_installer_file_name, "a")
- with open(scenario_file_name, "r") as scenario_file:
- scenario_installer_file.write("date,scenario,installer,detail,score\n")
- for line in scenario_file:
- if installer in line:
- scenario_installer_file.write(line)
- scenario_installer_file.close
-
-
-def generate_csv(scenario_file):
- """
- Generate sub files based on scenario_history.txt
- """
- import shutil
- csv_file = scenario_file.replace('txt', 'csv')
- shutil.copy2(scenario_file, csv_file)
-
-
-def export_pdf(pdf_path, pdf_doc_name):
- """
- Export results to pdf
- """
- try:
- pdfkit.from_file(pdf_path, pdf_doc_name)
- except IOError:
- print "Error but pdf generated anyway..."
- except Exception:
- print "impossible to generate PDF"
diff --git a/utils/test/reporting/reporting/utils/scenarioResult.py b/utils/test/reporting/reporting/utils/scenarioResult.py
deleted file mode 100644
index 6029d7f42..000000000
--- a/utils/test/reporting/reporting/utils/scenarioResult.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-
-class ScenarioResult(object):
- def __init__(self, status, four_days_score='', ten_days_score='',
- score_percent=0.0, last_url=''):
- self.status = status
- self.four_days_score = four_days_score
- self.ten_days_score = ten_days_score
- self.score_percent = score_percent
- self.last_url = last_url
-
- def getStatus(self):
- return self.status
-
- def getTenDaysScore(self):
- return self.ten_days_score
-
- def getFourDaysScore(self):
- return self.four_days_score
-
- def getScorePercent(self):
- return self.score_percent
-
- def getLastUrl(self):
- return self.last_url
diff --git a/utils/test/reporting/reporting/vsperf/__init__.py b/utils/test/reporting/reporting/vsperf/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/vsperf/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/vsperf/reporting-status.py b/utils/test/reporting/reporting/vsperf/reporting-status.py
deleted file mode 100644
index fc4cc677d..000000000
--- a/utils/test/reporting/reporting/vsperf/reporting-status.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import os
-
-import jinja2
-
-import reporting.utils.reporting_utils as rp_utils
-import reporting.utils.scenarioResult as sr
-
-installers = rp_utils.get_config('general.installers')
-PERIOD = rp_utils.get_config('general.period')
-
-# Logger
-logger = rp_utils.getLogger("Storperf-Status")
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-logger.info("*******************************************")
-logger.info("* Generating reporting scenario status *")
-logger.info("* Data retention = %s days *" % PERIOD)
-logger.info("* *")
-logger.info("*******************************************")
-
-# retrieve the list of storperf tests
-versions = {'master'}
-
-# For all the versions
-for version in versions:
- # For all the installers
- for installer in installers:
- scenario_results = rp_utils.getScenarios("vsperf",
- None,
- installer,
- None)
- items = {}
- scenario_result_criteria = {}
- logger.info("installer %s, version %s, scenario ", installer, version)
-
- # From each scenarios get results list
- for s, s_result in scenario_results.items():
- logger.info("---------------------------------")
- logger.info("installer %s, version %s, scenario %s", installer,
- version, s)
- ten_criteria = len(s_result)
-
- ten_score = 0
- for v in s_result:
- if "PASS" in v['criteria']:
- ten_score += 1
-
- logger.info("ten_score: %s / %s" % (ten_score, ten_criteria))
-
- four_score = 0
- try:
- LASTEST_TESTS = rp_utils.get_config(
- 'general.nb_iteration_tests_success_criteria')
- s_result.sort(key=lambda x: x['start_date'])
- four_result = s_result[-LASTEST_TESTS:]
- logger.debug("four_result: {}".format(four_result))
- logger.debug("LASTEST_TESTS: {}".format(LASTEST_TESTS))
- # logger.debug("four_result: {}".format(four_result))
- four_criteria = len(four_result)
- for v in four_result:
- if "PASS" in v['criteria']:
- four_score += 1
- logger.info("4 Score: %s / %s " % (four_score,
- four_criteria))
- except Exception:
- logger.error("Impossible to retrieve the four_score")
-
- try:
- s_status = (four_score * 100) / four_criteria
- except ZeroDivisionError:
- s_status = 0
- logger.info("Score percent = %s" % str(s_status))
- s_four_score = str(four_score) + '/' + str(four_criteria)
- s_ten_score = str(ten_score) + '/' + str(ten_criteria)
- s_score_percent = str(s_status)
-
- logger.debug(" s_status: {}".format(s_status))
- if s_status == 100:
- logger.info(">>>>> scenario OK, save the information")
- else:
- logger.info(">>>> scenario not OK, last 4 iterations = %s, \
- last 10 days = %s" % (s_four_score, s_ten_score))
-
- s_url = ""
- if len(s_result) > 0:
- build_tag = s_result[len(s_result)-1]['build_tag']
- logger.debug("Build tag: %s" % build_tag)
- s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
- logger.info("last jenkins url: %s" % s_url)
-
- # Save daily results in a file
- path_validation_file = ("./display/" + version +
- "/vsperf/scenario_history.txt")
-
- if not os.path.exists(path_validation_file):
- with open(path_validation_file, 'w') as f:
- info = 'date,scenario,installer,details,score\n'
- f.write(info)
-
- with open(path_validation_file, "a") as f:
- info = (reportingDate + "," + s + "," + installer +
- "," + s_ten_score + "," +
- str(s_score_percent) + "\n")
- f.write(info)
-
- scenario_result_criteria[s] = sr.ScenarioResult(s_status,
- s_four_score,
- s_ten_score,
- s_score_percent,
- s_url)
-
- logger.info("--------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = "./reporting/vsperf/template/index-status-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_result_criteria,
- installer=installer,
- period=PERIOD,
- version=version,
- date=reportingDate)
-
- with open("./display/" + version +
- "/vsperf/status-" + installer + ".html", "wb") as fh:
- fh.write(outputText)
diff --git a/utils/test/reporting/reporting/vsperf/template/index-status-tmpl.html b/utils/test/reporting/reporting/vsperf/template/index-status-tmpl.html
deleted file mode 100644
index 7e06ef66b..000000000
--- a/utils/test/reporting/reporting/vsperf/template/index-status-tmpl.html
+++ /dev/null
@@ -1,114 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
- <script type="text/javascript" src="../../js/gauge.js"></script>
- <script type="text/javascript" src="../../js/trend.js"></script>
- <script>
- function onDocumentReady() {
- // Gauge management
- {% for scenario in scenario_results.keys() -%}
- var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
- {%- endfor %}
- // assign success rate to the gauge
- function updateReadings() {
- {% for scenario in scenario_results.keys() -%}
- gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
- {%- endfor %}
- }
- updateReadings();
- }
-
- // trend line management
- d3.csv("./scenario_history.txt", function(data) {
- // ***************************************
- // Create the trend line
- {% for scenario in scenario_results.keys() -%}
- // for scenario {{scenario}}
- // Filter results
- var trend{{loop.index}} = data.filter(function(row) {
- return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
- })
- // Parse the date
- trend{{loop.index}}.forEach(function(d) {
- d.date = parseDate(d.date);
- d.score = +d.score
- });
- // Draw the trend line
- var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
- // ****************************************
- {%- endfor %}
- });
- if ( !window.isLoaded ) {
- window.addEventListener("load", function() {
- onDocumentReady();
- }, false);
- } else {
- onDocumentReady();
- }
- </script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Vsperf status page ({{version}}, {{date}})</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="status-apex.html">Apex</a></li>
- <li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-fuel.html">Fuel</a></li>
- <li><a href="status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
- <div><h1>Reported values represent the percentage of completed
-
- CI tests during the reporting period, where results
-
- were communicated to the Test Database.</h1></div>
- <div class="scenario-overview">
- <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
- <table class="table">
- <tr>
- <th width="40%">Scenario</th>
- <th width="20%">Status</th>
- <th width="20%">Trend</th>
- <th width="10%">Last 4 Iterations</th>
- <th width="10%">Last 10 Days</th>
- </tr>
- {% for scenario,result in scenario_results.iteritems() -%}
- <tr class="tr-ok">
- <td><a href="{{scenario_results[scenario].getLastUrl()}}">{{scenario}}</a></td>
- <td><div id="gaugeScenario{{loop.index}}"></div></td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
- <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
- </tr>
- {%- endfor %}
- </table>
- </div>
-
-
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/yardstick/__init__.py b/utils/test/reporting/reporting/yardstick/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/yardstick/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_0.png b/utils/test/reporting/reporting/yardstick/img/gauge_0.png
deleted file mode 100644
index ecefc0e66..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_0.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_100.png b/utils/test/reporting/reporting/yardstick/img/gauge_100.png
deleted file mode 100644
index e199e1561..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_100.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_16.7.png b/utils/test/reporting/reporting/yardstick/img/gauge_16.7.png
deleted file mode 100644
index 3e3993c3b..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_16.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_25.png b/utils/test/reporting/reporting/yardstick/img/gauge_25.png
deleted file mode 100644
index 4923659b9..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_25.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_33.3.png b/utils/test/reporting/reporting/yardstick/img/gauge_33.3.png
deleted file mode 100644
index 364574b4a..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_33.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_41.7.png b/utils/test/reporting/reporting/yardstick/img/gauge_41.7.png
deleted file mode 100644
index 8c3e910fa..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_41.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_50.png b/utils/test/reporting/reporting/yardstick/img/gauge_50.png
deleted file mode 100644
index 2874b9fcf..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_50.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_58.3.png b/utils/test/reporting/reporting/yardstick/img/gauge_58.3.png
deleted file mode 100644
index beedc8aa9..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_58.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_66.7.png b/utils/test/reporting/reporting/yardstick/img/gauge_66.7.png
deleted file mode 100644
index 93f44d133..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_66.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_75.png b/utils/test/reporting/reporting/yardstick/img/gauge_75.png
deleted file mode 100644
index 9fc261ff8..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_75.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_8.3.png b/utils/test/reporting/reporting/yardstick/img/gauge_8.3.png
deleted file mode 100644
index 59f86571e..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_8.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_83.3.png b/utils/test/reporting/reporting/yardstick/img/gauge_83.3.png
deleted file mode 100644
index 27ae4ec54..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_83.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_91.7.png b/utils/test/reporting/reporting/yardstick/img/gauge_91.7.png
deleted file mode 100644
index 280865714..000000000
--- a/utils/test/reporting/reporting/yardstick/img/gauge_91.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/icon-nok.png b/utils/test/reporting/reporting/yardstick/img/icon-nok.png
deleted file mode 100644
index 526b5294b..000000000
--- a/utils/test/reporting/reporting/yardstick/img/icon-nok.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/icon-ok.png b/utils/test/reporting/reporting/yardstick/img/icon-ok.png
deleted file mode 100644
index 3a9de2e89..000000000
--- a/utils/test/reporting/reporting/yardstick/img/icon-ok.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/weather-clear.png b/utils/test/reporting/reporting/yardstick/img/weather-clear.png
deleted file mode 100644
index a0d967750..000000000
--- a/utils/test/reporting/reporting/yardstick/img/weather-clear.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/weather-few-clouds.png b/utils/test/reporting/reporting/yardstick/img/weather-few-clouds.png
deleted file mode 100644
index acfa78398..000000000
--- a/utils/test/reporting/reporting/yardstick/img/weather-few-clouds.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/weather-overcast.png b/utils/test/reporting/reporting/yardstick/img/weather-overcast.png
deleted file mode 100644
index 4296246d0..000000000
--- a/utils/test/reporting/reporting/yardstick/img/weather-overcast.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/img/weather-storm.png b/utils/test/reporting/reporting/yardstick/img/weather-storm.png
deleted file mode 100644
index 956f0e20f..000000000
--- a/utils/test/reporting/reporting/yardstick/img/weather-storm.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/yardstick/index.html b/utils/test/reporting/reporting/yardstick/index.html
deleted file mode 100644
index 488f1421d..000000000
--- a/utils/test/reporting/reporting/yardstick/index.html
+++ /dev/null
@@ -1,51 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Yardstick reporting page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="index-status-apex.html">Apex</a></li>
- <li><a href="index-status-compass.html">Compass</a></li>
- <li><a href="index-status-fuel.html">Fuel</a></li>
- <li><a href="index-status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-main">
- <h2>Yardstick</h2>
- Yardstick is used in OPNFV for verifying the OPNFV infrastructure and some of the OPNFV features.
- <br>The Yardstick framework is deployed in several OPNFV community labs.
- <br>It is installer, infrastructure and application independent.
-
- <h2>Useful Links</h2>
- <li><a href="https://wiki.opnfv.org/download/attachments/5734608/yardstick%20in%20depth.pdf?version=1&modificationDate=1463410431000&api=v2">Yardstick in Depth</a></li>
- <li><a href="https://git.opnfv.org/cgit/yardstick">Yardstick Repo</a></li>
- <li><a href="https://wiki.opnfv.org/display/yardstick">Yardstick Project</a></li>
- <li><a href="https://build.opnfv.org/ci/view/yardstick/">Yardstick Jenkins page</a></li>
- <li><a href="https://jira.opnfv.org/browse/YARDSTICK-119?jql=project%20%3D%20YARDSTICK">JIRA</a></li>
-
- </div>
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/yardstick/reporting-status.py b/utils/test/reporting/reporting/yardstick/reporting-status.py
deleted file mode 100644
index 10cacf006..000000000
--- a/utils/test/reporting/reporting/yardstick/reporting-status.py
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import os
-
-import jinja2
-
-from reporting.utils.scenarioResult import ScenarioResult
-from reporting.utils import reporting_utils as utils
-from scenarios import config as blacklist
-
-
-# Logger
-LOG = utils.getLogger("Yardstick-Status")
-
-
-def get_scenario_data(version, installer):
- scenarios = utils.getScenarioStatus(installer, version)
-
- if 'colorado' == version:
- data = utils.getScenarioStatus(installer, 'stable/colorado')
- for archi, value in data.items():
- for k, v in value.items():
- if k not in scenarios[archi]:
- scenarios[archi][k] = []
- scenarios[archi][k].extend(data[archi][k])
-
- for archi, value in scenarios.items():
- for scenario in value:
- if installer in blacklist and scenario in blacklist[installer]:
- scenarios[archi].pop(scenario)
-
- return scenarios
-
-
-def write_history_data(version,
- scenario,
- installer,
- archi,
- ten_score,
- percent):
- # Save daily results in a file
- history_file = './display/{}/yardstick/scenario_history.txt'.format(
- version)
-
- if not os.path.exists(history_file):
- with open(history_file, 'w') as f:
- f.write('date,scenario,installer,details,score\n')
-
- date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
- if installer == 'fuel':
- installer = '{}@{}'.format(installer, archi)
- with open(history_file, "a") as f:
- info = '{},{},{},{},{}\n'.format(date,
- scenario,
- installer,
- ten_score,
- percent)
- f.write(info)
-
-
-def generate_page(scenario_data, installer, period, version, architecture):
- date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
- templateLoader = jinja2.FileSystemLoader(".")
- template_env = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- template_file = "./reporting/yardstick/template/index-status-tmpl.html"
- template = template_env.get_template(template_file)
-
- if installer == 'fuel':
- installer = '{}@{}'.format(installer, architecture)
-
- output_text = template.render(scenario_results=scenario_data,
- installer=installer,
- period=period,
- version=version,
- date=date)
-
- page_file = './display/{}/yardstick/status-{}.html'.format(version,
- installer)
- with open(page_file, 'wb') as f:
- f.write(output_text)
-
-
-def do_statistic(data):
- ten_score = 0
- for v in data:
- ten_score += v
-
- last_count = utils.get_config(
- 'general.nb_iteration_tests_success_criteria')
- last_data = data[:last_count]
- last_score = 0
- for v in last_data:
- last_score += v
-
- percent = utils.get_percent(last_data, data)
- status = str(percent)
- last_score = '{}/{}'.format(last_score, len(last_data))
- ten_score = '{}/{}'.format(ten_score, len(data))
-
- if '100' == status:
- LOG.info(">>>>> scenario OK, save the information")
- else:
- LOG.info(">>>> scenario not OK, last 4 iterations = %s, \
- last 10 days = %s" % (last_score, ten_score))
-
- return last_score, ten_score, percent, status
-
-
-def generate_reporting_page(version, installer, archi, scenarios, period):
- scenario_data = {}
-
- # From each scenarios get results list
- for scenario, data in scenarios.items():
- LOG.info("---------------------------------")
-
- LOG.info("installer %s, version %s, scenario %s",
- installer,
- version,
- scenario)
- last_score, ten_score, percent, status = do_statistic(data)
- write_history_data(version,
- scenario,
- installer,
- archi,
- ten_score,
- percent)
- scenario_data[scenario] = ScenarioResult(status,
- last_score,
- ten_score,
- percent)
-
- LOG.info("--------------------------")
- if scenario_data:
- generate_page(scenario_data, installer, period, version, archi)
-
-
-def main():
- installers = utils.get_config('general.installers')
- versions = utils.get_config('general.versions')
- period = utils.get_config('general.period')
-
- LOG.info("*******************************************")
- LOG.info("* Generating reporting scenario status *")
- LOG.info("* Data retention = %s days *" % period)
- LOG.info("* *")
- LOG.info("*******************************************")
-
- # For all the versions
- for version in versions:
- # For all the installers
- for installer in installers:
- # get scenarios results data
- scenarios = get_scenario_data(version, installer)
- for k, v in scenarios.items():
- generate_reporting_page(version, installer, k, v, period)
-
-
-if __name__ == '__main__':
- main()
diff --git a/utils/test/reporting/reporting/yardstick/scenarios.py b/utils/test/reporting/reporting/yardstick/scenarios.py
deleted file mode 100644
index 7504493b2..000000000
--- a/utils/test/reporting/reporting/yardstick/scenarios.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import requests
-import yaml
-
-import reporting.utils.reporting_utils as rp_utils
-
-yardstick_conf = rp_utils.get_config('yardstick.test_conf')
-response = requests.get(yardstick_conf)
-yaml_file = yaml.safe_load(response.text)
-reporting = yaml_file.get('reporting')
-
-config = {}
-
-for element in reporting:
- name = element['name']
- scenarios = element['scenario']
- for s in scenarios:
- if name not in config:
- config[name] = {}
- config[name][s] = True
diff --git a/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html b/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html
deleted file mode 100644
index 3db32e531..000000000
--- a/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html
+++ /dev/null
@@ -1,111 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
- <script type="text/javascript" src="../../js/gauge.js"></script>
- <script type="text/javascript" src="../../js/trend.js"></script>
- <script>
- function onDocumentReady() {
- // Gauge management
- {% for scenario in scenario_results.keys() -%}
- var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
- {%- endfor %}
- // assign success rate to the gauge
- function updateReadings() {
- {% for scenario in scenario_results.keys() -%}
- gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
- {%- endfor %}
- }
- updateReadings();
- }
-
- // trend line management
- d3.csv("./scenario_history.txt", function(data) {
- // ***************************************
- // Create the trend line
- {% for scenario in scenario_results.keys() -%}
- // for scenario {{scenario}}
- // Filter results
- var trend{{loop.index}} = data.filter(function(row) {
- return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
- })
- // Parse the date
- trend{{loop.index}}.forEach(function(d) {
- d.date = parseDate(d.date);
- d.score = +d.score
- });
- // Draw the trend line
- var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
- // ****************************************
- {%- endfor %}
- });
- if ( !window.isLoaded ) {
- window.addEventListener("load", function() {
- onDocumentReady();
- }, false);
- } else {
- onDocumentReady();
- }
- </script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Yardstick status page ({{version}}, {{date}})</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="status-apex.html">Apex</a></li>
- <li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-fuel@x86.html">Fuel@x86</a></li>
- <li><a href="status-fuel@aarch64.html">Fuel@aarch64</a></li>
- <li><a href="status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
-
- <div class="scenario-overview">
- <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
- <table class="table">
- <tr>
- <th width="40%">Scenario</th>
- <th width="20%">Status</th>
- <th width="20%">Trend</th>
- <th width="10%">Last 4 Iterations</th>
- <th width="10%">Last 10 Days</th>
- </tr>
- {% for scenario,result in scenario_results.iteritems() -%}
- <tr class="tr-ok">
- <td>{{scenario}}</td>
- <td><div id="gaugeScenario{{loop.index}}"></div></td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
- <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
- </tr>
- {%- endfor %}
- </table>
- </div>
-
-
- </div>
- <div class="col-md-1"></div>
-</div>