summaryrefslogtreecommitdiffstats
path: root/utils/test
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test')
-rwxr-xr-xutils/test/reporting/reporting/functest/reporting-status.py24
-rw-r--r--utils/test/reporting/reporting/utils/reporting_utils.py38
-rw-r--r--utils/test/reporting/reporting/yardstick/reporting-status.py256
-rw-r--r--utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html3
-rw-r--r--utils/test/testapi/htmlize/doc-build.sh21
-rw-r--r--utils/test/testapi/htmlize/htmlize.py57
-rw-r--r--utils/test/testapi/htmlize/push-doc-artifact.sh27
7 files changed, 197 insertions, 229 deletions
diff --git a/utils/test/reporting/reporting/functest/reporting-status.py b/utils/test/reporting/reporting/functest/reporting-status.py
index 267803e6c..c71e00f3b 100755
--- a/utils/test/reporting/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/reporting/functest/reporting-status.py
@@ -230,12 +230,26 @@ for version in versions:
# Evaluate the results for scenario validation
# **********************************************
# the validation criteria = nb runnable tests x 3
- # because each test case = 0,1,2 or 3
- scenario_criteria = nb_test_runnable_for_this_scenario * 3
- # if 0 runnable tests set criteria at a high value
- if scenario_criteria < 1:
- scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA
+ # because each test case can get
+ # 0 point (never PASS)
+ # 1 point at least (PASS once over the time window)
+ # 2 points (PASS more than once but 1 FAIL on the last 4)
+ # 3 points PASS on the last 4 iterations
+ # e.g. 1 scenario = 10 cases
+ # 1 iteration : max score = 10 (10x1)
+ # 2 iterations : max score = 20 (10x2)
+ # 3 iterations : max score = 20
+ # 4 or more iterations : max score = 30 (1x30)
+ if len(s_result) > 3:
+ k_score = 3
+ elif len(s_result) < 2:
+ k_score = 1
+ else:
+ k_score = 2
+
+ scenario_criteria = nb_test_runnable_for_this_scenario*k_score
+ # score for reporting
s_score = str(scenario_score) + "/" + str(scenario_criteria)
s_score_percent = rp_utils.getScenarioPercent(
scenario_score,
diff --git a/utils/test/reporting/reporting/utils/reporting_utils.py b/utils/test/reporting/reporting/utils/reporting_utils.py
index 235bd6ef9..65267ca11 100644
--- a/utils/test/reporting/reporting/utils/reporting_utils.py
+++ b/utils/test/reporting/reporting/utils/reporting_utils.py
@@ -186,7 +186,6 @@ def getScenarioStats(scenario_results):
def getScenarioStatus(installer, version):
"""
Get the status of a scenariofor Yardstick
- they used criteria SUCCESS (default: PASS)
"""
period = get_config('general.period')
url_base = get_config('testapi.url')
@@ -205,25 +204,34 @@ def getScenarioStatus(installer, version):
except URLError:
print "GetScenarioStatus: error when calling the API"
- scenario_results = {}
- result_dict = {}
+ x86 = 'x86'
+ aarch64 = 'aarch64'
+ scenario_results = {x86: {}, aarch64: {}}
+ result_dict = {x86: {}, aarch64: {}}
if test_results is not None:
for test_r in test_results:
if (test_r['stop_date'] != 'None' and
test_r['criteria'] is not None):
- if not test_r['scenario'] in scenario_results.keys():
- scenario_results[test_r['scenario']] = []
- scenario_results[test_r['scenario']].append(test_r)
-
- for scen_k, scen_v in scenario_results.items():
- # scenario_results[k] = v[:LASTEST_TESTS]
- s_list = []
- for element in scen_v:
- if element['criteria'] == 'SUCCESS':
- s_list.append(1)
+ scenario_name = test_r['scenario']
+ if 'arm' in test_r['pod_name']:
+ if not test_r['scenario'] in scenario_results[aarch64]:
+ scenario_results[aarch64][scenario_name] = []
+ scenario_results[aarch64][scenario_name].append(test_r)
else:
- s_list.append(0)
- result_dict[scen_k] = s_list
+ if not test_r['scenario'] in scenario_results[x86]:
+ scenario_results[x86][scenario_name] = []
+ scenario_results[x86][scenario_name].append(test_r)
+
+ for key in scenario_results:
+ for scen_k, scen_v in scenario_results[key].items():
+ # scenario_results[k] = v[:LASTEST_TESTS]
+ s_list = []
+ for element in scen_v:
+ if element['criteria'] == 'PASS':
+ s_list.append(1)
+ else:
+ s_list.append(0)
+ result_dict[key][scen_k] = s_list
# return scenario_results
return result_dict
diff --git a/utils/test/reporting/reporting/yardstick/reporting-status.py b/utils/test/reporting/reporting/yardstick/reporting-status.py
index 6584f4e8d..10cacf006 100644
--- a/utils/test/reporting/reporting/yardstick/reporting-status.py
+++ b/utils/test/reporting/reporting/yardstick/reporting-status.py
@@ -11,109 +11,159 @@ import os
import jinja2
-import reporting.utils.scenarioResult as sr
-import reporting.utils.reporting_utils as rp_utils
-from scenarios import config as cf
+from reporting.utils.scenarioResult import ScenarioResult
+from reporting.utils import reporting_utils as utils
+from scenarios import config as blacklist
-installers = rp_utils.get_config('general.installers')
-versions = rp_utils.get_config('general.versions')
-PERIOD = rp_utils.get_config('general.period')
# Logger
-logger = rp_utils.getLogger("Yardstick-Status")
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-logger.info("*******************************************")
-logger.info("* Generating reporting scenario status *")
-logger.info("* Data retention = %s days *" % PERIOD)
-logger.info("* *")
-logger.info("*******************************************")
-
-
-# For all the versions
-for version in versions:
- # For all the installers
- for installer in installers:
- # get scenarios results data
- scenario_results = rp_utils.getScenarioStatus(installer, version)
- if 'colorado' == version:
- stable_result = rp_utils.getScenarioStatus(installer,
- 'stable/colorado')
- for k, v in stable_result.items():
- if k not in scenario_results.keys():
- scenario_results[k] = []
- scenario_results[k] += stable_result[k]
- scenario_result_criteria = {}
-
- for s in scenario_results.keys():
- if installer in cf.keys() and s in cf[installer].keys():
- scenario_results.pop(s)
-
- # From each scenarios get results list
- for s, s_result in scenario_results.items():
- logger.info("---------------------------------")
- logger.info("installer %s, version %s, scenario %s", installer,
- version, s)
-
- ten_criteria = len(s_result)
- ten_score = 0
- for v in s_result:
- ten_score += v
-
- LASTEST_TESTS = rp_utils.get_config(
- 'general.nb_iteration_tests_success_criteria')
- four_result = s_result[:LASTEST_TESTS]
- four_criteria = len(four_result)
- four_score = 0
- for v in four_result:
- four_score += v
-
- s_status = str(rp_utils.get_percent(four_result, s_result))
- s_four_score = str(four_score) + '/' + str(four_criteria)
- s_ten_score = str(ten_score) + '/' + str(ten_criteria)
- s_score_percent = rp_utils.get_percent(four_result, s_result)
-
- if '100' == s_status:
- logger.info(">>>>> scenario OK, save the information")
- else:
- logger.info(">>>> scenario not OK, last 4 iterations = %s, \
- last 10 days = %s" % (s_four_score, s_ten_score))
-
- # Save daily results in a file
- path_validation_file = ("./display/" + version +
- "/yardstick/scenario_history.txt")
-
- if not os.path.exists(path_validation_file):
- with open(path_validation_file, 'w') as f:
- info = 'date,scenario,installer,details,score\n'
- f.write(info)
-
- with open(path_validation_file, "a") as f:
- info = (reportingDate + "," + s + "," + installer +
- "," + s_ten_score + "," +
- str(s_score_percent) + "\n")
- f.write(info)
-
- scenario_result_criteria[s] = sr.ScenarioResult(s_status,
- s_four_score,
- s_ten_score,
- s_score_percent)
-
- logger.info("--------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = "./reporting/yardstick/template/index-status-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_result_criteria,
- installer=installer,
- period=PERIOD,
- version=version,
- date=reportingDate)
-
- with open("./display/" + version +
- "/yardstick/status-" + installer + ".html", "wb") as fh:
- fh.write(outputText)
+LOG = utils.getLogger("Yardstick-Status")
+
+
+def get_scenario_data(version, installer):
+ scenarios = utils.getScenarioStatus(installer, version)
+
+ if 'colorado' == version:
+ data = utils.getScenarioStatus(installer, 'stable/colorado')
+ for archi, value in data.items():
+ for k, v in value.items():
+ if k not in scenarios[archi]:
+ scenarios[archi][k] = []
+ scenarios[archi][k].extend(data[archi][k])
+
+ for archi, value in scenarios.items():
+ for scenario in value:
+ if installer in blacklist and scenario in blacklist[installer]:
+ scenarios[archi].pop(scenario)
+
+ return scenarios
+
+
+def write_history_data(version,
+ scenario,
+ installer,
+ archi,
+ ten_score,
+ percent):
+ # Save daily results in a file
+ history_file = './display/{}/yardstick/scenario_history.txt'.format(
+ version)
+
+ if not os.path.exists(history_file):
+ with open(history_file, 'w') as f:
+ f.write('date,scenario,installer,details,score\n')
+
+ date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+ if installer == 'fuel':
+ installer = '{}@{}'.format(installer, archi)
+ with open(history_file, "a") as f:
+ info = '{},{},{},{},{}\n'.format(date,
+ scenario,
+ installer,
+ ten_score,
+ percent)
+ f.write(info)
+
+
+def generate_page(scenario_data, installer, period, version, architecture):
+ date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ template_env = jinja2.Environment(loader=templateLoader,
+ autoescape=True)
+
+ template_file = "./reporting/yardstick/template/index-status-tmpl.html"
+ template = template_env.get_template(template_file)
+
+ if installer == 'fuel':
+ installer = '{}@{}'.format(installer, architecture)
+
+ output_text = template.render(scenario_results=scenario_data,
+ installer=installer,
+ period=period,
+ version=version,
+ date=date)
+
+ page_file = './display/{}/yardstick/status-{}.html'.format(version,
+ installer)
+ with open(page_file, 'wb') as f:
+ f.write(output_text)
+
+
+def do_statistic(data):
+ ten_score = 0
+ for v in data:
+ ten_score += v
+
+ last_count = utils.get_config(
+ 'general.nb_iteration_tests_success_criteria')
+ last_data = data[:last_count]
+ last_score = 0
+ for v in last_data:
+ last_score += v
+
+ percent = utils.get_percent(last_data, data)
+ status = str(percent)
+ last_score = '{}/{}'.format(last_score, len(last_data))
+ ten_score = '{}/{}'.format(ten_score, len(data))
+
+ if '100' == status:
+ LOG.info(">>>>> scenario OK, save the information")
+ else:
+ LOG.info(">>>> scenario not OK, last 4 iterations = %s, \
+ last 10 days = %s" % (last_score, ten_score))
+
+ return last_score, ten_score, percent, status
+
+
+def generate_reporting_page(version, installer, archi, scenarios, period):
+ scenario_data = {}
+
+ # From each scenarios get results list
+ for scenario, data in scenarios.items():
+ LOG.info("---------------------------------")
+
+ LOG.info("installer %s, version %s, scenario %s",
+ installer,
+ version,
+ scenario)
+ last_score, ten_score, percent, status = do_statistic(data)
+ write_history_data(version,
+ scenario,
+ installer,
+ archi,
+ ten_score,
+ percent)
+ scenario_data[scenario] = ScenarioResult(status,
+ last_score,
+ ten_score,
+ percent)
+
+ LOG.info("--------------------------")
+ if scenario_data:
+ generate_page(scenario_data, installer, period, version, archi)
+
+
+def main():
+ installers = utils.get_config('general.installers')
+ versions = utils.get_config('general.versions')
+ period = utils.get_config('general.period')
+
+ LOG.info("*******************************************")
+ LOG.info("* Generating reporting scenario status *")
+ LOG.info("* Data retention = %s days *" % period)
+ LOG.info("* *")
+ LOG.info("*******************************************")
+
+ # For all the versions
+ for version in versions:
+ # For all the installers
+ for installer in installers:
+ # get scenarios results data
+ scenarios = get_scenario_data(version, installer)
+ for k, v in scenarios.items():
+ generate_reporting_page(version, installer, k, v, period)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html b/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html
index f9b852490..3db32e531 100644
--- a/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html
+++ b/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html
@@ -70,7 +70,8 @@
<li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
<li><a href="status-apex.html">Apex</a></li>
<li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-fuel.html">Fuel</a></li>
+ <li><a href="status-fuel@x86.html">Fuel@x86</a></li>
+ <li><a href="status-fuel@aarch64.html">Fuel@aarch64</a></li>
<li><a href="status-joid.html">Joid</a></li>
</ul>
</nav>
diff --git a/utils/test/testapi/htmlize/doc-build.sh b/utils/test/testapi/htmlize/doc-build.sh
deleted file mode 100644
index 33560ceea..000000000
--- a/utils/test/testapi/htmlize/doc-build.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-set -o errexit
-
-# Create virtual environment
-virtualenv $WORKSPACE/testapi_venv
-source $WORKSPACE/testapi_venv/bin/activate
-
-# Swgger Codegen Tool
-url="http://repo1.maven.org/maven2/io/swagger/swagger-codegen-cli/2.2.1/swagger-codegen-cli-2.2.1.jar"
-
-# Check for jar file locally and in the repo
-if [ ! -f swagger-codegen-cli.jar ];
-then
- wget http://repo1.maven.org/maven2/io/swagger/swagger-codegen-cli/2.2.1/swagger-codegen-cli-2.2.1.jar -O swagger-codegen-cli.jar
-fi
-
-# Install Pre-requistics
-pip install requests
-
-python ./utils/test/testapi/htmlize/htmlize.py -o ${WORKSPACE}/
diff --git a/utils/test/testapi/htmlize/htmlize.py b/utils/test/testapi/htmlize/htmlize.py
deleted file mode 100644
index da6a6cf91..000000000
--- a/utils/test/testapi/htmlize/htmlize.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python
-
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import argparse
-import requests
-import json
-import os
-
-
-def main(args):
-
- # Merging two specs
- api_response = requests.get(args.api_declaration_url)
- api_response = json.loads(api_response.content)
- resource_response = requests.get(args.resource_listing_url)
- resource_response = json.loads(resource_response.content)
- resource_response['models'] = api_response['models']
- resource_response['apis'] = api_response['apis']
-
- # Storing the swagger specs
- with open('specs.json', 'w') as outfile:
- json.dump(resource_response, outfile)
-
- # Generating html page
- cmd = 'java -jar swagger-codegen-cli.jar generate \
- -i specs.json -l html2 -o %s' % (args.output_directory)
- if os.system(cmd) == 0:
- exit(0)
- else:
- exit(1)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='Create \
- Swagger Spec documentation')
- parser.add_argument('-ru', '--resource-listing-url',
- type=str,
- required=False,
- default=('http://testresults.opnfv.org'
- '/test/swagger/resources.json'),
- help='Resource Listing Spec File')
- parser.add_argument('-au', '--api-declaration-url',
- type=str,
- required=False,
- default=('http://testresults.opnfv.org'
- '/test/swagger/APIs'),
- help='API Declaration Spec File')
- parser.add_argument('-o', '--output-directory',
- required=True,
- default='./',
- help='Output Directory where the \
- file should be stored')
- main(parser.parse_args())
diff --git a/utils/test/testapi/htmlize/push-doc-artifact.sh b/utils/test/testapi/htmlize/push-doc-artifact.sh
deleted file mode 100644
index 4cf1988b0..000000000
--- a/utils/test/testapi/htmlize/push-doc-artifact.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o pipefail
-
-export PATH=$PATH:/usr/local/bin/
-
-project=$PROJECT
-workspace=$WORKSPACE
-artifact_dir="$project/docs"
-
-set +e
-gsutil&>/dev/null
-if [ $? != 0 ]; then
- echo "Not possible to push results to artifact: gsutil not installed"
- exit 1
-else
- gsutil ls gs://artifacts.opnfv.org/"$project"/ &>/dev/null
- if [ $? != 0 ]; then
- echo "Not possible to push results to artifact: gsutil not installed."
- exit 1
- else
- echo "Uploading document to artifact $artifact_dir"
- gsutil cp "$workspace"/index.html gs://artifacts.opnfv.org/"$artifact_dir"/testapi.html >/dev/null 2>&1
- echo "Document can be found at http://artifacts.opnfv.org/releng/docs/testapi.html"
- fi
-fi