summaryrefslogtreecommitdiffstats
path: root/utils/test/reporting/reporting/functest
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test/reporting/reporting/functest')
-rw-r--r--utils/test/reporting/reporting/functest/__init__.py0
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_0.pngbin3644 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_100.pngbin3191 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_16.7.pngbin3170 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_25.pngbin3108 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_33.3.pngbin3081 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_41.7.pngbin3169 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_50.pngbin3123 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_58.3.pngbin3161 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_66.7.pngbin3069 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_75.pngbin3030 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_8.3.pngbin2993 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_83.3.pngbin3122 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/gauge_91.7.pngbin3008 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/icon-nok.pngbin2317 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/icon-ok.pngbin4063 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/weather-clear.pngbin1560 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/weather-few-clouds.pngbin1927 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/weather-overcast.pngbin1588 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/img/weather-storm.pngbin2137 -> 0 bytes
-rw-r--r--utils/test/reporting/reporting/functest/index.html53
-rwxr-xr-xutils/test/reporting/reporting/functest/reporting-status.py335
-rwxr-xr-xutils/test/reporting/reporting/functest/reporting-tempest.py163
-rwxr-xr-xutils/test/reporting/reporting/functest/reporting-vims.py142
-rw-r--r--utils/test/reporting/reporting/functest/scenarioResult.py29
-rw-r--r--utils/test/reporting/reporting/functest/template/index-status-tmpl.html183
-rw-r--r--utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html95
-rw-r--r--utils/test/reporting/reporting/functest/template/index-vims-tmpl.html93
-rw-r--r--utils/test/reporting/reporting/functest/testCase.py126
29 files changed, 0 insertions, 1219 deletions
diff --git a/utils/test/reporting/reporting/functest/__init__.py b/utils/test/reporting/reporting/functest/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/reporting/functest/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/reporting/functest/img/gauge_0.png b/utils/test/reporting/reporting/functest/img/gauge_0.png
deleted file mode 100644
index ecefc0e66..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_0.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_100.png b/utils/test/reporting/reporting/functest/img/gauge_100.png
deleted file mode 100644
index e199e1561..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_100.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_16.7.png b/utils/test/reporting/reporting/functest/img/gauge_16.7.png
deleted file mode 100644
index 3e3993c3b..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_16.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_25.png b/utils/test/reporting/reporting/functest/img/gauge_25.png
deleted file mode 100644
index 4923659b9..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_25.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_33.3.png b/utils/test/reporting/reporting/functest/img/gauge_33.3.png
deleted file mode 100644
index 364574b4a..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_33.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_41.7.png b/utils/test/reporting/reporting/functest/img/gauge_41.7.png
deleted file mode 100644
index 8c3e910fa..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_41.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_50.png b/utils/test/reporting/reporting/functest/img/gauge_50.png
deleted file mode 100644
index 2874b9fcf..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_50.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_58.3.png b/utils/test/reporting/reporting/functest/img/gauge_58.3.png
deleted file mode 100644
index beedc8aa9..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_58.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_66.7.png b/utils/test/reporting/reporting/functest/img/gauge_66.7.png
deleted file mode 100644
index 93f44d133..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_66.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_75.png b/utils/test/reporting/reporting/functest/img/gauge_75.png
deleted file mode 100644
index 9fc261ff8..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_75.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_8.3.png b/utils/test/reporting/reporting/functest/img/gauge_8.3.png
deleted file mode 100644
index 59f86571e..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_8.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_83.3.png b/utils/test/reporting/reporting/functest/img/gauge_83.3.png
deleted file mode 100644
index 27ae4ec54..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_83.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_91.7.png b/utils/test/reporting/reporting/functest/img/gauge_91.7.png
deleted file mode 100644
index 280865714..000000000
--- a/utils/test/reporting/reporting/functest/img/gauge_91.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/icon-nok.png b/utils/test/reporting/reporting/functest/img/icon-nok.png
deleted file mode 100644
index 526b5294b..000000000
--- a/utils/test/reporting/reporting/functest/img/icon-nok.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/icon-ok.png b/utils/test/reporting/reporting/functest/img/icon-ok.png
deleted file mode 100644
index 3a9de2e89..000000000
--- a/utils/test/reporting/reporting/functest/img/icon-ok.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-clear.png b/utils/test/reporting/reporting/functest/img/weather-clear.png
deleted file mode 100644
index a0d967750..000000000
--- a/utils/test/reporting/reporting/functest/img/weather-clear.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-few-clouds.png b/utils/test/reporting/reporting/functest/img/weather-few-clouds.png
deleted file mode 100644
index acfa78398..000000000
--- a/utils/test/reporting/reporting/functest/img/weather-few-clouds.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-overcast.png b/utils/test/reporting/reporting/functest/img/weather-overcast.png
deleted file mode 100644
index 4296246d0..000000000
--- a/utils/test/reporting/reporting/functest/img/weather-overcast.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-storm.png b/utils/test/reporting/reporting/functest/img/weather-storm.png
deleted file mode 100644
index 956f0e20f..000000000
--- a/utils/test/reporting/reporting/functest/img/weather-storm.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/reporting/functest/index.html b/utils/test/reporting/reporting/functest/index.html
deleted file mode 100644
index bb1bce209..000000000
--- a/utils/test/reporting/reporting/functest/index.html
+++ /dev/null
@@ -1,53 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Functest reporting page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="index-status-apex.html">Apex</a></li>
- <li><a href="index-status-compass.html">Compass</a></li>
- <li><a href="index-status-fuel.html">Fuel</a></li>
- <li><a href="index-status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-main">
- <h2>Functest</h2>
- This project develops test suites that cover functionaling test cases in OPNFV.
- <br>The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario.
- <br> Weekly meeting: every Tuesday 8 AM UTC
- <br> IRC chan #opnfv-testperf
-
- <br>
- <h2>Useful Links</h2>
- <li><a href="http://events.linuxfoundation.org/sites/events/files/slides/Functest%20in%20Depth_0.pdf">Functest in Depth</a></li>
- <li><a href="https://git.opnfv.org/cgit/functest">Functest Repo</a></li>
- <li><a href="https://wiki.opnfv.org/opnfv_functional_testing">Functest Project</a></li>
- <li><a href="https://build.opnfv.org/ci/view/functest/">Functest Jenkins page</a></li>
- <li><a href="https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=59&projectKey=FUNCTEST">JIRA</a></li>
-
- </div>
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/functest/reporting-status.py b/utils/test/reporting/reporting/functest/reporting-status.py
deleted file mode 100755
index 592f92996..000000000
--- a/utils/test/reporting/reporting/functest/reporting-status.py
+++ /dev/null
@@ -1,335 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import os
-import sys
-import time
-
-import jinja2
-
-import reporting.functest.testCase as tc
-import reporting.functest.scenarioResult as sr
-import reporting.utils.reporting_utils as rp_utils
-
-"""
-Functest reporting status
-"""
-
-# Logger
-LOGGER = rp_utils.getLogger("Functest-Status")
-
-# Initialization
-testValid = []
-otherTestCases = []
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-# init just connection_check to get the list of scenarios
-# as all the scenarios run connection_check
-healthcheck = tc.TestCase("connection_check", "functest", -1)
-
-# Retrieve the Functest configuration to detect which tests are relevant
-# according to the installer, scenario
-cf = rp_utils.get_config('functest.test_conf')
-period = rp_utils.get_config('general.period')
-versions = rp_utils.get_config('general.versions')
-installers = rp_utils.get_config('general.installers')
-blacklist = rp_utils.get_config('functest.blacklist')
-log_level = rp_utils.get_config('general.log.log_level')
-exclude_noha = rp_utils.get_config('functest.exclude_noha')
-exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
-
-functest_yaml_config = rp_utils.getFunctestConfig()
-
-LOGGER.info("*******************************************")
-LOGGER.info("* *")
-LOGGER.info("* Generating reporting scenario status *")
-LOGGER.info("* Data retention: %s days *", period)
-LOGGER.info("* Log level: %s *", log_level)
-LOGGER.info("* *")
-LOGGER.info("* Virtual PODs exluded: %s *", exclude_virtual)
-LOGGER.info("* NOHA scenarios excluded: %s *", exclude_noha)
-LOGGER.info("* *")
-LOGGER.info("*******************************************")
-
-# Retrieve test cases of Tier 1 (smoke)
-config_tiers = functest_yaml_config.get("tiers")
-
-# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
-# to validate scenarios
-# Tier > 2 are not used to validate scenarios but we display the results anyway
-# tricky thing for the API as some tests are Functest tests
-# other tests are declared directly in the feature projects
-for tier in config_tiers:
- if tier['order'] >= 0 and tier['order'] < 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- testValid.append(tc.TestCase(case['case_name'],
- "functest",
- case['dependencies']))
- elif tier['order'] == 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['case_name'],
- case['case_name'],
- case['dependencies']))
- elif tier['order'] > 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['case_name'],
- "functest",
- case['dependencies']))
-
-LOGGER.debug("Functest reporting start")
-
-# For all the versions
-for version in versions:
- # For all the installers
- scenario_directory = "./display/" + version + "/functest/"
- scenario_file_name = scenario_directory + "scenario_history.txt"
-
- # check that the directory exists, if not create it
- # (first run on new version)
- if not os.path.exists(scenario_directory):
- os.makedirs(scenario_directory)
-
- # initiate scenario file if it does not exist
- if not os.path.isfile(scenario_file_name):
- with open(scenario_file_name, "a") as my_file:
- LOGGER.debug("Create scenario file: %s", scenario_file_name)
- my_file.write("date,scenario,installer,detail,score\n")
-
- for installer in installers:
-
- # get scenarios
- scenario_results = rp_utils.getScenarios("functest",
- "connection_check",
- installer,
- version)
- # get nb of supported architecture (x86, aarch64)
- architectures = rp_utils.getArchitectures(scenario_results)
- LOGGER.info("Supported architectures: %s", architectures)
-
- for architecture in architectures:
- LOGGER.info("Architecture: %s", architecture)
- # Consider only the results for the selected architecture
- # i.e drop x86 for aarch64 and vice versa
- filter_results = rp_utils.filterArchitecture(scenario_results,
- architecture)
- scenario_stats = rp_utils.getScenarioStats(filter_results)
- items = {}
- scenario_result_criteria = {}
-
- # in case of more than 1 architecture supported
- # precise the architecture
- installer_display = installer
- if "fuel" in installer:
- installer_display = installer + "@" + architecture
-
- # For all the scenarios get results
- for s, s_result in filter_results.items():
- LOGGER.info("---------------------------------")
- LOGGER.info("installer %s, version %s, scenario %s:",
- installer, version, s)
- LOGGER.debug("Scenario results: %s", s_result)
-
- # Green or Red light for a given scenario
- nb_test_runnable_for_this_scenario = 0
- scenario_score = 0
- # url of the last jenkins log corresponding to a given
- # scenario
- s_url = ""
- if len(s_result) > 0:
- build_tag = s_result[len(s_result)-1]['build_tag']
- LOGGER.debug("Build tag: %s", build_tag)
- s_url = rp_utils.getJenkinsUrl(build_tag)
- if s_url is None:
- s_url = "http://testresultS.opnfv.org/reporting"
- LOGGER.info("last jenkins url: %s", s_url)
- testCases2BeDisplayed = []
- # Check if test case is runnable / installer, scenario
- # for the test case used for Scenario validation
- try:
- # 1) Manage the test cases for the scenario validation
- # concretely Tiers 0-3
- for test_case in testValid:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- LOGGER.debug("testcase %s (%s) is %s",
- test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable)
- time.sleep(1)
- if test_case.isRunnable:
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- nb_test_runnable_for_this_scenario += 1
- LOGGER.info(" Searching results for case %s ",
- displayName)
- if "fuel" in installer:
- result = rp_utils.getCaseScoreFromBuildTag(
- name,
- s_result)
- else:
- result = rp_utils.getCaseScore(name, installer,
- s, version)
- # if no result set the value to 0
- if result < 0:
- result = 0
- LOGGER.info(" >>>> Test score = " + str(result))
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 1))
- scenario_score = scenario_score + result
-
- # 2) Manage the test cases for the scenario qualification
- # concretely Tiers > 3
- for test_case in otherTestCases:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- LOGGER.debug("testcase %s (%s) is %s",
- test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable)
- time.sleep(1)
- if test_case.isRunnable:
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- LOGGER.info(" Searching results for case %s ",
- displayName)
- if "fuel" in installer:
- result = rp_utils.getCaseScoreFromBuildTag(
- name,
- s_result)
- else:
- result = rp_utils.getCaseScore(name, installer,
- s, version)
- # at least 1 result for the test
- if result > -1:
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(
- name,
- project,
- "",
- result,
- True,
- 4))
- else:
- LOGGER.debug("No results found")
-
- items[s] = testCases2BeDisplayed
- except Exception: # pylint: disable=broad-except
- LOGGER.error("Error installer %s, version %s, scenario %s",
- installer, version, s)
- LOGGER.error("No data available: %s", sys.exc_info()[0])
-
- # **********************************************
- # Evaluate the results for scenario validation
- # **********************************************
- # the validation criteria = nb runnable tests x 3
- # because each test case can get
- # 0 point (never PASS)
- # 1 point at least (PASS once over the time window)
- # 2 points (PASS more than once but 1 FAIL on the last 4)
- # 3 points PASS on the last 4 iterations
- # e.g. 1 scenario = 10 cases
- # 1 iteration : max score = 10 (10x1)
- # 2 iterations : max score = 20 (10x2)
- # 3 iterations : max score = 20
- # 4 or more iterations : max score = 30 (1x30)
- LOGGER.info("Number of iterations for this scenario: %s",
- len(s_result))
- if len(s_result) > 3:
- k_score = 3
- elif len(s_result) < 2:
- k_score = 1
- else:
- k_score = 2
-
- scenario_criteria = nb_test_runnable_for_this_scenario*k_score
-
- # score for reporting
- s_score = str(scenario_score) + "/" + str(scenario_criteria)
- s_score_percent = rp_utils.getScenarioPercent(
- scenario_score,
- scenario_criteria)
-
- s_status = "KO"
- if scenario_score < scenario_criteria:
- LOGGER.info(">>>> scenario not OK, score = %s/%s",
- scenario_score, scenario_criteria)
- s_status = "KO"
- else:
- LOGGER.info(">>>>> scenario OK, save the information")
- s_status = "OK"
- path_validation_file = ("./display/" + version +
- "/functest/" +
- "validated_scenario_history.txt")
- with open(path_validation_file, "a") as f:
- time_format = "%Y-%m-%d %H:%M"
- info = (datetime.datetime.now().strftime(time_format) +
- ";" + installer_display + ";" + s + "\n")
- f.write(info)
-
- # Save daily results in a file
- with open(scenario_file_name, "a") as f:
- info = (reportingDate + "," + s + "," + installer_display +
- "," + s_score + "," +
- str(round(s_score_percent)) + "\n")
- f.write(info)
-
- scenario_result_criteria[s] = sr.ScenarioResult(
- s_status,
- s_score,
- s_score_percent,
- s_url)
- LOGGER.info("--------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(
- loader=templateLoader, autoescape=True)
-
- TEMPLATE_FILE = ("./reporting/functest/template"
- "/index-status-tmpl.html")
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(
- scenario_stats=scenario_stats,
- scenario_results=scenario_result_criteria,
- items=items,
- installer=installer_display,
- period=period,
- version=version,
- date=reportingDate)
-
- with open("./display/" + version +
- "/functest/status-" +
- installer_display + ".html", "wb") as fh:
- fh.write(outputText)
-
- LOGGER.info("Manage export CSV & PDF")
- rp_utils.export_csv(scenario_file_name, installer_display, version)
- LOGGER.error("CSV generated...")
-
- # Generate outputs for export
- # pdf
- url_pdf = rp_utils.get_config('general.url')
- pdf_path = ("./display/" + version +
- "/functest/status-" + installer_display + ".html")
- pdf_doc_name = ("./display/" + version +
- "/functest/status-" + installer_display + ".pdf")
- rp_utils.export_pdf(pdf_path, pdf_doc_name)
- LOGGER.info("PDF generated...")
diff --git a/utils/test/reporting/reporting/functest/reporting-tempest.py b/utils/test/reporting/reporting/functest/reporting-tempest.py
deleted file mode 100755
index d78d9a19d..000000000
--- a/utils/test/reporting/reporting/functest/reporting-tempest.py
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2017 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-# SPDX-license-identifier: Apache-2.0
-
-from datetime import datetime
-import json
-import os
-
-from urllib2 import Request, urlopen, URLError
-import jinja2
-
-import reporting.utils.reporting_utils as rp_utils
-
-INSTALLERS = rp_utils.get_config('general.installers')
-ITEMS = ["tests", "Success rate", "duration"]
-
-CURRENT_DIR = os.getcwd()
-
-PERIOD = rp_utils.get_config('general.period')
-CRITERIA_NB_TEST = 100
-CRITERIA_DURATION = 1800
-CRITERIA_SUCCESS_RATE = 100
-
-logger = rp_utils.getLogger("Tempest")
-logger.info("************************************************")
-logger.info("* Generating reporting Tempest_smoke_serial *")
-logger.info("* Data retention = %s days *", PERIOD)
-logger.info("* *")
-logger.info("************************************************")
-
-logger.info("Success criteria:")
-logger.info("nb tests executed > %s s ", CRITERIA_NB_TEST)
-logger.info("test duration < %s s ", CRITERIA_DURATION)
-logger.info("success rate > %s ", CRITERIA_SUCCESS_RATE)
-
-# For all the versions
-for version in rp_utils.get_config('general.versions'):
- for installer in INSTALLERS:
- # we consider the Tempest results of the last PERIOD days
- url = ("http://" + rp_utils.get_config('testapi.url') +
- "?case=tempest_smoke_serial&period=" + str(PERIOD) +
- "&installer=" + installer + "&version=" + version)
- request = Request(url)
- logger.info(("Search tempest_smoke_serial results for installer %s"
- " for version %s"), installer, version)
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError as err:
- logger.error("Error code: %s", err)
- logger.debug("request sent: %s", url)
- logger.debug("Results from API: %s", results)
- test_results = results['results']
- logger.debug("Test results: %s", test_results)
- scenario_results = {}
- criteria = {}
- errors = {}
-
- for r in test_results:
- # Retrieve all the scenarios per installer
- # In Brahmaputra use version
- # Since Colorado use scenario
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- logger.debug("Scenario results: %s", scenario_results)
-
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- # For each scenario, we build a result object to deal with
- # results, criteria and error handling
- for result in scenario_results[s]:
- result["start_date"] = result["start_date"].split(".")[0]
- logger.debug("start_date= %s", result["start_date"])
-
- # retrieve results
- # ****************
- nb_tests_run = result['details']['tests']
- nb_tests_failed = result['details']['failures']
- logger.debug("nb_tests_run= %s", nb_tests_run)
- logger.debug("nb_tests_failed= %s", nb_tests_failed)
-
- try:
- success_rate = (100 * (int(nb_tests_run) -
- int(nb_tests_failed)) /
- int(nb_tests_run))
- except ZeroDivisionError:
- success_rate = 0
-
- result['details']["tests"] = nb_tests_run
- result['details']["Success rate"] = str(success_rate) + "%"
-
- logger.info("nb_tests_run= %s", result['details']["tests"])
- logger.info("test rate = %s",
- result['details']["Success rate"])
-
- # Criteria management
- # *******************
- crit_tests = False
- crit_rate = False
- crit_time = False
-
- # Expect that at least 165 tests are run
- if nb_tests_run >= CRITERIA_NB_TEST:
- crit_tests = True
-
- # Expect that at least 90% of success
- if success_rate >= CRITERIA_SUCCESS_RATE:
- crit_rate = True
-
- # Expect that the suite duration is inferior to 30m
- stop_date = datetime.strptime(result['stop_date'],
- '%Y-%m-%d %H:%M:%S')
- start_date = datetime.strptime(result['start_date'],
- '%Y-%m-%d %H:%M:%S')
-
- delta = stop_date - start_date
-
- if delta.total_seconds() < CRITERIA_DURATION:
- crit_time = True
-
- result['criteria'] = {'tests': crit_tests,
- 'Success rate': crit_rate,
- 'duration': crit_time}
- try:
- logger.debug("Nb Test run: %s", nb_tests_run)
- logger.debug("Test duration: %s", delta)
- logger.debug("Success rate: %s", success_rate)
- except Exception: # pylint: disable=broad-except
- logger.error("Data format error")
-
- # Error management
- # ****************
- try:
- errors = result['details']['errors']
- logger.info("errors: %s", errors)
- result['errors'] = errors
- except Exception: # pylint: disable=broad-except
- logger.error("Error field not present (Brahamputra runs?)")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = "./reporting/functest/template/index-tempest-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_results,
- items=ITEMS,
- installer=installer)
-
- with open("./display/" + version +
- "/functest/tempest-" + installer + ".html", "wb") as fh:
- fh.write(outputText)
-logger.info("Tempest automatic reporting succesfully generated.")
diff --git a/utils/test/reporting/reporting/functest/reporting-vims.py b/utils/test/reporting/reporting/functest/reporting-vims.py
deleted file mode 100755
index 3b25e911d..000000000
--- a/utils/test/reporting/reporting/functest/reporting-vims.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-"""
-vIMS reporting status
-"""
-from urllib2 import Request, urlopen, URLError
-import json
-import jinja2
-
-import reporting.utils.reporting_utils as rp_utils
-
-LOGGER = rp_utils.getLogger("vIMS")
-
-PERIOD = rp_utils.get_config('general.period')
-VERSIONS = rp_utils.get_config('general.versions')
-URL_BASE = rp_utils.get_config('testapi.url')
-
-LOGGER.info("****************************************")
-LOGGER.info("* Generating reporting vIMS *")
-LOGGER.info("* Data retention = %s days *", PERIOD)
-LOGGER.info("* *")
-LOGGER.info("****************************************")
-
-INSTALLERS = rp_utils.get_config('general.installers')
-STEP_ORDER = ["initialisation", "orchestrator", "vnf", "test_vnf"]
-LOGGER.info("Start vIMS reporting processing....")
-
-# For all the versions
-for version in VERSIONS:
- for installer in INSTALLERS:
-
- # get nb of supported architecture (x86, aarch64)
- # get scenarios
- scenario_results = rp_utils.getScenarios("functest",
- "cloudify_ims",
- installer,
- version)
-
- architectures = rp_utils.getArchitectures(scenario_results)
- LOGGER.info("Supported architectures: %s", architectures)
-
- for architecture in architectures:
- LOGGER.info("Architecture: %s", architecture)
- # Consider only the results for the selected architecture
- # i.e drop x86 for aarch64 and vice versa
- filter_results = rp_utils.filterArchitecture(scenario_results,
- architecture)
- scenario_stats = rp_utils.getScenarioStats(filter_results)
- items = {}
- scenario_result_criteria = {}
-
- # in case of more than 1 architecture supported
- # precise the architecture
- installer_display = installer
- if "fuel" in installer:
- installer_display = installer + "@" + architecture
-
- LOGGER.info("Search vIMS results for installer: %s, version: %s",
- installer, version)
- request = Request("http://" + URL_BASE + '?case=cloudify_ims&'
- 'installer=' + installer + '&version=' + version)
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError as err:
- LOGGER.error("Error code: %s", err)
-
- test_results = results['results']
-
- # LOGGER.debug("Results found: %s" % test_results)
-
- scenario_results = {}
- for r in test_results:
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- # LOGGER.debug("scenario result: %s" % scenario_results)
-
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- for result in scenario_results[s]:
- try:
- format_result = result['details']['test_vnf']['result']
-
- # round durations of the different steps
- result['details']['orchestrator']['duration'] = round(
- result['details']['orchestrator']['duration'], 1)
- result['details']['vnf']['duration'] = round(
- result['details']['vnf']['duration'], 1)
- result['details']['test_vnf']['duration'] = round(
- result['details']['test_vnf']['duration'], 1)
-
- res_orch = \
- result['details']['orchestrator']['duration']
- res_vnf = result['details']['vnf']['duration']
- res_test_vnf = \
- result['details']['test_vnf']['duration']
- res_signaling = \
- result['details']['test_vnf']['result']['failures']
-
- # Manage test result status
- if res_signaling != 0:
- LOGGER.debug("At least 1 signalig test FAIL")
- result['details']['test_vnf']['status'] = "FAIL"
- else:
- LOGGER.debug("All signalig tests PASS")
- result['details']['test_vnf']['status'] = "PASS"
-
- LOGGER.debug("Scenario %s, Installer %s",
- s_result[1]['scenario'], installer)
- LOGGER.debug("Orchestrator deployment: %ss", res_orch)
- LOGGER.debug("vIMS deployment: %ss", res_vnf)
- LOGGER.debug("VNF testing: %ss", res_test_vnf)
- LOGGER.debug("VNF testing results: %s", format_result)
- except Exception as err: # pylint: disable=broad-except
- LOGGER.error("Uncomplete data %s", err)
- LOGGER.debug("----------------------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = "./reporting/functest/template/index-vims-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_results,
- step_order=STEP_ORDER,
- installer=installer_display)
- LOGGER.debug("Generate html page for %s", installer_display)
- with open("./display/" + version + "/functest/vims-" +
- installer_display + ".html", "wb") as fh:
- fh.write(outputText)
-
-LOGGER.info("vIMS report succesfully generated")
diff --git a/utils/test/reporting/reporting/functest/scenarioResult.py b/utils/test/reporting/reporting/functest/scenarioResult.py
deleted file mode 100644
index 5a54eed96..000000000
--- a/utils/test/reporting/reporting/functest/scenarioResult.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-
-class ScenarioResult(object):
-
- def __init__(self, status, score=0, score_percent=0, url_lastrun=''):
- self.status = status
- self.score = score
- self.score_percent = score_percent
- self.url_lastrun = url_lastrun
-
- def getStatus(self):
- return self.status
-
- def getScore(self):
- return self.score
-
- def getScorePercent(self):
- return self.score_percent
-
- def getUrlLastRun(self):
- return self.url_lastrun
diff --git a/utils/test/reporting/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/reporting/functest/template/index-status-tmpl.html
deleted file mode 100644
index 50fc648aa..000000000
--- a/utils/test/reporting/reporting/functest/template/index-status-tmpl.html
+++ /dev/null
@@ -1,183 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
- <script type="text/javascript" src="../../js/gauge.js"></script>
- <script type="text/javascript" src="../../js/trend.js"></script>
- <script>
- function onDocumentReady() {
- // Gauge management
- {% for scenario in scenario_stats.iteritems() -%}
- var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
- {%- endfor %}
-
- // assign success rate to the gauge
- function updateReadings() {
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
- {%- endfor %}
- }
- updateReadings();
- }
-
- // trend line management
- d3.csv("./scenario_history.txt", function(data) {
- // ***************************************
- // Create the trend line
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- // for scenario {{scenario}}
- // Filter results
- var trend{{loop.index}} = data.filter(function(row) {
- return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
- })
- // Parse the date
- trend{{loop.index}}.forEach(function(d) {
- d.date = parseDate(d.date);
- d.score = +d.score
- });
- // Draw the trend line
- var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
- // ****************************************
- {%- endfor %}
- });
- if ( !window.isLoaded ) {
- window.addEventListener("load", function() {
- onDocumentReady();
- }, false);
- } else {
- onDocumentReady();
- }
-</script>
-<script type="text/javascript">
-$(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
-})
-</script>
-
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Functest status page ({{version}}, {{date}})</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="status-apex.html">Apex</a></li>
- <li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-daisy.html">Daisy</a></li>
- <li><a href="status-fuel@x86.html">fuel@x86</a></li>
- <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
- <li><a href="status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
-
- <div class="scenario-overview">
- <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
- <table class="table">
- <tr>
- <th width="40%">HA Scenario</th>
- <th width="20%">Status</th>
- <th width="20%">Trend</th>
- <th width="10%">Score</th>
- <th width="10%">Iteration</th>
- </tr>
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- <tr class="tr-ok">
- {% if '-ha' in scenario -%}
- <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
- <td><div id="gaugeScenario{{loop.index}}"></div></td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{scenario_results[scenario].getScore()}}</td>
- <td>{{iteration}}</td>
- {%- endif %}
- </tr>
- {%- endfor %}
- <br>
- </table>
- <br>
- <table class="table">
- <tr>
- <th width="40%">NOHA Scenario</th>
- <th width="20%">Status</th>
- <th width="20%">Trend</th>
- <th width="10%">Score</th>
- <th width="10%">Iteration</th>
- </tr>
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- <tr class="tr-ok">
- {% if '-noha' in scenario -%}
- <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
- <td><div id="gaugeScenario{{loop.index}}"></div></td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{scenario_results[scenario].getScore()}}</td>
- <td>{{iteration}}</td>
- {%- endif %}
- </tr>
- {%- endfor %}
- </table>
-
- </div>
-
-
- {% for scenario, iteration in scenario_stats.iteritems() -%}
- <div class="scenario-part">
- <div class="page-header">
- <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario}}</b></h3>
- </div>
- <div class="panel panel-default">
- <div class="panel-heading">
- <span class="panel-header-item">
- </span>
- </div>
- <table class="table">
- <tr>
- {% for test in items[scenario] -%}
- <th>
- {% if test.getCriteria() > -1 -%}
- {{test.getDisplayName() }}
- {%- endif %}
- {% if test.getTier() > 3 -%}
- *
- {%- endif %}
- </th>
- {%- endfor %}
- </tr>
- <tr class="tr-weather-weather">
- {% for test in items[scenario] -%}
- {% if test.getCriteria() > 2 -%}
- <td><img src="../../img/weather-clear.png"></td>
- {%- elif test.getCriteria() > 1 -%}
- <td><img src="../../img/weather-few-clouds.png"></td>
- {%- elif test.getCriteria() > 0 -%}
- <td><img src="../../img/weather-overcast.png"></td>
- {%- elif test.getCriteria() > -1 -%}
- <td><img src="../../img/weather-storm.png"></td>
- {%- endif %}
- {%- endfor %}
- </tr>
- </table>
- </div>
- </div>
- {%- endfor %}
- see <a href="https://wiki.opnfv.org/pages/viewpage.action?pageId=6828617">Functest scoring wiki page</a> for details on scenario scoring
- <div> <br>
- <a href="./status-{{installer}}.pdf" class="myButtonPdf">Export to PDF</a> <a href="./scenario_history_{{installer}}.txt" class="myButtonCSV">Export to CSV</a>
- </div>
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html b/utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html
deleted file mode 100644
index 3a222276e..000000000
--- a/utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html
+++ /dev/null
@@ -1,95 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Tempest status page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="tempest-apex.html">Apex</a></li>
- <li><a href="tempest-compass.html">Compass</a></li>
- <li><a href="tempest-daisy.html">Daisy</a></li>
- <li><a href="tempest-fuel.html">Fuel</a></li>
- <li><a href="tempest-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
- {% for scenario_name, results in scenario_results.iteritems() -%}
- <div class="scenario-part">
- <div class="page-header">
- <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
- </div>
- {% for result in results -%}
- {% if loop.index > 2 -%}
- <div class="panel panel-default" hidden>
- {%- else -%}
- <div class="panel panel-default">
- {%- endif %}
- <div class="panel-heading">
- <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
- <span class="panel-header-item">
- <h4><b>{{result.start_date}}</b></h4>
- </span>
- <span class="badge panel-pod-name">{{result.pod_name}}</span>
- </div>
- <table class="table">
- <tr>
- <th width="20%">Item</th>
- <th width="10%">Result</th>
- <th width="10%">Status</th>
- <th width="60%">Errors</th>
- </tr>
- {% for item in items -%}
- {% if item in result.details.keys() -%}
- {% if result.criteria[item] -%}
- <tr class="tr-ok">
- <td>{{item}}</td>
- <td>{{result.details[item]}}</td>
- <td><span class="glyphicon glyphicon-ok"></td>
- {% if item is equalto "Success rate" %}
- <td>{{result.errors}}</td>
- {% endif %}
- </tr>
- {%- else -%}
- <tr class="tr-danger">
- <td>{{item}}</td>
- <td>{{result.details[item]}}</td>
- <td><span class="glyphicon glyphicon-remove"></td>
- {% if item is equalto "Success rate" %}
- <td>{{result.errors}}</td>
- {% endif %}
- </tr>
- {%- endif %}
- {%- endif %}
- {%- endfor %}
- </table>
- </div>
- {%- endfor %}
- <button type="button" class="btn btn-more">More than two</button>
- </div>
- {%- endfor %}
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html
deleted file mode 100644
index 9bd2b2f66..000000000
--- a/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html
+++ /dev/null
@@ -1,93 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">vIMS status page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="vims-apex.html">Apex</a></li>
- <li><a href="vims-compass.html">Compass</a></li>
- <li><a href="vims-daisy.html">Daisy</a></li>
- <li><a href="vims-fuel@x86.html">Fuel@x86</a></li>
- <li><a href="vims-fuel@aarch64.html">Fuel@aarch64</a></li>
- <li><a href="vims-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
- {% for scenario_name, results in scenario_results.iteritems() -%}
- <div class="scenario-part">
- <div class="page-header">
- <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
- </div>
- {% for result in results -%}
- {% if loop.index > 2 -%}
- <div class="panel panel-default" hidden>
- {%- else -%}
- <div class="panel panel-default">
- {%- endif %}
- <div class="panel-heading">
- <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
- <span class="panel-header-item">
- <h4><b>{{result.start_date}}</b></h4>
- </span>
- <span class="badge panel-pod-name">{{result.pod_name}}</span>
- </div>
- <table class="table">
- <tr>
- <th width="20%">Step</th>
- <th width="10%">Status</th>
- <th width="10%">Duration(s)</th>
- <th width="60%">Result</th>
- </tr>
- {% for step_od_name in step_order -%}
- {% if step_od_name in result.details.keys() -%}
- {% set step_result = result.details[step_od_name] -%}
- {% if step_result.status == "PASS" -%}
- <tr class="tr-ok">
- <td>{{step_od_name}}</td>
- <td><span class="glyphicon glyphicon-ok"></td>
- <td><b>{{step_result.duration}}</b></td>
- <td>{{step_result.result}}</td>
- </tr>
- {%- else -%}
- <tr class="tr-danger">
- <td>{{step_od_name}}</td>
- <td><span class="glyphicon glyphicon-remove"></td>
- <td><b>0s</b></td>
- <td>{{step_result.result}}</td>
- </tr>
- {%- endif %}
- {%- endif %}
- {%- endfor %}
- </table>
- </div>
- {%- endfor %}
- <button type="button" class="btn btn-more">More than two</button>
- </div>
- {%- endfor %}
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting/functest/testCase.py b/utils/test/reporting/reporting/functest/testCase.py
deleted file mode 100644
index a182dd4cf..000000000
--- a/utils/test/reporting/reporting/functest/testCase.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import re
-
-
-class TestCase(object):
-
- def __init__(self, name, project, constraints,
- criteria=-1, isRunnable=True, tier=-1):
- self.name = name
- self.project = project
- self.constraints = constraints
- self.criteria = criteria
- self.isRunnable = isRunnable
- self.tier = tier
- display_name_matrix = {'healthcheck': 'healthcheck',
- 'vping_ssh': 'vPing (ssh)',
- 'vping_userdata': 'vPing (userdata)',
- 'odl': 'ODL',
- 'onos': 'ONOS',
- 'ocl': 'OCL',
- 'tempest_smoke_serial': 'Tempest (smoke)',
- 'tempest_full_parallel': 'Tempest (full)',
- 'tempest_defcore': 'Tempest (Defcore)',
- 'refstack_defcore': 'Refstack',
- 'rally_sanity': 'Rally (smoke)',
- 'bgpvpn': 'bgpvpn',
- 'rally_full': 'Rally (full)',
- 'vims': 'vIMS',
- 'doctor-notification': 'Doctor',
- 'promise': 'Promise',
- 'moon': 'Moon',
- 'copper': 'Copper',
- 'security_scan': 'Security',
- 'multisite': 'Multisite',
- 'domino-multinode': 'Domino',
- 'functest-odl-sfc': 'SFC',
- 'onos_sfc': 'SFC',
- 'parser-basics': 'Parser',
- 'connection_check': 'Health (connection)',
- 'api_check': 'Health (api)',
- 'snaps_smoke': 'SNAPS',
- 'snaps_health_check': 'Health (dhcp)',
- 'gluon_vping': 'Netready',
- 'fds': 'FDS',
- 'cloudify_ims': 'vIMS (Cloudify)',
- 'orchestra_openims': 'OpenIMS (OpenBaton)',
- 'orchestra_clearwaterims': 'vIMS (OpenBaton)',
- 'opera_ims': 'vIMS (Open-O)',
- 'vyos_vrouter': 'vyos (Cloudify)',
- 'barometercollectd': 'Barometer',
- 'odl_netvirt': 'Netvirt',
- 'security_scan': 'Security'}
- try:
- self.displayName = display_name_matrix[self.name]
- except:
- self.displayName = "unknown"
-
- def getName(self):
- return self.name
-
- def getProject(self):
- return self.project
-
- def getConstraints(self):
- return self.constraints
-
- def getCriteria(self):
- return self.criteria
-
- def getTier(self):
- return self.tier
-
- def setCriteria(self, criteria):
- self.criteria = criteria
-
- def setIsRunnable(self, isRunnable):
- self.isRunnable = isRunnable
-
- def checkRunnable(self, installer, scenario, config):
- # Re-use Functest declaration
- # Retrieve Functest configuration file functest_config.yaml
- is_runnable = True
- config_test = config
- # print " *********************** "
- # print TEST_ENV
- # print " ---------------------- "
- # print "case = " + self.name
- # print "installer = " + installer
- # print "scenario = " + scenario
- # print "project = " + self.project
-
- # Retrieve test constraints
- # Retrieve test execution param
- test_execution_context = {"installer": installer,
- "scenario": scenario}
-
- # By default we assume that all the tests are always runnable...
- # if test_env not empty => dependencies to be checked
- if config_test is not None and len(config_test) > 0:
- # possible criteria = ["installer", "scenario"]
- # consider test criteria from config file
- # compare towards CI env through CI en variable
- for criteria in config_test:
- if re.search(config_test[criteria],
- test_execution_context[criteria]) is None:
- # print "Test "+ test + " cannot be run on the environment"
- is_runnable = False
- # print is_runnable
- self.isRunnable = is_runnable
-
- def toString(self):
- testcase = ("Name=" + self.name + ";Criteria=" +
- str(self.criteria) + ";Project=" + self.project +
- ";Constraints=" + str(self.constraints) +
- ";IsRunnable" + str(self.isRunnable))
- return testcase
-
- def getDisplayName(self):
- return self.displayName