summaryrefslogtreecommitdiffstats
path: root/utils/test/reporting/functest
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test/reporting/functest')
-rw-r--r--utils/test/reporting/functest/__init__.py0
-rw-r--r--utils/test/reporting/functest/img/gauge_0.pngbin3644 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_100.pngbin3191 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_16.7.pngbin3170 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_25.pngbin3108 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_33.3.pngbin3081 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_41.7.pngbin3169 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_50.pngbin3123 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_58.3.pngbin3161 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_66.7.pngbin3069 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_75.pngbin3030 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_8.3.pngbin2993 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_83.3.pngbin3122 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/gauge_91.7.pngbin3008 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/icon-nok.pngbin2317 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/icon-ok.pngbin4063 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/weather-clear.pngbin1560 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/weather-few-clouds.pngbin1927 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/weather-overcast.pngbin1588 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/img/weather-storm.pngbin2137 -> 0 bytes
-rw-r--r--utils/test/reporting/functest/index.html53
-rwxr-xr-xutils/test/reporting/functest/reporting-status.py306
-rwxr-xr-xutils/test/reporting/functest/reporting-tempest.py155
-rwxr-xr-xutils/test/reporting/functest/reporting-vims.py126
-rw-r--r--utils/test/reporting/functest/scenarioResult.py29
-rw-r--r--utils/test/reporting/functest/template/index-status-tmpl.html157
-rw-r--r--utils/test/reporting/functest/template/index-tempest-tmpl.html95
-rw-r--r--utils/test/reporting/functest/template/index-vims-tmpl.html92
-rw-r--r--utils/test/reporting/functest/testCase.py125
29 files changed, 0 insertions, 1138 deletions
diff --git a/utils/test/reporting/functest/__init__.py b/utils/test/reporting/functest/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/utils/test/reporting/functest/__init__.py
+++ /dev/null
diff --git a/utils/test/reporting/functest/img/gauge_0.png b/utils/test/reporting/functest/img/gauge_0.png
deleted file mode 100644
index ecefc0e66..000000000
--- a/utils/test/reporting/functest/img/gauge_0.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_100.png b/utils/test/reporting/functest/img/gauge_100.png
deleted file mode 100644
index e199e1561..000000000
--- a/utils/test/reporting/functest/img/gauge_100.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_16.7.png b/utils/test/reporting/functest/img/gauge_16.7.png
deleted file mode 100644
index 3e3993c3b..000000000
--- a/utils/test/reporting/functest/img/gauge_16.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_25.png b/utils/test/reporting/functest/img/gauge_25.png
deleted file mode 100644
index 4923659b9..000000000
--- a/utils/test/reporting/functest/img/gauge_25.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_33.3.png b/utils/test/reporting/functest/img/gauge_33.3.png
deleted file mode 100644
index 364574b4a..000000000
--- a/utils/test/reporting/functest/img/gauge_33.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_41.7.png b/utils/test/reporting/functest/img/gauge_41.7.png
deleted file mode 100644
index 8c3e910fa..000000000
--- a/utils/test/reporting/functest/img/gauge_41.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_50.png b/utils/test/reporting/functest/img/gauge_50.png
deleted file mode 100644
index 2874b9fcf..000000000
--- a/utils/test/reporting/functest/img/gauge_50.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_58.3.png b/utils/test/reporting/functest/img/gauge_58.3.png
deleted file mode 100644
index beedc8aa9..000000000
--- a/utils/test/reporting/functest/img/gauge_58.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_66.7.png b/utils/test/reporting/functest/img/gauge_66.7.png
deleted file mode 100644
index 93f44d133..000000000
--- a/utils/test/reporting/functest/img/gauge_66.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_75.png b/utils/test/reporting/functest/img/gauge_75.png
deleted file mode 100644
index 9fc261ff8..000000000
--- a/utils/test/reporting/functest/img/gauge_75.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_8.3.png b/utils/test/reporting/functest/img/gauge_8.3.png
deleted file mode 100644
index 59f86571e..000000000
--- a/utils/test/reporting/functest/img/gauge_8.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_83.3.png b/utils/test/reporting/functest/img/gauge_83.3.png
deleted file mode 100644
index 27ae4ec54..000000000
--- a/utils/test/reporting/functest/img/gauge_83.3.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/gauge_91.7.png b/utils/test/reporting/functest/img/gauge_91.7.png
deleted file mode 100644
index 280865714..000000000
--- a/utils/test/reporting/functest/img/gauge_91.7.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/icon-nok.png b/utils/test/reporting/functest/img/icon-nok.png
deleted file mode 100644
index 526b5294b..000000000
--- a/utils/test/reporting/functest/img/icon-nok.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/icon-ok.png b/utils/test/reporting/functest/img/icon-ok.png
deleted file mode 100644
index 3a9de2e89..000000000
--- a/utils/test/reporting/functest/img/icon-ok.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/weather-clear.png b/utils/test/reporting/functest/img/weather-clear.png
deleted file mode 100644
index a0d967750..000000000
--- a/utils/test/reporting/functest/img/weather-clear.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/weather-few-clouds.png b/utils/test/reporting/functest/img/weather-few-clouds.png
deleted file mode 100644
index acfa78398..000000000
--- a/utils/test/reporting/functest/img/weather-few-clouds.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/weather-overcast.png b/utils/test/reporting/functest/img/weather-overcast.png
deleted file mode 100644
index 4296246d0..000000000
--- a/utils/test/reporting/functest/img/weather-overcast.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/img/weather-storm.png b/utils/test/reporting/functest/img/weather-storm.png
deleted file mode 100644
index 956f0e20f..000000000
--- a/utils/test/reporting/functest/img/weather-storm.png
+++ /dev/null
Binary files differ
diff --git a/utils/test/reporting/functest/index.html b/utils/test/reporting/functest/index.html
deleted file mode 100644
index bb1bce209..000000000
--- a/utils/test/reporting/functest/index.html
+++ /dev/null
@@ -1,53 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Functest reporting page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="index-status-apex.html">Apex</a></li>
- <li><a href="index-status-compass.html">Compass</a></li>
- <li><a href="index-status-fuel.html">Fuel</a></li>
- <li><a href="index-status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-main">
- <h2>Functest</h2>
- This project develops test suites that cover functionaling test cases in OPNFV.
- <br>The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario.
- <br> Weekly meeting: every Tuesday 8 AM UTC
- <br> IRC chan #opnfv-testperf
-
- <br>
- <h2>Useful Links</h2>
- <li><a href="http://events.linuxfoundation.org/sites/events/files/slides/Functest%20in%20Depth_0.pdf">Functest in Depth</a></li>
- <li><a href="https://git.opnfv.org/cgit/functest">Functest Repo</a></li>
- <li><a href="https://wiki.opnfv.org/opnfv_functional_testing">Functest Project</a></li>
- <li><a href="https://build.opnfv.org/ci/view/functest/">Functest Jenkins page</a></li>
- <li><a href="https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=59&projectKey=FUNCTEST">JIRA</a></li>
-
- </div>
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
deleted file mode 100755
index 77ab7840f..000000000
--- a/utils/test/reporting/functest/reporting-status.py
+++ /dev/null
@@ -1,306 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import jinja2
-import os
-import sys
-import time
-
-import testCase as tc
-import scenarioResult as sr
-
-# manage conf
-import utils.reporting_utils as rp_utils
-
-# Logger
-logger = rp_utils.getLogger("Functest-Status")
-
-# Initialization
-testValid = []
-otherTestCases = []
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-# init just connection_check to get the list of scenarios
-# as all the scenarios run connection_check
-healthcheck = tc.TestCase("connection_check", "functest", -1)
-
-# Retrieve the Functest configuration to detect which tests are relevant
-# according to the installer, scenario
-cf = rp_utils.get_config('functest.test_conf')
-period = rp_utils.get_config('general.period')
-versions = rp_utils.get_config('general.versions')
-installers = rp_utils.get_config('general.installers')
-blacklist = rp_utils.get_config('functest.blacklist')
-log_level = rp_utils.get_config('general.log.log_level')
-exclude_noha = rp_utils.get_config('functest.exclude_noha')
-exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
-
-functest_yaml_config = rp_utils.getFunctestConfig()
-
-logger.info("*******************************************")
-logger.info("* *")
-logger.info("* Generating reporting scenario status *")
-logger.info("* Data retention: %s days *" % period)
-logger.info("* Log level: %s *" % log_level)
-logger.info("* *")
-logger.info("* Virtual PODs exluded: %s *" % exclude_virtual)
-logger.info("* NOHA scenarios excluded: %s *" % exclude_noha)
-logger.info("* *")
-logger.info("*******************************************")
-
-# Retrieve test cases of Tier 1 (smoke)
-config_tiers = functest_yaml_config.get("tiers")
-
-# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
-# to validate scenarios
-# Tier > 2 are not used to validate scenarios but we display the results anyway
-# tricky thing for the API as some tests are Functest tests
-# other tests are declared directly in the feature projects
-for tier in config_tiers:
- if tier['order'] >= 0 and tier['order'] < 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- testValid.append(tc.TestCase(case['case_name'],
- "functest",
- case['dependencies']))
- elif tier['order'] == 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- testValid.append(tc.TestCase(case['case_name'],
- case['case_name'],
- case['dependencies']))
- elif tier['order'] > 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['case_name'],
- "functest",
- case['dependencies']))
-
-logger.debug("Functest reporting start")
-
-# For all the versions
-for version in versions:
- # For all the installers
- scenario_directory = "./display/" + version + "/functest/"
- scenario_file_name = scenario_directory + "scenario_history.txt"
-
- # check that the directory exists, if not create it
- # (first run on new version)
- if not os.path.exists(scenario_directory):
- os.makedirs(scenario_directory)
-
- # initiate scenario file if it does not exist
- if not os.path.isfile(scenario_file_name):
- with open(scenario_file_name, "a") as my_file:
- logger.debug("Create scenario file: %s" % scenario_file_name)
- my_file.write("date,scenario,installer,detail,score\n")
-
- for installer in installers:
-
- # get scenarios
- scenario_results = rp_utils.getScenarios(healthcheck,
- installer,
- version)
- # get nb of supported architecture (x86, aarch64)
- architectures = rp_utils.getArchitectures(scenario_results)
- logger.info("Supported architectures: {}".format(architectures))
-
- for architecture in architectures:
- logger.info("architecture: {}".format(architecture))
- # Consider only the results for the selected architecture
- # i.e drop x86 for aarch64 and vice versa
- filter_results = rp_utils.filterArchitecture(scenario_results,
- architecture)
- scenario_stats = rp_utils.getScenarioStats(filter_results)
- items = {}
- scenario_result_criteria = {}
-
- # in case of more than 1 architecture supported
- # precise the architecture
- installer_display = installer
- if (len(architectures) > 1):
- installer_display = installer + "@" + architecture
-
- # For all the scenarios get results
- for s, s_result in filter_results.items():
- logger.info("---------------------------------")
- logger.info("installer %s, version %s, scenario %s:" %
- (installer, version, s))
- logger.debug("Scenario results: %s" % s_result)
-
- # Green or Red light for a given scenario
- nb_test_runnable_for_this_scenario = 0
- scenario_score = 0
- # url of the last jenkins log corresponding to a given
- # scenario
- s_url = ""
- if len(s_result) > 0:
- build_tag = s_result[len(s_result)-1]['build_tag']
- logger.debug("Build tag: %s" % build_tag)
- s_url = rp_utils.getJenkinsUrl(build_tag)
- if s_url is None:
- s_url = "http://testresultS.opnfv.org/reporting"
- logger.info("last jenkins url: %s" % s_url)
- testCases2BeDisplayed = []
- # Check if test case is runnable / installer, scenario
- # for the test case used for Scenario validation
- try:
- # 1) Manage the test cases for the scenario validation
- # concretely Tiers 0-3
- for test_case in testValid:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- logger.debug("testcase %s (%s) is %s" %
- (test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable))
- time.sleep(1)
- if test_case.isRunnable:
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- nb_test_runnable_for_this_scenario += 1
- logger.info(" Searching results for case %s " %
- (displayName))
- result = rp_utils.getResult(name, installer,
- s, version)
- # if no result set the value to 0
- if result < 0:
- result = 0
- logger.info(" >>>> Test score = " + str(result))
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 1))
- scenario_score = scenario_score + result
-
- # 2) Manage the test cases for the scenario qualification
- # concretely Tiers > 3
- for test_case in otherTestCases:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- logger.debug("testcase %s (%s) is %s" %
- (test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable))
- time.sleep(1)
- if test_case.isRunnable:
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- logger.info(" Searching results for case %s " %
- (displayName))
- result = rp_utils.getResult(name, installer,
- s, version)
- # at least 1 result for the test
- if result > -1:
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(
- name,
- project,
- "",
- result,
- True,
- 4))
- else:
- logger.debug("No results found")
-
- items[s] = testCases2BeDisplayed
- except:
- logger.error("Error: installer %s, version %s, scenario %s"
- % (installer, version, s))
- logger.error("No data available: %s" % (sys.exc_info()[0]))
-
- # **********************************************
- # Evaluate the results for scenario validation
- # **********************************************
- # the validation criteria = nb runnable tests x 3
- # because each test case = 0,1,2 or 3
- scenario_criteria = nb_test_runnable_for_this_scenario * 3
- # if 0 runnable tests set criteria at a high value
- if scenario_criteria < 1:
- scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA
-
- s_score = str(scenario_score) + "/" + str(scenario_criteria)
- s_score_percent = rp_utils.getScenarioPercent(
- scenario_score,
- scenario_criteria)
-
- s_status = "KO"
- if scenario_score < scenario_criteria:
- logger.info(">>>> scenario not OK, score = %s/%s" %
- (scenario_score, scenario_criteria))
- s_status = "KO"
- else:
- logger.info(">>>>> scenario OK, save the information")
- s_status = "OK"
- path_validation_file = ("./display/" + version +
- "/functest/" +
- "validated_scenario_history.txt")
- with open(path_validation_file, "a") as f:
- time_format = "%Y-%m-%d %H:%M"
- info = (datetime.datetime.now().strftime(time_format) +
- ";" + installer_display + ";" + s + "\n")
- f.write(info)
-
- # Save daily results in a file
- with open(scenario_file_name, "a") as f:
- info = (reportingDate + "," + s + "," + installer_display +
- "," + s_score + "," +
- str(round(s_score_percent)) + "\n")
- f.write(info)
-
- scenario_result_criteria[s] = sr.ScenarioResult(
- s_status,
- s_score,
- s_score_percent,
- s_url)
- logger.info("--------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(
- loader=templateLoader, autoescape=True)
-
- TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(
- scenario_stats=scenario_stats,
- scenario_results=scenario_result_criteria,
- items=items,
- installer=installer_display,
- period=period,
- version=version,
- date=reportingDate)
-
- with open("./display/" + version +
- "/functest/status-" +
- installer_display + ".html", "wb") as fh:
- fh.write(outputText)
-
- logger.info("Manage export CSV & PDF")
- rp_utils.export_csv(scenario_file_name, installer_display, version)
- logger.error("CSV generated...")
-
- # Generate outputs for export
- # pdf
- # TODO Change once web site updated...use the current one
- # to test pdf production
- url_pdf = rp_utils.get_config('general.url')
- pdf_path = ("./display/" + version +
- "/functest/status-" + installer_display + ".html")
- pdf_doc_name = ("./display/" + version +
- "/functest/status-" + installer_display + ".pdf")
- rp_utils.export_pdf(pdf_path, pdf_doc_name)
- logger.info("PDF generated...")
diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py
deleted file mode 100755
index 0304298b4..000000000
--- a/utils/test/reporting/functest/reporting-tempest.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2017 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-# SPDX-license-identifier: Apache-2.0
-
-from urllib2 import Request, urlopen, URLError
-from datetime import datetime
-import json
-import jinja2
-import os
-
-# manage conf
-import utils.reporting_utils as rp_utils
-
-installers = rp_utils.get_config('general.installers')
-items = ["tests", "Success rate", "duration"]
-
-CURRENT_DIR = os.getcwd()
-
-PERIOD = rp_utils.get_config('general.period')
-criteria_nb_test = 165
-criteria_duration = 1800
-criteria_success_rate = 90
-
-logger = rp_utils.getLogger("Tempest")
-logger.info("************************************************")
-logger.info("* Generating reporting Tempest_smoke_serial *")
-logger.info("* Data retention = %s days *" % PERIOD)
-logger.info("* *")
-logger.info("************************************************")
-
-logger.info("Success criteria:")
-logger.info("nb tests executed > %s s " % criteria_nb_test)
-logger.info("test duration < %s s " % criteria_duration)
-logger.info("success rate > %s " % criteria_success_rate)
-
-# For all the versions
-for version in rp_utils.get_config('general.versions'):
- for installer in installers:
- # we consider the Tempest results of the last PERIOD days
- url = ("http://" + rp_utils.get_config('testapi.url') +
- "?case=tempest_smoke_serial")
- request = Request(url + '&period=' + str(PERIOD) +
- '&installer=' + installer +
- '&version=' + version)
- logger.info("Search tempest_smoke_serial results for installer %s"
- " for version %s"
- % (installer, version))
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError as e:
- logger.error("Error code: %s" % e)
-
- test_results = results['results']
-
- scenario_results = {}
- criteria = {}
- errors = {}
-
- for r in test_results:
- # Retrieve all the scenarios per installer
- # In Brahmaputra use version
- # Since Colorado use scenario
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- # For each scenario, we build a result object to deal with
- # results, criteria and error handling
- for result in scenario_results[s]:
- result["start_date"] = result["start_date"].split(".")[0]
-
- # retrieve results
- # ****************
- nb_tests_run = result['details']['tests']
- nb_tests_failed = result['details']['failures']
- if nb_tests_run != 0:
- success_rate = 100 * ((int(nb_tests_run) -
- int(nb_tests_failed)) /
- int(nb_tests_run))
- else:
- success_rate = 0
-
- result['details']["tests"] = nb_tests_run
- result['details']["Success rate"] = str(success_rate) + "%"
-
- # Criteria management
- # *******************
- crit_tests = False
- crit_rate = False
- crit_time = False
-
- # Expect that at least 165 tests are run
- if nb_tests_run >= criteria_nb_test:
- crit_tests = True
-
- # Expect that at least 90% of success
- if success_rate >= criteria_success_rate:
- crit_rate = True
-
- # Expect that the suite duration is inferior to 30m
- stop_date = datetime.strptime(result['stop_date'],
- '%Y-%m-%d %H:%M:%S')
- start_date = datetime.strptime(result['start_date'],
- '%Y-%m-%d %H:%M:%S')
-
- delta = stop_date - start_date
- if (delta.total_seconds() < criteria_duration):
- crit_time = True
-
- result['criteria'] = {'tests': crit_tests,
- 'Success rate': crit_rate,
- 'duration': crit_time}
- try:
- logger.debug("Scenario %s, Installer %s"
- % (s_result[1]['scenario'], installer))
- logger.debug("Nb Test run: %s" % nb_tests_run)
- logger.debug("Test duration: %s"
- % result['details']['duration'])
- logger.debug("Success rate: %s" % success_rate)
- except:
- logger.error("Data format error")
-
- # Error management
- # ****************
- try:
- errors = result['details']['errors']
- result['errors'] = errors.replace('{0}', '')
- except:
- logger.error("Error field not present (Brahamputra runs?)")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = "./functest/template/index-tempest-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_results,
- items=items,
- installer=installer)
-
- with open("./display/" + version +
- "/functest/tempest-" + installer + ".html", "wb") as fh:
- fh.write(outputText)
-logger.info("Tempest automatic reporting succesfully generated.")
diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py
deleted file mode 100755
index b236b8963..000000000
--- a/utils/test/reporting/functest/reporting-vims.py
+++ /dev/null
@@ -1,126 +0,0 @@
-from urllib2 import Request, urlopen, URLError
-import json
-import jinja2
-
-# manage conf
-import utils.reporting_utils as rp_utils
-
-logger = rp_utils.getLogger("vIMS")
-
-
-def sig_test_format(sig_test):
- nbPassed = 0
- nbFailures = 0
- nbSkipped = 0
- for data_test in sig_test:
- if data_test['result'] == "Passed":
- nbPassed += 1
- elif data_test['result'] == "Failed":
- nbFailures += 1
- elif data_test['result'] == "Skipped":
- nbSkipped += 1
- total_sig_test_result = {}
- total_sig_test_result['passed'] = nbPassed
- total_sig_test_result['failures'] = nbFailures
- total_sig_test_result['skipped'] = nbSkipped
- return total_sig_test_result
-
-period = rp_utils.get_config('general.period')
-versions = rp_utils.get_config('general.versions')
-url_base = rp_utils.get_config('testapi.url')
-
-logger.info("****************************************")
-logger.info("* Generating reporting vIMS *")
-logger.info("* Data retention = %s days *" % period)
-logger.info("* *")
-logger.info("****************************************")
-
-installers = rp_utils.get_config('general.installers')
-step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
-logger.info("Start processing....")
-
-# For all the versions
-for version in versions:
- for installer in installers:
- logger.info("Search vIMS results for installer: %s, version: %s"
- % (installer, version))
- request = Request("http://" + url_base + '?case=vims&installer=' +
- installer + '&version=' + version)
-
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError as e:
- logger.error("Error code: %s" % e)
-
- test_results = results['results']
-
- logger.debug("Results found: %s" % test_results)
-
- scenario_results = {}
- for r in test_results:
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- logger.debug("Search for success criteria")
- for result in scenario_results[s]:
- result["start_date"] = result["start_date"].split(".")[0]
- sig_test = result['details']['sig_test']['result']
- if not sig_test == "" and isinstance(sig_test, list):
- format_result = sig_test_format(sig_test)
- if format_result['failures'] > format_result['passed']:
- result['details']['sig_test']['duration'] = 0
- result['details']['sig_test']['result'] = format_result
- nb_step_ok = 0
- nb_step = len(result['details'])
-
- for step_name, step_result in result['details'].items():
- if step_result['duration'] != 0:
- nb_step_ok += 1
- m, s = divmod(step_result['duration'], 60)
- m_display = ""
- if int(m) != 0:
- m_display += str(int(m)) + "m "
-
- step_result['duration_display'] = (m_display +
- str(int(s)) + "s")
-
- result['pr_step_ok'] = 0
- if nb_step != 0:
- result['pr_step_ok'] = (float(nb_step_ok) / nb_step) * 100
- try:
- logger.debug("Scenario %s, Installer %s"
- % (s_result[1]['scenario'], installer))
- res = result['details']['orchestrator']['duration']
- logger.debug("Orchestrator deployment: %s s"
- % res)
- logger.debug("vIMS deployment: %s s"
- % result['details']['vIMS']['duration'])
- logger.debug("Signaling testing: %s s"
- % result['details']['sig_test']['duration'])
- logger.debug("Signaling testing results: %s"
- % format_result)
- except:
- logger.error("Data badly formatted")
- logger.debug("----------------------------------------")
-
- templateLoader = jinja2.FileSystemLoader(".")
- templateEnv = jinja2.Environment(loader=templateLoader,
- autoescape=True)
-
- TEMPLATE_FILE = "./functest/template/index-vims-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_results,
- step_order=step_order,
- installer=installer)
-
- with open("./display/" + version + "/functest/vims-" +
- installer + ".html", "wb") as fh:
- fh.write(outputText)
-
-logger.info("vIMS report succesfully generated")
diff --git a/utils/test/reporting/functest/scenarioResult.py b/utils/test/reporting/functest/scenarioResult.py
deleted file mode 100644
index 5a54eed96..000000000
--- a/utils/test/reporting/functest/scenarioResult.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-
-class ScenarioResult(object):
-
- def __init__(self, status, score=0, score_percent=0, url_lastrun=''):
- self.status = status
- self.score = score
- self.score_percent = score_percent
- self.url_lastrun = url_lastrun
-
- def getStatus(self):
- return self.status
-
- def getScore(self):
- return self.score
-
- def getScorePercent(self):
- return self.score_percent
-
- def getUrlLastRun(self):
- return self.url_lastrun
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
deleted file mode 100644
index cc4edaac5..000000000
--- a/utils/test/reporting/functest/template/index-status-tmpl.html
+++ /dev/null
@@ -1,157 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
- <script type="text/javascript" src="../../js/gauge.js"></script>
- <script type="text/javascript" src="../../js/trend.js"></script>
- <script>
- function onDocumentReady() {
- // Gauge management
- {% for scenario in scenario_stats.iteritems() -%}
- var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
- {%- endfor %}
-
- // assign success rate to the gauge
- function updateReadings() {
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
- {%- endfor %}
- }
- updateReadings();
- }
-
- // trend line management
- d3.csv("./scenario_history.txt", function(data) {
- // ***************************************
- // Create the trend line
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- // for scenario {{scenario}}
- // Filter results
- var trend{{loop.index}} = data.filter(function(row) {
- return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
- })
- // Parse the date
- trend{{loop.index}}.forEach(function(d) {
- d.date = parseDate(d.date);
- d.score = +d.score
- });
- // Draw the trend line
- var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
- // ****************************************
- {%- endfor %}
- });
- if ( !window.isLoaded ) {
- window.addEventListener("load", function() {
- onDocumentReady();
- }, false);
- } else {
- onDocumentReady();
- }
-</script>
-<script type="text/javascript">
-$(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
-})
-</script>
-
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Functest status page ({{version}}, {{date}})</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="status-apex.html">Apex</a></li>
- <li><a href="status-compass.html">Compass</a></li>
- <li><a href="status-fuel@x86.html">fuel@x86</a></li>
- <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
- <li><a href="status-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
-
- <div class="scenario-overview">
- <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
- <table class="table">
- <tr>
- <th width="40%">Scenario</th>
- <th width="20%">Status</th>
- <th width="20%">Trend</th>
- <th width="10%">Score</th>
- <th width="10%">Iteration</th>
- </tr>
- {% for scenario,iteration in scenario_stats.iteritems() -%}
- <tr class="tr-ok">
- <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
- <td><div id="gaugeScenario{{loop.index}}"></div></td>
- <td><div id="trend_svg{{loop.index}}"></div></td>
- <td>{{scenario_results[scenario].getScore()}}</td>
- <td>{{iteration}}</td>
- </tr>
- {%- endfor %}
- </table>
- </div>
-
-
- {% for scenario, iteration in scenario_stats.iteritems() -%}
- <div class="scenario-part">
- <div class="page-header">
- <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario}}</b></h3>
- </div>
- <div class="panel panel-default">
- <div class="panel-heading">
- <span class="panel-header-item">
- </span>
- </div>
- <table class="table">
- <tr>
- {% for test in items[scenario] -%}
- <th>
- {% if test.getCriteria() > -1 -%}
- {{test.getDisplayName() }}
- {%- endif %}
- {% if test.getTier() > 3 -%}
- *
- {%- endif %}
- </th>
- {%- endfor %}
- </tr>
- <tr class="tr-weather-weather">
- {% for test in items[scenario] -%}
- {% if test.getCriteria() > 2 -%}
- <td><img src="../../img/weather-clear.png"></td>
- {%- elif test.getCriteria() > 1 -%}
- <td><img src="../../img/weather-few-clouds.png"></td>
- {%- elif test.getCriteria() > 0 -%}
- <td><img src="../../img/weather-overcast.png"></td>
- {%- elif test.getCriteria() > -1 -%}
- <td><img src="../../img/weather-storm.png"></td>
- {%- endif %}
- {%- endfor %}
- </tr>
- </table>
- </div>
- </div>
- {%- endfor %}
- see <a href="https://wiki.opnfv.org/pages/viewpage.action?pageId=6828617">Functest scoring wiki page</a> for details on scenario scoring
- <div> <br>
- <a href="./status-{{installer}}.pdf" class="myButtonPdf">Export to PDF</a> <a href="./scenario_history_{{installer}}.txt" class="myButtonCSV">Export to CSV</a>
- </div>
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/functest/template/index-tempest-tmpl.html b/utils/test/reporting/functest/template/index-tempest-tmpl.html
deleted file mode 100644
index 3a222276e..000000000
--- a/utils/test/reporting/functest/template/index-tempest-tmpl.html
+++ /dev/null
@@ -1,95 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">Tempest status page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="tempest-apex.html">Apex</a></li>
- <li><a href="tempest-compass.html">Compass</a></li>
- <li><a href="tempest-daisy.html">Daisy</a></li>
- <li><a href="tempest-fuel.html">Fuel</a></li>
- <li><a href="tempest-joid.html">Joid</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
- {% for scenario_name, results in scenario_results.iteritems() -%}
- <div class="scenario-part">
- <div class="page-header">
- <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
- </div>
- {% for result in results -%}
- {% if loop.index > 2 -%}
- <div class="panel panel-default" hidden>
- {%- else -%}
- <div class="panel panel-default">
- {%- endif %}
- <div class="panel-heading">
- <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
- <span class="panel-header-item">
- <h4><b>{{result.start_date}}</b></h4>
- </span>
- <span class="badge panel-pod-name">{{result.pod_name}}</span>
- </div>
- <table class="table">
- <tr>
- <th width="20%">Item</th>
- <th width="10%">Result</th>
- <th width="10%">Status</th>
- <th width="60%">Errors</th>
- </tr>
- {% for item in items -%}
- {% if item in result.details.keys() -%}
- {% if result.criteria[item] -%}
- <tr class="tr-ok">
- <td>{{item}}</td>
- <td>{{result.details[item]}}</td>
- <td><span class="glyphicon glyphicon-ok"></td>
- {% if item is equalto "Success rate" %}
- <td>{{result.errors}}</td>
- {% endif %}
- </tr>
- {%- else -%}
- <tr class="tr-danger">
- <td>{{item}}</td>
- <td>{{result.details[item]}}</td>
- <td><span class="glyphicon glyphicon-remove"></td>
- {% if item is equalto "Success rate" %}
- <td>{{result.errors}}</td>
- {% endif %}
- </tr>
- {%- endif %}
- {%- endif %}
- {%- endfor %}
- </table>
- </div>
- {%- endfor %}
- <button type="button" class="btn btn-more">More than two</button>
- </div>
- {%- endfor %}
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/functest/template/index-vims-tmpl.html
deleted file mode 100644
index cd51607b7..000000000
--- a/utils/test/reporting/functest/template/index-vims-tmpl.html
+++ /dev/null
@@ -1,92 +0,0 @@
- <html>
- <head>
- <meta charset="utf-8">
- <!-- Bootstrap core CSS -->
- <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
- <link href="../../css/default.css" rel="stylesheet">
- <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
- <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
- <script type="text/javascript">
- $(document).ready(function (){
- $(".btn-more").click(function() {
- $(this).hide();
- $(this).parent().find(".panel-default").show();
- });
- })
- </script>
- </head>
- <body>
- <div class="container">
- <div class="masthead">
- <h3 class="text-muted">vIMS status page</h3>
- <nav>
- <ul class="nav nav-justified">
- <li class="active"><a href="../../index.html">Home</a></li>
- <li><a href="vims-fuel.html">Fuel</a></li>
- <li><a href="vims-compass.html">Compass</a></li>
- <li><a href="vims-daisy.html">Daisy</a></li>
- <li><a href="vims-joid.html">JOID</a></li>
- <li><a href="vims-apex.html">APEX</a></li>
- </ul>
- </nav>
- </div>
-<div class="row">
- <div class="col-md-1"></div>
- <div class="col-md-10">
- <div class="page-header">
- <h2>{{installer}}</h2>
- </div>
- {% for scenario_name, results in scenario_results.iteritems() -%}
- <div class="scenario-part">
- <div class="page-header">
- <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
- </div>
- {% for result in results -%}
- {% if loop.index > 2 -%}
- <div class="panel panel-default" hidden>
- {%- else -%}
- <div class="panel panel-default">
- {%- endif %}
- <div class="panel-heading">
- <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
- <span class="panel-header-item">
- <h4><b>{{result.start_date}}</b></h4>
- </span>
- <span class="badge panel-pod-name">{{result.pod_name}}</span>
- </div>
- <table class="table">
- <tr>
- <th width="20%">Step</th>
- <th width="10%">Status</th>
- <th width="10%">Duration</th>
- <th width="60%">Result</th>
- </tr>
- {% for step_od_name in step_order -%}
- {% if step_od_name in result.details.keys() -%}
- {% set step_result = result.details[step_od_name] -%}
- {% if step_result.duration != 0 -%}
- <tr class="tr-ok">
- <td>{{step_od_name}}</td>
- <td><span class="glyphicon glyphicon-ok"></td>
- <td><b>{{step_result.duration_display}}</b></td>
- <td>{{step_result.result}}</td>
- </tr>
- {%- else -%}
- <tr class="tr-danger">
- <td>{{step_od_name}}</td>
- <td><span class="glyphicon glyphicon-remove"></td>
- <td><b>0s</b></td>
- <td>{{step_result.result}}</td>
- </tr>
- {%- endif %}
- {%- endif %}
- {%- endfor %}
- </table>
- </div>
- {%- endfor %}
- <button type="button" class="btn btn-more">More than two</button>
- </div>
- {%- endfor %}
- </div>
- <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py
deleted file mode 100644
index 9834f0753..000000000
--- a/utils/test/reporting/functest/testCase.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import re
-
-
-class TestCase(object):
-
- def __init__(self, name, project, constraints,
- criteria=-1, isRunnable=True, tier=-1):
- self.name = name
- self.project = project
- self.constraints = constraints
- self.criteria = criteria
- self.isRunnable = isRunnable
- self.tier = tier
- display_name_matrix = {'healthcheck': 'healthcheck',
- 'vping_ssh': 'vPing (ssh)',
- 'vping_userdata': 'vPing (userdata)',
- 'odl': 'ODL',
- 'onos': 'ONOS',
- 'ocl': 'OCL',
- 'tempest_smoke_serial': 'Tempest (smoke)',
- 'tempest_full_parallel': 'Tempest (full)',
- 'tempest_defcore': 'Tempest (Defcore)',
- 'refstack_defcore': 'Refstack',
- 'rally_sanity': 'Rally (smoke)',
- 'bgpvpn': 'bgpvpn',
- 'rally_full': 'Rally (full)',
- 'vims': 'vIMS',
- 'doctor-notification': 'Doctor',
- 'promise': 'Promise',
- 'moon': 'Moon',
- 'copper': 'Copper',
- 'security_scan': 'Security',
- 'multisite': 'Multisite',
- 'domino-multinode': 'Domino',
- 'functest-odl-sfc': 'SFC',
- 'onos_sfc': 'SFC',
- 'parser-basics': 'Parser',
- 'connection_check': 'Health (connection)',
- 'api_check': 'Health (api)',
- 'snaps_smoke': 'SNAPS',
- 'snaps_health_check': 'Health (dhcp)',
- 'gluon_vping': 'Netready',
- 'fds': 'FDS',
- 'cloudify_ims': 'vIMS (Cloudify)',
- 'orchestra_ims': 'OpenIMS (OpenBaton)',
- 'opera_ims': 'vIMS (Open-O)',
- 'vyos_vrouter': 'vyos',
- 'barometercollectd': 'Barometer',
- 'odl_netvirt': 'Netvirt',
- 'security_scan': 'Security'}
- try:
- self.displayName = display_name_matrix[self.name]
- except:
- self.displayName = "unknown"
-
- def getName(self):
- return self.name
-
- def getProject(self):
- return self.project
-
- def getConstraints(self):
- return self.constraints
-
- def getCriteria(self):
- return self.criteria
-
- def getTier(self):
- return self.tier
-
- def setCriteria(self, criteria):
- self.criteria = criteria
-
- def setIsRunnable(self, isRunnable):
- self.isRunnable = isRunnable
-
- def checkRunnable(self, installer, scenario, config):
- # Re-use Functest declaration
- # Retrieve Functest configuration file functest_config.yaml
- is_runnable = True
- config_test = config
- # print " *********************** "
- # print TEST_ENV
- # print " ---------------------- "
- # print "case = " + self.name
- # print "installer = " + installer
- # print "scenario = " + scenario
- # print "project = " + self.project
-
- # Retrieve test constraints
- # Retrieve test execution param
- test_execution_context = {"installer": installer,
- "scenario": scenario}
-
- # By default we assume that all the tests are always runnable...
- # if test_env not empty => dependencies to be checked
- if config_test is not None and len(config_test) > 0:
- # possible criteria = ["installer", "scenario"]
- # consider test criteria from config file
- # compare towards CI env through CI en variable
- for criteria in config_test:
- if re.search(config_test[criteria],
- test_execution_context[criteria]) is None:
- # print "Test "+ test + " cannot be run on the environment"
- is_runnable = False
- # print is_runnable
- self.isRunnable = is_runnable
-
- def toString(self):
- testcase = ("Name=" + self.name + ";Criteria=" +
- str(self.criteria) + ";Project=" + self.project +
- ";Constraints=" + str(self.constraints) +
- ";IsRunnable" + str(self.isRunnable))
- return testcase
-
- def getDisplayName(self):
- return self.displayName