summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--reporting/css/default.css4
-rwxr-xr-xreporting/docker/reporting.sh5
-rwxr-xr-xreporting/reporting/functest/reporting-status.py100
-rw-r--r--reporting/reporting/functest/template/index-status-tmpl.html25
-rw-r--r--reporting/reporting/functest/testCase.py60
-rw-r--r--reporting/reporting/reporting.yaml4
6 files changed, 88 insertions, 110 deletions
diff --git a/reporting/css/default.css b/reporting/css/default.css
index e32fa5f..686e886 100644
--- a/reporting/css/default.css
+++ b/reporting/css/default.css
@@ -26,6 +26,7 @@
width: 100%;
background-color: #0095a2
}
+
.panel-default > .panel-heading h4 {
color: white;
}
@@ -55,7 +56,7 @@ td{
background-color: #0095a2;
}
-h1 {
+h1 {
display: block;
font-size: 2em;
margin-top: 0.67em;
@@ -191,4 +192,3 @@ h2 {
position:relative;
top:1px;
}
-
diff --git a/reporting/docker/reporting.sh b/reporting/docker/reporting.sh
index 8ca7eac..8008bf6 100755
--- a/reporting/docker/reporting.sh
+++ b/reporting/docker/reporting.sh
@@ -25,11 +25,6 @@ cp -Rf 3rd_party/html/* display
cp -Rf img display
cp -Rf js display
-for i in "${versions[@]}"
-do
- cp -Rf html/functest.html display/$i/functest
-done
-
# if nothing is precised run all the reporting generation
# projet | option
# $1 | $2
diff --git a/reporting/reporting/functest/reporting-status.py b/reporting/reporting/functest/reporting-status.py
index 44ab921..e36aede 100755
--- a/reporting/reporting/functest/reporting-status.py
+++ b/reporting/reporting/functest/reporting-status.py
@@ -41,6 +41,7 @@ blacklist = rp_utils.get_config('functest.blacklist')
log_level = rp_utils.get_config('general.log.log_level')
exclude_noha = rp_utils.get_config('functest.exclude_noha')
exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
+tiers_for_scoring = {'healthcheck', 'smoke', 'vnf', 'features'}
LOGGER.info("*******************************************")
LOGGER.info("* *")
@@ -56,10 +57,9 @@ LOGGER.info("*******************************************")
# For all the versions
for version in versions:
testValid = []
- otherTestCases = []
# Retrieve test cases of Tier 1 (smoke)
version_config = ""
- if version != "master":
+ if (version != "master" and version != "latest"):
version_config = "?h=stable/" + version
functest_yaml_config = rp_utils.getFunctestConfig(version_config)
config_tiers = functest_yaml_config.get("tiers")
@@ -71,25 +71,17 @@ for version in versions:
# tricky thing for the API as some tests are Functest tests
# other tests are declared directly in the feature projects
for tier in config_tiers:
- if tier['order'] >= 0 and tier['order'] < 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- testValid.append(tc.TestCase(case['case_name'],
- "functest",
- case['dependencies']))
- elif tier['order'] == 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['case_name'],
- case['case_name'],
- case['dependencies']))
- elif tier['order'] > 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['case_name'],
- "functest",
- case['dependencies']))
+ for case in tier['testcases']:
+ try:
+ dependencies = case['dependencies']
+ except KeyError:
+ dependencies = ""
+ if case['case_name'] not in blacklist:
+ testValid.append(tc.TestCase(case['case_name'],
+ "functest",
+ dependencies,
+ tier=tier['name']))
LOGGER.debug("Functest reporting start")
# For all the installers
@@ -158,10 +150,8 @@ for version in versions:
# Check if test case is runnable / installer, scenario
# for the test case used for Scenario validation
try:
- # 1) Manage the test cases for the scenario validation
- # concretely Tiers 0-3
for test_case in testValid:
- test_case.checkRunnable(installer, s,
+ test_case.checkRunnable(installer, s, architecture,
test_case.getConstraints())
LOGGER.debug("testcase %s (%s) is %s",
test_case.getDisplayName(),
@@ -172,7 +162,8 @@ for version in versions:
name = test_case.getName()
displayName = test_case.getDisplayName()
project = test_case.getProject()
- nb_test_runnable_for_this_scenario += 1
+ if test_case.getTier() in tiers_for_scoring:
+ nb_test_runnable_for_this_scenario += 1
LOGGER.info(" Searching results for case %s ",
displayName)
if "fuel" in installer:
@@ -188,53 +179,18 @@ for version in versions:
LOGGER.info(" >>>> Test score = " + str(result))
test_case.setCriteria(result)
test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 1))
- scenario_score = scenario_score + result
-
- # 2) Manage the test cases for the scenario qualification
- # concretely Tiers > 3
- for test_case in otherTestCases:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- LOGGER.debug("testcase %s (%s) is %s",
- test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable)
- time.sleep(1)
- if test_case.isRunnable:
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- LOGGER.info(" Searching results for case %s ",
- displayName)
- if "fuel" in installer:
- result = rp_utils.getCaseScoreFromBuildTag(
- name,
- s_result)
- else:
- result = rp_utils.getCaseScore(name, installer,
- s, version)
- # at least 1 result for the test
- if result > -1:
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(
- name,
- project,
- "",
- result,
- True,
- 4))
- else:
- LOGGER.debug("No results found")
+ testCases2BeDisplayed.append(
+ tc.TestCase(name,
+ project,
+ "",
+ result,
+ True,
+ tier=test_case.getTier()))
+ if test_case.getTier() in tiers_for_scoring:
+ scenario_score = scenario_score + result
items[s] = testCases2BeDisplayed
- except Exception: # pylint: disable=broad-except
+ except KeyError: # pylint: disable=broad-except
LOGGER.error("Error installer %s, version %s, scenario %s",
installer, version, s)
LOGGER.error("No data available: %s", sys.exc_info()[0])
@@ -262,6 +218,12 @@ for version in versions:
else:
k_score = 2
+ # TODO for the scoring we should consider 3 tiers
+ # - Healthcheck
+ # - Smoke
+ # - Vnf
+ # components
+
scenario_criteria = nb_test_runnable_for_this_scenario*k_score
# score for reporting
diff --git a/reporting/reporting/functest/template/index-status-tmpl.html b/reporting/reporting/functest/template/index-status-tmpl.html
index 50fc648..48b5a2d 100644
--- a/reporting/reporting/functest/template/index-status-tmpl.html
+++ b/reporting/reporting/functest/template/index-status-tmpl.html
@@ -144,33 +144,32 @@ $(document).ready(function (){
<span class="panel-header-item">
</span>
</div>
+ {% for tier in ['healthcheck', 'smoke', 'vnf', 'features'] -%}
<table class="table">
<tr>
+ <h2>{{tier}}</h2>
{% for test in items[scenario] -%}
- <th>
- {% if test.getCriteria() > -1 -%}
- {{test.getDisplayName() }}
+ {% if test.getCriteria() > -1 and test.getTier() == tier -%}
+ <th>{{test.getDisplayName() }}</th>
{%- endif %}
- {% if test.getTier() > 3 -%}
- *
- {%- endif %}
- </th>
- {%- endfor %}
+ {%- endfor %}
</tr>
<tr class="tr-weather-weather">
- {% for test in items[scenario] -%}
- {% if test.getCriteria() > 2 -%}
+ {% for test in items[scenario] -%}
+ {% if test.getCriteria() > 2 and test.getTier() == tier -%}
<td><img src="../../img/weather-clear.png"></td>
- {%- elif test.getCriteria() > 1 -%}
+ {%- elif test.getCriteria() > 1 and test.getTier() == tier -%}
<td><img src="../../img/weather-few-clouds.png"></td>
- {%- elif test.getCriteria() > 0 -%}
+ {%- elif test.getCriteria() > 0 and test.getTier() == tier -%}
<td><img src="../../img/weather-overcast.png"></td>
- {%- elif test.getCriteria() > -1 -%}
+ {%- elif test.getCriteria() > -1 and test.getTier() == tier -%}
<td><img src="../../img/weather-storm.png"></td>
{%- endif %}
{%- endfor %}
</tr>
</table>
+ <br><hr>
+ {%- endfor %}
</div>
</div>
{%- endfor %}
diff --git a/reporting/reporting/functest/testCase.py b/reporting/reporting/functest/testCase.py
index dff3f8c..5240533 100644
--- a/reporting/reporting/functest/testCase.py
+++ b/reporting/reporting/functest/testCase.py
@@ -43,10 +43,10 @@ class TestCase(object):
'functest-odl-sfc': 'SFC',
'onos_sfc': 'SFC',
'parser-basics': 'Parser',
- 'connection_check': 'Health (connection)',
- 'api_check': 'Health (api)',
+ 'connection_check': 'connectivity',
+ 'api_check': 'api',
'snaps_smoke': 'SNAPS',
- 'snaps_health_check': 'Health (dhcp)',
+ 'snaps_health_check': 'dhcp',
'gluon_vping': 'Netready',
'fds': 'FDS',
'cloudify_ims': 'vIMS (Cloudify)',
@@ -68,7 +68,18 @@ class TestCase(object):
'barbican': 'barbican',
'juju_epc': 'vEPC (Juju)',
'shaker': 'shaker',
- 'neutron_trunk': 'Neutron trunk'}
+ 'neutron_trunk': 'Neutron trunk',
+ 'tempest_scenario': 'tempest_scenario',
+ 'networking-bgpvpn': 'networking-bgpvpn',
+ 'networking-sfc': 'networking-sfc',
+ 'tempest_full': 'Tempest (full)',
+ 'cloudify': 'cloudify',
+ 'heat_ims': 'vIMS (Heat)',
+ 'vmtp': 'vmtp',
+ 'tempest_smoke': 'Tempest (smoke)',
+ 'neutron-tempest-plugin-api': 'Neutron API',
+ 'vgpu': 'vgpu',
+ 'stor4nfv_os': 'stor4nfv_os'}
try:
self.displayName = display_name_matrix[self.name]
except:
@@ -80,22 +91,22 @@ class TestCase(object):
def getProject(self):
return self.project
- def getConstraints(self):
- return self.constraints
-
def getCriteria(self):
return self.criteria
def getTier(self):
return self.tier
+ def getConstraints(self):
+ return self.constraints
+
def setCriteria(self, criteria):
self.criteria = criteria
def setIsRunnable(self, isRunnable):
self.isRunnable = isRunnable
- def checkRunnable(self, installer, scenario, config):
+ def checkRunnable(self, installer, scenario, arch, config):
# Re-use Functest declaration
# Retrieve Functest configuration file functest_config.yaml
is_runnable = True
@@ -110,27 +121,36 @@ class TestCase(object):
# Retrieve test constraints
# Retrieve test execution param
- test_execution_context = {"installer": installer,
- "scenario": scenario}
+ test_execution_context = {"INSTALLER_TYPE": installer,
+ "DEPLOY_SCENARIO": scenario,
+ "POD_ARCH": arch}
+
+ # 3 types of constraints
+ # INSTALLER_TYPE
+ # DEPLOY_SCENARIO
+ # POD_ARCH
# By default we assume that all the tests are always runnable...
# if test_env not empty => dependencies to be checked
- if config_test is not None and len(config_test) > 0:
- # possible criteria = ["installer", "scenario"]
- # consider test criteria from config file
- # compare towards CI env through CI en variable
- for criteria in config_test:
- if re.search(config_test[criteria],
- test_execution_context[criteria]) is None:
- # print "Test "+ test + " cannot be run on the environment"
- is_runnable = False
+ try:
+ if config_test is not None and len(config_test) > 0:
+ # possible criteria = ["installer", "scenario"]
+ # consider test criteria from config file
+ # compare towards CI env through CI en variable
+ for criterias in config_test:
+ for criteria_key, criteria_value in criterias.iteritems():
+ if re.search(
+ criteria_value,
+ test_execution_context[criteria_key]) is None:
+ is_runnable = False
+ except AttributeError:
+ is_runnable = False
# print is_runnable
self.isRunnable = is_runnable
def toString(self):
testcase = ("Name=" + self.name + ";Criteria=" +
str(self.criteria) + ";Project=" + self.project +
- ";Constraints=" + str(self.constraints) +
";IsRunnable" + str(self.isRunnable))
return testcase
diff --git a/reporting/reporting/reporting.yaml b/reporting/reporting/reporting.yaml
index b2f7d07..ce27b90 100644
--- a/reporting/reporting/reporting.yaml
+++ b/reporting/reporting/reporting.yaml
@@ -36,9 +36,11 @@ testapi:
functest:
blacklist:
- odl_netvirt
- - juju_epc
- tempest_full_parallel
+ - tempest_full
- rally_full
+ - heat_ims
+ - tempest_scenario
max_scenario_criteria: 50
test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
log_level: ERROR