summaryrefslogtreecommitdiffstats
path: root/reporting/reporting/functest
diff options
context:
space:
mode:
Diffstat (limited to 'reporting/reporting/functest')
-rwxr-xr-xreporting/reporting/functest/reporting-status.py123
-rw-r--r--reporting/reporting/functest/template/index-status-tmpl.html25
-rw-r--r--reporting/reporting/functest/testCase.py73
3 files changed, 109 insertions, 112 deletions
diff --git a/reporting/reporting/functest/reporting-status.py b/reporting/reporting/functest/reporting-status.py
index 552080d..e36aede 100755
--- a/reporting/reporting/functest/reporting-status.py
+++ b/reporting/reporting/functest/reporting-status.py
@@ -25,8 +25,6 @@ Functest reporting status
LOGGER = rp_utils.getLogger("Functest-Status")
# Initialization
-testValid = []
-otherTestCases = []
reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# init just connection_check to get the list of scenarios
@@ -43,8 +41,7 @@ blacklist = rp_utils.get_config('functest.blacklist')
log_level = rp_utils.get_config('general.log.log_level')
exclude_noha = rp_utils.get_config('functest.exclude_noha')
exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
-
-functest_yaml_config = rp_utils.getFunctestConfig()
+tiers_for_scoring = {'healthcheck', 'smoke', 'vnf', 'features'}
LOGGER.info("*******************************************")
LOGGER.info("* *")
@@ -57,38 +54,36 @@ LOGGER.info("* NOHA scenarios excluded: %s *", exclude_noha)
LOGGER.info("* *")
LOGGER.info("*******************************************")
-# Retrieve test cases of Tier 1 (smoke)
-config_tiers = functest_yaml_config.get("tiers")
+# For all the versions
+for version in versions:
+ testValid = []
+ # Retrieve test cases of Tier 1 (smoke)
+ version_config = ""
+ if (version != "master" and version != "latest"):
+ version_config = "?h=stable/" + version
+ functest_yaml_config = rp_utils.getFunctestConfig(version_config)
+ config_tiers = functest_yaml_config.get("tiers")
+
+ # we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
+ # to validate scenarios
+ # Tier > 2 are not used to validate scenarios but we display
+ # the results anyway
+ # tricky thing for the API as some tests are Functest tests
+ # other tests are declared directly in the feature projects
+ for tier in config_tiers:
-# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
-# to validate scenarios
-# Tier > 2 are not used to validate scenarios but we display the results anyway
-# tricky thing for the API as some tests are Functest tests
-# other tests are declared directly in the feature projects
-for tier in config_tiers:
- if tier['order'] >= 0 and tier['order'] < 2:
for case in tier['testcases']:
+ try:
+ dependencies = case['dependencies']
+ except KeyError:
+ dependencies = ""
if case['case_name'] not in blacklist:
testValid.append(tc.TestCase(case['case_name'],
"functest",
- case['dependencies']))
- elif tier['order'] == 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['case_name'],
- case['case_name'],
- case['dependencies']))
- elif tier['order'] > 2:
- for case in tier['testcases']:
- if case['case_name'] not in blacklist:
- otherTestCases.append(tc.TestCase(case['case_name'],
- "functest",
- case['dependencies']))
-
-LOGGER.debug("Functest reporting start")
+ dependencies,
+ tier=tier['name']))
+ LOGGER.debug("Functest reporting start")
-# For all the versions
-for version in versions:
# For all the installers
scenario_directory = "./display/" + version + "/functest/"
scenario_file_name = scenario_directory + "scenario_history.txt"
@@ -155,10 +150,8 @@ for version in versions:
# Check if test case is runnable / installer, scenario
# for the test case used for Scenario validation
try:
- # 1) Manage the test cases for the scenario validation
- # concretely Tiers 0-3
for test_case in testValid:
- test_case.checkRunnable(installer, s,
+ test_case.checkRunnable(installer, s, architecture,
test_case.getConstraints())
LOGGER.debug("testcase %s (%s) is %s",
test_case.getDisplayName(),
@@ -169,7 +162,8 @@ for version in versions:
name = test_case.getName()
displayName = test_case.getDisplayName()
project = test_case.getProject()
- nb_test_runnable_for_this_scenario += 1
+ if test_case.getTier() in tiers_for_scoring:
+ nb_test_runnable_for_this_scenario += 1
LOGGER.info(" Searching results for case %s ",
displayName)
if "fuel" in installer:
@@ -185,53 +179,18 @@ for version in versions:
LOGGER.info(" >>>> Test score = " + str(result))
test_case.setCriteria(result)
test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 1))
- scenario_score = scenario_score + result
-
- # 2) Manage the test cases for the scenario qualification
- # concretely Tiers > 3
- for test_case in otherTestCases:
- test_case.checkRunnable(installer, s,
- test_case.getConstraints())
- LOGGER.debug("testcase %s (%s) is %s",
- test_case.getDisplayName(),
- test_case.getName(),
- test_case.isRunnable)
- time.sleep(1)
- if test_case.isRunnable:
- name = test_case.getName()
- displayName = test_case.getDisplayName()
- project = test_case.getProject()
- LOGGER.info(" Searching results for case %s ",
- displayName)
- if "fuel" in installer:
- result = rp_utils.getCaseScoreFromBuildTag(
- name,
- s_result)
- else:
- result = rp_utils.getCaseScore(name, installer,
- s, version)
- # at least 1 result for the test
- if result > -1:
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(
- name,
- project,
- "",
- result,
- True,
- 4))
- else:
- LOGGER.debug("No results found")
+ testCases2BeDisplayed.append(
+ tc.TestCase(name,
+ project,
+ "",
+ result,
+ True,
+ tier=test_case.getTier()))
+ if test_case.getTier() in tiers_for_scoring:
+ scenario_score = scenario_score + result
items[s] = testCases2BeDisplayed
- except Exception: # pylint: disable=broad-except
+ except KeyError: # pylint: disable=broad-except
LOGGER.error("Error installer %s, version %s, scenario %s",
installer, version, s)
LOGGER.error("No data available: %s", sys.exc_info()[0])
@@ -259,6 +218,12 @@ for version in versions:
else:
k_score = 2
+ # TODO for the scoring we should consider 3 tiers
+ # - Healthcheck
+ # - Smoke
+ # - Vnf
+ # components
+
scenario_criteria = nb_test_runnable_for_this_scenario*k_score
# score for reporting
diff --git a/reporting/reporting/functest/template/index-status-tmpl.html b/reporting/reporting/functest/template/index-status-tmpl.html
index 50fc648..48b5a2d 100644
--- a/reporting/reporting/functest/template/index-status-tmpl.html
+++ b/reporting/reporting/functest/template/index-status-tmpl.html
@@ -144,33 +144,32 @@ $(document).ready(function (){
<span class="panel-header-item">
</span>
</div>
+ {% for tier in ['healthcheck', 'smoke', 'vnf', 'features'] -%}
<table class="table">
<tr>
+ <h2>{{tier}}</h2>
{% for test in items[scenario] -%}
- <th>
- {% if test.getCriteria() > -1 -%}
- {{test.getDisplayName() }}
+ {% if test.getCriteria() > -1 and test.getTier() == tier -%}
+ <th>{{test.getDisplayName() }}</th>
{%- endif %}
- {% if test.getTier() > 3 -%}
- *
- {%- endif %}
- </th>
- {%- endfor %}
+ {%- endfor %}
</tr>
<tr class="tr-weather-weather">
- {% for test in items[scenario] -%}
- {% if test.getCriteria() > 2 -%}
+ {% for test in items[scenario] -%}
+ {% if test.getCriteria() > 2 and test.getTier() == tier -%}
<td><img src="../../img/weather-clear.png"></td>
- {%- elif test.getCriteria() > 1 -%}
+ {%- elif test.getCriteria() > 1 and test.getTier() == tier -%}
<td><img src="../../img/weather-few-clouds.png"></td>
- {%- elif test.getCriteria() > 0 -%}
+ {%- elif test.getCriteria() > 0 and test.getTier() == tier -%}
<td><img src="../../img/weather-overcast.png"></td>
- {%- elif test.getCriteria() > -1 -%}
+ {%- elif test.getCriteria() > -1 and test.getTier() == tier -%}
<td><img src="../../img/weather-storm.png"></td>
{%- endif %}
{%- endfor %}
</tr>
</table>
+ <br><hr>
+ {%- endfor %}
</div>
</div>
{%- endfor %}
diff --git a/reporting/reporting/functest/testCase.py b/reporting/reporting/functest/testCase.py
index d114f8a..fba3216 100644
--- a/reporting/reporting/functest/testCase.py
+++ b/reporting/reporting/functest/testCase.py
@@ -26,7 +26,9 @@ class TestCase(object):
'onos': 'ONOS',
'ocl': 'OCL',
'tempest_smoke_serial': 'Tempest (smoke)',
+ 'tempest_smoke': 'Tempest (smoke)',
'tempest_full_parallel': 'Tempest (full)',
+ 'tempest_full': 'Tempest (full)',
'tempest_defcore': 'Tempest (Defcore)',
'refstack_defcore': 'Refstack',
'rally_sanity': 'Rally (smoke)',
@@ -43,13 +45,14 @@ class TestCase(object):
'functest-odl-sfc': 'SFC',
'onos_sfc': 'SFC',
'parser-basics': 'Parser',
- 'connection_check': 'Health (connection)',
- 'api_check': 'Health (api)',
+ 'connection_check': 'connectivity',
+ 'api_check': 'api',
'snaps_smoke': 'SNAPS',
- 'snaps_health_check': 'Health (dhcp)',
+ 'snaps_health_check': 'dhcp',
'gluon_vping': 'Netready',
'fds': 'FDS',
'cloudify_ims': 'vIMS (Cloudify)',
+ 'cloudify': 'Cloudify',
'orchestra_openims': 'OpenIMS (OpenBaton)',
'orchestra_clearwaterims': 'vIMS (OpenBaton)',
'opera_ims': 'vIMS (Open-O)',
@@ -58,8 +61,29 @@ class TestCase(object):
'odl_netvirt': 'Netvirt',
'security_scan': 'Security',
'patrole': 'Patrole',
+ 'tenantnetwork1': 'tenant network 1',
+ 'tenantnetwork2': 'tenant network 2',
+ 'vmready1': 'vm ready 1',
+ 'vmready2': 'vm ready 2',
+ 'singlevm1': 'single vm 1',
+ 'singlevm2': 'single vm 2',
+ 'cinder_test': 'cinder tests',
+ 'barbican': 'barbican',
+ 'vmtp': 'vmtp',
'juju_epc': 'vEPC (Juju)',
- 'neutron_trunk': 'Neutron trunk'}
+ 'shaker': 'shaker',
+ 'neutron_trunk': 'Neutron trunk',
+ 'tempest_scenario': 'tempest_scenario',
+ 'networking-bgpvpn': 'networking-bgpvpn',
+ 'networking-sfc': 'networking-sfc',
+ 'tempest_full': 'Tempest (full)',
+ 'cloudify': 'cloudify',
+ 'heat_ims': 'vIMS (Heat)',
+ 'vmtp': 'vmtp',
+ 'tempest_smoke': 'Tempest (smoke)',
+ 'neutron-tempest-plugin-api': 'Neutron API',
+ 'vgpu': 'vgpu',
+ 'stor4nfv_os': 'stor4nfv_os'}
try:
self.displayName = display_name_matrix[self.name]
except:
@@ -71,22 +95,22 @@ class TestCase(object):
def getProject(self):
return self.project
- def getConstraints(self):
- return self.constraints
-
def getCriteria(self):
return self.criteria
def getTier(self):
return self.tier
+ def getConstraints(self):
+ return self.constraints
+
def setCriteria(self, criteria):
self.criteria = criteria
def setIsRunnable(self, isRunnable):
self.isRunnable = isRunnable
- def checkRunnable(self, installer, scenario, config):
+ def checkRunnable(self, installer, scenario, arch, config):
# Re-use Functest declaration
# Retrieve Functest configuration file functest_config.yaml
is_runnable = True
@@ -101,27 +125,36 @@ class TestCase(object):
# Retrieve test constraints
# Retrieve test execution param
- test_execution_context = {"installer": installer,
- "scenario": scenario}
+ test_execution_context = {"INSTALLER_TYPE": installer,
+ "DEPLOY_SCENARIO": scenario,
+ "POD_ARCH": arch}
+
+ # 3 types of constraints
+ # INSTALLER_TYPE
+ # DEPLOY_SCENARIO
+ # POD_ARCH
# By default we assume that all the tests are always runnable...
# if test_env not empty => dependencies to be checked
- if config_test is not None and len(config_test) > 0:
- # possible criteria = ["installer", "scenario"]
- # consider test criteria from config file
- # compare towards CI env through CI en variable
- for criteria in config_test:
- if re.search(config_test[criteria],
- test_execution_context[criteria]) is None:
- # print "Test "+ test + " cannot be run on the environment"
- is_runnable = False
+ try:
+ if config_test is not None and len(config_test) > 0:
+ # possible criteria = ["installer", "scenario"]
+ # consider test criteria from config file
+ # compare towards CI env through CI en variable
+ for criterias in config_test:
+ for criteria_key, criteria_value in criterias.iteritems():
+ if re.search(
+ criteria_value,
+ test_execution_context[criteria_key]) is None:
+ is_runnable = False
+ except AttributeError:
+ is_runnable = False
# print is_runnable
self.isRunnable = is_runnable
def toString(self):
testcase = ("Name=" + self.name + ";Criteria=" +
str(self.criteria) + ";Project=" + self.project +
- ";Constraints=" + str(self.constraints) +
";IsRunnable" + str(self.isRunnable))
return testcase