From 5239734ebfeacec1c1b30741a75d6725a9a149d9 Mon Sep 17 00:00:00 2001 From: mrichomme Date: Tue, 28 Aug 2018 08:18:06 +0200 Subject: Fix reporting pages for functest - adapt existing framework to new testcases.yaml format (dependencies) - simplify Tier management - unblacklist juju_epc - add new testcases Change-Id: I085bff327f9eca856879616215dc6bae83435e7c Signed-off-by: mrichomme --- reporting/css/default.css | 4 +- reporting/docker/reporting.sh | 5 -- reporting/reporting/functest/reporting-status.py | 100 +++++++-------------- .../functest/template/index-status-tmpl.html | 25 +++--- reporting/reporting/functest/testCase.py | 60 ++++++++----- reporting/reporting/reporting.yaml | 4 +- 6 files changed, 88 insertions(+), 110 deletions(-) diff --git a/reporting/css/default.css b/reporting/css/default.css index e32fa5f..686e886 100644 --- a/reporting/css/default.css +++ b/reporting/css/default.css @@ -26,6 +26,7 @@ width: 100%; background-color: #0095a2 } + .panel-default > .panel-heading h4 { color: white; } @@ -55,7 +56,7 @@ td{ background-color: #0095a2; } -h1 { +h1 { display: block; font-size: 2em; margin-top: 0.67em; @@ -191,4 +192,3 @@ h2 { position:relative; top:1px; } - diff --git a/reporting/docker/reporting.sh b/reporting/docker/reporting.sh index 8ca7eac..8008bf6 100755 --- a/reporting/docker/reporting.sh +++ b/reporting/docker/reporting.sh @@ -25,11 +25,6 @@ cp -Rf 3rd_party/html/* display cp -Rf img display cp -Rf js display -for i in "${versions[@]}" -do - cp -Rf html/functest.html display/$i/functest -done - # if nothing is precised run all the reporting generation # projet | option # $1 | $2 diff --git a/reporting/reporting/functest/reporting-status.py b/reporting/reporting/functest/reporting-status.py index 44ab921..e36aede 100755 --- a/reporting/reporting/functest/reporting-status.py +++ b/reporting/reporting/functest/reporting-status.py @@ -41,6 +41,7 @@ blacklist = rp_utils.get_config('functest.blacklist') log_level = rp_utils.get_config('general.log.log_level') exclude_noha = rp_utils.get_config('functest.exclude_noha') exclude_virtual = rp_utils.get_config('functest.exclude_virtual') +tiers_for_scoring = {'healthcheck', 'smoke', 'vnf', 'features'} LOGGER.info("*******************************************") LOGGER.info("* *") @@ -56,10 +57,9 @@ LOGGER.info("*******************************************") # For all the versions for version in versions: testValid = [] - otherTestCases = [] # Retrieve test cases of Tier 1 (smoke) version_config = "" - if version != "master": + if (version != "master" and version != "latest"): version_config = "?h=stable/" + version functest_yaml_config = rp_utils.getFunctestConfig(version_config) config_tiers = functest_yaml_config.get("tiers") @@ -71,25 +71,17 @@ for version in versions: # tricky thing for the API as some tests are Functest tests # other tests are declared directly in the feature projects for tier in config_tiers: - if tier['order'] >= 0 and tier['order'] < 2: - for case in tier['testcases']: - if case['case_name'] not in blacklist: - testValid.append(tc.TestCase(case['case_name'], - "functest", - case['dependencies'])) - elif tier['order'] == 2: - for case in tier['testcases']: - if case['case_name'] not in blacklist: - otherTestCases.append(tc.TestCase(case['case_name'], - case['case_name'], - case['dependencies'])) - elif tier['order'] > 2: - for case in tier['testcases']: - if case['case_name'] not in blacklist: - otherTestCases.append(tc.TestCase(case['case_name'], - "functest", - case['dependencies'])) + for case in tier['testcases']: + try: + dependencies = case['dependencies'] + except KeyError: + dependencies = "" + if case['case_name'] not in blacklist: + testValid.append(tc.TestCase(case['case_name'], + "functest", + dependencies, + tier=tier['name'])) LOGGER.debug("Functest reporting start") # For all the installers @@ -158,10 +150,8 @@ for version in versions: # Check if test case is runnable / installer, scenario # for the test case used for Scenario validation try: - # 1) Manage the test cases for the scenario validation - # concretely Tiers 0-3 for test_case in testValid: - test_case.checkRunnable(installer, s, + test_case.checkRunnable(installer, s, architecture, test_case.getConstraints()) LOGGER.debug("testcase %s (%s) is %s", test_case.getDisplayName(), @@ -172,7 +162,8 @@ for version in versions: name = test_case.getName() displayName = test_case.getDisplayName() project = test_case.getProject() - nb_test_runnable_for_this_scenario += 1 + if test_case.getTier() in tiers_for_scoring: + nb_test_runnable_for_this_scenario += 1 LOGGER.info(" Searching results for case %s ", displayName) if "fuel" in installer: @@ -188,53 +179,18 @@ for version in versions: LOGGER.info(" >>>> Test score = " + str(result)) test_case.setCriteria(result) test_case.setIsRunnable(True) - testCases2BeDisplayed.append(tc.TestCase(name, - project, - "", - result, - True, - 1)) - scenario_score = scenario_score + result - - # 2) Manage the test cases for the scenario qualification - # concretely Tiers > 3 - for test_case in otherTestCases: - test_case.checkRunnable(installer, s, - test_case.getConstraints()) - LOGGER.debug("testcase %s (%s) is %s", - test_case.getDisplayName(), - test_case.getName(), - test_case.isRunnable) - time.sleep(1) - if test_case.isRunnable: - name = test_case.getName() - displayName = test_case.getDisplayName() - project = test_case.getProject() - LOGGER.info(" Searching results for case %s ", - displayName) - if "fuel" in installer: - result = rp_utils.getCaseScoreFromBuildTag( - name, - s_result) - else: - result = rp_utils.getCaseScore(name, installer, - s, version) - # at least 1 result for the test - if result > -1: - test_case.setCriteria(result) - test_case.setIsRunnable(True) - testCases2BeDisplayed.append(tc.TestCase( - name, - project, - "", - result, - True, - 4)) - else: - LOGGER.debug("No results found") + testCases2BeDisplayed.append( + tc.TestCase(name, + project, + "", + result, + True, + tier=test_case.getTier())) + if test_case.getTier() in tiers_for_scoring: + scenario_score = scenario_score + result items[s] = testCases2BeDisplayed - except Exception: # pylint: disable=broad-except + except KeyError: # pylint: disable=broad-except LOGGER.error("Error installer %s, version %s, scenario %s", installer, version, s) LOGGER.error("No data available: %s", sys.exc_info()[0]) @@ -262,6 +218,12 @@ for version in versions: else: k_score = 2 + # TODO for the scoring we should consider 3 tiers + # - Healthcheck + # - Smoke + # - Vnf + # components + scenario_criteria = nb_test_runnable_for_this_scenario*k_score # score for reporting diff --git a/reporting/reporting/functest/template/index-status-tmpl.html b/reporting/reporting/functest/template/index-status-tmpl.html index 50fc648..48b5a2d 100644 --- a/reporting/reporting/functest/template/index-status-tmpl.html +++ b/reporting/reporting/functest/template/index-status-tmpl.html @@ -144,33 +144,32 @@ $(document).ready(function (){ + {% for tier in ['healthcheck', 'smoke', 'vnf', 'features'] -%} +

{{tier}}

{% for test in items[scenario] -%} - {%- endif %} - {% if test.getTier() > 3 -%} - * - {%- endif %} - - {%- endfor %} + {%- endfor %} - {% for test in items[scenario] -%} - {% if test.getCriteria() > 2 -%} + {% for test in items[scenario] -%} + {% if test.getCriteria() > 2 and test.getTier() == tier -%} - {%- elif test.getCriteria() > 1 -%} + {%- elif test.getCriteria() > 1 and test.getTier() == tier -%} - {%- elif test.getCriteria() > 0 -%} + {%- elif test.getCriteria() > 0 and test.getTier() == tier -%} - {%- elif test.getCriteria() > -1 -%} + {%- elif test.getCriteria() > -1 and test.getTier() == tier -%} {%- endif %} {%- endfor %}
- {% if test.getCriteria() > -1 -%} - {{test.getDisplayName() }} + {% if test.getCriteria() > -1 and test.getTier() == tier -%} + {{test.getDisplayName() }}
+

+ {%- endfor %} {%- endfor %} diff --git a/reporting/reporting/functest/testCase.py b/reporting/reporting/functest/testCase.py index dff3f8c..5240533 100644 --- a/reporting/reporting/functest/testCase.py +++ b/reporting/reporting/functest/testCase.py @@ -43,10 +43,10 @@ class TestCase(object): 'functest-odl-sfc': 'SFC', 'onos_sfc': 'SFC', 'parser-basics': 'Parser', - 'connection_check': 'Health (connection)', - 'api_check': 'Health (api)', + 'connection_check': 'connectivity', + 'api_check': 'api', 'snaps_smoke': 'SNAPS', - 'snaps_health_check': 'Health (dhcp)', + 'snaps_health_check': 'dhcp', 'gluon_vping': 'Netready', 'fds': 'FDS', 'cloudify_ims': 'vIMS (Cloudify)', @@ -68,7 +68,18 @@ class TestCase(object): 'barbican': 'barbican', 'juju_epc': 'vEPC (Juju)', 'shaker': 'shaker', - 'neutron_trunk': 'Neutron trunk'} + 'neutron_trunk': 'Neutron trunk', + 'tempest_scenario': 'tempest_scenario', + 'networking-bgpvpn': 'networking-bgpvpn', + 'networking-sfc': 'networking-sfc', + 'tempest_full': 'Tempest (full)', + 'cloudify': 'cloudify', + 'heat_ims': 'vIMS (Heat)', + 'vmtp': 'vmtp', + 'tempest_smoke': 'Tempest (smoke)', + 'neutron-tempest-plugin-api': 'Neutron API', + 'vgpu': 'vgpu', + 'stor4nfv_os': 'stor4nfv_os'} try: self.displayName = display_name_matrix[self.name] except: @@ -80,22 +91,22 @@ class TestCase(object): def getProject(self): return self.project - def getConstraints(self): - return self.constraints - def getCriteria(self): return self.criteria def getTier(self): return self.tier + def getConstraints(self): + return self.constraints + def setCriteria(self, criteria): self.criteria = criteria def setIsRunnable(self, isRunnable): self.isRunnable = isRunnable - def checkRunnable(self, installer, scenario, config): + def checkRunnable(self, installer, scenario, arch, config): # Re-use Functest declaration # Retrieve Functest configuration file functest_config.yaml is_runnable = True @@ -110,27 +121,36 @@ class TestCase(object): # Retrieve test constraints # Retrieve test execution param - test_execution_context = {"installer": installer, - "scenario": scenario} + test_execution_context = {"INSTALLER_TYPE": installer, + "DEPLOY_SCENARIO": scenario, + "POD_ARCH": arch} + + # 3 types of constraints + # INSTALLER_TYPE + # DEPLOY_SCENARIO + # POD_ARCH # By default we assume that all the tests are always runnable... # if test_env not empty => dependencies to be checked - if config_test is not None and len(config_test) > 0: - # possible criteria = ["installer", "scenario"] - # consider test criteria from config file - # compare towards CI env through CI en variable - for criteria in config_test: - if re.search(config_test[criteria], - test_execution_context[criteria]) is None: - # print "Test "+ test + " cannot be run on the environment" - is_runnable = False + try: + if config_test is not None and len(config_test) > 0: + # possible criteria = ["installer", "scenario"] + # consider test criteria from config file + # compare towards CI env through CI en variable + for criterias in config_test: + for criteria_key, criteria_value in criterias.iteritems(): + if re.search( + criteria_value, + test_execution_context[criteria_key]) is None: + is_runnable = False + except AttributeError: + is_runnable = False # print is_runnable self.isRunnable = is_runnable def toString(self): testcase = ("Name=" + self.name + ";Criteria=" + str(self.criteria) + ";Project=" + self.project + - ";Constraints=" + str(self.constraints) + ";IsRunnable" + str(self.isRunnable)) return testcase diff --git a/reporting/reporting/reporting.yaml b/reporting/reporting/reporting.yaml index b2f7d07..ce27b90 100644 --- a/reporting/reporting/reporting.yaml +++ b/reporting/reporting/reporting.yaml @@ -36,9 +36,11 @@ testapi: functest: blacklist: - odl_netvirt - - juju_epc - tempest_full_parallel + - tempest_full - rally_full + - heat_ims + - tempest_scenario max_scenario_criteria: 50 test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml log_level: ERROR -- cgit 1.2.3-korg