diff options
Diffstat (limited to 'functest')
-rw-r--r-- | functest/ci/testcases.yaml | 66 | ||||
-rw-r--r-- | functest/ci/tier_handler.py | 2 | ||||
-rw-r--r-- | functest/core/feature.py | 4 | ||||
-rw-r--r-- | functest/core/testcase.py | 22 | ||||
-rw-r--r-- | functest/tests/unit/core/test_feature.py | 4 | ||||
-rw-r--r-- | functest/tests/unit/core/test_testcase.py | 69 | ||||
-rw-r--r-- | functest/tests/unit/openstack/tempest/test_tempest.py | 2 | ||||
-rw-r--r-- | functest/tests/unit/utils/test_functest_utils.py | 9 | ||||
-rw-r--r-- | functest/utils/functest_utils.py | 23 |
9 files changed, 127 insertions, 74 deletions
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml index 39988306..8f2cc4bc 100644 --- a/functest/ci/testcases.yaml +++ b/functest/ci/testcases.yaml @@ -10,7 +10,7 @@ tiers: - case_name: connection_check project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: true clean_flag: false description: >- @@ -30,7 +30,7 @@ tiers: - case_name: api_check project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: true clean_flag: false description: >- @@ -49,7 +49,7 @@ tiers: - case_name: snaps_health_check project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: true clean_flag: false description: >- @@ -73,7 +73,7 @@ tiers: - case_name: vping_ssh project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: true clean_flag: true description: >- @@ -90,7 +90,7 @@ tiers: - case_name: vping_userdata project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: true clean_flag: true description: >- @@ -106,7 +106,7 @@ tiers: - case_name: tempest_smoke_serial project_name: functest - criteria: 'success_rate == 100%' + criteria: 100 blocking: false clean_flag: true description: >- @@ -124,7 +124,7 @@ tiers: - case_name: rally_sanity project_name: functest - criteria: 'success_rate == 100%' + criteria: 100 blocking: false clean_flag: false description: >- @@ -140,7 +140,7 @@ tiers: - case_name: refstack_defcore project_name: functest - criteria: 'success_rate == 100%' + criteria: 100 blocking: false clean_flag: true description: >- @@ -156,7 +156,7 @@ tiers: - case_name: odl project_name: functest - criteria: 'success_rate == 100%' + criteria: 100 blocking: true clean_flag: false description: >- @@ -177,7 +177,7 @@ tiers: - case_name: odl_netvirt project_name: functest - criteria: 'success_rate == 100%' + criteria: 100 blocking: false clean_flag: false description: >- @@ -200,7 +200,7 @@ tiers: - case_name: fds project_name: functest - criteria: 'success_rate == 100%' + criteria: 100 blocking: false clean_flag: false description: >- @@ -220,7 +220,7 @@ tiers: - case_name: onos project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: true clean_flag: true description: >- @@ -237,7 +237,7 @@ tiers: - case_name: snaps_smoke project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: false description: >- @@ -267,7 +267,7 @@ tiers: - case_name: promise project_name: promise - criteria: 'success_rate == 100%' + criteria: 100 blocking: false clean_flag: true description: >- @@ -284,7 +284,7 @@ tiers: - case_name: doctor-notification project_name: doctor - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -301,7 +301,7 @@ tiers: - case_name: bgpvpn project_name: sdnvpn - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -318,7 +318,7 @@ tiers: - case_name: security_scan project_name: securityscanning - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -335,7 +335,7 @@ tiers: - case_name: copper project_name: copper - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -352,7 +352,7 @@ tiers: - case_name: multisite project_name: multisite - criteria: 'success_rate == 100%' + criteria: 100 blocking: false clean_flag: false description: >- @@ -366,7 +366,7 @@ tiers: - case_name: functest-odl-sfc project_name: sfc - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -382,7 +382,7 @@ tiers: - case_name: onos_sfc project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: true clean_flag: true description: >- @@ -396,7 +396,7 @@ tiers: - case_name: parser-basics project_name: parser - criteria: 'ret == 0' + criteria: 100 blocking: false clean_flag: true description: >- @@ -412,7 +412,7 @@ tiers: - case_name: domino-multinode project_name: domino - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -428,7 +428,7 @@ tiers: - case_name: gluon_vping project_name: netready - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -444,7 +444,7 @@ tiers: - case_name: barometercollectd project_name: barometer - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -468,7 +468,7 @@ tiers: - case_name: tempest_full_parallel project_name: functest - criteria: 'success_rate >= 80%' + criteria: 80 blocking: false clean_flag: true description: >- @@ -484,7 +484,7 @@ tiers: - case_name: tempest_custom project_name: functest - criteria: 'success_rate == 100%' + criteria: 100 blocking: false clean_flag: true description: >- @@ -502,7 +502,7 @@ tiers: - case_name: rally_full project_name: functest - criteria: 'success_rate >= 90%' + criteria: 90 blocking: false clean_flag: false description: >- @@ -525,7 +525,7 @@ tiers: - case_name: cloudify_ims project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -540,7 +540,7 @@ tiers: # - # case_name: aaa # project_name: functest -# criteria: 'ret == 0' +# criteria: 100 # blocking: false # clean_flag: true # description: >- @@ -554,7 +554,7 @@ tiers: - case_name: orchestra_ims project_name: functest - criteria: 'ret == 0' + criteria: 100 blocking: false clean_flag: true description: >- @@ -569,7 +569,7 @@ tiers: - case_name: opera-vims project_name: opera - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- @@ -584,7 +584,7 @@ tiers: - case_name: vyos_vrouter project_name: functest - criteria: 'status == "PASS"' + criteria: 100 blocking: false clean_flag: true description: >- diff --git a/functest/ci/tier_handler.py b/functest/ci/tier_handler.py index 6b4864b5..fe7372a3 100644 --- a/functest/ci/tier_handler.py +++ b/functest/ci/tier_handler.py @@ -158,7 +158,7 @@ class TestCase(object): for line in lines: out += ("| " + line.ljust(LINE_LENGTH - 7) + " |\n") out += ("| Criteria: " + - self.criteria.ljust(LINE_LENGTH - 14) + "|\n") + str(self.criteria).ljust(LINE_LENGTH - 14) + "|\n") out += ("| Dependencies:".ljust(LINE_LENGTH - 1) + "|\n") installer = self.dependency.get_installer() scenario = self.dependency.get_scenario() diff --git a/functest/core/feature.py b/functest/core/feature.py index 08500a26..d65f5a3c 100644 --- a/functest/core/feature.py +++ b/functest/core/feature.py @@ -74,11 +74,11 @@ class Feature(base.TestCase): """ self.start_time = time.time() exit_code = base.TestCase.EX_RUN_ERROR - self.result = "FAIL" + self.result = 0 try: if self.execute(**kwargs) == 0: exit_code = base.TestCase.EX_OK - self.result = 'PASS' + self.result = 100 ft_utils.logger_test_results( self.project_name, self.case_name, self.result, self.details) diff --git a/functest/core/testcase.py b/functest/core/testcase.py index b9dcbb2d..3f191b40 100644 --- a/functest/core/testcase.py +++ b/functest/core/testcase.py @@ -38,6 +38,7 @@ class TestCase(object): self.details = {} self.project_name = kwargs.get('project_name', 'functest') self.case_name = kwargs.get('case_name', '') + self.criteria = kwargs.get('criteria', 100) self.result = "" self.start_time = "" self.stop_time = "" @@ -55,9 +56,19 @@ class TestCase(object): TestCase.EX_TESTCASE_FAILED otherwise. """ try: - assert self.result - if self.result == 'PASS': - return TestCase.EX_OK + assert self.criteria + if isinstance(self.result, int) and isinstance(self.criteria, int): + if self.result >= self.criteria: + return TestCase.EX_OK + else: + # Backward compatibility + # It must be removed as soon as TestCase subclasses + # stop setting result = 'PASS' or 'FAIL'. + # In this case criteria is unread. + self.logger.warning( + "Please update result which must be an int!") + if self.result == 'PASS': + return TestCase.EX_OK except AssertionError: self.logger.error("Please run test before checking the results") return TestCase.EX_TESTCASE_FAILED @@ -110,12 +121,13 @@ class TestCase(object): try: assert self.project_name assert self.case_name - assert self.result assert self.start_time assert self.stop_time + pub_result = 'PASS' if self.check_result( + ) == TestCase.EX_OK else 'FAIL' if ft_utils.push_results_to_db( self.project_name, self.case_name, self.start_time, - self.stop_time, self.result, self.details): + self.stop_time, pub_result, self.details): self.logger.info("The results were successfully pushed to DB") return TestCase.EX_OK else: diff --git a/functest/tests/unit/core/test_feature.py b/functest/tests/unit/core/test_feature.py index 97075223..993da5a0 100644 --- a/functest/tests/unit/core/test_feature.py +++ b/functest/tests/unit/core/test_feature.py @@ -35,9 +35,9 @@ class FeatureTestingBase(unittest.TestCase): def _test_run(self, status, mock_method=None): self.assertEqual(self.feature.run(cmd=self._cmd), status) if status == testcase.TestCase.EX_OK: - self.assertEqual(self.feature.result, 'PASS') + self.assertEqual(self.feature.result, 100) else: - self.assertEqual(self.feature.result, 'FAIL') + self.assertEqual(self.feature.result, 0) mock_method.assert_has_calls([mock.call(), mock.call()]) self.assertEqual(self.feature.start_time, 1) self.assertEqual(self.feature.stop_time, 2) diff --git a/functest/tests/unit/core/test_testcase.py b/functest/tests/unit/core/test_testcase.py index b93f50d6..cc8446d8 100644 --- a/functest/tests/unit/core/test_testcase.py +++ b/functest/tests/unit/core/test_testcase.py @@ -28,13 +28,14 @@ class TestCaseTesting(unittest.TestCase): _case_name = "base" _project_name = "functest" + _published_result = "PASS" def setUp(self): self.test = testcase.TestCase(case_name=self._case_name, project_name=self._project_name) self.test.start_time = "1" self.test.stop_time = "2" - self.test.result = "PASS" + self.test.result = 100 self.test.details = {"Hello": "World"} def test_run_unimplemented(self): @@ -56,10 +57,6 @@ class TestCaseTesting(unittest.TestCase): self.test.case_name = None self._test_missing_attribute() - def test_missing_criteria(self): - self.test.result = None - self._test_missing_attribute() - def test_missing_start_time(self): self.test.start_time = None self._test_missing_attribute() @@ -76,7 +73,7 @@ class TestCaseTesting(unittest.TestCase): testcase.TestCase.EX_OK) mock_function.assert_called_once_with( self._project_name, self._case_name, self.test.start_time, - self.test.stop_time, self.test.result, self.test.details) + self.test.stop_time, self._published_result, self.test.details) @mock.patch('functest.utils.functest_utils.push_results_to_db', return_value=False) @@ -85,7 +82,7 @@ class TestCaseTesting(unittest.TestCase): testcase.TestCase.EX_PUSH_TO_DB_ERROR) mock_function.assert_called_once_with( self._project_name, self._case_name, self.test.start_time, - self.test.stop_time, self.test.result, self.test.details) + self.test.stop_time, self._published_result, self.test.details) @mock.patch('functest.utils.functest_utils.push_results_to_db', return_value=True) @@ -94,7 +91,33 @@ class TestCaseTesting(unittest.TestCase): testcase.TestCase.EX_OK) mock_function.assert_called_once_with( self._project_name, self._case_name, self.test.start_time, - self.test.stop_time, self.test.result, self.test.details) + self.test.stop_time, self._published_result, self.test.details) + + @mock.patch('functest.utils.functest_utils.push_results_to_db', + return_value=True) + def test_push_to_db_res_ko(self, mock_function=None): + self.test.result = 0 + self.assertEqual(self.test.push_to_db(), + testcase.TestCase.EX_OK) + mock_function.assert_called_once_with( + self._project_name, self._case_name, self.test.start_time, + self.test.stop_time, 'FAIL', self.test.details) + + @mock.patch('functest.utils.functest_utils.push_results_to_db', + return_value=True) + def test_push_to_db_both_ko(self, mock_function=None): + self.test.result = 0 + self.test.criteria = 0 + self.assertEqual(self.test.push_to_db(), + testcase.TestCase.EX_OK) + mock_function.assert_called_once_with( + self._project_name, self._case_name, self.test.start_time, + self.test.stop_time, 'FAIL', self.test.details) + + def test_check_criteria_missing(self): + self.test.criteria = None + self.assertEqual(self.test.check_result(), + testcase.TestCase.EX_TESTCASE_FAILED) def test_check_result_missing(self): self.test.result = None @@ -102,15 +125,43 @@ class TestCaseTesting(unittest.TestCase): testcase.TestCase.EX_TESTCASE_FAILED) def test_check_result_failed(self): - self.test.result = 'FAILED' + # Backward compatibility + # It must be removed as soon as TestCase subclasses + # stop setting result = 'PASS' or 'FAIL'. + self.test.result = 'FAIL' self.assertEqual(self.test.check_result(), testcase.TestCase.EX_TESTCASE_FAILED) def test_check_result_pass(self): + # Backward compatibility + # It must be removed as soon as TestCase subclasses + # stop setting result = 'PASS' or 'FAIL'. self.test.result = 'PASS' self.assertEqual(self.test.check_result(), testcase.TestCase.EX_OK) + def test_check_result_lt(self): + self.test.result = 50 + self.assertEqual(self.test.check_result(), + testcase.TestCase.EX_TESTCASE_FAILED) + + def test_check_result_eq(self): + self.test.result = 100 + self.assertEqual(self.test.check_result(), + testcase.TestCase.EX_OK) + + def test_check_result_gt(self): + self.test.criteria = 50 + self.test.result = 100 + self.assertEqual(self.test.check_result(), + testcase.TestCase.EX_OK) + + def test_check_result_zero(self): + self.test.criteria = 0 + self.test.result = 0 + self.assertEqual(self.test.check_result(), + testcase.TestCase.EX_TESTCASE_FAILED) + if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/functest/tests/unit/openstack/tempest/test_tempest.py b/functest/tests/unit/openstack/tempest/test_tempest.py index 34031b40..e1653a40 100644 --- a/functest/tests/unit/openstack/tempest/test_tempest.py +++ b/functest/tests/unit/openstack/tempest/test_tempest.py @@ -105,7 +105,7 @@ class OSTempestTesting(unittest.TestCase): self._test_generate_test_list_mode_default('full') def test_parse_verifier_result_missing_verification_uuid(self): - self.tempestcommon.VERIFICATION_ID = '' + self.tempestcommon.VERIFICATION_ID = None with self.assertRaises(Exception): self.tempestcommon.parse_verifier_result() diff --git a/functest/tests/unit/utils/test_functest_utils.py b/functest/tests/unit/utils/test_functest_utils.py index 7ab8b455..573fcb70 100644 --- a/functest/tests/unit/utils/test_functest_utils.py +++ b/functest/tests/unit/utils/test_functest_utils.py @@ -41,9 +41,8 @@ class FunctestUtilsTesting(unittest.TestCase): self.status = 'test_status' self.details = 'test_details' self.db_url = 'test_db_url' - self.success_rate = 2.0 - self.criteria = 'test_criteria==2.0' - self.result = 'PASS' + self.criteria = 50 + self.result = 75 self.start_date = 1482624000 self.stop_date = 1482624000 self.start_time = time.time() @@ -567,7 +566,7 @@ class FunctestUtilsTesting(unittest.TestCase): as mock_criteria: mock_criteria.return_value = self.criteria resp = functest_utils.check_success_rate(self.case_name, - self.success_rate) + self.result) self.assertEqual(resp, 'PASS') def test_check_success_rate_failed(self): @@ -575,7 +574,7 @@ class FunctestUtilsTesting(unittest.TestCase): as mock_criteria: mock_criteria.return_value = self.criteria resp = functest_utils.check_success_rate(self.case_name, - 3.0) + 0) self.assertEqual(resp, 'FAIL') # TODO: merge_dicts diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py index 6cebabff..7d993cbf 100644 --- a/functest/utils/functest_utils.py +++ b/functest/utils/functest_utils.py @@ -379,23 +379,14 @@ def get_functest_config(parameter): return get_parameter_from_yaml(parameter, yaml_) -def check_success_rate(case_name, success_rate): - success_rate = float(success_rate) +def check_success_rate(case_name, result): + # It should be removed as TestCase tests criteria + # and result. + logger.warning('check_success_rate will be removed soon') criteria = get_criteria_by_test(case_name) - - def get_criteria_value(op): - return float(criteria.split(op)[1].rstrip('%')) - - status = 'FAIL' - ops = ['==', '>='] - for op in ops: - if op in criteria: - c_value = get_criteria_value(op) - if eval("%s %s %s" % (success_rate, op, c_value)): - status = 'PASS' - break - - return status + if type(criteria) == int and result >= criteria: + return 'PASS' + return 'FAIL' def merge_dicts(dict1, dict2): |