diff options
author | Cédric Ollivier <cedric.ollivier@orange.com> | 2017-04-23 08:59:43 +0200 |
---|---|---|
committer | Cédric Ollivier <cedric.ollivier@orange.com> | 2017-04-26 09:35:47 +0200 |
commit | e620488a6747318c40eb973c2607ae6d44e95b8f (patch) | |
tree | 7654490079abb39af4a48b2edf249efc66cb9874 | |
parent | 9d4e6cb10fef5f0cef104861034340cda5e48a3e (diff) |
Switch TestCase attribute criteria to result
It mainly avoids mixing input and output.
Criteria is now an input set in functest/ci/testcases.yaml and then
must be passed as __init__() args (which will be proposed in an
additional change).
Then it also renames the related TestCase method to
check_result().
Change-Id: Ifc3c8e3ea6cde7e3edf7174bed4bf2bf0894e8e3
Signed-off-by: Cédric Ollivier <cedric.ollivier@orange.com>
22 files changed, 64 insertions, 63 deletions
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py index 0ca73f36..e68901b8 100755 --- a/functest/ci/run_tests.py +++ b/functest/ci/run_tests.py @@ -155,7 +155,7 @@ def run_test(test, tier_name, testcases=None): if result == testcase.TestCase.EX_OK: if GlobalVariables.REPORT_FLAG: test_case.push_to_db() - result = test_case.check_criteria() + result = test_case.check_result() except ImportError: logger.exception("Cannot import module {}".format( run_dict['module'])) diff --git a/functest/core/feature.py b/functest/core/feature.py index 5f8a0873..08500a26 100644 --- a/functest/core/feature.py +++ b/functest/core/feature.py @@ -59,7 +59,7 @@ class Feature(base.TestCase): It sets the following attributes required to push the results to DB: - * criteria, + * result, * start_time, * stop_time. @@ -74,15 +74,15 @@ class Feature(base.TestCase): """ self.start_time = time.time() exit_code = base.TestCase.EX_RUN_ERROR - self.criteria = "FAIL" + self.result = "FAIL" try: if self.execute(**kwargs) == 0: exit_code = base.TestCase.EX_OK - self.criteria = 'PASS' + self.result = 'PASS' ft_utils.logger_test_results( self.project_name, self.case_name, - self.criteria, self.details) - self.logger.info("%s %s", self.project_name, self.criteria) + self.result, self.details) + self.logger.info("%s %s", self.project_name, self.result) except Exception: # pylint: disable=broad-except self.logger.exception("%s FAILED", self.project_name) self.logger.info("Test result is stored in '%s'", self.result_file) diff --git a/functest/core/pytest_suite_runner.py b/functest/core/pytest_suite_runner.py index 775f0a66..8b5da05e 100644 --- a/functest/core/pytest_suite_runner.py +++ b/functest/core/pytest_suite_runner.py @@ -48,10 +48,10 @@ class PyTestSuiteRunner(base.TestCase): if ((result.errors and len(result.errors) > 0) or (result.failures and len(result.failures) > 0)): self.logger.info("%s FAILED" % self.case_name) - self.criteria = 'FAIL' + self.result = 'FAIL' else: self.logger.info("%s OK" % self.case_name) - self.criteria = 'PASS' + self.result = 'PASS' self.details = {} return exit_code diff --git a/functest/core/testcase.py b/functest/core/testcase.py index 309842e3..b9dcbb2d 100644 --- a/functest/core/testcase.py +++ b/functest/core/testcase.py @@ -38,25 +38,25 @@ class TestCase(object): self.details = {} self.project_name = kwargs.get('project_name', 'functest') self.case_name = kwargs.get('case_name', '') - self.criteria = "" + self.result = "" self.start_time = "" self.stop_time = "" - def check_criteria(self): - """Interpret the results of the test case. + def check_result(self): + """Interpret the result of the test case. - It allows getting the results of TestCase. It completes run() + It allows getting the result of TestCase. It completes run() which only returns the execution status. - It can be overriden if checking criteria is not suitable. + It can be overriden if checking result is not suitable. Returns: - TestCase.EX_OK if criteria is 'PASS'. + TestCase.EX_OK if result is 'PASS'. TestCase.EX_TESTCASE_FAILED otherwise. """ try: - assert self.criteria - if self.criteria == 'PASS': + assert self.result + if self.result == 'PASS': return TestCase.EX_OK except AssertionError: self.logger.error("Please run test before checking the results") @@ -74,7 +74,7 @@ class TestCase(object): The new implementation must set the following attributes to push the results to DB: - * criteria, + * result, * start_time, * stop_time. @@ -99,7 +99,7 @@ class TestCase(object): * project_name, * case_name, - * criteria, + * result, * start_time, * stop_time. @@ -110,12 +110,12 @@ class TestCase(object): try: assert self.project_name assert self.case_name - assert self.criteria + assert self.result assert self.start_time assert self.stop_time if ft_utils.push_results_to_db( self.project_name, self.case_name, self.start_time, - self.stop_time, self.criteria, self.details): + self.stop_time, self.result, self.details): self.logger.info("The results were successfully pushed to DB") return TestCase.EX_OK else: diff --git a/functest/core/vnf_base.py b/functest/core/vnf_base.py index 2de28c12..fe4e427f 100644 --- a/functest/core/vnf_base.py +++ b/functest/core/vnf_base.py @@ -196,19 +196,19 @@ class VnfOnBoardingBase(base.TestCase): def parse_results(self): exit_code = self.EX_OK - self.criteria = "PASS" + self.result = "PASS" self.logger.info(self.details) # The 2 VNF steps must be OK to get a PASS result if (self.details['vnf']['status'] is not "PASS" or self.details['test_vnf']['status'] is not "PASS"): exit_code = self.EX_RUN_ERROR - self.criteria = "FAIL" + self.result = "FAIL" return exit_code def log_results(self): ft_utils.logger_test_results(self.project_name, self.case_name, - self.criteria, + self.result, self.details) def step_failure(self, error_msg): diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py index ad8745c7..e07e2a8d 100644 --- a/functest/opnfv_tests/openstack/rally/rally.py +++ b/functest/opnfv_tests/openstack/rally/rally.py @@ -500,12 +500,12 @@ class RallyBase(testcase.TestCase): 'nb tests': total_nb_tests, 'nb success': success_rate}}) - self.criteria = ft_utils.check_success_rate( + self.result = ft_utils.check_success_rate( self.case_name, success_rate) self.details = payload logger.info("Rally '%s' success_rate is %s%%, is marked as %s" - % (self.case_name, success_rate, self.criteria)) + % (self.case_name, success_rate, self.result)) def _clean_up(self): if self.volume_type: diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py index c708a223..7aff251b 100755 --- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py +++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py @@ -133,10 +133,10 @@ class RefstackClient(testcase.TestCase): except Exception: success_rate = 0 - self.criteria = ft_utils.check_success_rate( + self.result = ft_utils.check_success_rate( self.case_name, success_rate) logger.info("Testcase %s success_rate is %s%%, is marked as %s" - % (self.case_name, success_rate, self.criteria)) + % (self.case_name, success_rate, self.result)) def run(self): '''used for functest command line, diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py index 8b175c25..8fa413a8 100644 --- a/functest/opnfv_tests/openstack/tempest/tempest.py +++ b/functest/opnfv_tests/openstack/tempest/tempest.py @@ -200,10 +200,10 @@ class TempestCommon(testcase.TestCase): except Exception: success_rate = 0 - self.criteria = ft_utils.check_success_rate( + self.result = ft_utils.check_success_rate( self.case_name, success_rate) logger.info("Tempest %s success_rate is %s%%, is marked as %s" - % (self.case_name, success_rate, self.criteria)) + % (self.case_name, success_rate, self.result)) def run(self): diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py index 8f7cc1d2..8bf263eb 100644 --- a/functest/opnfv_tests/openstack/vping/vping_base.py +++ b/functest/opnfv_tests/openstack/vping/vping_base.py @@ -273,7 +273,7 @@ class VPingBase(testcase.TestCase): self.details = {'timestart': start_time, 'duration': duration, 'status': test_status} - self.criteria = test_status + self.result = test_status @staticmethod def pMsg(msg): diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py index acd004b0..6f4acf6d 100755 --- a/functest/opnfv_tests/sdn/odl/odl.py +++ b/functest/opnfv_tests/sdn/odl/odl.py @@ -100,7 +100,7 @@ class ODLTests(testcase.TestCase): result = robot.api.ExecutionResult(xml_file) visitor = ODLResultVisitor() result.visit(visitor) - self.criteria = result.suite.status + self.result = result.suite.status self.start_time = timestamp_to_secs(result.suite.starttime) self.stop_time = timestamp_to_secs(result.suite.endtime) self.details = {} diff --git a/functest/opnfv_tests/sdn/onos/onos.py b/functest/opnfv_tests/sdn/onos/onos.py index fe496c1f..cbe1b9eb 100644 --- a/functest/opnfv_tests/sdn/onos/onos.py +++ b/functest/opnfv_tests/sdn/onos/onos.py @@ -156,9 +156,9 @@ class Onos(OnosBase): result['FUNCvirNetL3']['result'] == "Success"): status = "PASS" except: - logger.error("Unable to set ONOS criteria") + logger.error("Unable to set ONOS result") - self.criteria = status + self.result = status self.details = result def _run(self): diff --git a/functest/opnfv_tests/vnf/aaa/aaa.py b/functest/opnfv_tests/vnf/aaa/aaa.py index 6de65bcd..9c94cfb1 100755 --- a/functest/opnfv_tests/vnf/aaa/aaa.py +++ b/functest/opnfv_tests/vnf/aaa/aaa.py @@ -49,7 +49,7 @@ class AaaVnf(vnf_base.VnfOnBoardingBase): def main(self, **kwargs): self.logger.info("AAA VNF onboarding") self.execute() - if self.criteria is "PASS": + if self.result is "PASS": return self.EX_OK else: return self.EX_RUN_ERROR diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py index e351e0d9..0e6d4797 100644 --- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py +++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py @@ -277,7 +277,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase): self.logger.info("Cloudify IMS VNF onboarding test starting") self.execute() self.logger.info("Cloudify IMS VNF onboarding test executed") - if self.criteria is "PASS": + if self.result is "PASS": return self.EX_OK else: return self.EX_RUN_ERROR diff --git a/functest/opnfv_tests/vnf/ims/opera_ims.py b/functest/opnfv_tests/vnf/ims/opera_ims.py index 7ca96ae1..a46f9d71 100644 --- a/functest/opnfv_tests/vnf/ims/opera_ims.py +++ b/functest/opnfv_tests/vnf/ims/opera_ims.py @@ -119,7 +119,7 @@ class OperaIms(clearwater_ims_base.ClearwaterOnBoardingBase): self.logger.info("Start to run Opera vIMS VNF onboarding test") self.execute() self.logger.info("Opera vIMS VNF onboarding test finished") - if self.criteria is "PASS": + if self.result is "PASS": return self.EX_OK else: return self.EX_RUN_ERROR diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.py b/functest/opnfv_tests/vnf/ims/orchestra_ims.py index c95a17e2..351c5fbe 100755 --- a/functest/opnfv_tests/vnf/ims/orchestra_ims.py +++ b/functest/opnfv_tests/vnf/ims/orchestra_ims.py @@ -484,7 +484,7 @@ class ImsVnf(vnf_base.VnfOnBoardingBase): self.logger.info("Orchestra IMS VNF onboarding test starting") self.execute() self.logger.info("Orchestra IMS VNF onboarding test executed") - if self.criteria is "PASS": + if self.result is "PASS": return self.EX_OK else: return self.EX_RUN_ERROR diff --git a/functest/opnfv_tests/vnf/router/vyos_vrouter.py b/functest/opnfv_tests/vnf/router/vyos_vrouter.py index e6d2284d..5654278d 100644 --- a/functest/opnfv_tests/vnf/router/vyos_vrouter.py +++ b/functest/opnfv_tests/vnf/router/vyos_vrouter.py @@ -29,6 +29,6 @@ class VrouterVnf(base.Feature): f.close() def log_results(self): - if self.criteria == 'PASS': + if self.result == 'PASS': self.set_result_details() super(VrouterVnf, self).log_results() diff --git a/functest/tests/unit/core/test_feature.py b/functest/tests/unit/core/test_feature.py index bd7197f0..97075223 100644 --- a/functest/tests/unit/core/test_feature.py +++ b/functest/tests/unit/core/test_feature.py @@ -35,9 +35,9 @@ class FeatureTestingBase(unittest.TestCase): def _test_run(self, status, mock_method=None): self.assertEqual(self.feature.run(cmd=self._cmd), status) if status == testcase.TestCase.EX_OK: - self.assertEqual(self.feature.criteria, 'PASS') + self.assertEqual(self.feature.result, 'PASS') else: - self.assertEqual(self.feature.criteria, 'FAIL') + self.assertEqual(self.feature.result, 'FAIL') mock_method.assert_has_calls([mock.call(), mock.call()]) self.assertEqual(self.feature.start_time, 1) self.assertEqual(self.feature.stop_time, 2) diff --git a/functest/tests/unit/core/test_testcase.py b/functest/tests/unit/core/test_testcase.py index 4f3b25cc..b93f50d6 100644 --- a/functest/tests/unit/core/test_testcase.py +++ b/functest/tests/unit/core/test_testcase.py @@ -34,7 +34,7 @@ class TestCaseTesting(unittest.TestCase): project_name=self._project_name) self.test.start_time = "1" self.test.stop_time = "2" - self.test.criteria = "PASS" + self.test.result = "PASS" self.test.details = {"Hello": "World"} def test_run_unimplemented(self): @@ -57,7 +57,7 @@ class TestCaseTesting(unittest.TestCase): self._test_missing_attribute() def test_missing_criteria(self): - self.test.criteria = None + self.test.result = None self._test_missing_attribute() def test_missing_start_time(self): @@ -76,7 +76,7 @@ class TestCaseTesting(unittest.TestCase): testcase.TestCase.EX_OK) mock_function.assert_called_once_with( self._project_name, self._case_name, self.test.start_time, - self.test.stop_time, self.test.criteria, self.test.details) + self.test.stop_time, self.test.result, self.test.details) @mock.patch('functest.utils.functest_utils.push_results_to_db', return_value=False) @@ -85,7 +85,7 @@ class TestCaseTesting(unittest.TestCase): testcase.TestCase.EX_PUSH_TO_DB_ERROR) mock_function.assert_called_once_with( self._project_name, self._case_name, self.test.start_time, - self.test.stop_time, self.test.criteria, self.test.details) + self.test.stop_time, self.test.result, self.test.details) @mock.patch('functest.utils.functest_utils.push_results_to_db', return_value=True) @@ -94,21 +94,21 @@ class TestCaseTesting(unittest.TestCase): testcase.TestCase.EX_OK) mock_function.assert_called_once_with( self._project_name, self._case_name, self.test.start_time, - self.test.stop_time, self.test.criteria, self.test.details) + self.test.stop_time, self.test.result, self.test.details) - def test_check_criteria_missing(self): - self.test.criteria = None - self.assertEqual(self.test.check_criteria(), + def test_check_result_missing(self): + self.test.result = None + self.assertEqual(self.test.check_result(), testcase.TestCase.EX_TESTCASE_FAILED) - def test_check_criteria_failed(self): - self.test.criteria = 'FAILED' - self.assertEqual(self.test.check_criteria(), + def test_check_result_failed(self): + self.test.result = 'FAILED' + self.assertEqual(self.test.check_result(), testcase.TestCase.EX_TESTCASE_FAILED) - def test_check_criteria_pass(self): - self.test.criteria = 'PASS' - self.assertEqual(self.test.check_criteria(), + def test_check_result_pass(self): + self.test.result = 'PASS' + self.assertEqual(self.test.check_result(), testcase.TestCase.EX_OK) diff --git a/functest/tests/unit/core/test_vnf_base.py b/functest/tests/unit/core/test_vnf_base.py index 96706040..540cf610 100644 --- a/functest/tests/unit/core/test_vnf_base.py +++ b/functest/tests/unit/core/test_vnf_base.py @@ -23,7 +23,7 @@ class VnfBaseTesting(unittest.TestCase): self.test.project = "functest" self.test.start_time = "1" self.test.stop_time = "5" - self.test.criteria = "" + self.test.result = "" self.test.details = {"orchestrator": {"status": "PASS", "result": "", "duration": 20}, diff --git a/functest/tests/unit/odl/test_odl.py b/functest/tests/unit/odl/test_odl.py index 55e100dd..80469346 100644 --- a/functest/tests/unit/odl/test_odl.py +++ b/functest/tests/unit/odl/test_odl.py @@ -123,7 +123,7 @@ class ODLParseResultTesting(ODLTesting): with mock.patch('robot.api.ExecutionResult', return_value=mock.Mock(suite=suite)): self.test.parse_results() - self.assertEqual(self.test.criteria, config['status']) + self.assertEqual(self.test.result, config['status']) self.assertEqual(self.test.start_time, timestamp_to_secs(config['starttime'])) self.assertEqual(self.test.stop_time, diff --git a/functest/tests/unit/utils/test_functest_utils.py b/functest/tests/unit/utils/test_functest_utils.py index 22cadf0f..7ab8b455 100644 --- a/functest/tests/unit/utils/test_functest_utils.py +++ b/functest/tests/unit/utils/test_functest_utils.py @@ -43,6 +43,7 @@ class FunctestUtilsTesting(unittest.TestCase): self.db_url = 'test_db_url' self.success_rate = 2.0 self.criteria = 'test_criteria==2.0' + self.result = 'PASS' self.start_date = 1482624000 self.stop_date = 1482624000 self.start_time = time.time() @@ -279,7 +280,7 @@ class FunctestUtilsTesting(unittest.TestCase): as mock_logger_error: functest_utils.push_results_to_db(self.project, self.case_name, self.start_date, self.stop_date, - self.criteria, self.details) + self.result, self.details) mock_logger_error.assert_called_once_with("Please set env var: " + str("\'" + env_var + "\'")) @@ -311,7 +312,7 @@ class FunctestUtilsTesting(unittest.TestCase): push_results_to_db(self.project, self.case_name, self.start_date, self.stop_date, - self.criteria, self.details)) + self.result, self.details)) mock_logger_error.assert_called_once_with(test_utils. RegexMatch("Pushing " "Result to" @@ -334,7 +335,7 @@ class FunctestUtilsTesting(unittest.TestCase): push_results_to_db(self.project, self.case_name, self.start_date, self.stop_date, - self.criteria, self.details)) + self.result, self.details)) self.assertTrue(mock_logger_error.called) def test_push_results_to_db_default(self): @@ -349,7 +350,7 @@ class FunctestUtilsTesting(unittest.TestCase): push_results_to_db(self.project, self.case_name, self.start_date, self.stop_date, - self.criteria, self.details)) + self.result, self.details)) readline = 0 test_ip = ['10.1.23.4', '10.1.14.15', '10.1.16.15'] diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py index 0d612412..6cebabff 100644 --- a/functest/utils/functest_utils.py +++ b/functest/utils/functest_utils.py @@ -192,7 +192,7 @@ def logger_test_results(project, case_name, status, details): @decorators.can_dump_request_to_file def push_results_to_db(project, case_name, - start_date, stop_date, criteria, details): + start_date, stop_date, result, details): """ POST results to the Result target DB """ @@ -213,7 +213,7 @@ def push_results_to_db(project, case_name, params = {"project_name": project, "case_name": case_name, "pod_name": pod_name, "installer": installer, - "version": version, "scenario": scenario, "criteria": criteria, + "version": version, "scenario": scenario, "criteria": result, "build_tag": build_tag, "start_date": test_start, "stop_date": test_stop, "details": details} @@ -248,7 +248,7 @@ def push_results_to_db(project, case_name, 'pod': pod_name, 'v': version, 's': scenario, - 'c': criteria, + 'c': result, 't': build_tag, 'd': details, 'error': e |