aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--functest/core/pytest_suite_runner.py42
-rwxr-xr-xfunctest/opnfv_tests/sdn/odl/odl.py2
-rw-r--r--functest/tests/unit/core/test_pytest_suite_runner.py52
3 files changed, 46 insertions, 50 deletions
diff --git a/functest/core/pytest_suite_runner.py b/functest/core/pytest_suite_runner.py
index a6e47660..efcef7b6 100644
--- a/functest/core/pytest_suite_runner.py
+++ b/functest/core/pytest_suite_runner.py
@@ -7,6 +7,8 @@
# pylint: disable=missing-docstring
+from __future__ import division
+
import logging
import time
import unittest
@@ -46,32 +48,14 @@ class PyTestSuiteRunner(testcase.TestCase):
stream=stream, verbosity=2).run(self.suite)
self.logger.debug("\n\n%s", stream.getvalue())
self.stop_time = time.time()
-
- if result.errors:
- self.logger.error('Number of errors in test suite - ' +
- str(len(result.errors)))
- for test, message in result.errors:
- self.logger.error(str(test) + " ERROR with " + message)
-
- if result.failures:
- self.logger.error('Number of failures in test suite - ' +
- str(len(result.failures)))
- for test, message in result.failures:
- self.logger.error(str(test) + " FAILED with " + message)
-
- # a result can be PASS or FAIL
- # But in this case it means that the Execution was OK
- # we shall distinguish Execution Error from FAIL results
- # TestCase.EX_RUN_ERROR means that the test case was not run
- # not that it was run but the result was FAIL
- exit_code = testcase.TestCase.EX_OK
- if ((result.errors and len(result.errors) > 0) or
- (result.failures and len(result.failures) > 0)):
- self.logger.info("%s FAILED", self.case_name)
- self.result = 0
- else:
- self.logger.info("%s OK", self.case_name)
- self.result = 100
-
- self.details = {}
- return exit_code
+ self.details = {"failures": result.failures,
+ "errors": result.errors}
+ try:
+ self.result = 100 * (
+ (result.testsRun - (len(result.failures) +
+ len(result.errors))) /
+ result.testsRun)
+ return testcase.TestCase.EX_OK
+ except ZeroDivisionError:
+ self.logger.error("No test has been run")
+ return testcase.TestCase.EX_RUN_ERROR
diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py
index 2f3dd74b..b2b0b77c 100755
--- a/functest/opnfv_tests/sdn/odl/odl.py
+++ b/functest/opnfv_tests/sdn/odl/odl.py
@@ -108,7 +108,7 @@ class ODLTests(testcase.TestCase):
result.suite.statistics.critical.passed /
result.suite.statistics.critical.total)
except ZeroDivisionError:
- self.__logger.error("No test has been ran")
+ self.__logger.error("No test has been run")
self.start_time = timestamp_to_secs(result.suite.starttime)
self.stop_time = timestamp_to_secs(result.suite.endtime)
self.details = {}
diff --git a/functest/tests/unit/core/test_pytest_suite_runner.py b/functest/tests/unit/core/test_pytest_suite_runner.py
index 07ac7906..4160df03 100644
--- a/functest/tests/unit/core/test_pytest_suite_runner.py
+++ b/functest/tests/unit/core/test_pytest_suite_runner.py
@@ -20,29 +20,41 @@ class PyTestSuiteRunnerTesting(unittest.TestCase):
def setUp(self):
self.psrunner = pytest_suite_runner.PyTestSuiteRunner()
- self.result = mock.Mock()
- attrs = {'errors': [('test1', 'error_msg1')],
- 'failures': [('test2', 'failure_msg1')]}
- self.result.configure_mock(**attrs)
-
- self.pass_results = mock.Mock()
- attrs = {'errors': None,
- 'failures': None}
- self.pass_results.configure_mock(**attrs)
-
- def test_run(self):
- self.psrunner.case_name = 'test_case_name'
- with mock.patch('functest.core.pytest_suite_runner.'
- 'unittest.TextTestRunner.run',
- return_value=self.result):
- self.assertEqual(self.psrunner.run(),
- testcase.TestCase.EX_OK)
+ def _test_run(self, result, status=testcase.TestCase.EX_OK):
with mock.patch('functest.core.pytest_suite_runner.'
'unittest.TextTestRunner.run',
- return_value=self.pass_results):
- self.assertEqual(self.psrunner.run(),
- testcase.TestCase.EX_OK)
+ return_value=result):
+ self.assertEqual(self.psrunner.run(), status)
+
+ def test_run_no_ut(self):
+ mock_result = mock.Mock(testsRun=0, errors=[], failures=[])
+ self._test_run(mock_result, testcase.TestCase.EX_RUN_ERROR)
+ self.assertEqual(self.psrunner.result, 0)
+ self.assertEqual(self.psrunner.details, {'errors': [], 'failures': []})
+ self.assertEqual(self.psrunner.is_successful(),
+ testcase.TestCase.EX_TESTCASE_FAILED)
+
+ def test_run_ko(self):
+ self.psrunner.criteria = 100
+ mock_result = mock.Mock(testsRun=50, errors=[('test1', 'error_msg1')],
+ failures=[('test2', 'failure_msg1')])
+ self._test_run(mock_result, testcase.TestCase.EX_OK)
+ self.assertEqual(self.psrunner.result, 96)
+ self.assertEqual(self.psrunner.details,
+ {'errors': [('test1', 'error_msg1')],
+ 'failures': [('test2', 'failure_msg1')]})
+ self.assertEqual(self.psrunner.is_successful(),
+ testcase.TestCase.EX_TESTCASE_FAILED)
+
+ def test_run_ok(self):
+ mock_result = mock.Mock(testsRun=50, errors=[],
+ failures=[])
+ self._test_run(mock_result)
+ self.assertEqual(self.psrunner.result, 100)
+ self.assertEqual(self.psrunner.details, {'errors': [], 'failures': []})
+ self.assertEqual(self.psrunner.is_successful(),
+ testcase.TestCase.EX_OK)
if __name__ == "__main__":