aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJose Lausuch <jose.lausuch@ericsson.com>2017-05-18 16:44:19 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-05-18 16:44:19 +0000
commit6a655e830dd891d0b5afec601b4fb2461b1fad42 (patch)
treee5ad8ac9a0862e6ae389581aeb15d4d32947b40a
parent54c02e2bd175a80a5659ddd4156c1166347efc93 (diff)
parent87dcf37d2d658e83252aadfad468639842a30085 (diff)
Merge "Remove check_success_rate"
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py20
-rwxr-xr-xfunctest/opnfv_tests/openstack/refstack_client/refstack_client.py18
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py18
-rw-r--r--functest/tests/unit/openstack/rally/test_rally.py13
-rw-r--r--functest/tests/unit/openstack/tempest/test_tempest.py18
-rw-r--r--functest/tests/unit/utils/test_functest_utils.py16
-rw-r--r--functest/utils/functest_utils.py10
7 files changed, 34 insertions, 79 deletions
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index f762383a..fbed0ce1 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -8,6 +8,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+from __future__ import division
+
import json
import logging
import os
@@ -20,7 +22,6 @@ import yaml
from functest.core import testcase
from functest.utils.constants import CONST
-import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
logger = logging.getLogger(__name__)
@@ -480,11 +481,12 @@ class RallyBase(testcase.TestCase):
total_duration_str2 = "{0:<10}".format(total_duration_str)
total_nb_tests_str = "{0:<13}".format(total_nb_tests)
- if len(self.summary):
- success_rate = total_success / len(self.summary)
- else:
- success_rate = 100
- success_rate = "{:0.2f}".format(success_rate)
+ try:
+ self.result = total_success / len(self.summary)
+ except ZeroDivisionError:
+ self.result = 100
+
+ success_rate = "{:0.2f}".format(self.result)
success_rate_str = "{0:<10}".format(str(success_rate) + '%')
report += ("+===================+============"
"+===============+===========+")
@@ -500,12 +502,10 @@ class RallyBase(testcase.TestCase):
'nb tests': total_nb_tests,
'nb success': success_rate}})
- self.result = ft_utils.check_success_rate(
- self.case_name, success_rate)
self.details = payload
- logger.info("Rally '%s' success_rate is %s%%, is marked as %s"
- % (self.case_name, success_rate, self.result))
+ logger.info("Rally '%s' success_rate is %s%%"
+ % (self.case_name, success_rate))
def _clean_up(self):
if self.volume_type:
diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
index ebae4b86..2a2718dd 100755
--- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
+++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
@@ -5,6 +5,10 @@
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
+
+from __future__ import division
+
+
import argparse
import logging
import os
@@ -123,7 +127,11 @@ class RefstackClient(testcase.TestCase):
skipped_testcases += match + ", "
num_executed = int(num_tests) - int(num_skipped)
- success_rate = 100 * int(num_success) / int(num_executed)
+
+ try:
+ self.result = 100 * int(num_success) / int(num_executed)
+ except ZeroDivisionError:
+ logger.error("No test has been executed")
self.details = {"tests": int(num_tests),
"failures": int(num_failures),
@@ -131,12 +139,10 @@ class RefstackClient(testcase.TestCase):
"errors": failed_testcases,
"skipped": skipped_testcases}
except Exception:
- success_rate = 0
+ self.result = 0
- self.result = ft_utils.check_success_rate(
- self.case_name, success_rate)
- logger.info("Testcase %s success_rate is %s%%, is marked as %s"
- % (self.case_name, success_rate, self.result))
+ logger.info("Testcase %s success_rate is %s%%"
+ % (self.case_name, self.result))
def run(self):
'''used for functest command line,
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index 984e2a1b..a41d07c7 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -8,6 +8,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+from __future__ import division
+
import logging
import os
import re
@@ -181,7 +183,13 @@ class TempestCommon(testcase.TestCase):
try:
num_executed = int(num_tests) - int(num_skipped)
- success_rate = 100 * int(num_success) / int(num_executed)
+ try:
+ self.result = 100 * int(num_success) / int(num_executed)
+ except ZeroDivisionError:
+ logger.error("No test has been executed")
+ self.result = 0
+ return
+
with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
"tempest.log"), 'r') as logfile:
output = logfile.read()
@@ -198,12 +206,10 @@ class TempestCommon(testcase.TestCase):
"errors": error_logs,
"skipped": skipped_testcase}
except Exception:
- success_rate = 0
+ self.result = 0
- self.result = ft_utils.check_success_rate(
- self.case_name, success_rate)
- logger.info("Tempest %s success_rate is %s%%, is marked as %s"
- % (self.case_name, success_rate, self.result))
+ logger.info("Tempest %s success_rate is %s%%"
+ % (self.case_name, self.result))
def run(self):
diff --git a/functest/tests/unit/openstack/rally/test_rally.py b/functest/tests/unit/openstack/rally/test_rally.py
index fe25dfcf..c7828618 100644
--- a/functest/tests/unit/openstack/rally/test_rally.py
+++ b/functest/tests/unit/openstack/rally/test_rally.py
@@ -343,19 +343,6 @@ class OSRallyTesting(unittest.TestCase):
self.rally_base._run_tests()
self.rally_base._run_task.assert_any_call('test1')
- @mock.patch('functest.opnfv_tests.openstack.rally.rally.logger.info')
- def test_generate_report(self, mock_logger_info):
- summary = [{'test_name': 'test_name',
- 'overall_duration': 5,
- 'nb_tests': 3,
- 'success': 5}]
- self.rally_base.summary = summary
- with mock.patch('functest.opnfv_tests.openstack.rally.rally.'
- 'ft_utils.check_success_rate',
- return_value='criteria'):
- self.rally_base._generate_report()
- self.assertTrue(mock_logger_info.called)
-
def test_clean_up_default(self):
self.rally_base.volume_type = mock.Mock()
self.rally_base.cinder_client = mock.Mock()
diff --git a/functest/tests/unit/openstack/tempest/test_tempest.py b/functest/tests/unit/openstack/tempest/test_tempest.py
index e05e5dfa..bb75c9ed 100644
--- a/functest/tests/unit/openstack/tempest/test_tempest.py
+++ b/functest/tests/unit/openstack/tempest/test_tempest.py
@@ -151,24 +151,6 @@ class OSTempestTesting(unittest.TestCase):
assert_any_call("Starting Tempest test suite: '%s'."
% cmd_line)
- @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.logger.info')
- def test_parse_verifier_result_default(self, mock_logger_info):
- self.tempestcommon.VERIFICATION_ID = 'test_uuid'
- self.tempestcommon.case_name = 'test_case_name'
- stdout = ['Testscount||2', 'Success||2', 'Skipped||0', 'Failures||0']
- with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'subprocess.Popen') as mock_popen, \
- mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'ft_utils.check_success_rate') as mock_method, \
- mock.patch('__builtin__.open', mock.mock_open()):
- mock_stdout = mock.Mock()
- attrs = {'stdout': stdout}
- mock_stdout.configure_mock(**attrs)
- mock_popen.return_value = mock_stdout
-
- self.tempestcommon.parse_verifier_result()
- mock_method.assert_any_call('test_case_name', 100)
-
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'os.path.exists', return_value=False)
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.os.makedirs',
diff --git a/functest/tests/unit/utils/test_functest_utils.py b/functest/tests/unit/utils/test_functest_utils.py
index d48e06f9..57e0c465 100644
--- a/functest/tests/unit/utils/test_functest_utils.py
+++ b/functest/tests/unit/utils/test_functest_utils.py
@@ -563,22 +563,6 @@ class FunctestUtilsTesting(unittest.TestCase):
assert_called_once_with(self.parameter,
self.config_yaml)
- def test_check_success_rate_default(self):
- with mock.patch('functest.utils.functest_utils.get_criteria_by_test') \
- as mock_criteria:
- mock_criteria.return_value = self.criteria
- resp = functest_utils.check_success_rate(self.case_name,
- self.result)
- self.assertEqual(resp, 100)
-
- def test_check_success_rate_failed(self):
- with mock.patch('functest.utils.functest_utils.get_criteria_by_test') \
- as mock_criteria:
- mock_criteria.return_value = self.criteria
- resp = functest_utils.check_success_rate(self.case_name,
- 0)
- self.assertEqual(resp, 0)
-
# TODO: merge_dicts
def test_get_testcases_file_dir(self):
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index 744258b3..bf30f56e 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -379,16 +379,6 @@ def get_functest_config(parameter):
return get_parameter_from_yaml(parameter, yaml_)
-def check_success_rate(case_name, result):
- # It should be removed as TestCase tests criteria
- # and result.
- logger.warning('check_success_rate will be removed soon')
- criteria = get_criteria_by_test(case_name)
- if type(criteria) == int and result >= criteria:
- return 100
- return 0
-
-
def merge_dicts(dict1, dict2):
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2: