diff options
-rw-r--r-- | functest/opnfv_tests/openstack/rally/rally.py | 16 | ||||
-rw-r--r-- | functest/tests/unit/openstack/rally/test_rally.py | 25 |
2 files changed, 36 insertions, 5 deletions
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py index cd84657b5..f4c968faa 100644 --- a/functest/opnfv_tests/openstack/rally/rally.py +++ b/functest/opnfv_tests/openstack/rally/rally.py @@ -433,10 +433,13 @@ class RallyBase(singlevm.VmReady2): nb_tests = 0 nb_success = 0 overall_duration = 0.0 + success = [] + failures = [] rally_report = json.loads(json_raw) for task in rally_report.get('tasks'): for subtask in task.get('subtasks'): + has_errors = False for workload in subtask.get('workloads'): if workload.get('full_duration'): overall_duration += workload.get('full_duration') @@ -447,11 +450,20 @@ class RallyBase(singlevm.VmReady2): for result in workload.get('data'): if not result.get('error'): nb_success += 1 + else: + has_errors = True + + if has_errors: + failures.append(subtask['title']) + else: + success.append(subtask['title']) scenario_summary = {'test_name': test_name, 'overall_duration': overall_duration, 'nb_tests': nb_tests, 'nb_success': nb_success, + 'success': success, + 'failures': failures, 'task_status': self.task_succeed(json_raw)} self.summary.append(scenario_summary) @@ -548,7 +560,9 @@ class RallyBase(singlevm.VmReady2): payload.append({'module': item['test_name'], 'details': {'duration': item['overall_duration'], 'nb tests': item['nb_tests'], - 'success': success_str}}) + 'success rate': success_str, + 'success': item['success'], + 'failures': item['failures']}}) total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration)) diff --git a/functest/tests/unit/openstack/rally/test_rally.py b/functest/tests/unit/openstack/rally/test_rally.py index 7d21e0deb..1bdb7c422 100644 --- a/functest/tests/unit/openstack/rally/test_rally.py +++ b/functest/tests/unit/openstack/rally/test_rally.py @@ -360,14 +360,31 @@ class OSRallyTesting(unittest.TestCase): mock_prep_env.assert_called() def test_append_summary(self): - text = '{"tasks": [{"subtasks": [{"workloads": [{"full_duration": ' \ - '1.23,"data": [{"error": []}]}]},{"workloads": ' \ - '[{"full_duration": 2.78, "data": [{"error": ["err"]}]}]}]}]}' - self.rally_base._append_summary(text, "foo_test") + json_dict = { + 'tasks': [{ + 'subtasks': [{ + 'title': 'sub_task', + 'workloads': [{ + 'full_duration': 1.23, + 'data': [{ + 'error': [] + }] + }, { + 'full_duration': 2.78, + 'data': [{ + 'error': ['err'] + }] + }] + }] + }] + } + self.rally_base._append_summary(json.dumps(json_dict), "foo_test") self.assertEqual(self.rally_base.summary[0]['test_name'], "foo_test") self.assertEqual(self.rally_base.summary[0]['overall_duration'], 4.01) self.assertEqual(self.rally_base.summary[0]['nb_tests'], 2) self.assertEqual(self.rally_base.summary[0]['nb_success'], 1) + self.assertEqual(self.rally_base.summary[0]['success'], []) + self.assertEqual(self.rally_base.summary[0]['failures'], ['sub_task']) def test_is_successful_false(self): with mock.patch('six.moves.builtins.super') as mock_super: |