diff options
author | Ross Brattain <ross.b.brattain@intel.com> | 2018-04-26 14:11:27 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@opnfv.org> | 2018-04-26 14:11:28 +0000 |
commit | b338d3091bb0beb89d4ad9f7c144f43a31a19a74 (patch) | |
tree | c62e20c3a500e756fc56aab8a4410727eac5344a | |
parent | fd54ab5c79b16cdc1765517057b8eb0c988f60c8 (diff) | |
parent | dd87e15c9c4f0aff3577c1bcd68bd86dd2d64898 (diff) |
Merge "Get HA test case results on failure"
5 files changed, 55 insertions, 75 deletions
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 955b8cae2..697cc007f 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -112,9 +112,9 @@ class Task(object): # pragma: no cover continue try: - data = self._run(tasks[i]['scenarios'], - tasks[i]['run_in_parallel'], - output_config) + success, data = self._run(tasks[i]['scenarios'], + tasks[i]['run_in_parallel'], + output_config) except KeyboardInterrupt: raise except Exception: # pylint: disable=broad-except @@ -123,9 +123,15 @@ class Task(object): # pragma: no cover testcases[tasks[i]['case_name']] = {'criteria': 'FAIL', 'tc_data': []} else: - LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name']) - testcases[tasks[i]['case_name']] = {'criteria': 'PASS', - 'tc_data': data} + if success: + LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name']) + testcases[tasks[i]['case_name']] = {'criteria': 'PASS', + 'tc_data': data} + else: + LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'], + exc_info=True) + testcases[tasks[i]['case_name']] = {'criteria': 'FAIL', + 'tc_data': data} if args.keep_deploy: # keep deployment, forget about stack @@ -240,6 +246,7 @@ class Task(object): # pragma: no cover background_runners = [] + task_success = True result = [] # Start all background scenarios for scenario in filter(_is_background_scenario, scenarios): @@ -258,8 +265,8 @@ class Task(object): # pragma: no cover for runner in runners: status = runner_join(runner, background_runners, self.outputs, result) if status != 0: - raise RuntimeError( - "{0} runner status {1}".format(runner.__execution_type__, status)) + LOG.error("%s runner status %s", runner.__execution_type__, status) + task_success = False LOG.info("Runner ended") else: # run serially @@ -271,8 +278,8 @@ class Task(object): # pragma: no cover LOG.error('Scenario NO.%s: "%s" ERROR!', scenarios.index(scenario) + 1, scenario.get('type')) - raise RuntimeError( - "{0} runner status {1}".format(runner.__execution_type__, status)) + LOG.error("%s runner status %s", runner.__execution_type__, status) + task_success = False LOG.info("Runner ended") # Abort background runners @@ -289,7 +296,7 @@ class Task(object): # pragma: no cover base_runner.Runner.release(runner) print("Background task ended") - return result + return task_success, result def atexit_handler(self): """handler for process termination""" diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py index 9ac55471d..1fadd2532 100644 --- a/yardstick/benchmark/scenarios/availability/scenario_general.py +++ b/yardstick/benchmark/scenarios/availability/scenario_general.py @@ -26,7 +26,6 @@ class ScenarioGeneral(base.Scenario): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.intermediate_variables = {} - self.pass_flag = True def setup(self): self.director = Director(self.scenario_cfg, self.context_cfg) @@ -47,7 +46,7 @@ class ScenarioGeneral(base.Scenario): step['actionType'], step['actionKey']) if actionRollbacker: self.director.executionSteps.append(actionRollbacker) - except Exception: + except Exception: # pylint: disable=broad-except LOG.exception("Exception") LOG.debug( "\033[91m exception when running step: %s .... \033[0m", @@ -59,31 +58,16 @@ class ScenarioGeneral(base.Scenario): self.director.stopMonitors() verify_result = self.director.verify() - - self.director.store_result(result) - for k, v in self.director.data.items(): if v == 0: result['sla_pass'] = 0 verify_result = False - self.pass_flag = False - LOG.info( - "\033[92m The service process not found in the host \ -envrioment, the HA test case NOT pass") + LOG.info("\033[92m The service process (%s) not found in the host environment", k) - if verify_result: - result['sla_pass'] = 1 - LOG.info( - "\033[92m Congratulations, " - "the HA test case PASS! \033[0m") - else: - result['sla_pass'] = 0 - self.pass_flag = False - LOG.info( - "\033[91m Aoh, the HA test case FAIL," - "please check the detail debug information! \033[0m") + result['sla_pass'] = 1 if verify_result else 0 + self.director.store_result(result) + + assert verify_result is True, "The HA test case NOT passed" def teardown(self): self.director.knockoff() - - assert self.pass_flag, "The HA test case NOT passed" diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py index 6d0d812af..dcd0fe598 100755 --- a/yardstick/benchmark/scenarios/availability/serviceha.py +++ b/yardstick/benchmark/scenarios/availability/serviceha.py @@ -29,7 +29,6 @@ class ServiceHA(base.Scenario): self.context_cfg = context_cfg self.setup_done = False self.data = {} - self.pass_flag = True def setup(self): """scenario setup""" @@ -73,18 +72,12 @@ class ServiceHA(base.Scenario): sla_pass = self.monitorMgr.verify_SLA() for k, v in self.data.items(): if v == 0: - result['sla_pass'] = 0 - self.pass_flag = False - LOG.info("The service process not found in the host envrioment, \ -the HA test case NOT pass") - return + sla_pass = False + LOG.info("The service process (%s) not found in the host envrioment", k) + + result['sla_pass'] = 1 if sla_pass else 0 self.monitorMgr.store_result(result) - if sla_pass: - result['sla_pass'] = 1 - LOG.info("The HA test case PASS the SLA") - else: - result['sla_pass'] = 0 - self.pass_flag = False + assert sla_pass is True, "The HA test case NOT pass the SLA" return @@ -94,8 +87,6 @@ the HA test case NOT pass") for attacker in self.attackers: attacker.recover() - assert self.pass_flag, "The HA test case NOT passed" - def _test(): # pragma: no cover """internal test function""" diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py index 45840d569..d1172d5a6 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py @@ -14,7 +14,8 @@ from yardstick.benchmark.scenarios.availability import scenario_general class ScenarioGeneralTestCase(unittest.TestCase): - def setUp(self): + @mock.patch.object(scenario_general, 'Director') + def setUp(self, *args): self.scenario_cfg = { 'type': "general_scenario", 'options': { @@ -36,32 +37,28 @@ class ScenarioGeneralTestCase(unittest.TestCase): } } self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, None) - - self._mock_director = mock.patch.object(scenario_general, 'Director') - self.mock_director = self._mock_director.start() - self.addCleanup(self._stop_mock) - - def _stop_mock(self): - self._mock_director.stop() + self.instance.setup() + self.instance.director.verify.return_value = True def test_scenario_general_all_successful(self): - self.instance.setup() - self.instance.run({}) + + ret = {} + self.instance.run(ret) self.instance.teardown() + self.assertEqual(ret['sla_pass'], 1) def test_scenario_general_exception(self): - mock_obj = mock.Mock() - mock_obj.createActionPlayer.side_effect = KeyError('Wrong') - self.instance.director = mock_obj + self.instance.director.createActionPlayer.side_effect = KeyError('Wrong') self.instance.director.data = {} - self.instance.run({}) + ret = {} + self.instance.run(ret) self.instance.teardown() + self.assertEqual(ret['sla_pass'], 1) def test_scenario_general_case_fail(self): - mock_obj = mock.Mock() - mock_obj.verify.return_value = False - self.instance.director = mock_obj + self.instance.director.verify.return_value = False self.instance.director.data = {} - self.instance.run({}) - self.instance.pass_flag = True + ret = {} + self.assertRaises(AssertionError, self.instance.run, ret) self.instance.teardown() + self.assertEqual(ret['sla_pass'], 0) diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py index 6bb3ec63b..dd656fbd5 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py @@ -60,15 +60,16 @@ class ServicehaTestCase(unittest.TestCase): p.setup() self.assertTrue(p.setup_done) - # def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor): - # p = serviceha.ServiceHA(self.args, self.ctx) + @mock.patch.object(serviceha, 'baseattacker') + @mock.patch.object(serviceha, 'basemonitor') + def test__serviceha_run_sla_error(self, mock_monitor, *args): + p = serviceha.ServiceHA(self.args, self.ctx) - # p.setup() - # self.assertEqual(p.setup_done, True) + p.setup() + self.assertEqual(p.setup_done, True) - # result = {} - # result["outage_time"] = 10 - # mock_monitor.Monitor().get_result.return_value = result + mock_monitor.MonitorMgr().verify_SLA.return_value = False - # ret = {} - # self.assertRaises(AssertionError, p.run, ret) + ret = {} + self.assertRaises(AssertionError, p.run, ret) + self.assertEqual(ret['sla_pass'], 0) |