From 736f3fa5d52345d6fe5174b83de043f779fa0600 Mon Sep 17 00:00:00 2001 From: rexlee8776 Date: Tue, 19 Jun 2018 10:40:20 +0000 Subject: Bugfix: HA kill process recovery has a conflict It happens in Nokia SUT when running in the Plugfest. The problem happens when the start_process start to recover the killed process (like nova-api), but the self-cured mechanism already recovered. And somehow it lead to a conflict and has problems. So the recover of the HA attack-recover should be improved to only recover it when it needs to. JIRA: YARDSTICK-1222 Change-Id: I1acb5a7d59d6fe4e0de0b0c5942fa89e051dd1ff Signed-off-by: rexlee8776 --- .../benchmark/scenarios/availability/serviceha.py | 19 +++++++++++-------- .../scenarios/availability/test_serviceha.py | 22 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py index 76721e38c..7f976fdbc 100755 --- a/yardstick/benchmark/scenarios/availability/serviceha.py +++ b/yardstick/benchmark/scenarios/availability/serviceha.py @@ -29,6 +29,7 @@ class ServiceHA(base.Scenario): self.context_cfg = context_cfg self.setup_done = False self.data = {} + self.sla_pass = False def setup(self): """scenario setup""" @@ -69,26 +70,28 @@ class ServiceHA(base.Scenario): self.monitorMgr.wait_monitors() LOG.info("Monitor '%s' stop!", self.__scenario_type__) - sla_pass = self.monitorMgr.verify_SLA() + self.sla_pass = self.monitorMgr.verify_SLA() service_not_found = False for k, v in self.data.items(): if v == 0: - sla_pass = False + self.sla_pass = False service_not_found = True LOG.info("The service process (%s) not found in the host envrioment", k) - result['sla_pass'] = 1 if sla_pass else 0 + result['sla_pass'] = 1 if self.sla_pass else 0 self.monitorMgr.store_result(result) self.verify_SLA( - sla_pass, ("a service process was not found in the host " - "environment" if service_not_found - else "MonitorMgr.verify_SLA() failed")) + self.sla_pass, ("a service process was not found in the host " + "environment" if service_not_found + else "MonitorMgr.verify_SLA() failed")) def teardown(self): """scenario teardown""" - for attacker in self.attackers: - attacker.recover() + # only recover when sla not pass + if not self.sla_pass: + for attacker in self.attackers: + attacker.recover() def _test(): # pragma: no cover diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py index cf1e76d7a..ec0e5973c 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py @@ -43,6 +43,13 @@ class ServicehaTestCase(unittest.TestCase): } sla = {"outage_time": 5} self.args = {"options": options, "sla": sla} + self.test__serviceha = serviceha.ServiceHA(self.args, self.ctx) + + def test___init__(self): + + self.assertEqual(self.test__serviceha.data, {}) + self.assertFalse(self.test__serviceha.setup_done) + self.assertFalse(self.test__serviceha.sla_pass) # NOTE(elfoley): This should be split into test_setup and test_run # NOTE(elfoley): This should explicitly test outcomes and states @@ -90,3 +97,18 @@ class ServicehaTestCase(unittest.TestCase): ret = {} self.assertRaises(y_exc.SLAValidationError, p.run, ret) self.assertEqual(ret['sla_pass'], 0) + + @mock.patch.object(serviceha, 'baseattacker') + @mock.patch.object(serviceha, 'basemonitor') + def test__serviceha_no_teardown_when_sla_pass(self, mock_monitor, + *args): + p = serviceha.ServiceHA(self.args, self.ctx) + p.setup() + self.assertTrue(p.setup_done) + mock_monitor.MonitorMgr().verify_SLA.return_value = True + ret = {} + p.run(ret) + attacker = mock.Mock() + p.attackers = [attacker] + p.teardown() + attacker.recover.assert_not_called() -- cgit 1.2.3-korg