diff options
author | rexlee8776 <limingjiang@huawei.com> | 2018-06-19 10:40:20 +0000 |
---|---|---|
committer | Emma Foley <emma.l.foley@intel.com> | 2018-06-26 16:38:19 +0100 |
commit | c9a9dcae22045cb0392f8445d9d4e00753f934b5 (patch) | |
tree | 7e2a6b89f64ea0425a3aec0ca354cd8896f54de8 | |
parent | 18657d0ffc9cbc46a17f1a15cc8e884b1124ebbc (diff) |
Bugfix: HA kill process recovery has a conflict
It happens in Nokia SUT when running in the Plugfest. The problem happens when
the start_process start to recover the killed process (like nova-api), but the
self-cured mechanism already recovered. And somehow it lead to a conflict and
has problems.
So the recover of the HA attack-recover should be improved to only recover it
when it needs to.
JIRA: YARDSTICK-1222
Change-Id: I1acb5a7d59d6fe4e0de0b0c5942fa89e051dd1ff
Signed-off-by: rexlee8776 <limingjiang@huawei.com>
(cherry picked from commit 736f3fa5d52345d6fe5174b83de043f779fa0600)
-rwxr-xr-x | yardstick/benchmark/scenarios/availability/serviceha.py | 15 | ||||
-rw-r--r-- | yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py | 22 |
2 files changed, 31 insertions, 6 deletions
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py index b6e840143..ee2eeb007 100755 --- a/yardstick/benchmark/scenarios/availability/serviceha.py +++ b/yardstick/benchmark/scenarios/availability/serviceha.py @@ -29,6 +29,7 @@ class ServiceHA(base.Scenario): self.context_cfg = context_cfg self.setup_done = False self.data = {} + self.sla_pass = False def setup(self): """scenario setup""" @@ -69,23 +70,25 @@ class ServiceHA(base.Scenario): self.monitorMgr.wait_monitors() LOG.info("Monitor '%s' stop!", self.__scenario_type__) - sla_pass = self.monitorMgr.verify_SLA() + self.sla_pass = self.monitorMgr.verify_SLA() for k, v in self.data.items(): if v == 0: - sla_pass = False + self.sla_pass = False LOG.info("The service process (%s) not found in the host environment", k) - result['sla_pass'] = 1 if sla_pass else 0 + result['sla_pass'] = 1 if self.sla_pass else 0 self.monitorMgr.store_result(result) - assert sla_pass is True, "The HA test case NOT pass the SLA" + assert self.sla_pass is True, "The HA test case NOT pass the SLA" return def teardown(self): """scenario teardown""" - for attacker in self.attackers: - attacker.recover() + # only recover when sla not pass + if not self.sla_pass: + for attacker in self.attackers: + attacker.recover() def _test(): # pragma: no cover diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py index dd656fbd5..5c9bda5e1 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py @@ -42,6 +42,13 @@ class ServicehaTestCase(unittest.TestCase): } sla = {"outage_time": 5} self.args = {"options": options, "sla": sla} + self.test__serviceha = serviceha.ServiceHA(self.args, self.ctx) + + def test___init__(self): + + self.assertEqual(self.test__serviceha.data, {}) + self.assertFalse(self.test__serviceha.setup_done) + self.assertFalse(self.test__serviceha.sla_pass) # NOTE(elfoley): This should be split into test_setup and test_run # NOTE(elfoley): This should explicitly test outcomes and states @@ -73,3 +80,18 @@ class ServicehaTestCase(unittest.TestCase): ret = {} self.assertRaises(AssertionError, p.run, ret) self.assertEqual(ret['sla_pass'], 0) + + @mock.patch.object(serviceha, 'baseattacker') + @mock.patch.object(serviceha, 'basemonitor') + def test__serviceha_no_teardown_when_sla_pass(self, mock_monitor, + *args): + p = serviceha.ServiceHA(self.args, self.ctx) + p.setup() + self.assertTrue(p.setup_done) + mock_monitor.MonitorMgr().verify_SLA.return_value = True + ret = {} + p.run(ret) + attacker = mock.Mock() + p.attackers = [attacker] + p.teardown() + attacker.recover.assert_not_called() |