From f41d7301d4c736fb8b3e734cad72107d9154e59f Mon Sep 17 00:00:00 2001 From: rexlee8776 Date: Sat, 26 Aug 2017 01:37:10 +0000 Subject: Bugfix: ha test case criteria pass when sla not pass ha test cases didn't store moniter info and report fail when sla didn't pass Change-Id: I0e5637e37a66e1bf03b47fe09d17e0a1acfa11c1 Signed-off-by: rexlee8776 --- .../scenarios/availability/test_basemonitor.py | 45 ++++++++++++++++++---- .../benchmark/scenarios/availability/director.py | 5 +++ .../scenarios/availability/monitor/basemonitor.py | 15 +++++++- .../availability/monitor/monitor_multi.py | 3 +- .../scenarios/availability/scenario_general.py | 2 + .../benchmark/scenarios/availability/serviceha.py | 1 + 6 files changed, 60 insertions(+), 11 deletions(-) diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py index 3b7e07376..92ae8aa88 100644 --- a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py +++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py @@ -25,13 +25,32 @@ from yardstick.benchmark.scenarios.availability.monitor import basemonitor class MonitorMgrTestCase(unittest.TestCase): def setUp(self): - config = { - 'monitor_type': 'openstack-api', - 'key': 'service-status' - } - - self.monitor_configs = [] - self.monitor_configs.append(config) + self.monitor_configs = [ + { + "monitor_type": "openstack-cmd", + "command_name": "openstack router list", + "monitor_time": 10, + "monitor_number": 3, + "sla": { + "max_outage_time": 5 + } + }, + { + "monitor_type": "process", + "process_name": "neutron-server", + "host": "node1", + "monitor_time": 20, + "monitor_number": 3, + "sla": { + "max_recover_time": 20 + } + } + ] + self.MonitorMgr = basemonitor.MonitorMgr([]) + self.MonitorMgr.init_monitors(self.monitor_configs, None) + self.monitor_list = self.MonitorMgr._monitor_list + for mo in self.monitor_list: + mo._result = {"outage_time": 10} def test__MonitorMgr_setup_successful(self, mock_monitor): instance = basemonitor.MonitorMgr({"nova-api": 10}) @@ -44,7 +63,13 @@ class MonitorMgrTestCase(unittest.TestCase): def test_MonitorMgr_getitem(self, mock_monitor): monitorMgr = basemonitor.MonitorMgr({"nova-api": 10}) monitorMgr.init_monitors(self.monitor_configs, None) - monitorIns = monitorMgr['service-status'] + + def test_store_result(self, mock_monitor): + expect = {'process_neutron-server_outage_time': 10, + 'openstack-router-list_outage_time': 10} + result = {} + self.MonitorMgr.store_result(result) + self.assertDictEqual(result, expect) class BaseMonitorTestCase(unittest.TestCase): @@ -94,3 +119,7 @@ class BaseMonitorTestCase(unittest.TestCase): except Exception: pass self.assertIsNone(cls) + + +if __name__ == "__main__": + unittest.main() diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py index c9187c34d..f152af090 100644 --- a/yardstick/benchmark/scenarios/availability/director.py +++ b/yardstick/benchmark/scenarios/availability/director.py @@ -111,3 +111,8 @@ class Director(object): while self.executionSteps: singleStep = self.executionSteps.pop() singleStep.rollback() + + def store_result(self, result): + LOG.debug("store result ....") + if hasattr(self, 'monitorMgr'): + self.monitorMgr.store_result(result) diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py index 0027925d6..a6c1a28bd 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py +++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py @@ -44,7 +44,11 @@ class MonitorMgr(object): monitor_ins = monitor_cls(monitor_cfg, context, self.monitor_mgr_data) if "key" in monitor_cfg: - monitor_ins.key = monitor_cfg["key"] + monitor_ins.tag = monitor_ins.key = monitor_cfg["key"] + elif monitor_type == "openstack-cmd": + monitor_ins.tag = monitor_cfg["command_name"].replace(" ", "-") + elif monitor_type == "process": + monitor_ins.tag = monitor_type + "_" + monitor_cfg["process_name"] self._monitor_list.append(monitor_ins) def __getitem__(self, item): @@ -67,6 +71,12 @@ class MonitorMgr(object): sla_pass = sla_pass & monitor.verify_SLA() return sla_pass + def store_result(self, result): + for monitor in self._monitor_list: + monitor_result = monitor.get_result() + for k, v in monitor_result.items(): + result[monitor.tag + "_" + k] = v + class BaseMonitor(multiprocessing.Process): """docstring for BaseMonitor""" @@ -83,6 +93,7 @@ class BaseMonitor(multiprocessing.Process): self._event = multiprocessing.Event() self.monitor_data = data self.setup_done = False + self.tag = "" @staticmethod def get_monitor_cls(monitor_type): @@ -164,5 +175,5 @@ class BaseMonitor(multiprocessing.Process): def verify_SLA(self): pass - def result(self): + def get_result(self): return self._result diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py index d7d1545da..dce69f45f 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py +++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py @@ -47,7 +47,7 @@ class MultiMonitor(basemonitor.BaseMonitor): last_outage = 0 for monitor in self.monitors: - monitor_result = monitor.result() + monitor_result = monitor.get_result() monitor_first_outage = monitor_result.get('first_outage', 0) monitor_last_outage = monitor_result.get('last_outage', 0) @@ -71,6 +71,7 @@ class MultiMonitor(basemonitor.BaseMonitor): max_outage_time = self._config["sla"]["max_recover_time"] else: raise RuntimeError("monitor max_outage_time config is not found") + self._result = {"outage_time": outage_time} if outage_time > max_outage_time: LOG.error("SLA failure: %f > %f", outage_time, max_outage_time) diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py index c7ed1d6ec..9ac55471d 100644 --- a/yardstick/benchmark/scenarios/availability/scenario_general.py +++ b/yardstick/benchmark/scenarios/availability/scenario_general.py @@ -60,6 +60,8 @@ class ScenarioGeneral(base.Scenario): verify_result = self.director.verify() + self.director.store_result(result) + for k, v in self.director.data.items(): if v == 0: result['sla_pass'] = 0 diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py index d0f5e9e4d..6d0d812af 100755 --- a/yardstick/benchmark/scenarios/availability/serviceha.py +++ b/yardstick/benchmark/scenarios/availability/serviceha.py @@ -78,6 +78,7 @@ class ServiceHA(base.Scenario): LOG.info("The service process not found in the host envrioment, \ the HA test case NOT pass") return + self.monitorMgr.store_result(result) if sla_pass: result['sla_pass'] = 1 LOG.info("The HA test case PASS the SLA") -- cgit 1.2.3-korg