From 9a372b8adf618981e4fbfb8d0cb3a48674c65d92 Mon Sep 17 00:00:00 2001 From: JingLu5 Date: Thu, 25 May 2017 08:48:15 +0000 Subject: Imporve monitor_process pass criteria JIRA: YARDSTICK-660 The monitor func()'s criteria in the monitor_process.py now is whether at least one process of the specific controller node service is recovered. But in reality is more resonable to use whether processes have been recoverd to it's original amount. This patch is aiming at improving the isssue Change-Id: I950ce2a89555801b96092735b0d670e892049927 Signed-off-by: JingLu5 (cherry picked from commit 37921fcd232cd2fbba9f45ef9fa5d8c912f54af6) --- .../benchmark/scenarios/availability/test_attacker_process.py | 4 ++-- .../unit/benchmark/scenarios/availability/test_basemonitor.py | 10 +++++----- .../benchmark/scenarios/availability/test_monitor_command.py | 6 +++--- .../benchmark/scenarios/availability/test_monitor_general.py | 6 +++--- .../benchmark/scenarios/availability/test_monitor_multi.py | 4 ++-- .../benchmark/scenarios/availability/test_monitor_process.py | 4 ++-- .../scenarios/availability/attacker/attacker_process.py | 5 ++--- .../benchmark/scenarios/availability/attacker/baseattacker.py | 4 ++++ yardstick/benchmark/scenarios/availability/director.py | 9 ++++++--- .../benchmark/scenarios/availability/monitor/basemonitor.py | 9 ++++++--- .../benchmark/scenarios/availability/monitor/monitor_multi.py | 8 +++++--- .../scenarios/availability/monitor/monitor_process.py | 7 +++++-- yardstick/benchmark/scenarios/availability/serviceha.py | 10 +++++++++- 13 files changed, 54 insertions(+), 32 deletions(-) diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py index eec512a58..0a8e8322a 100644 --- a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py +++ b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py @@ -41,7 +41,7 @@ class AttackerServiceTestCase(unittest.TestCase): cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg) ins = cls(self.attacker_cfg, self.context) - mock_ssh.SSH.from_node().execute.return_value = (0, "running", '') + mock_ssh.SSH.from_node().execute.return_value = (0, "10", '') ins.setup() ins.inject_fault() ins.recover() @@ -51,5 +51,5 @@ class AttackerServiceTestCase(unittest.TestCase): cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg) ins = cls(self.attacker_cfg, self.context) - mock_ssh.SSH.from_node().execute.return_value = (0, "error check", '') + mock_ssh.SSH.from_node().execute.return_value = (0, None, '') ins.setup() diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py index 7030c7849..3b7e07376 100644 --- a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py +++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py @@ -34,7 +34,7 @@ class MonitorMgrTestCase(unittest.TestCase): self.monitor_configs.append(config) def test__MonitorMgr_setup_successful(self, mock_monitor): - instance = basemonitor.MonitorMgr() + instance = basemonitor.MonitorMgr({"nova-api": 10}) instance.init_monitors(self.monitor_configs, None) instance.start_monitors() instance.wait_monitors() @@ -42,7 +42,7 @@ class MonitorMgrTestCase(unittest.TestCase): ret = instance.verify_SLA() def test_MonitorMgr_getitem(self, mock_monitor): - monitorMgr = basemonitor.MonitorMgr() + monitorMgr = basemonitor.MonitorMgr({"nova-api": 10}) monitorMgr.init_monitors(self.monitor_configs, None) monitorIns = monitorMgr['service-status'] @@ -67,12 +67,12 @@ class BaseMonitorTestCase(unittest.TestCase): } def test__basemonitor_start_wait_successful(self): - ins = basemonitor.BaseMonitor(self.monitor_cfg, None) + ins = basemonitor.BaseMonitor(self.monitor_cfg, None, {"nova-api": 10}) ins.start_monitor() ins.wait_monitor() def test__basemonitor_all_successful(self): - ins = self.MonitorSimple(self.monitor_cfg, None) + ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10}) ins.setup() ins.run() ins.verify_SLA() @@ -81,7 +81,7 @@ class BaseMonitorTestCase(unittest.TestCase): 'yardstick.benchmark.scenarios.availability.monitor.basemonitor' '.multiprocessing') def test__basemonitor_func_false(self, mock_multiprocess): - ins = self.MonitorSimple(self.monitor_cfg, None) + ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10}) ins.setup() mock_multiprocess.Event().is_set.return_value = False ins.run() diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py index c179bbfaf..2ed4be731 100644 --- a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py @@ -59,7 +59,7 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase): def test__monitor_command_monitor_func_successful(self, mock_subprocess): - instance = monitor_command.MonitorOpenstackCmd(self.config, None) + instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10}) instance.setup() mock_subprocess.check_output.return_value = (0, 'unittest') ret = instance.monitor_func() @@ -69,7 +69,7 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase): def test__monitor_command_monitor_func_failure(self, mock_subprocess): mock_subprocess.check_output.return_value = (1, 'unittest') - instance = monitor_command.MonitorOpenstackCmd(self.config, None) + instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10}) instance.setup() mock_subprocess.check_output.side_effect = RuntimeError ret = instance.monitor_func() @@ -85,7 +85,7 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase): self.config["host"] = "node1" instance = monitor_command.MonitorOpenstackCmd( - self.config, self.context) + self.config, self.context, {"nova-api": 10}) instance.setup() mock_ssh.SSH.from_node().execute.return_value = (0, "0", '') ret = instance.monitor_func() diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py b/tests/unit/benchmark/scenarios/availability/test_monitor_general.py index 169b630bf..c14f073ec 100644 --- a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_general.py @@ -50,7 +50,7 @@ class GeneralMonitorServiceTestCase(unittest.TestCase): } def test__monitor_general_all_successful(self, mock_open, mock_ssh): - ins = monitor_general.GeneralMonitor(self.monitor_cfg, self.context) + ins = monitor_general.GeneralMonitor(self.monitor_cfg, self.context, {"nova-api": 10}) ins.setup() mock_ssh.SSH.from_node().execute.return_value = (0, "running", '') @@ -61,7 +61,7 @@ class GeneralMonitorServiceTestCase(unittest.TestCase): def test__monitor_general_all_successful_noparam(self, mock_open, mock_ssh): ins = monitor_general.GeneralMonitor( - self.monitor_cfg_noparam, self.context) + self.monitor_cfg_noparam, self.context, {"nova-api": 10}) ins.setup() mock_ssh.SSH.from_node().execute.return_value = (0, "running", '') @@ -71,7 +71,7 @@ class GeneralMonitorServiceTestCase(unittest.TestCase): def test__monitor_general_failure(self, mock_open, mock_ssh): ins = monitor_general.GeneralMonitor( - self.monitor_cfg_noparam, self.context) + self.monitor_cfg_noparam, self.context, {"nova-api": 10}) ins.setup() mock_ssh.SSH.from_node().execute.return_value = (1, "error", 'error') diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py b/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py index 5719f286a..f8d12bd29 100644 --- a/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py @@ -42,7 +42,7 @@ class MultiMonitorServiceTestCase(unittest.TestCase): } def test__monitor_multi_all_successful(self, mock_open, mock_ssh): - ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context) + ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context, {"nova-api": 10}) mock_ssh.SSH.from_node().execute.return_value = (0, "running", '') @@ -51,7 +51,7 @@ class MultiMonitorServiceTestCase(unittest.TestCase): ins.verify_SLA() def test__monitor_multi_all_fail(self, mock_open, mock_ssh): - ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context) + ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context, {"nova-api": 10}) mock_ssh.SSH.from_node().execute.return_value = (0, "running", '') diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py index 8c267e413..41ce5445e 100644 --- a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py @@ -40,7 +40,7 @@ class MonitorProcessTestCase(unittest.TestCase): def test__monitor_process_all_successful(self, mock_ssh): - ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context) + ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context, {"nova-api": 10}) mock_ssh.SSH.from_node().execute.return_value = (0, "1", '') ins.setup() @@ -50,7 +50,7 @@ class MonitorProcessTestCase(unittest.TestCase): def test__monitor_process_down_failuer(self, mock_ssh): - ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context) + ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context, {"nova-api": 10}) mock_ssh.SSH.from_node().execute.return_value = (0, "0", '') ins.setup() diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py index bff4a6dc3..e0e6cf3bf 100644 --- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py +++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py @@ -38,8 +38,7 @@ class ProcessAttacker(BaseAttacker): self.recovery_script = self.get_script_fullpath( self.fault_cfg['recovery_script']) - if self.check(): - self.setup_done = True + self.data[self.service_name] = self.check() def check(self): with open(self.check_script, "r") as stdin_file: @@ -49,7 +48,7 @@ class ProcessAttacker(BaseAttacker): if stdout: LOG.info("check the envrioment success!") - return True + return int(stdout.strip('\n')) else: LOG.error( "the host envrioment is error, stdout:%s, stderr:%s", diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py index ca2324055..7b3d8b0be 100644 --- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py +++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py @@ -25,6 +25,7 @@ class AttackerMgr(object): def __init__(self): self._attacker_list = [] + self.data = {} def init_attackers(self, attacker_cfgs, context): LOG.debug("attackerMgr confg: %s", attacker_cfgs) @@ -35,6 +36,8 @@ class AttackerMgr(object): attacker_ins.key = cfg['key'] attacker_ins.setup() self._attacker_list.append(attacker_ins) + self.data = dict(self.data.items() + attacker_ins.data.items()) + return self.data def __getitem__(self, item): for obj in self._attacker_list: @@ -57,6 +60,7 @@ class BaseAttacker(object): self._config = config self._context = context + self.data = {} self.setup_done = False @staticmethod diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py index 76fcc0e7f..e0d05ebf5 100644 --- a/yardstick/benchmark/scenarios/availability/director.py +++ b/yardstick/benchmark/scenarios/availability/director.py @@ -24,7 +24,7 @@ LOG = logging.getLogger(__name__) class Director(object): """ Director is used to direct a test scenaio - including the creation of action players, test result verification + including the creation of action players, test result verification and rollback of actions. """ @@ -33,6 +33,7 @@ class Director(object): # A stack store Rollbacker that will be called after # all actionplayers finish. self.executionSteps = [] + self.data = {} self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg @@ -42,12 +43,14 @@ class Director(object): LOG.debug("start init attackers...") attacker_cfgs = self.scenario_cfg["options"]["attackers"] self.attackerMgr = baseattacker.AttackerMgr() - self.attackerMgr.init_attackers(attacker_cfgs, nodes) + self.data = self.attackerMgr.init_attackers(attacker_cfgs, + nodes) + # setup monitors if "monitors" in self.scenario_cfg["options"]: LOG.debug("start init monitors...") monitor_cfgs = self.scenario_cfg["options"]["monitors"] - self.monitorMgr = basemonitor.MonitorMgr() + self.monitorMgr = basemonitor.MonitorMgr(self.data) self.monitorMgr.init_monitors(monitor_cfgs, nodes) # setup operations if "operations" in self.scenario_cfg["options"]: diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py index a0fc5965b..ba3370003 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py +++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py @@ -25,8 +25,9 @@ monitor_conf_path = pkg_resources.resource_filename( class MonitorMgr(object): """docstring for MonitorMgr""" - def __init__(self): + def __init__(self, data): self._monitor_list = [] + self.monitor_mgr_data = data def init_monitors(self, monitor_cfgs, context): LOG.debug("monitorMgr config: %s", monitor_cfgs) @@ -39,7 +40,8 @@ class MonitorMgr(object): if monitor_number > 1: monitor_cls = BaseMonitor.get_monitor_cls("multi-monitor") - monitor_ins = monitor_cls(monitor_cfg, context) + monitor_ins = monitor_cls(monitor_cfg, context, + self.monitor_mgr_data) if "key" in monitor_cfg: monitor_ins.key = monitor_cfg["key"] self._monitor_list.append(monitor_ins) @@ -69,7 +71,7 @@ class BaseMonitor(multiprocessing.Process): """docstring for BaseMonitor""" monitor_cfgs = {} - def __init__(self, config, context): + def __init__(self, config, context, data): if not BaseMonitor.monitor_cfgs: with open(monitor_conf_path) as stream: BaseMonitor.monitor_cfgs = yaml.load(stream) @@ -78,6 +80,7 @@ class BaseMonitor(multiprocessing.Process): self._context = context self._queue = multiprocessing.Queue() self._event = multiprocessing.Event() + self.monitor_data = data self.setup_done = False @staticmethod diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py index 8df2ea282..3386c5a1c 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py +++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py @@ -20,16 +20,18 @@ class MultiMonitor(basemonitor.BaseMonitor): __monitor_type__ = "multi-monitor" - def __init__(self, config, context): - super(MultiMonitor, self).__init__(config, context) + def __init__(self, config, context, data): + super(MultiMonitor, self).__init__(config, context, data) self.monitors = [] + self.monitor_data = data monitor_type = self._config["monitor_type"] monitor_cls = basemonitor.BaseMonitor.get_monitor_cls(monitor_type) monitor_number = self._config.get("monitor_number", 1) for i in range(monitor_number): - monitor_ins = monitor_cls(self._config, self._context) + monitor_ins = monitor_cls(self._config, self._context, + self.monitor_data) self.monitors.append(monitor_ins) def start_monitor(self): diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py index 31526b011..b0f6f8e9d 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py +++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py @@ -35,10 +35,13 @@ class MonitorProcess(basemonitor.BaseMonitor): exit_status, stdout, stderr = self.connection.execute( "sudo /bin/sh -s {0}".format(self.process_name), stdin=stdin_file) - if not stdout or int(stdout) <= 0: - LOG.info("the process (%s) is not running!", self.process_name) + + if not stdout or int(stdout) < self.monitor_data[self.process_name]: + LOG.info("the (%s) processes are in recovery!", self.process_name) return False + LOG.info("the (%s) processes have been fully recovered!", + self.process_name) return True def verify_SLA(self): diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py index 69727de2b..2e829714d 100755 --- a/yardstick/benchmark/scenarios/availability/serviceha.py +++ b/yardstick/benchmark/scenarios/availability/serviceha.py @@ -28,6 +28,7 @@ class ServiceHA(base.Scenario): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.setup_done = False + self.data = {} def setup(self): """scenario setup""" @@ -44,10 +45,11 @@ class ServiceHA(base.Scenario): attacker_ins = attacker_cls(attacker_cfg, nodes) attacker_ins.setup() self.attackers.append(attacker_ins) + self.data = dict(self.data.items() + attacker_ins.data.items()) monitor_cfgs = self.scenario_cfg["options"]["monitors"] - self.monitorMgr = basemonitor.MonitorMgr() + self.monitorMgr = basemonitor.MonitorMgr(self.data) self.monitorMgr.init_monitors(monitor_cfgs, nodes) self.setup_done = True @@ -68,6 +70,12 @@ class ServiceHA(base.Scenario): LOG.info("HA monitor stop!") sla_pass = self.monitorMgr.verify_SLA() + for k, v in self.data.items(): + if self.data[k] == 0: + result['sla_pass'] = 0 + LOG.info("The service process not found in the host envrioment, \ +the HA test case NOT pass") + return if sla_pass: result['sla_pass'] = 1 LOG.info("The HA test case PASS the SLA") -- cgit 1.2.3-korg