aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml4
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc047.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc048.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc049.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml4
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml4
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml1
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml5
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_attacker_process.py4
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_basemonitor.py10
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_command.py6
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_general.py6
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_multi.py16
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_process.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py5
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/director.py9
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py9
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py8
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py7
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py10
25 files changed, 92 insertions, 38 deletions
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml
index 78fa7abca..5d3057dc3 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml
@@ -28,12 +28,14 @@ scenarios:
- monitor_type: "openstack-cmd"
command_name: "openstack server list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
- monitor_type: "process"
process_name: "nova-api"
host: node1
monitor_time: 20
+ monitor_number: 3
sla:
max_recover_time: 20
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml
index ec3d1c8d1..443395873 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml
@@ -26,15 +26,19 @@ scenarios:
- monitor_type: "openstack-cmd"
command_name: "openstack image list"
monitor_time: 10
+ monitor_number: 3
- monitor_type: "openstack-cmd"
command_name: "openstack router list"
monitor_time: 10
+ monitor_number: 3
- monitor_type: "openstack-cmd"
command_name: "openstack stack list"
monitor_time: 10
+ monitor_number: 3
- monitor_type: "openstack-cmd"
command_name: "openstack volume list"
monitor_time: 10
+ monitor_number: 3
nodes:
node1: node1.LF
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml
index cad6c591c..d851d5a71 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc045.yaml
@@ -27,12 +27,14 @@ scenarios:
- monitor_type: "openstack-cmd"
command_name: "openstack router list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
- monitor_type: "process"
process_name: "neutron-server"
host: node1
monitor_time: 20
+ monitor_number: 3
sla:
max_recover_time: 20
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml
index 38b475451..ad81a14ab 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc046.yaml
@@ -27,12 +27,14 @@ scenarios:
- monitor_type: "openstack-cmd"
command_name: "openstack user list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
- monitor_type: "process"
process_name: "keystone"
host: node1
monitor_time: 20
+ monitor_number: 3
sla:
max_recover_time: 20
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc047.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc047.yaml
index 53df7c64b..cc990363f 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc047.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc047.yaml
@@ -27,12 +27,14 @@ scenarios:
- monitor_type: "openstack-cmd"
command_name: "openstack image list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
- monitor_type: "process"
process_name: "glance-api"
host: node1
monitor_time: 20
+ monitor_number: 3
sla:
max_recover_time: 20
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc048.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc048.yaml
index 7457f9392..237f845be 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc048.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc048.yaml
@@ -27,12 +27,14 @@ scenarios:
- monitor_type: "openstack-cmd"
command_name: "openstack volume list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
- monitor_type: "process"
process_name: "cinder-api"
host: node1
monitor_time: 20
+ monitor_number: 3
sla:
max_recover_time: 20
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc049.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc049.yaml
index e55345ae2..ff3ac7529 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc049.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc049.yaml
@@ -27,12 +27,14 @@ scenarios:
- monitor_type: "openstack-cmd"
command_name: "swift stat"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
- monitor_type: "process"
process_name: "swift-proxy"
host: node1
monitor_time: 20
+ monitor_number: 3
sla:
max_recover_time: 20
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml
index 1296c0592..0611fd2b3 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml
@@ -70,6 +70,7 @@ scenarios:
key: "nova-image-list"
command_name: "openstack image list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
@@ -78,6 +79,7 @@ scenarios:
key: "neutron-router-list"
command_name: "openstack router list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
@@ -86,6 +88,7 @@ scenarios:
key: "heat-stack-list"
command_name: "openstack stack list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
@@ -94,6 +97,7 @@ scenarios:
key: "cinder-list"
command_name: "openstack volume list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml
index cf2b51548..3921140f5 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc051.yaml
@@ -32,6 +32,7 @@ scenarios:
key: "nova-image-list"
command_name: "openstack image list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
@@ -40,6 +41,7 @@ scenarios:
key: "neutron-router-list"
command_name: "openstack router list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
@@ -48,6 +50,7 @@ scenarios:
key: "heat-stack-list"
command_name: "openstack stack list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
@@ -56,6 +59,7 @@ scenarios:
key: "cinder-list"
command_name: "openstack volume list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml
index 4254e79b6..5907eb2b3 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml
@@ -32,6 +32,7 @@ scenarios:
key: "nova-flavor-list"
command_name: "openstack flavor list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml
index cc5b5cd3e..9e05cc895 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml
@@ -33,6 +33,7 @@ scenarios:
process_name: "haproxy"
host: node1
monitor_time: 20
+ monitor_number: 3
sla:
max_recover_time: 20
@@ -41,6 +42,7 @@ scenarios:
key: "list-images"
command_name: "openstack image list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml
index b9bb889a1..6dd17f8b5 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml
@@ -35,6 +35,7 @@ scenarios:
key: "list-images"
command_name: "openstack image list"
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
@@ -44,6 +45,7 @@ scenarios:
key: "vip-mgmt-status"
host: node2
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
parameter:
@@ -55,6 +57,7 @@ scenarios:
key: "vip-routerp-status"
host: node2
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
parameter:
@@ -66,6 +69,7 @@ scenarios:
key: "vip-router-status"
host: node2
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
parameter:
@@ -77,6 +81,7 @@ scenarios:
key: "vip-pub"
host: node2
monitor_time: 10
+ monitor_number: 3
sla:
max_outage_time: 5
parameter:
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
index eec512a58..0a8e8322a 100644
--- a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
+++ b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
@@ -41,7 +41,7 @@ class AttackerServiceTestCase(unittest.TestCase):
cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg)
ins = cls(self.attacker_cfg, self.context)
- mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
+ mock_ssh.SSH.from_node().execute.return_value = (0, "10", '')
ins.setup()
ins.inject_fault()
ins.recover()
@@ -51,5 +51,5 @@ class AttackerServiceTestCase(unittest.TestCase):
cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg)
ins = cls(self.attacker_cfg, self.context)
- mock_ssh.SSH.from_node().execute.return_value = (0, "error check", '')
+ mock_ssh.SSH.from_node().execute.return_value = (0, None, '')
ins.setup()
diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
index 7030c7849..3b7e07376 100644
--- a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
+++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
@@ -34,7 +34,7 @@ class MonitorMgrTestCase(unittest.TestCase):
self.monitor_configs.append(config)
def test__MonitorMgr_setup_successful(self, mock_monitor):
- instance = basemonitor.MonitorMgr()
+ instance = basemonitor.MonitorMgr({"nova-api": 10})
instance.init_monitors(self.monitor_configs, None)
instance.start_monitors()
instance.wait_monitors()
@@ -42,7 +42,7 @@ class MonitorMgrTestCase(unittest.TestCase):
ret = instance.verify_SLA()
def test_MonitorMgr_getitem(self, mock_monitor):
- monitorMgr = basemonitor.MonitorMgr()
+ monitorMgr = basemonitor.MonitorMgr({"nova-api": 10})
monitorMgr.init_monitors(self.monitor_configs, None)
monitorIns = monitorMgr['service-status']
@@ -67,12 +67,12 @@ class BaseMonitorTestCase(unittest.TestCase):
}
def test__basemonitor_start_wait_successful(self):
- ins = basemonitor.BaseMonitor(self.monitor_cfg, None)
+ ins = basemonitor.BaseMonitor(self.monitor_cfg, None, {"nova-api": 10})
ins.start_monitor()
ins.wait_monitor()
def test__basemonitor_all_successful(self):
- ins = self.MonitorSimple(self.monitor_cfg, None)
+ ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
ins.setup()
ins.run()
ins.verify_SLA()
@@ -81,7 +81,7 @@ class BaseMonitorTestCase(unittest.TestCase):
'yardstick.benchmark.scenarios.availability.monitor.basemonitor'
'.multiprocessing')
def test__basemonitor_func_false(self, mock_multiprocess):
- ins = self.MonitorSimple(self.monitor_cfg, None)
+ ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
ins.setup()
mock_multiprocess.Event().is_set.return_value = False
ins.run()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
index c179bbfaf..2ed4be731 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
@@ -59,7 +59,7 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
def test__monitor_command_monitor_func_successful(self, mock_subprocess):
- instance = monitor_command.MonitorOpenstackCmd(self.config, None)
+ instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10})
instance.setup()
mock_subprocess.check_output.return_value = (0, 'unittest')
ret = instance.monitor_func()
@@ -69,7 +69,7 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
def test__monitor_command_monitor_func_failure(self, mock_subprocess):
mock_subprocess.check_output.return_value = (1, 'unittest')
- instance = monitor_command.MonitorOpenstackCmd(self.config, None)
+ instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10})
instance.setup()
mock_subprocess.check_output.side_effect = RuntimeError
ret = instance.monitor_func()
@@ -85,7 +85,7 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
self.config["host"] = "node1"
instance = monitor_command.MonitorOpenstackCmd(
- self.config, self.context)
+ self.config, self.context, {"nova-api": 10})
instance.setup()
mock_ssh.SSH.from_node().execute.return_value = (0, "0", '')
ret = instance.monitor_func()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py b/tests/unit/benchmark/scenarios/availability/test_monitor_general.py
index 169b630bf..c14f073ec 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_general.py
@@ -50,7 +50,7 @@ class GeneralMonitorServiceTestCase(unittest.TestCase):
}
def test__monitor_general_all_successful(self, mock_open, mock_ssh):
- ins = monitor_general.GeneralMonitor(self.monitor_cfg, self.context)
+ ins = monitor_general.GeneralMonitor(self.monitor_cfg, self.context, {"nova-api": 10})
ins.setup()
mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
@@ -61,7 +61,7 @@ class GeneralMonitorServiceTestCase(unittest.TestCase):
def test__monitor_general_all_successful_noparam(self, mock_open,
mock_ssh):
ins = monitor_general.GeneralMonitor(
- self.monitor_cfg_noparam, self.context)
+ self.monitor_cfg_noparam, self.context, {"nova-api": 10})
ins.setup()
mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
@@ -71,7 +71,7 @@ class GeneralMonitorServiceTestCase(unittest.TestCase):
def test__monitor_general_failure(self, mock_open, mock_ssh):
ins = monitor_general.GeneralMonitor(
- self.monitor_cfg_noparam, self.context)
+ self.monitor_cfg_noparam, self.context, {"nova-api": 10})
ins.setup()
mock_ssh.SSH.from_node().execute.return_value = (1, "error", 'error')
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py b/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
index 9539f27d0..f8d12bd29 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
@@ -42,20 +42,20 @@ class MultiMonitorServiceTestCase(unittest.TestCase):
}
def test__monitor_multi_all_successful(self, mock_open, mock_ssh):
- ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context);
+ ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context, {"nova-api": 10})
- mock_ssh.SSH().execute.return_value = (0, "running", '')
+ mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
- ins.start_monitor();
- ins.wait_monitor();
+ ins.start_monitor()
+ ins.wait_monitor()
ins.verify_SLA()
def test__monitor_multi_all_fail(self, mock_open, mock_ssh):
- ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context);
+ ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context, {"nova-api": 10})
- mock_ssh.SSH().execute.return_value = (1, "failed", '')
+ mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
- ins.start_monitor();
- ins.wait_monitor();
+ ins.start_monitor()
+ ins.wait_monitor()
ins.verify_SLA()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
index 8c267e413..41ce5445e 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
@@ -40,7 +40,7 @@ class MonitorProcessTestCase(unittest.TestCase):
def test__monitor_process_all_successful(self, mock_ssh):
- ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context)
+ ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context, {"nova-api": 10})
mock_ssh.SSH.from_node().execute.return_value = (0, "1", '')
ins.setup()
@@ -50,7 +50,7 @@ class MonitorProcessTestCase(unittest.TestCase):
def test__monitor_process_down_failuer(self, mock_ssh):
- ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context)
+ ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context, {"nova-api": 10})
mock_ssh.SSH.from_node().execute.return_value = (0, "0", '')
ins.setup()
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
index bff4a6dc3..e0e6cf3bf 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -38,8 +38,7 @@ class ProcessAttacker(BaseAttacker):
self.recovery_script = self.get_script_fullpath(
self.fault_cfg['recovery_script'])
- if self.check():
- self.setup_done = True
+ self.data[self.service_name] = self.check()
def check(self):
with open(self.check_script, "r") as stdin_file:
@@ -49,7 +48,7 @@ class ProcessAttacker(BaseAttacker):
if stdout:
LOG.info("check the envrioment success!")
- return True
+ return int(stdout.strip('\n'))
else:
LOG.error(
"the host envrioment is error, stdout:%s, stderr:%s",
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index ca2324055..7b3d8b0be 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -25,6 +25,7 @@ class AttackerMgr(object):
def __init__(self):
self._attacker_list = []
+ self.data = {}
def init_attackers(self, attacker_cfgs, context):
LOG.debug("attackerMgr confg: %s", attacker_cfgs)
@@ -35,6 +36,8 @@ class AttackerMgr(object):
attacker_ins.key = cfg['key']
attacker_ins.setup()
self._attacker_list.append(attacker_ins)
+ self.data = dict(self.data.items() + attacker_ins.data.items())
+ return self.data
def __getitem__(self, item):
for obj in self._attacker_list:
@@ -57,6 +60,7 @@ class BaseAttacker(object):
self._config = config
self._context = context
+ self.data = {}
self.setup_done = False
@staticmethod
diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py
index 76fcc0e7f..e0d05ebf5 100644
--- a/yardstick/benchmark/scenarios/availability/director.py
+++ b/yardstick/benchmark/scenarios/availability/director.py
@@ -24,7 +24,7 @@ LOG = logging.getLogger(__name__)
class Director(object):
"""
Director is used to direct a test scenaio
- including the creation of action players, test result verification
+ including the creation of action players, test result verification
and rollback of actions.
"""
@@ -33,6 +33,7 @@ class Director(object):
# A stack store Rollbacker that will be called after
# all actionplayers finish.
self.executionSteps = []
+ self.data = {}
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
@@ -42,12 +43,14 @@ class Director(object):
LOG.debug("start init attackers...")
attacker_cfgs = self.scenario_cfg["options"]["attackers"]
self.attackerMgr = baseattacker.AttackerMgr()
- self.attackerMgr.init_attackers(attacker_cfgs, nodes)
+ self.data = self.attackerMgr.init_attackers(attacker_cfgs,
+ nodes)
+
# setup monitors
if "monitors" in self.scenario_cfg["options"]:
LOG.debug("start init monitors...")
monitor_cfgs = self.scenario_cfg["options"]["monitors"]
- self.monitorMgr = basemonitor.MonitorMgr()
+ self.monitorMgr = basemonitor.MonitorMgr(self.data)
self.monitorMgr.init_monitors(monitor_cfgs, nodes)
# setup operations
if "operations" in self.scenario_cfg["options"]:
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
index a0fc5965b..ba3370003 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -25,8 +25,9 @@ monitor_conf_path = pkg_resources.resource_filename(
class MonitorMgr(object):
"""docstring for MonitorMgr"""
- def __init__(self):
+ def __init__(self, data):
self._monitor_list = []
+ self.monitor_mgr_data = data
def init_monitors(self, monitor_cfgs, context):
LOG.debug("monitorMgr config: %s", monitor_cfgs)
@@ -39,7 +40,8 @@ class MonitorMgr(object):
if monitor_number > 1:
monitor_cls = BaseMonitor.get_monitor_cls("multi-monitor")
- monitor_ins = monitor_cls(monitor_cfg, context)
+ monitor_ins = monitor_cls(monitor_cfg, context,
+ self.monitor_mgr_data)
if "key" in monitor_cfg:
monitor_ins.key = monitor_cfg["key"]
self._monitor_list.append(monitor_ins)
@@ -69,7 +71,7 @@ class BaseMonitor(multiprocessing.Process):
"""docstring for BaseMonitor"""
monitor_cfgs = {}
- def __init__(self, config, context):
+ def __init__(self, config, context, data):
if not BaseMonitor.monitor_cfgs:
with open(monitor_conf_path) as stream:
BaseMonitor.monitor_cfgs = yaml.load(stream)
@@ -78,6 +80,7 @@ class BaseMonitor(multiprocessing.Process):
self._context = context
self._queue = multiprocessing.Queue()
self._event = multiprocessing.Event()
+ self.monitor_data = data
self.setup_done = False
@staticmethod
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
index 0bd8e6d37..d7d1545da 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
@@ -20,16 +20,18 @@ class MultiMonitor(basemonitor.BaseMonitor):
__monitor_type__ = "multi-monitor"
- def __init__(self, config, context):
- super(MultiMonitor, self).__init__(config, context)
+ def __init__(self, config, context, data):
+ super(MultiMonitor, self).__init__(config, context, data)
self.monitors = []
+ self.monitor_data = data
monitor_type = self._config["monitor_type"]
monitor_cls = basemonitor.BaseMonitor.get_monitor_cls(monitor_type)
monitor_number = self._config.get("monitor_number", 1)
for i in range(monitor_number):
- monitor_ins = monitor_cls(self._config, self._context)
+ monitor_ins = monitor_cls(self._config, self._context,
+ self.monitor_data)
self.monitors.append(monitor_ins)
def start_monitor(self):
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
index 31526b011..b0f6f8e9d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -35,10 +35,13 @@ class MonitorProcess(basemonitor.BaseMonitor):
exit_status, stdout, stderr = self.connection.execute(
"sudo /bin/sh -s {0}".format(self.process_name),
stdin=stdin_file)
- if not stdout or int(stdout) <= 0:
- LOG.info("the process (%s) is not running!", self.process_name)
+
+ if not stdout or int(stdout) < self.monitor_data[self.process_name]:
+ LOG.info("the (%s) processes are in recovery!", self.process_name)
return False
+ LOG.info("the (%s) processes have been fully recovered!",
+ self.process_name)
return True
def verify_SLA(self):
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 69727de2b..2e829714d 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -28,6 +28,7 @@ class ServiceHA(base.Scenario):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.setup_done = False
+ self.data = {}
def setup(self):
"""scenario setup"""
@@ -44,10 +45,11 @@ class ServiceHA(base.Scenario):
attacker_ins = attacker_cls(attacker_cfg, nodes)
attacker_ins.setup()
self.attackers.append(attacker_ins)
+ self.data = dict(self.data.items() + attacker_ins.data.items())
monitor_cfgs = self.scenario_cfg["options"]["monitors"]
- self.monitorMgr = basemonitor.MonitorMgr()
+ self.monitorMgr = basemonitor.MonitorMgr(self.data)
self.monitorMgr.init_monitors(monitor_cfgs, nodes)
self.setup_done = True
@@ -68,6 +70,12 @@ class ServiceHA(base.Scenario):
LOG.info("HA monitor stop!")
sla_pass = self.monitorMgr.verify_SLA()
+ for k, v in self.data.items():
+ if self.data[k] == 0:
+ result['sla_pass'] = 0
+ LOG.info("The service process not found in the host envrioment, \
+the HA test case NOT pass")
+ return
if sla_pass:
result['sla_pass'] = 1
LOG.info("The HA test case PASS the SLA")