aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwym_libra <yimin.wang@huawei.com>2015-12-31 20:02:09 +0800
committerqi liang <liangqi1@huawei.com>2016-01-10 03:23:40 +0000
commit7f3cc74f9443552631956e2fe61d31bc97106ef5 (patch)
treec483de22d9abbf14f7ebf932170f666d89e6035a
parent93e5a8fefd2574de339d2f1ae2041b9d233bbc7b (diff)
The secondi HA test case-shutdown controller
1) add "attacker_baremetal.py" for fault injection 2) modify the monitor to excute on remote node after ssh connection 3) move all shell scripts together JIRA: YARDSTICK-182 Change-Id: Ibb9dc908224ddb8b99a0140b75c1a046503f6dfb Signed-off-by: wym_libra <yimin.wang@huawei.com> (cherry picked from commit 4f4edd840823ff6a0151e3f5220241183e27e560)
-rwxr-xr-xsamples/ha-baremetal.yaml45
-rwxr-xr-xsamples/ha-service.yaml42
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py77
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_command.py39
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_process.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py129
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_conf.yaml9
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker_conf.yaml13
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash27
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash20
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash (renamed from yardstick/benchmark/scenarios/availability/monitor/script_tools/check_service.bash)4
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_service.bash (renamed from yardstick/benchmark/scenarios/availability/attacker/scripts/check_service.bash)0
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash18
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash21
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/start_service.bash (renamed from yardstick/benchmark/scenarios/availability/attacker/scripts/start_service.bash)0
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash (renamed from yardstick/benchmark/scenarios/availability/attacker/scripts/stop_service.bash)0
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py62
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py39
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_conf.yaml9
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py19
21 files changed, 496 insertions, 83 deletions
diff --git a/samples/ha-baremetal.yaml b/samples/ha-baremetal.yaml
new file mode 100755
index 000000000..9f9baf50c
--- /dev/null
+++ b/samples/ha-baremetal.yaml
@@ -0,0 +1,45 @@
+---
+# Sample test case for ha
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "bare-metal-down"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "openstack-cmd"
+ command_name: "heat stack-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "openstack-cmd"
+ command_name: "neutron router-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/samples/ha-service.yaml b/samples/ha-service.yaml
new file mode 100755
index 000000000..e624f531e
--- /dev/null
+++ b/samples/ha-service.yaml
@@ -0,0 +1,42 @@
+---
+# Sample test case for ha
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "kill-process"
+ process_name: "nova-api"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "process"
+ process_name: "nova-api"
+ host: node1
+ monitor_time: 10
+ sla:
+ max_recover_time: 5
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
new file mode 100644
index 000000000..340f94cb0
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+from yardstick.benchmark.scenarios.availability.attacker import attacker_baremetal
+
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
+class ExecuteShellTestCase(unittest.TestCase):
+
+ def test__fun_execute_shell_command_successful(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.return_value = (0, 'unittest')
+ exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ self.assertEqual(exitcode, 0)
+
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.side_effect = RuntimeError
+ exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ self.assertEqual(exitcode, -1)
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh')
+class AttackerBaremetalTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ipmi_ip": "10.20.0.5",
+ "ipmi_user": "root",
+ "ipmi_pwd": "123456",
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.attacker_cfg = {
+ 'fault_type': 'bear-metal-down',
+ 'host': 'node1',
+ }
+
+ def test__attacker_baremetal_all_successful(self, mock_ssh):
+
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "running", '')
+ ins.setup()
+ ins.inject_fault()
+ ins.recover()
+
+ def test__attacker_baremetal_check_failuer(self, mock_ssh):
+
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+ mock_ssh.SSH().execute.return_value = (0, "error check", '')
+ ins.setup()
+
+ def test__attacker_baremetal_recover_successful(self, mock_ssh):
+
+ self.attacker_cfg["jump_host"] = 'node1'
+ self.context["node1"]["pwd"] = "123456"
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "running", '')
+ ins.setup()
+ ins.recover()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
index 31e309714..c8cda7dc7 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
@@ -17,7 +17,7 @@ import unittest
from yardstick.benchmark.scenarios.availability.monitor import monitor_command
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess')
-class MonitorOpenstackCmdTestCase(unittest.TestCase):
+class ExecuteShellTestCase(unittest.TestCase):
def test__fun_execute_shell_command_successful(self, mock_subprocess):
cmd = "env"
@@ -31,16 +31,28 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
exitcode, output = monitor_command._execute_shell_command(cmd)
self.assertEqual(exitcode, -1)
- def test__monitor_command_monitor_func_successful(self, mock_subprocess):
- config = {
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess')
+class MonitorOpenstackCmdTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.config = {
'monitor_type': 'openstack-api',
'command_name': 'nova image-list',
'monitor_time': 1,
'sla': {'max_outage_time': 5}
}
- instance = monitor_command.MonitorOpenstackCmd(config, None)
+ def test__monitor_command_monitor_func_successful(self, mock_subprocess):
+
+ instance = monitor_command.MonitorOpenstackCmd(self.config, None)
+ instance.setup()
mock_subprocess.check_output.return_value = (0, 'unittest')
ret = instance.monitor_func()
self.assertEqual(ret, True)
@@ -49,16 +61,19 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
def test__monitor_command_monitor_func_failure(self, mock_subprocess):
mock_subprocess.check_output.return_value = (1, 'unittest')
- config = {
- 'monitor_type': 'openstack-api',
- 'command_name': 'nova image-list',
- 'monitor_time': 1,
- 'sla': {'max_outage_time': 5}
- }
- instance = monitor_command.MonitorOpenstackCmd(config, None)
-
+ instance = monitor_command.MonitorOpenstackCmd(self.config, None)
+ instance.setup()
mock_subprocess.check_output.side_effect = RuntimeError
ret = instance.monitor_func()
self.assertEqual(ret, False)
instance._result = {"outage_time": 10}
instance.verify_SLA()
+
+ @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.ssh')
+ def test__monitor_command_ssh_monitor_successful(self, mock_ssh, mock_subprocess):
+
+ self.config["host"] = "node1"
+ instance = monitor_command.MonitorOpenstackCmd(self.config, self.context)
+ instance.setup()
+ mock_ssh.SSH().execute.return_value = (0, "0", '')
+ ret = instance.monitor_func()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
index f983136d2..dda104b4e 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
@@ -38,7 +38,7 @@ class MonitorProcessTestCase(unittest.TestCase):
ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context)
- mock_ssh.SSH().execute.return_value = (0, "running", '')
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
ins.setup()
ins.monitor_func()
ins._result = {"outage_time": 0}
@@ -48,7 +48,7 @@ class MonitorProcessTestCase(unittest.TestCase):
ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context)
- mock_ssh.SSH().execute.return_value = (0, "stop", '')
+ mock_ssh.SSH().execute.return_value = (0, "0", '')
ins.setup()
ins.monitor_func()
ins._result = {"outage_time": 10}
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
new file mode 100644
index 000000000..b35869d07
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -0,0 +1,129 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import traceback
+import subprocess
+import yardstick.ssh as ssh
+from baseattacker import BaseAttacker
+
+LOG = logging.getLogger(__name__)
+
+
+def _execute_shell_command(command, stdin=None):
+ '''execute shell script with error handling'''
+ exitcode = 0
+ output = []
+ try:
+ output = subprocess.check_output(command, stdin=stdin, shell=True)
+ except Exception:
+ exitcode = -1
+ output = traceback.format_exc()
+ LOG.error("exec command '%s' error:\n " % command)
+ LOG.error(traceback.format_exc())
+
+ return exitcode, output
+
+
+class BaremetalAttacker(BaseAttacker):
+
+ __attacker_type__ = 'bare-metal-down'
+
+ def setup(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+ host = self._context.get(self._config['host'], None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+ self.host_ip = ip
+
+ self.ipmi_ip = host.get("ipmi_ip", None)
+ self.ipmi_user = host.get("ipmi_user", "root")
+ self.ipmi_pwd = host.get("ipmi_pwd", None)
+
+ self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down')
+ self.check_script = self.get_script_fullpath(
+ self.fault_cfg['check_script'])
+ self.recovery_script = self.get_script_fullpath(
+ self.fault_cfg['recovery_script'])
+
+ if self.check():
+ self.setup_done = True
+
+ def check(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0} -W 10".format(self.host_ip),
+ stdin=open(self.check_script, "r"))
+
+ LOG.debug("check ret: %s out:%s err:%s" %
+ (exit_status, stdout, stderr))
+ if not stdout or "running" not in stdout:
+ LOG.info("the host (ipmi_ip:%s) is not running!" % self.ipmi_ip)
+ return False
+
+ return True
+
+ def inject_fault(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "shutdown -h now")
+ LOG.debug("inject fault ret: %s out:%s err:%s" %
+ (exit_status, stdout, stderr))
+ if not exit_status:
+ LOG.info("inject fault success")
+
+ def recover(self):
+ jump_host_name = self._config.get("jump_host", None)
+ self.jump_connection = None
+ if jump_host_name is not None:
+ host = self._context.get(jump_host_name, None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ pwd = host.get("pwd", None)
+
+ LOG.debug("jump_host ip:%s user:%s" % (ip, user))
+ self.jump_connection = ssh.SSH(user, ip, password=pwd)
+ self.jump_connection.wait(timeout=600)
+ LOG.debug("ssh jump host success!")
+
+ if self.jump_connection is not None:
+ exit_status, stdout, stderr = self.jump_connection.execute(
+ "/bin/bash -s {0} {1} {2} {3}".format(
+ self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"),
+ stdin=open(self.recovery_script, "r"))
+ else:
+ exit_status, stdout = _execute_shell_command(
+ "/bin/bash -s {0} {1} {2} {3}".format(
+ self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"),
+ stdin=open(self.recovery_script, "r"))
+
+
+def _test(): # pragma: no cover
+ host = {
+ "ipmi_ip": "10.20.0.5",
+ "ipmi_user": "root",
+ "ipmi_pwd": "123456",
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ context = {"node1": host}
+ attacker_cfg = {
+ 'fault_type': 'bear-metal-down',
+ 'host': 'node1',
+ }
+ ins = BaremetalAttacker(attacker_cfg, context)
+ ins.setup()
+ ins.inject_fault()
+
+
+if __name__ == '__main__': # pragma: no cover
+ _test()
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker/attacker_conf.yaml
deleted file mode 100644
index 44f06038b..000000000
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_conf.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# sample config file for ha test
-#
-schema: "yardstick:task:0.1"
-
-kill-process:
- inject_script: scripts/stop_service.bash
- recovery_script: scripts/start_service.bash
- check_script: scripts/check_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index ddaf09969..a1c6999e5 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -16,7 +16,7 @@ import yardstick.common.utils as utils
LOG = logging.getLogger(__name__)
attacker_conf_path = pkg_resources.resource_filename(
- "yardstick.benchmark.scenarios.availability.attacker",
+ "yardstick.benchmark.scenarios.availability",
"attacker_conf.yaml")
diff --git a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
new file mode 100644
index 000000000..3f6c2aa8f
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
@@ -0,0 +1,13 @@
+---
+# sample config file for ha test
+#
+schema: "yardstick:task:0.1"
+
+kill-process:
+ check_script: ha_tools/check_process_python.bash
+ inject_script: ha_tools/fault_process_kill.bash
+ recovery_script: ha_tools/start_service.bash
+
+bare-metal-down:
+ check_script: ha_tools/check_host_ping.bash
+ recovery_script: ha_tools/ipmi_power.bash
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash
new file mode 100755
index 000000000..0f160e2a8
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check wether the host is running
+
+set -e
+
+host_ip=$1
+shift
+options="$@"
+
+ping -c 1 $options $host_ip | grep ttl | wc -l
+EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ]; then
+ exit 1
+else
+ echo "running"
+fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash
new file mode 100755
index 000000000..83d7e36c1
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a openstack command
+
+set -e
+
+cmd=$1
+
+source /root/openrc
+
+exec $cmd
diff --git a/yardstick/benchmark/scenarios/availability/monitor/script_tools/check_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash
index cc898a859..88baed7d9 100755
--- a/yardstick/benchmark/scenarios/availability/monitor/script_tools/check_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash
@@ -13,6 +13,6 @@
set -e
-service_name=$1
+process_name=$1
-service $service_name status
+ps aux | grep -e .*python.*$process_name.* | grep -v grep | wc -l
diff --git a/yardstick/benchmark/scenarios/availability/attacker/scripts/check_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_service.bash
index cc898a859..cc898a859 100755
--- a/yardstick/benchmark/scenarios/availability/attacker/scripts/check_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
new file mode 100755
index 000000000..d0e2f1683
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop process by process name
+
+set -e
+
+process_name=$1
+
+killall -9 $process_name
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash b/yardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash
new file mode 100755
index 000000000..ea621facd
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop a service and check the service is stoped
+
+set -e
+
+ipmi_ip=$1
+ipmi_user=$2
+ipmi_pwd=$3
+
+action=$4
+ipmitool -I lanplus -H $ipmi_ip -U $ipmi_user -P $ipmi_pwd power $action
diff --git a/yardstick/benchmark/scenarios/availability/attacker/scripts/start_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
index c1bf8b7eb..c1bf8b7eb 100755
--- a/yardstick/benchmark/scenarios/availability/attacker/scripts/start_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/attacker/scripts/stop_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash
index a8901784e..a8901784e 100755
--- a/yardstick/benchmark/scenarios/availability/attacker/scripts/stop_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
index 25990ac8c..983c3a3ac 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -16,10 +16,40 @@ import yardstick.common.utils as utils
LOG = logging.getLogger(__name__)
monitor_conf_path = pkg_resources.resource_filename(
- "yardstick.benchmark.scenarios.availability.monitor",
+ "yardstick.benchmark.scenarios.availability",
"monitor_conf.yaml")
+class MonitorMgr(object):
+ """docstring for MonitorMgr"""
+ def __init__(self):
+ self._monitor_list = []
+
+ def init_monitors(self, monitor_cfgs, context):
+ LOG.debug("monitorMgr config: %s" % monitor_cfgs)
+
+ for monitor_cfg in monitor_cfgs:
+ monitor_type = monitor_cfg["monitor_type"]
+ monitor_cls = BaseMonitor.get_monitor_cls(monitor_type)
+ monitor_ins = monitor_cls(monitor_cfg, context)
+
+ self._monitor_list.append(monitor_ins)
+
+ def start_monitors(self):
+ for _monotor_instace in self._monitor_list:
+ _monotor_instace.start_monitor()
+
+ def wait_monitors(self):
+ for monitor in self._monitor_list:
+ monitor.wait_monitor()
+
+ def verify_SLA(self):
+ sla_pass = True
+ for monitor in self._monitor_list:
+ sla_pass = sla_pass & monitor.verify_SLA()
+ return sla_pass
+
+
class BaseMonitor(multiprocessing.Process):
"""docstring for BaseMonitor"""
@@ -108,33 +138,3 @@ class BaseMonitor(multiprocessing.Process):
def verify_SLA(self):
pass
-
-
-class MonitorMgr(object):
- """docstring for MonitorMgr"""
- def __init__(self):
- self._monitor_list = []
-
- def init_monitors(self, monitor_cfgs, context):
- LOG.debug("monitorMgr config: %s" % monitor_cfgs)
-
- for monitor_cfg in monitor_cfgs:
- monitor_type = monitor_cfg["monitor_type"]
- monitor_cls = BaseMonitor.get_monitor_cls(monitor_type)
- monitor_ins = monitor_cls(monitor_cfg, context)
-
- self._monitor_list.append(monitor_ins)
-
- def start_monitors(self):
- for _monotor_instace in self._monitor_list:
- _monotor_instace.start_monitor()
-
- def wait_monitors(self):
- for monitor in self._monitor_list:
- monitor.wait_monitor()
-
- def verify_SLA(self):
- sla_pass = True
- for monitor in self._monitor_list:
- sla_pass = sla_pass & monitor.verify_SLA()
- return sla_pass
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
index 232340aca..c285024e1 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -9,7 +9,7 @@
import logging
import subprocess
import traceback
-
+import yardstick.ssh as ssh
import basemonitor as basemonitor
LOG = logging.getLogger(__name__)
@@ -35,9 +35,35 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
__monitor_type__ = "openstack-cmd"
+ def setup(self):
+ self.connection = None
+ node_name = self._config.get("host", None)
+ if node_name:
+ host = self._context[node_name]
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+
+ self.check_script = self.get_script_fullpath(
+ "ha_tools/check_openstack_cmd.bash")
+
+ self.cmd = self._config["command_name"]
+
def monitor_func(self):
- cmd = self._config["command_name"]
- exit_status, stdout = _execute_shell_command(cmd)
+ exit_status = 0
+ if self.connection:
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/bash -s '{0}'".format(self.cmd),
+ stdin=open(self.check_script, "r"))
+
+ LOG.debug("the ret stats: %s stdout: %s stderr: %s" %
+ (exit_status, stdout, stderr))
+ else:
+ exit_status, stdout = _execute_shell_command(self.cmd)
if exit_status:
return False
return True
@@ -56,7 +82,7 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
def _test(): # pragma: no cover
host = {
- "ip": "10.20.0.5",
+ "ip": "192.168.235.22",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
}
@@ -66,7 +92,8 @@ def _test(): # pragma: no cover
'monitor_type': 'openstack-cmd',
'command_name': 'nova image-list',
'monitor_time': 1,
- 'SLA': {'max_outage_time': 5}
+ 'host': 'node1',
+ 'sla': {'max_outage_time': 5}
}
monitor_configs.append(config)
@@ -74,7 +101,7 @@ def _test(): # pragma: no cover
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
- p.verify()
+ p.verify_SLA()
if __name__ == '__main__': # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_conf.yaml b/yardstick/benchmark/scenarios/availability/monitor/monitor_conf.yaml
deleted file mode 100644
index 44f06038b..000000000
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_conf.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# sample config file for ha test
-#
-schema: "yardstick:task:0.1"
-
-kill-process:
- inject_script: scripts/stop_service.bash
- recovery_script: scripts/start_service.bash
- check_script: scripts/check_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
index 64e12f1dd..53a6d8e4d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -29,24 +29,21 @@ class MonitorProcess(basemonitor.BaseMonitor):
self.connection.wait(timeout=600)
LOG.debug("ssh host success!")
self.check_script = self.get_script_fullpath(
- "script_tools/check_service.bash")
+ "ha_tools/check_process_python.bash")
self.process_name = self._config["process_name"]
def monitor_func(self):
exit_status, stdout, stderr = self.connection.execute(
"/bin/sh -s {0}".format(self.process_name),
stdin=open(self.check_script, "r"))
+ if not stdout or int(stdout) <= 0:
+ LOG.info("the process (%s) is not running!" % self.process_name)
+ return False
- if stdout and "running" in stdout:
- LOG.info("check the envrioment success!")
- return True
- else:
- LOG.error(
- "the host envrioment is error, stdout:%s, stderr:%s" %
- (stdout, stderr))
- return False
+ return True
def verify_SLA(self):
+ LOG.debug("the _result:%s" % self._result)
outage_time = self._result.get('outage_time', None)
max_outage_time = self._config["sla"]["max_recover_time"]
if outage_time > max_outage_time:
@@ -69,7 +66,7 @@ def _test(): # pragma: no cover
'process_name': 'nova-api',
'host': "node1",
'monitor_time': 1,
- 'SLA': {'max_recover_time': 5}
+ 'sla': {'max_recover_time': 5}
}
monitor_configs.append(config)
@@ -77,7 +74,7 @@ def _test(): # pragma: no cover
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
- p.verify()
+ p.verify_SLA()
if __name__ == '__main__': # pragma: no cover