aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwym_libra <yimin.wang@huawei.com>2015-10-26 07:54:31 +0000
committerwym_libra <yimin.wang@huawei.com>2015-11-13 06:59:51 +0000
commit6a5d8a6d58ab501184313eda84820294ff3597e7 (patch)
tree2fa59c5f95776db6c9461816ba293547c247eed7
parenteb8320b2e924e22c20af49a0d37bee12417ede95 (diff)
A initial HA test case
1)stop an openstack service 2)then monitor the corresponding api and check the availability of it 3)recovery the openstack service JIRA: YARDSTICK-149 Change-Id: Id7b77d2f5c71844729c04f37442c8cfaa270ab12 Signed-off-by: wym_libra <yimin.wang@huawei.com>
-rw-r--r--etc/yardstick/nodes/fuel_virtual/id_rsa27
-rw-r--r--etc/yardstick/nodes/fuel_virtual/pod.yaml43
-rwxr-xr-xsamples/serviceha.yaml29
-rwxr-xr-xsetup.py1
-rw-r--r--tests/unit/benchmark/scenarios/availability/__init__.py0
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor.py83
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_serviceha.py153
-rwxr-xr-xyardstick/benchmark/scenarios/availability/__init__.py0
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_service.bash18
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/ha_conf.yaml12
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/start_service.bash18
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash21
-rwxr-xr-xyardstick/benchmark/scenarios/availability/monitor.py114
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py193
14 files changed, 712 insertions, 0 deletions
diff --git a/etc/yardstick/nodes/fuel_virtual/id_rsa b/etc/yardstick/nodes/fuel_virtual/id_rsa
new file mode 100644
index 000000000..35ac1b5fe
--- /dev/null
+++ b/etc/yardstick/nodes/fuel_virtual/id_rsa
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA24CCFpmsaPUt9KJsaER/R4IyYvOd0iOXhFuOUCl4nJvBnlXu
+D8Pzombgz6bZcHx96ukgmOKq/Bf0tPA4fN733fw/Jjb4t6O4HFVpcBZykcgdnB56
+pqwN108pQZCq8R3EiKU3BgL2nWi9YP94JxsbD8I6vcQVbG7SMeJ0YpQzNYyJ1ig9
+fjff7ROuOc+XVZhG7UtbCz7adGS2/FfGgXz49mLS98pNLMOAUSUtoBog0CveotXM
+rWT9OOOpTTihWFspVU1cnT1LGJ+MYVRX2uF7sZASsglwA0kzjlf1nzQ8fGXC3o7D
+kFPdM1jtHWf4ah4DSb2e/LnCrwqBgmMyKpV+AQIBIwKCAQEA1TsCB1N0SLOo/EYC
+6PIVPia0mqODXmu3wmeRj7NB9zg4be0TJUICncMGRg/L6Z2Bol7PNW56NrgvizKA
+BEZP3vUKJR91RK2riT0HVvE8GJaDKfG4+a5z2HjI/dz91EjNjA41c43ZoDncihy9
+3NiAsDkF3Opd9E5lyg8vOzDhSfRwCg2u2HMiGy9/yB+V8x8QJ2vjDzXI1Nn40jAa
+azw94wIdkHCrXRbt3z5zNMNRTQtGUD1uUSsTGO2AH4LY/Dtq0kAZw2f4dgfZS8f3
+FGIa2o3wpJfgZWmHS+jWBg1ADZTr45Ur2BpKIo/GcjeTdf8DkNQ2/hy0x3JGBVBE
+e5HvYwKBgQDzb5ApXZu7gOm5XOCWC0cNiqQT7VUHQMdc9i7waV862kH7+Jn1nPBj
+QsJQhGVrB/vP3PhkGbRHp86QBJiwTpY4m9YQFfvFH47a2NWrlafNyvQ5y37OIsD0
+ib8vYRXQrBFnrkBn+wCbDNTy04v3hbPayN5OC7EwT1g7PbexIpYH7wKBgQDm1Lcr
+bvhyQR61FnQjKmX0DOVk6Jve0Pk0PNkzXm7JsnF8U+mJClmgqJOEoaBaj3dGZDTC
+TzGLUcZCkZk+2PEv8BcyAd4GNETZYCHEgxDsnDbotZPvlkFCzQr4+6delSv+8zTM
+kTgyEf8IW8/4Jy7wIH1UkRgyoXFYin6F1BZpDwKBgQDekeLj/dA2ZzwXMFhOqzml
++xmr0azTbm0hyyOZ+fCq1i2zLG+BeYtTcDyhYxrlg6RmRl9xdpYy4pD4s77NFKaa
+KBQr9tePp9MRO0cDRv/SGKTHIHPvqr8LdqArUXMH7cbFMZn4qvk9TY9+7UzFDIcu
+boIbeGd8oFCrMRz5uTi2ywKBgQCXsFscisCFmIHktvvcmDRejCG3VwdX6Gk/lbNN
+pHSwbfLOC0Gx05n73H4ylhjrDdIJr5Bibo5FnCMzDzjRhz9opRaOk4NF59V452al
+tTcB4v+C+vrQpJFJJ6gf9dRiucx0Vq2rAFgg50ExYOfAVENqmQHnHYTuElHMeESD
+1IPBYQKBgQDUEnv0htzJjJef2080CMN4RG+Sq2PHBkFlwe6fL52+AVv/zyCqKqy5
+TXRV1tHnMTII68+/NobwG+UC8dCBQSFmbLEqHcLF1+PxtOiSkRotU8rBZAafuMCS
+ajH+CdGMwUkMvsPL2PnnX/A6w0PJZM/arpou9qOI1bzxQuL7h43zzw==
+-----END RSA PRIVATE KEY-----
diff --git a/etc/yardstick/nodes/fuel_virtual/pod.yaml b/etc/yardstick/nodes/fuel_virtual/pod.yaml
new file mode 100644
index 000000000..8a7f436b9
--- /dev/null
+++ b/etc/yardstick/nodes/fuel_virtual/pod.yaml
@@ -0,0 +1,43 @@
+---
+# sample config file about the POD information, including the
+# name/IP/user/ssh key of Bare Metal and Controllers/Computes
+#
+# The options of this config file include:
+# name: the name of this node
+# role: node's role, support role: Master/Controller/Comupte/BareMetal
+# ip: the node's IP address
+# user: the username for login
+# key_filename:the path of the private key file for login
+
+nodes:
+-
+ name: node1
+ role: Controller
+ ip: 10.20.0.3
+ user: root
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node2
+ role: Controller
+ ip: 10.20.0.4
+ user: root
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node3
+ role: Controller
+ ip: 10.20.0.5
+ user: root
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node4
+ role: Compute
+ ip: 10.20.0.6
+ user: root
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node5
+ role: Compute
+ ip: 10.20.0.7
+ user: root
+ key_filename: /root/.ssh/id_rsa
+
diff --git a/samples/serviceha.yaml b/samples/serviceha.yaml
new file mode 100755
index 000000000..424732189
--- /dev/null
+++ b/samples/serviceha.yaml
@@ -0,0 +1,29 @@
+---
+# Sample test case for ha
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ component: "nova-api"
+ fault_type: "stop-service"
+ fault_time: 5
+
+ host: node1.LF
+
+ runner:
+ type: Duration
+ duration: 6
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
+
+
diff --git a/setup.py b/setup.py
index b8a6fe987..a8e9ccd86 100755
--- a/setup.py
+++ b/setup.py
@@ -8,6 +8,7 @@ setup(
include_package_data=True,
package_data={
'yardstick': [
+ 'benchmark/scenarios/availability/ha_tools/*.bash',
'benchmark/scenarios/compute/*.bash',
'benchmark/scenarios/networking/*.bash',
'benchmark/scenarios/storage/*.bash',
diff --git a/tests/unit/benchmark/scenarios/availability/__init__.py b/tests/unit/benchmark/scenarios/availability/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/__init__.py
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor.py b/tests/unit/benchmark/scenarios/availability/test_monitor.py
new file mode 100644
index 000000000..793871ca3
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.monitor
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability import monitor
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.subprocess')
+class MonitorTestCase(unittest.TestCase):
+
+ def test__fun_execute_shell_command_successful(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.return_value = (0, 'unittest')
+ exitcode, output = monitor._execute_shell_command(cmd)
+ self.assertEqual(exitcode, 0)
+
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.side_effect = RuntimeError
+ exitcode, output = monitor._execute_shell_command(cmd)
+ self.assertEqual(exitcode, -1)
+
+ def test__fun_monitor_process_successful(self, mock_subprocess):
+ config = {
+ 'monitor_cmd':'env',
+ 'duration':0
+ }
+ mock_queue = mock.Mock()
+ mock_event = mock.Mock()
+
+ mock_subprocess.check_output.return_value = (0, 'unittest')
+ monitor._monitor_process(config, mock_queue, mock_event)
+
+ def test__fun_monitor_process_fail_cmd_execute_error(self, mock_subprocess):
+ config = {
+ 'monitor_cmd':'env',
+ 'duration':0
+ }
+ mock_queue = mock.Mock()
+ mock_event = mock.Mock()
+
+ mock_subprocess.check_output.side_effect = RuntimeError
+ monitor._monitor_process(config, mock_queue, mock_event)
+
+ def test__fun_monitor_process_fail_no_monitor_cmd(self, mock_subprocess):
+ config = {
+ 'duration':0
+ }
+ mock_queue = mock.Mock()
+ mock_event = mock.Mock()
+
+ mock_subprocess.check_output.return_value = (-1, 'unittest')
+ monitor._monitor_process(config, mock_queue, mock_event)
+
+ @mock.patch('yardstick.benchmark.scenarios.availability.monitor.multiprocessing')
+ def test_monitor_all_successful(self, mock_multip, mock_subprocess):
+ config = {
+ 'monitor_cmd':'env',
+ 'duration':0
+ }
+ p = monitor.Monitor()
+ p.setup(config)
+ mock_multip.Queue().get.return_value = 'started'
+ p.start()
+
+ result = "monitor unitest"
+ mock_multip.Queue().get.return_value = result
+ p.stop()
+
+ ret = p.get_result()
+
+ self.assertEqual(result, ret)
diff --git a/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/tests/unit/benchmark/scenarios/availability/test_serviceha.py
new file mode 100644
index 000000000..861bacdc9
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.serviceha
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability import serviceha
+
+@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.ssh')
+class ServicehaTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.args = {
+ 'options':{
+ 'component':'nova-api',
+ 'fault_type':'stop-service',
+ 'fault_time':0
+ },
+ 'sla':{
+ 'outage_time':'2'
+ }
+ }
+ self.ctx = {
+ 'host': {
+ 'ip': '10.20.0.3',
+ 'user': 'cirros',
+ 'key_filename': 'mykey.key'
+ }
+ }
+
+ def test__serviceha_setup_successful(self, mock_ssh):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, 'running', '')
+ p.setup()
+
+ self.assertEqual(p.setup_done, True)
+
+ def test__serviceha_setup_fail_service(self, mock_ssh):
+
+ self.args['options']['component'] = 'error'
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, 'running', '')
+ p.setup()
+
+ self.assertEqual(p.setup_done, False)
+
+ def test__serviceha_setup_fail_fault_type(self, mock_ssh):
+
+ self.args['options']['fault_type'] = 'error'
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, 'running', '')
+ p.setup()
+
+ self.assertEqual(p.setup_done, False)
+
+ def test__serviceha_setup_fail_check(self, mock_ssh):
+
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, 'error', '')
+ p.setup()
+
+ self.assertEqual(p.setup_done, False)
+
+ def test__serviceha_setup_fail_script(self, mock_ssh):
+
+ p = serviceha.ServiceHA(self.args, self.ctx)
+
+ mock_ssh.SSH().execute.return_value = (-1, 'false', '')
+
+ self.assertRaises(RuntimeError, p.setup)
+ self.assertEqual(p.setup_done, False)
+
+ @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor')
+ def test__serviceha_run_successful(self, mock_monitor, mock_ssh):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, 'running', '')
+ p.setup()
+
+ monitor_result = {'total_time': 5, 'outage_time': 0, 'total_count': 16, 'outage_count': 0}
+ mock_monitor.Monitor().get_result.return_value = monitor_result
+
+ p.connection = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (0, 'success', '')
+
+ result = {}
+ p.run(result)
+ self.assertEqual(result,{ 'outage_time': 0})
+
+ def test__serviceha_run_fail_nosetup(self, mock_ssh):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ p.run(None)
+
+ @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor')
+ def test__serviceha_run_fail_script(self, mock_monitor, mock_ssh):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, 'running', '')
+ p.setup()
+
+ monitor_result = {'total_time': 5, 'outage_time': 0, 'total_count': 16, 'outage_count': 0}
+ mock_monitor.Monitor().get_result.return_value = monitor_result
+
+ p.connection = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (-1, 'error', '')
+
+ result = {}
+ self.assertRaises(RuntimeError, p.run, result)
+
+ @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor')
+ def test__serviceha_run_fail_sla(self, mock_monitor, mock_ssh):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, 'running', '')
+ p.setup()
+
+ monitor_result = {'total_time': 10, 'outage_time': 5, 'total_count': 16, 'outage_count': 0}
+ mock_monitor.Monitor().get_result.return_value = monitor_result
+
+ p.connection = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (0, 'success', '')
+
+ result = {}
+ self.assertRaises(AssertionError, p.run, result)
+
+ def test__serviceha_teardown_successful(self, mock_ssh):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, 'running', '')
+ p.setup()
+ p.need_teardown = True
+
+ mock_ssh.SSH().execute.return_value = (0, 'success', '')
+ p.teardown()
+
+ self.assertEqual(p.need_teardown, False)
+
+ def test__serviceha_teardown_fail_script(self, mock_ssh):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, 'running', '')
+ p.setup()
+ p.need_teardown = True
+
+ mock_ssh.SSH().execute.return_value = (-1, 'false', '')
+
+ self.assertRaises(RuntimeError, p.teardown)
+
diff --git a/yardstick/benchmark/scenarios/availability/__init__.py b/yardstick/benchmark/scenarios/availability/__init__.py
new file mode 100755
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/__init__.py
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_service.bash
new file mode 100755
index 000000000..cc898a859
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_service.bash
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a service
+
+set -e
+
+service_name=$1
+
+service $service_name status
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/ha_conf.yaml b/yardstick/benchmark/scenarios/availability/ha_tools/ha_conf.yaml
new file mode 100644
index 000000000..67e56eb4f
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/ha_conf.yaml
@@ -0,0 +1,12 @@
+---
+# sample config file for ha test
+#
+schema: "yardstick:task:0.1"
+
+nova-api:
+-
+ type: stop-service
+ inject_script: ha_tools/stop_service.bash
+ recovery_script: ha_tools/start_service.bash
+ check_script: ha_tools/check_service.bash
+ monitor_cmd: nova image-list
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
new file mode 100755
index 000000000..c1bf8b7eb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Start a service and check the service is started
+
+set -e
+
+service_name=$1
+
+service $service_name start
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash
new file mode 100755
index 000000000..a8901784e
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop a service and check the service is stoped
+
+set -e
+
+service_name=$1
+
+service $service_name stop
+
+# TODO
+# check the service status
diff --git a/yardstick/benchmark/scenarios/availability/monitor.py b/yardstick/benchmark/scenarios/availability/monitor.py
new file mode 100755
index 000000000..3193d3304
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor.py
@@ -0,0 +1,114 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import multiprocessing
+import subprocess
+import traceback
+import time
+
+LOG = logging.getLogger(__name__)
+
+
+def _execute_shell_command(command):
+ '''execute shell script with error handling'''
+ exitcode = 0
+ output = []
+ try:
+ output = subprocess.check_output(command, shell=True)
+ except Exception:
+ exitcode = -1
+ output = traceback.format_exc()
+ LOG.error("exec command '%s' error:\n " % command)
+ LOG.error(traceback.format_exc())
+
+ return exitcode, output
+
+
+def _monitor_process(config, queue, event):
+
+ total_time = 0
+ outage_time = 0
+ total_count = 0
+ outage_count = 0
+ first_outage = 0
+ last_outage = 0
+
+ wait_time = config.get("duration", 0)
+ cmd = config.get("monitor_cmd", None)
+ if cmd is None:
+ LOG.error("There are no monitor cmd!")
+ return
+
+ queue.put("started")
+
+ begin_time = time.time()
+ while True:
+
+ total_count = total_count + 1
+
+ one_check_begin_time = time.time()
+ exit_status, stdout = _execute_shell_command(cmd)
+ one_check_end_time = time.time()
+
+ LOG.info("the exit_status:%s stdout:%s" % (exit_status, stdout))
+ if exit_status:
+ outage_count = outage_count + 1
+
+ outage_time = outage_time + (
+ one_check_end_time - one_check_begin_time)
+
+ if not first_outage:
+ first_outage = one_check_begin_time
+
+ last_outage = one_check_end_time
+
+ if event.is_set():
+ LOG.debug("the monitor process stop")
+ break
+
+ if wait_time > 0:
+ time.sleep(wait_time)
+
+ end_time = time.time()
+ total_time = end_time - begin_time
+
+ queue.put({"total_time": total_time,
+ "outage_time": last_outage-first_outage,
+ "total_count": total_count,
+ "outage_count": outage_count})
+
+
+class Monitor:
+
+ def __init__(self):
+ self._result = []
+ self._monitor_process = []
+
+ def setup(self, config):
+ self._config = config
+
+ def start(self):
+ self._queue = multiprocessing.Queue()
+ self._event = multiprocessing.Event()
+ self._monitor_process = multiprocessing.Process(
+ target=_monitor_process, name="Monitor",
+ args=(self._config, self._queue, self._event))
+
+ self._monitor_process.start()
+ ret = self._queue.get()
+ if ret == "started":
+ LOG.debug("monitor process started!")
+
+ def stop(self):
+ self._event.set()
+ self._result = self._queue.get()
+ LOG.debug("stop the monitor process. the result:%s" % self._result)
+
+ def get_result(self):
+ return self._result
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
new file mode 100755
index 000000000..3e03e1da5
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -0,0 +1,193 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import time
+import yaml
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+from yardstick.benchmark.scenarios.availability import monitor
+
+LOG = logging.getLogger(__name__)
+
+
+class ServiceHA(base.Scenario):
+ """TODO: docstring of ServiceHA
+ """
+ __scenario_type__ = "ServiceHA"
+
+ HA_CONF = "ha_tools/ha_conf.yaml"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.service_name = scenario_cfg["options"]["component"]
+ self.fault_type = scenario_cfg["options"]["fault_type"]
+ self.fault_time = scenario_cfg["options"].get("fault_time", 0)
+ self.fault_cfg = None
+ self.setup_done = False
+ self.need_teardown = False
+
+ def setup(self):
+ '''scenario setup'''
+ self.ha_conf_file = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ ServiceHA.HA_CONF)
+ ha_cfg = []
+ with open(self.ha_conf_file) as stream:
+ ha_cfg = yaml.load(stream)
+ LOG.debug("ha_cfg content:%s" % ha_cfg)
+
+ # check the ha_conf contains the service defined in test cases yaml
+ service_cfg = ha_cfg.get(self.service_name, None)
+ if not service_cfg:
+ LOG.error(
+ "The component %s can not be supported!" % self.service_name)
+ return
+
+ for fault in service_cfg:
+ if fault["type"] == self.fault_type:
+ self.fault_cfg = fault
+ break
+ if not self.fault_cfg:
+ LOG.error(
+ "The fualt_type %s can not be supproted!" % self.fault_type)
+ return
+ LOG.debug("the fault_cfg :%s" % self.fault_cfg)
+
+ self.fault_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ self.fault_cfg["inject_script"])
+ self.recovery_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ self.fault_cfg["recovery_script"])
+ self.check_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ self.fault_cfg["check_script"])
+
+ host = self.context_cfg.get("host", None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+ LOG.info("The host: %s the service: %s" % (ip, self.service_name))
+ LOG.debug("The params, host:%s fault_cfg:%s" % (host, self.fault_cfg))
+
+ LOG.debug(
+ "ssh connection ip:%s, user:%s, key_file:%s",
+ ip, user, key_filename)
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+
+ # check the host envrioment
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0}".format(self.service_name),
+ stdin=open(self.check_script, "r"))
+ LOG.info(
+ "the exit_status:%s stdout:%s stderr:%s" %
+ (exit_status, stdout, stderr))
+ if exit_status:
+ raise RuntimeError(stderr)
+
+ if stdout and "running" in stdout:
+ LOG.info("check the envrioment success!")
+ else:
+ LOG.error(
+ "the host envrioment is error, stdout:%s, stderr:%s" %
+ (stdout, stderr))
+ return
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the benchmark"""
+ if not self.setup_done:
+ LOG.error("The setup not finished!")
+ return
+
+ monitorInstance = monitor.Monitor()
+ monitorInstance.setup(self.fault_cfg)
+ monitorInstance.start()
+ LOG.info("monitor start!")
+
+ LOG.info("Inject fault!")
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0}".format(self.service_name),
+ stdin=open(self.fault_script, "r"))
+
+ if exit_status != 0:
+ monitorInstance.stop()
+ raise RuntimeError(stderr)
+
+ self.need_teardown = True
+ time.sleep(self.fault_time)
+
+ monitorInstance.stop()
+ LOG.info("monitor stop!")
+
+ ret = monitorInstance.get_result()
+ LOG.info("The monitor result:%s" % ret)
+ outage_time = ret.get("outage_time")
+ result["outage_time"] = outage_time
+ LOG.info("the result:%s" % result)
+
+ if "sla" in self.scenario_cfg:
+ sla_outage_time = int(self.scenario_cfg["sla"]["outage_time"])
+ assert outage_time <= sla_outage_time, "outage_time %f > sla:outage_time(%f)" % \
+ (outage_time, sla_outage_time)
+
+ return
+
+ def teardown(self):
+ '''scenario teardown'''
+ LOG.info("recory the everiment!")
+
+ if self.need_teardown:
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0} ".format(self.service_name),
+ stdin=open(self.recovery_script, "r"))
+
+ if exit_status:
+ raise RuntimeError(stderr)
+ else:
+ self.need_teardown = False
+
+"""
+def _test():
+ '''internal test function'''
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ ctx = {"host": host}
+
+ logger = logging.getLogger("yardstick")
+ logger.setLevel(logging.DEBUG)
+
+ options = {
+ "component": "nova-api",
+ "fault_type": "stop-service"
+ }
+ sla = {"outage_time": 5}
+ args = {"options": options, "sla": sla}
+
+ print "create instance"
+ terstInstance = ServiceHA(args, ctx)
+
+ terstInstance.setup()
+ result = {}
+ terstInstance.run(result)
+ print result
+
+ terstInstance.teardown()
+
+if __name__ == '__main__':
+ _test()
+"""