diff options
Diffstat (limited to 'tests/unit/benchmark/scenarios/availability')
6 files changed, 300 insertions, 90 deletions
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py new file mode 100644 index 000000000..340f94cb0 --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.attacker import baseattacker +from yardstick.benchmark.scenarios.availability.attacker import attacker_baremetal + +@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess') +class ExecuteShellTestCase(unittest.TestCase): + + def test__fun_execute_shell_command_successful(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.return_value = (0, 'unittest') + exitcode, output = attacker_baremetal._execute_shell_command(cmd) + self.assertEqual(exitcode, 0) + + def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.side_effect = RuntimeError + exitcode, output = attacker_baremetal._execute_shell_command(cmd) + self.assertEqual(exitcode, -1) + + +@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh') +class AttackerBaremetalTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ipmi_ip": "10.20.0.5", + "ipmi_user": "root", + "ipmi_pwd": "123456", + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.attacker_cfg = { + 'fault_type': 'bear-metal-down', + 'host': 'node1', + } + + def test__attacker_baremetal_all_successful(self, mock_ssh): + + ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "running", '') + ins.setup() + ins.inject_fault() + ins.recover() + + def test__attacker_baremetal_check_failuer(self, mock_ssh): + + ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) + mock_ssh.SSH().execute.return_value = (0, "error check", '') + ins.setup() + + def test__attacker_baremetal_recover_successful(self, mock_ssh): + + self.attacker_cfg["jump_host"] = 'node1' + self.context["node1"]["pwd"] = "123456" + ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "running", '') + ins.setup() + ins.recover() diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py new file mode 100644 index 000000000..13295273b --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.monitor import basemonitor + + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.BaseMonitor') +class MonitorMgrTestCase(unittest.TestCase): + + def setUp(self): + config = { + 'monitor_type': 'openstack-api', + } + + self.monitor_configs = [] + self.monitor_configs.append(config) + + def test__MonitorMgr_setup_successful(self, mock_monitor): + instance = basemonitor.MonitorMgr() + instance.init_monitors(self.monitor_configs, None) + instance.start_monitors() + instance.wait_monitors() + + ret = instance.verify_SLA() + +class BaseMonitorTestCase(unittest.TestCase): + + class MonitorSimple(basemonitor.BaseMonitor): + __monitor_type__ = "MonitorForTest" + def setup(self): + self.monitor_result = False + + def monitor_func(self): + return self.monitor_result + + def setUp(self): + self.monitor_cfg = { + 'monitor_type': 'MonitorForTest', + 'command_name': 'nova image-list', + 'monitor_time': 0.01, + 'sla': {'max_outage_time': 5} + } + + def test__basemonitor_start_wait_successful(self): + ins = basemonitor.BaseMonitor(self.monitor_cfg, None) + ins.start_monitor() + ins.wait_monitor() + + + def test__basemonitor_all_successful(self): + ins = self.MonitorSimple(self.monitor_cfg, None) + ins.setup() + ins.run() + ins.verify_SLA() + + @mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.multiprocessing') + def test__basemonitor_func_false(self, mock_multiprocess): + ins = self.MonitorSimple(self.monitor_cfg, None) + ins.setup() + mock_multiprocess.Event().is_set.return_value = False + ins.run() + ins.verify_SLA() + + def test__basemonitor_getmonitorcls_successfule(self): + cls = None + try: + cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg) + except Exception: + pass + self.assertIsNone(cls) + diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor.py b/tests/unit/benchmark/scenarios/availability/test_monitor.py deleted file mode 100644 index 793871ca3..000000000 --- a/tests/unit/benchmark/scenarios/availability/test_monitor.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python - -############################################################################## -# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -# Unittest for yardstick.benchmark.scenarios.availability.monitor - -import mock -import unittest - -from yardstick.benchmark.scenarios.availability import monitor - -@mock.patch('yardstick.benchmark.scenarios.availability.monitor.subprocess') -class MonitorTestCase(unittest.TestCase): - - def test__fun_execute_shell_command_successful(self, mock_subprocess): - cmd = "env" - mock_subprocess.check_output.return_value = (0, 'unittest') - exitcode, output = monitor._execute_shell_command(cmd) - self.assertEqual(exitcode, 0) - - def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): - cmd = "env" - mock_subprocess.check_output.side_effect = RuntimeError - exitcode, output = monitor._execute_shell_command(cmd) - self.assertEqual(exitcode, -1) - - def test__fun_monitor_process_successful(self, mock_subprocess): - config = { - 'monitor_cmd':'env', - 'duration':0 - } - mock_queue = mock.Mock() - mock_event = mock.Mock() - - mock_subprocess.check_output.return_value = (0, 'unittest') - monitor._monitor_process(config, mock_queue, mock_event) - - def test__fun_monitor_process_fail_cmd_execute_error(self, mock_subprocess): - config = { - 'monitor_cmd':'env', - 'duration':0 - } - mock_queue = mock.Mock() - mock_event = mock.Mock() - - mock_subprocess.check_output.side_effect = RuntimeError - monitor._monitor_process(config, mock_queue, mock_event) - - def test__fun_monitor_process_fail_no_monitor_cmd(self, mock_subprocess): - config = { - 'duration':0 - } - mock_queue = mock.Mock() - mock_event = mock.Mock() - - mock_subprocess.check_output.return_value = (-1, 'unittest') - monitor._monitor_process(config, mock_queue, mock_event) - - @mock.patch('yardstick.benchmark.scenarios.availability.monitor.multiprocessing') - def test_monitor_all_successful(self, mock_multip, mock_subprocess): - config = { - 'monitor_cmd':'env', - 'duration':0 - } - p = monitor.Monitor() - p.setup(config) - mock_multip.Queue().get.return_value = 'started' - p.start() - - result = "monitor unitest" - mock_multip.Queue().get.return_value = result - p.stop() - - ret = p.get_result() - - self.assertEqual(result, ret) diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py new file mode 100644 index 000000000..c8cda7dc7 --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.monitor import monitor_command + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess') +class ExecuteShellTestCase(unittest.TestCase): + + def test__fun_execute_shell_command_successful(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.return_value = (0, 'unittest') + exitcode, output = monitor_command._execute_shell_command(cmd) + self.assertEqual(exitcode, 0) + + def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.side_effect = RuntimeError + exitcode, output = monitor_command._execute_shell_command(cmd) + self.assertEqual(exitcode, -1) + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess') +class MonitorOpenstackCmdTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.config = { + 'monitor_type': 'openstack-api', + 'command_name': 'nova image-list', + 'monitor_time': 1, + 'sla': {'max_outage_time': 5} + } + + + def test__monitor_command_monitor_func_successful(self, mock_subprocess): + + instance = monitor_command.MonitorOpenstackCmd(self.config, None) + instance.setup() + mock_subprocess.check_output.return_value = (0, 'unittest') + ret = instance.monitor_func() + self.assertEqual(ret, True) + instance._result = {"outage_time": 0} + instance.verify_SLA() + + def test__monitor_command_monitor_func_failure(self, mock_subprocess): + mock_subprocess.check_output.return_value = (1, 'unittest') + instance = monitor_command.MonitorOpenstackCmd(self.config, None) + instance.setup() + mock_subprocess.check_output.side_effect = RuntimeError + ret = instance.monitor_func() + self.assertEqual(ret, False) + instance._result = {"outage_time": 10} + instance.verify_SLA() + + @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.ssh') + def test__monitor_command_ssh_monitor_successful(self, mock_ssh, mock_subprocess): + + self.config["host"] = "node1" + instance = monitor_command.MonitorOpenstackCmd(self.config, self.context) + instance.setup() + mock_ssh.SSH().execute.return_value = (0, "0", '') + ret = instance.monitor_func() diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py new file mode 100644 index 000000000..dda104b4e --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_process + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.monitor import monitor_process + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_process.ssh') +class MonitorProcessTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.monitor_cfg = { + 'monitor_type': 'process', + 'process_name': 'nova-api', + 'host': "node1", + 'monitor_time': 1, + 'sla': {'max_recover_time': 5} + } + + def test__monitor_process_all_successful(self, mock_ssh): + + ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "1", '') + ins.setup() + ins.monitor_func() + ins._result = {"outage_time": 0} + ins.verify_SLA() + + def test__monitor_process_down_failuer(self, mock_ssh): + + ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "0", '') + ins.setup() + ins.monitor_func() + ins._result = {"outage_time": 10} + ins.verify_SLA() + diff --git a/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/tests/unit/benchmark/scenarios/availability/test_serviceha.py index 32adf3208..6e58b6e7a 100644 --- a/tests/unit/benchmark/scenarios/availability/test_serviceha.py +++ b/tests/unit/benchmark/scenarios/availability/test_serviceha.py @@ -16,7 +16,7 @@ import unittest from yardstick.benchmark.scenarios.availability import serviceha -@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor') +@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.basemonitor') @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.baseattacker') class ServicehaTestCase(unittest.TestCase): @@ -53,15 +53,11 @@ class ServicehaTestCase(unittest.TestCase): p.setup() self.assertEqual(p.setup_done, True) - - result = {} - result["outage_time"] = 0 - mock_monitor.Monitor().get_result.return_value = result + mock_monitor.MonitorMgr().verify_SLA.return_value = True ret = {} p.run(ret) - self.assertEqual(ret, result) p.teardown() - +""" def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor): p = serviceha.ServiceHA(self.args, self.ctx) @@ -74,3 +70,4 @@ class ServicehaTestCase(unittest.TestCase): ret = {} self.assertRaises(AssertionError, p.run, ret) +""" |