diff options
Diffstat (limited to 'tests/unit/benchmark/scenarios')
11 files changed, 170 insertions, 23 deletions
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_general.py b/tests/unit/benchmark/scenarios/availability/test_attacker_general.py index d6488a9a7..aa2e0cc4d 100644 --- a/tests/unit/benchmark/scenarios/availability/test_attacker_general.py +++ b/tests/unit/benchmark/scenarios/availability/test_attacker_general.py @@ -32,7 +32,8 @@ class GeneralAttackerServiceTestCase(unittest.TestCase): 'fault_type': 'general-attacker', 'action_parameter':{'process_name':'nova_api'}, 'rollback_parameter':{'process_name':'nova_api'}, - 'key':'stop_service', + 'key':'stop-service', + 'attack_key':'stop-service', 'host': 'node1', } diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py index 140841075..a20cf8187 100644 --- a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py +++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py @@ -23,7 +23,7 @@ class MonitorMgrTestCase(unittest.TestCase): def setUp(self): config = { 'monitor_type': 'openstack-api', - 'key' : 'service_status' + 'key' : 'service-status' } self.monitor_configs = [] @@ -40,7 +40,7 @@ class MonitorMgrTestCase(unittest.TestCase): def test_MonitorMgr_getitem(self, mock_monitor): monitorMgr = basemonitor.MonitorMgr() monitorMgr.init_monitors(self.monitor_configs, None) - monitorIns = monitorMgr['service_status'] + monitorIns = monitorMgr['service-status'] class BaseMonitorTestCase(unittest.TestCase): diff --git a/tests/unit/benchmark/scenarios/availability/test_baseoperation.py b/tests/unit/benchmark/scenarios/availability/test_baseoperation.py index 8c341913f..d85f1e19f 100644 --- a/tests/unit/benchmark/scenarios/availability/test_baseoperation.py +++ b/tests/unit/benchmark/scenarios/availability/test_baseoperation.py @@ -22,7 +22,7 @@ class OperationMgrTestCase(unittest.TestCase): def setUp(self): config = { 'operation_type': 'general-operation', - 'key' : 'service_status' + 'key' : 'service-status' } self.operation_configs = [] @@ -31,7 +31,7 @@ class OperationMgrTestCase(unittest.TestCase): def test_all_successful(self, mock_operation): mgr_ins = baseoperation.OperationMgr() mgr_ins.init_operations(self.operation_configs, None) - operation_ins = mgr_ins["service_status"] + operation_ins = mgr_ins["service-status"] mgr_ins.rollback() def test_getitem_fail(self, mock_operation): @@ -59,7 +59,7 @@ class BaseOperationTestCase(unittest.TestCase): def setUp(self): self.config = { 'operation_type': 'general-operation', - 'key' : 'service_status' + 'key' : 'service-status' } def test_all_successful(self): diff --git a/tests/unit/benchmark/scenarios/availability/test_director.py b/tests/unit/benchmark/scenarios/availability/test_director.py index 887ddd631..06116725d 100644 --- a/tests/unit/benchmark/scenarios/availability/test_director.py +++ b/tests/unit/benchmark/scenarios/availability/test_director.py @@ -33,16 +33,16 @@ class DirectorTestCase(unittest.TestCase): 'key': "kill-process"}], 'monitors': [{ 'monitor_type': "general-monitor", - 'key': "service_status"}], + 'key': "service-status"}], 'operations': [{ 'operation_type': 'general-operation', - 'key' : 'service_status'}], + 'key' : 'service-status'}], 'resultCheckers': [{ 'checker_type': 'general-result-checker', 'key' : 'process-checker',}], 'steps':[ { - 'actionKey': "service_status", + 'actionKey': "service-status", 'actionType': "operation", 'index': 1}, { @@ -54,7 +54,7 @@ class DirectorTestCase(unittest.TestCase): 'actionType': "resultchecker", 'index': 3}, { - 'actionKey': "service_status", + 'actionKey': "service-status", 'actionType': "monitor", 'index': 4}, ] @@ -69,12 +69,12 @@ class DirectorTestCase(unittest.TestCase): def test_director_all_successful(self, mock_checer, mock_opertion, mock_attacker, mock_monitor): ins = Director(self.scenario_cfg, self.ctx) - opertion_action = ins.createActionPlayer("operation", "service_status") + opertion_action = ins.createActionPlayer("operation", "service-status") attacker_action = ins.createActionPlayer("attacker", "kill-process") checker_action = ins.createActionPlayer("resultchecker", "process-checker") - monitor_action = ins.createActionPlayer("monitor", "service_status") + monitor_action = ins.createActionPlayer("monitor", "service-status") - opertion_rollback = ins.createActionRollbacker("operation", "service_status") + opertion_rollback = ins.createActionRollbacker("operation", "service-status") attacker_rollback = ins.createActionRollbacker("attacker", "kill-process") ins.executionSteps.append(opertion_rollback) ins.executionSteps.append(attacker_rollback) diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py b/tests/unit/benchmark/scenarios/availability/test_monitor_general.py index 85487a574..de7d26cbf 100644 --- a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_general.py @@ -31,18 +31,20 @@ class GeneralMonitorServiceTestCase(unittest.TestCase): self.context = {"node1": host} self.monitor_cfg = { 'monitor_type': 'general-monitor', - 'key': 'service_status', + 'key': 'service-status', + 'monitor_key': 'service-status', 'host': 'node1', 'monitor_time': 3, 'parameter': {'serviceName': 'haproxy'}, - 'sla': {'max_recover_time': 1} + 'sla': {'max_outage_time': 1} } self.monitor_cfg_noparam = { 'monitor_type': 'general-monitor', - 'key': 'service_status', + 'key': 'service-status', + 'monitor_key': 'service-status', 'host': 'node1', 'monitor_time': 3, - 'sla': {'max_recover_time': 1} + 'sla': {'max_outage_time': 1} } def test__monitor_general_all_successful(self, mock_open, mock_ssh): diff --git a/tests/unit/benchmark/scenarios/availability/test_operation_general.py b/tests/unit/benchmark/scenarios/availability/test_operation_general.py index 6713733a8..26cd3f7c4 100644 --- a/tests/unit/benchmark/scenarios/availability/test_operation_general.py +++ b/tests/unit/benchmark/scenarios/availability/test_operation_general.py @@ -34,11 +34,13 @@ class GeneralOperaionTestCase(unittest.TestCase): 'action_parameter': {'ins_cup': 2}, 'rollback_parameter': {'ins_id': 'id123456'}, 'key': 'nova-create-instance', + 'operation_key': 'nova-create-instance', 'host': 'node1', } self.operation_cfg_noparam = { 'operation_type': 'general-operation', 'key': 'nova-create-instance', + 'operation_key': 'nova-create-instance', 'host': 'node1', } diff --git a/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py b/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py index 88a9b9d20..bbadf0ac3 100644 --- a/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py +++ b/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py @@ -38,6 +38,7 @@ class GeneralResultCheckerTestCase(unittest.TestCase): 'condition' : 'eq', 'expectedValue' : 1, 'key' : 'process-checker', + 'checker_key' : 'process-checker', 'host': 'node1' } diff --git a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py index c17edea45..bab9d62f1 100644 --- a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py +++ b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py @@ -29,14 +29,14 @@ class ScenarioGeneralTestCase(unittest.TestCase): 'key': "kill-process"}], 'monitors': [{ 'monitor_type': "general-monitor", - 'key': "service_status"}], + 'key': "service-status"}], 'steps':[ { 'actionKey': "kill-process", 'actionType': "attacker", 'index': 1}, { - 'actionKey': "service_status", + 'actionKey': "service-status", 'actionType': "monitor", 'index': 2}] } @@ -62,4 +62,4 @@ class ScenarioGeneralTestCase(unittest.TestCase): mock_obj.verify.return_value = False ins.director = mock_obj ins.run(None) - ins.teardown()
\ No newline at end of file + ins.teardown() diff --git a/tests/unit/benchmark/scenarios/compute/test_computecapacity.py b/tests/unit/benchmark/scenarios/compute/test_computecapacity.py index 5745b7ec9..660bb3391 100644 --- a/tests/unit/benchmark/scenarios/compute/test_computecapacity.py +++ b/tests/unit/benchmark/scenarios/compute/test_computecapacity.py @@ -29,7 +29,7 @@ class ComputeCapacityTestCase(unittest.TestCase): def setUp(self): self.ctx = { 'nodes': { - 'host1': { + 'host': { 'ip': '172.16.0.137', 'user': 'cirros', 'key_filename': "mykey.key", diff --git a/tests/unit/benchmark/scenarios/networking/test_netperf_node.py b/tests/unit/benchmark/scenarios/networking/test_netperf_node.py new file mode 100755 index 000000000..1c39b292b --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_netperf_node.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for +# yardstick.benchmark.scenarios.networking.netperf_node.NetperfNode + +import mock +import unittest +import os +import json + +from yardstick.benchmark.scenarios.networking import netperf_node + + +@mock.patch('yardstick.benchmark.scenarios.networking.netperf_node.ssh') +class NetperfNodeTestCase(unittest.TestCase): + + def setUp(self): + self.ctx = { + 'host': { + 'ip': '192.168.10.10', + 'user': 'root', + 'password': 'root' + }, + 'target': { + 'ip': '192.168.10.11', + 'user': 'root', + 'password': 'root' + } + } + + def test_netperf_node_successful_setup(self, mock_ssh): + + p = netperf_node.NetperfNode({}, self.ctx) + mock_ssh.SSH().execute.return_value = (0, '', '') + + p.setup() + self.assertIsNotNone(p.server) + self.assertIsNotNone(p.client) + self.assertEqual(p.setup_done, True) + + def test_netperf_node_successful_no_sla(self, mock_ssh): + + options = {} + args = {'options': options} + result = {} + + p = netperf_node.NetperfNode(args, self.ctx) + mock_ssh.SSH().execute.return_value = (0, '', '') + p.host = mock_ssh.SSH() + + sample_output = self._read_sample_output() + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + expected_result = json.loads(sample_output) + p.run(result) + self.assertEqual(result, expected_result) + + def test_netperf_node_successful_sla(self, mock_ssh): + + options = {} + args = { + 'options': options, + 'sla': {'mean_latency': 100} + } + result = {} + + p = netperf_node.NetperfNode(args, self.ctx) + mock_ssh.SSH().execute.return_value = (0, '', '') + p.host = mock_ssh.SSH() + + sample_output = self._read_sample_output() + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + expected_result = json.loads(sample_output) + p.run(result) + self.assertEqual(result, expected_result) + + def test_netperf_node_unsuccessful_sla(self, mock_ssh): + + options = {} + args = { + 'options': options, + 'sla': {'mean_latency': 5} + } + result = {} + + p = netperf_node.NetperfNode(args, self.ctx) + mock_ssh.SSH().execute.return_value = (0, '', '') + p.host = mock_ssh.SSH() + + sample_output = self._read_sample_output() + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + self.assertRaises(AssertionError, p.run, result) + + def test_netperf_node_unsuccessful_script_error(self, mock_ssh): + + options = {} + args = {'options': options} + result = {} + + p = netperf_node.NetperfNode(args, self.ctx) + mock_ssh.SSH().execute.return_value = (0, '', '') + p.host = mock_ssh.SSH() + + mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR') + self.assertRaises(RuntimeError, p.run, result) + + def _read_sample_output(self): + curr_path = os.path.dirname(os.path.abspath(__file__)) + output = os.path.join(curr_path, 'netperf_sample_output.json') + with open(output) as f: + sample_output = f.read() + return sample_output + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/networking/test_sfc.py b/tests/unit/benchmark/scenarios/networking/test_sfc.py index 2d7990e59..618efc32e 100644 --- a/tests/unit/benchmark/scenarios/networking/test_sfc.py +++ b/tests/unit/benchmark/scenarios/networking/test_sfc.py @@ -45,8 +45,22 @@ class SfcTestCase(unittest.TestCase): def test_run_for_success(self, mock_subprocess, mock_openstack, mock_ssh): # Mock a successfull SSH in Sfc.setup() and Sfc.run() mock_ssh.SSH().execute.return_value = (0, '100', '') - mock_openstack.return_value = "127.0.0.1" - mock_subprocess.return_value = 'mocked!' + mock_openstack.get_an_IP.return_value = "127.0.0.1" + mock_subprocess.call.return_value = 'mocked!' + + result = {} + self.sfc.setup() + self.sfc.run(result) + self.sfc.teardown() + + @mock.patch('yardstick.benchmark.scenarios.networking.sfc.ssh') + @mock.patch('yardstick.benchmark.scenarios.networking.sfc.sfc_openstack') + @mock.patch('yardstick.benchmark.scenarios.networking.sfc.subprocess') + def test2_run_for_success(self, mock_subprocess, mock_openstack, mock_ssh): + # Mock a successfull SSH in Sfc.setup() and Sfc.run() + mock_ssh.SSH().execute.return_value = (0, 'vxlan_tool.py', 'succeeded timed out') + mock_openstack.get_an_IP.return_value = "127.0.0.1" + mock_subprocess.call.return_value = 'mocked!' result = {} self.sfc.setup() |