diff options
Diffstat (limited to 'tests/unit')
22 files changed, 1231 insertions, 283 deletions
diff --git a/tests/unit/benchmark/contexts/test_dummy.py b/tests/unit/benchmark/contexts/test_dummy.py new file mode 100644 index 000000000..5214e6630 --- /dev/null +++ b/tests/unit/benchmark/contexts/test_dummy.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.contexts.dummy + +import unittest + +from yardstick.benchmark.contexts import dummy + + +class DummyContextTestCase(unittest.TestCase): + + def setUp(self): + self.test_context = dummy.DummyContext() + + def test__get_server(self): + self.test_context.init(None) + self.test_context.deploy() + + result = self.test_context._get_server(None) + self.assertEqual(result, None) + + self.test_context.undeploy() diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py new file mode 100644 index 000000000..340f94cb0 --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.attacker import baseattacker +from yardstick.benchmark.scenarios.availability.attacker import attacker_baremetal + +@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess') +class ExecuteShellTestCase(unittest.TestCase): + + def test__fun_execute_shell_command_successful(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.return_value = (0, 'unittest') + exitcode, output = attacker_baremetal._execute_shell_command(cmd) + self.assertEqual(exitcode, 0) + + def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.side_effect = RuntimeError + exitcode, output = attacker_baremetal._execute_shell_command(cmd) + self.assertEqual(exitcode, -1) + + +@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh') +class AttackerBaremetalTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ipmi_ip": "10.20.0.5", + "ipmi_user": "root", + "ipmi_pwd": "123456", + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.attacker_cfg = { + 'fault_type': 'bear-metal-down', + 'host': 'node1', + } + + def test__attacker_baremetal_all_successful(self, mock_ssh): + + ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "running", '') + ins.setup() + ins.inject_fault() + ins.recover() + + def test__attacker_baremetal_check_failuer(self, mock_ssh): + + ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) + mock_ssh.SSH().execute.return_value = (0, "error check", '') + ins.setup() + + def test__attacker_baremetal_recover_successful(self, mock_ssh): + + self.attacker_cfg["jump_host"] = 'node1' + self.context["node1"]["pwd"] = "123456" + ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "running", '') + ins.setup() + ins.recover() diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py new file mode 100644 index 000000000..eb0cce70d --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_process + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.attacker import baseattacker + +@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_process.ssh') +class AttackerServiceTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.attacker_cfg = { + 'fault_type': 'kill-process', + 'process_name': 'nova-api', + 'host': 'node1', + } + + def test__attacker_service_all_successful(self, mock_ssh): + + cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg) + ins = cls(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "running", '') + ins.setup() + ins.inject_fault() + ins.recover() + + def test__attacker_service_check_failuer(self, mock_ssh): + + cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg) + ins = cls(self.attacker_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "error check", '') + ins.setup() diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py new file mode 100644 index 000000000..13295273b --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.monitor import basemonitor + + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.BaseMonitor') +class MonitorMgrTestCase(unittest.TestCase): + + def setUp(self): + config = { + 'monitor_type': 'openstack-api', + } + + self.monitor_configs = [] + self.monitor_configs.append(config) + + def test__MonitorMgr_setup_successful(self, mock_monitor): + instance = basemonitor.MonitorMgr() + instance.init_monitors(self.monitor_configs, None) + instance.start_monitors() + instance.wait_monitors() + + ret = instance.verify_SLA() + +class BaseMonitorTestCase(unittest.TestCase): + + class MonitorSimple(basemonitor.BaseMonitor): + __monitor_type__ = "MonitorForTest" + def setup(self): + self.monitor_result = False + + def monitor_func(self): + return self.monitor_result + + def setUp(self): + self.monitor_cfg = { + 'monitor_type': 'MonitorForTest', + 'command_name': 'nova image-list', + 'monitor_time': 0.01, + 'sla': {'max_outage_time': 5} + } + + def test__basemonitor_start_wait_successful(self): + ins = basemonitor.BaseMonitor(self.monitor_cfg, None) + ins.start_monitor() + ins.wait_monitor() + + + def test__basemonitor_all_successful(self): + ins = self.MonitorSimple(self.monitor_cfg, None) + ins.setup() + ins.run() + ins.verify_SLA() + + @mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.multiprocessing') + def test__basemonitor_func_false(self, mock_multiprocess): + ins = self.MonitorSimple(self.monitor_cfg, None) + ins.setup() + mock_multiprocess.Event().is_set.return_value = False + ins.run() + ins.verify_SLA() + + def test__basemonitor_getmonitorcls_successfule(self): + cls = None + try: + cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg) + except Exception: + pass + self.assertIsNone(cls) + diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor.py b/tests/unit/benchmark/scenarios/availability/test_monitor.py deleted file mode 100644 index 793871ca3..000000000 --- a/tests/unit/benchmark/scenarios/availability/test_monitor.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python - -############################################################################## -# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -# Unittest for yardstick.benchmark.scenarios.availability.monitor - -import mock -import unittest - -from yardstick.benchmark.scenarios.availability import monitor - -@mock.patch('yardstick.benchmark.scenarios.availability.monitor.subprocess') -class MonitorTestCase(unittest.TestCase): - - def test__fun_execute_shell_command_successful(self, mock_subprocess): - cmd = "env" - mock_subprocess.check_output.return_value = (0, 'unittest') - exitcode, output = monitor._execute_shell_command(cmd) - self.assertEqual(exitcode, 0) - - def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): - cmd = "env" - mock_subprocess.check_output.side_effect = RuntimeError - exitcode, output = monitor._execute_shell_command(cmd) - self.assertEqual(exitcode, -1) - - def test__fun_monitor_process_successful(self, mock_subprocess): - config = { - 'monitor_cmd':'env', - 'duration':0 - } - mock_queue = mock.Mock() - mock_event = mock.Mock() - - mock_subprocess.check_output.return_value = (0, 'unittest') - monitor._monitor_process(config, mock_queue, mock_event) - - def test__fun_monitor_process_fail_cmd_execute_error(self, mock_subprocess): - config = { - 'monitor_cmd':'env', - 'duration':0 - } - mock_queue = mock.Mock() - mock_event = mock.Mock() - - mock_subprocess.check_output.side_effect = RuntimeError - monitor._monitor_process(config, mock_queue, mock_event) - - def test__fun_monitor_process_fail_no_monitor_cmd(self, mock_subprocess): - config = { - 'duration':0 - } - mock_queue = mock.Mock() - mock_event = mock.Mock() - - mock_subprocess.check_output.return_value = (-1, 'unittest') - monitor._monitor_process(config, mock_queue, mock_event) - - @mock.patch('yardstick.benchmark.scenarios.availability.monitor.multiprocessing') - def test_monitor_all_successful(self, mock_multip, mock_subprocess): - config = { - 'monitor_cmd':'env', - 'duration':0 - } - p = monitor.Monitor() - p.setup(config) - mock_multip.Queue().get.return_value = 'started' - p.start() - - result = "monitor unitest" - mock_multip.Queue().get.return_value = result - p.stop() - - ret = p.get_result() - - self.assertEqual(result, ret) diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py new file mode 100644 index 000000000..c8cda7dc7 --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.monitor import monitor_command + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess') +class ExecuteShellTestCase(unittest.TestCase): + + def test__fun_execute_shell_command_successful(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.return_value = (0, 'unittest') + exitcode, output = monitor_command._execute_shell_command(cmd) + self.assertEqual(exitcode, 0) + + def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess): + cmd = "env" + mock_subprocess.check_output.side_effect = RuntimeError + exitcode, output = monitor_command._execute_shell_command(cmd) + self.assertEqual(exitcode, -1) + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess') +class MonitorOpenstackCmdTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.config = { + 'monitor_type': 'openstack-api', + 'command_name': 'nova image-list', + 'monitor_time': 1, + 'sla': {'max_outage_time': 5} + } + + + def test__monitor_command_monitor_func_successful(self, mock_subprocess): + + instance = monitor_command.MonitorOpenstackCmd(self.config, None) + instance.setup() + mock_subprocess.check_output.return_value = (0, 'unittest') + ret = instance.monitor_func() + self.assertEqual(ret, True) + instance._result = {"outage_time": 0} + instance.verify_SLA() + + def test__monitor_command_monitor_func_failure(self, mock_subprocess): + mock_subprocess.check_output.return_value = (1, 'unittest') + instance = monitor_command.MonitorOpenstackCmd(self.config, None) + instance.setup() + mock_subprocess.check_output.side_effect = RuntimeError + ret = instance.monitor_func() + self.assertEqual(ret, False) + instance._result = {"outage_time": 10} + instance.verify_SLA() + + @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.ssh') + def test__monitor_command_ssh_monitor_successful(self, mock_ssh, mock_subprocess): + + self.config["host"] = "node1" + instance = monitor_command.MonitorOpenstackCmd(self.config, self.context) + instance.setup() + mock_ssh.SSH().execute.return_value = (0, "0", '') + ret = instance.monitor_func() diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py new file mode 100644 index 000000000..dda104b4e --- /dev/null +++ b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_process + +import mock +import unittest + +from yardstick.benchmark.scenarios.availability.monitor import monitor_process + +@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_process.ssh') +class MonitorProcessTestCase(unittest.TestCase): + + def setUp(self): + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" + } + self.context = {"node1": host} + self.monitor_cfg = { + 'monitor_type': 'process', + 'process_name': 'nova-api', + 'host': "node1", + 'monitor_time': 1, + 'sla': {'max_recover_time': 5} + } + + def test__monitor_process_all_successful(self, mock_ssh): + + ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "1", '') + ins.setup() + ins.monitor_func() + ins._result = {"outage_time": 0} + ins.verify_SLA() + + def test__monitor_process_down_failuer(self, mock_ssh): + + ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context) + + mock_ssh.SSH().execute.return_value = (0, "0", '') + ins.setup() + ins.monitor_func() + ins._result = {"outage_time": 10} + ins.verify_SLA() + diff --git a/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/tests/unit/benchmark/scenarios/availability/test_serviceha.py index 861bacdc9..6e58b6e7a 100644 --- a/tests/unit/benchmark/scenarios/availability/test_serviceha.py +++ b/tests/unit/benchmark/scenarios/availability/test_serviceha.py @@ -16,138 +16,58 @@ import unittest from yardstick.benchmark.scenarios.availability import serviceha -@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.ssh') +@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.basemonitor') +@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.baseattacker') class ServicehaTestCase(unittest.TestCase): def setUp(self): - self.args = { - 'options':{ - 'component':'nova-api', - 'fault_type':'stop-service', - 'fault_time':0 - }, - 'sla':{ - 'outage_time':'2' - } + host = { + "ip": "10.20.0.5", + "user": "root", + "key_filename": "/root/.ssh/id_rsa" } - self.ctx = { - 'host': { - 'ip': '10.20.0.3', - 'user': 'cirros', - 'key_filename': 'mykey.key' - } + self.ctx = {"nodes": {"node1": host}} + attacker_cfg = { + "fault_type": "kill-process", + "process_name": "nova-api", + "host": "node1" } + attacker_cfgs = [] + attacker_cfgs.append(attacker_cfg) + monitor_cfg = { + "monitor_cmd": "nova image-list", + "monitor_time": 0.1 + } + monitor_cfgs = [] + monitor_cfgs.append(monitor_cfg) - def test__serviceha_setup_successful(self, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - - self.assertEqual(p.setup_done, True) - - def test__serviceha_setup_fail_service(self, mock_ssh): - - self.args['options']['component'] = 'error' - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - - self.assertEqual(p.setup_done, False) - - def test__serviceha_setup_fail_fault_type(self, mock_ssh): - - self.args['options']['fault_type'] = 'error' - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - - self.assertEqual(p.setup_done, False) - - def test__serviceha_setup_fail_check(self, mock_ssh): - - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'error', '') - p.setup() - - self.assertEqual(p.setup_done, False) - - def test__serviceha_setup_fail_script(self, mock_ssh): + options = { + "attackers": attacker_cfgs, + "monitors": monitor_cfgs + } + sla = {"outage_time": 5} + self.args = {"options": options, "sla": sla} + def test__serviceha_setup_run_successful(self, mock_attacker, mock_monitor): p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (-1, 'false', '') - - self.assertRaises(RuntimeError, p.setup) - self.assertEqual(p.setup_done, False) - - @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor') - def test__serviceha_run_successful(self, mock_monitor, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') p.setup() - - monitor_result = {'total_time': 5, 'outage_time': 0, 'total_count': 16, 'outage_count': 0} - mock_monitor.Monitor().get_result.return_value = monitor_result - - p.connection = mock_ssh.SSH() - mock_ssh.SSH().execute.return_value = (0, 'success', '') - - result = {} - p.run(result) - self.assertEqual(result,{ 'outage_time': 0}) - - def test__serviceha_run_fail_nosetup(self, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - p.run(None) - - @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor') - def test__serviceha_run_fail_script(self, mock_monitor, mock_ssh): + self.assertEqual(p.setup_done, True) + mock_monitor.MonitorMgr().verify_SLA.return_value = True + ret = {} + p.run(ret) + p.teardown() +""" + def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor): p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - - monitor_result = {'total_time': 5, 'outage_time': 0, 'total_count': 16, 'outage_count': 0} - mock_monitor.Monitor().get_result.return_value = monitor_result - p.connection = mock_ssh.SSH() - mock_ssh.SSH().execute.return_value = (-1, 'error', '') - - result = {} - self.assertRaises(RuntimeError, p.run, result) - - @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor') - def test__serviceha_run_fail_sla(self, mock_monitor, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') p.setup() - - monitor_result = {'total_time': 10, 'outage_time': 5, 'total_count': 16, 'outage_count': 0} - mock_monitor.Monitor().get_result.return_value = monitor_result - - p.connection = mock_ssh.SSH() - mock_ssh.SSH().execute.return_value = (0, 'success', '') + self.assertEqual(p.setup_done, True) result = {} - self.assertRaises(AssertionError, p.run, result) - - def test__serviceha_teardown_successful(self, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - p.need_teardown = True - - mock_ssh.SSH().execute.return_value = (0, 'success', '') - p.teardown() - - self.assertEqual(p.need_teardown, False) - - def test__serviceha_teardown_fail_script(self, mock_ssh): - p = serviceha.ServiceHA(self.args, self.ctx) - mock_ssh.SSH().execute.return_value = (0, 'running', '') - p.setup() - p.need_teardown = True - - mock_ssh.SSH().execute.return_value = (-1, 'false', '') - - self.assertRaises(RuntimeError, p.teardown) + result["outage_time"] = 10 + mock_monitor.Monitor().get_result.return_value = result + ret = {} + self.assertRaises(AssertionError, p.run, ret) +""" diff --git a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py index a87b39142..807429025 100644 --- a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py +++ b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py @@ -22,41 +22,65 @@ from yardstick.benchmark.scenarios.compute import cyclictest class CyclictestTestCase(unittest.TestCase): def setUp(self): - self.ctx = { + self.scenario_cfg = { + "host": "kvm.LF", + "setup_options": { + "rpm_dir": "/opt/rpm", + "host_setup_seqs": [ + "host-setup0.sh", + "host-setup1.sh", + "host-run-qemu.sh" + ], + "script_dir": "/opt/scripts", + "image_dir": "/opt/image", + "guest_setup_seqs": [ + "guest-setup0.sh", + "guest-setup1.sh" + ] + }, + "sla": { + "action": "monitor", + "max_min_latency": 50, + "max_avg_latency": 100, + "max_max_latency": 1000 + }, + "options": { + "priority": 99, + "threads": 1, + "loops": 1000, + "affinity": 1, + "interval": 1000, + "histogram": 90 + } + } + self.context_cfg = { "host": { - "ip": "192.168.50.28", - "user": "root", - "key_filename": "mykey.key" + "ip": "10.229.43.154", + "key_filename": "/yardstick/resources/files/yardstick_key", + "role": "BareMetal", + "name": "kvm.LF", + "user": "root" } } def test_cyclictest_successful_setup(self, mock_ssh): - c = cyclictest.Cyclictest({}, self.ctx) - c.setup() - + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) mock_ssh.SSH().execute.return_value = (0, '', '') - self.assertIsNotNone(c.client) + + c.setup() + self.assertIsNotNone(c.guest) + self.assertIsNotNone(c.host) self.assertEqual(c.setup_done, True) def test_cyclictest_successful_no_sla(self, mock_ssh): - - options = { - "affinity": 2, - "interval": 100, - "priority": 88, - "loops": 10000, - "threads": 2, - "histogram": 80 - } - args = { - "options": options, - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.pop("sla", None) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() - + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -65,29 +89,19 @@ class CyclictestTestCase(unittest.TestCase): self.assertEqual(result, expected_result) def test_cyclictest_successful_sla(self, mock_ssh): - - options = { - "affinity": 2, - "interval": 100, - "priority": 88, - "loops": 10000, - "threads": 2, - "histogram": 80 - } - sla = { - "max_min_latency": 100, - "max_avg_latency": 500, - "max_max_latency": 1000, - } - args = { - "options": options, - "sla": sla - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": { + "action": "monitor", + "max_min_latency": 100, + "max_avg_latency": 500, + "max_max_latency": 1000 + } + }) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() - + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -97,14 +111,13 @@ class CyclictestTestCase(unittest.TestCase): def test_cyclictest_unsuccessful_sla_min_latency(self, mock_ssh): - args = { - "options": {}, - "sla": {"max_min_latency": 10} - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": {"max_min_latency": 10}}) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -112,14 +125,13 @@ class CyclictestTestCase(unittest.TestCase): def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh): - args = { - "options": {}, - "sla": {"max_avg_latency": 10} - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": {"max_avg_latency": 10}}) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -127,14 +139,13 @@ class CyclictestTestCase(unittest.TestCase): def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh): - args = { - "options": {}, - "sla": {"max_max_latency": 10} - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": {"max_max_latency": 10}}) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() + c.guest = mock_ssh.SSH() sample_output = '{"min": 100, "avg": 500, "max": 1000}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') @@ -142,27 +153,13 @@ class CyclictestTestCase(unittest.TestCase): def test_cyclictest_unsuccessful_script_error(self, mock_ssh): - options = { - "affinity": 2, - "interval": 100, - "priority": 88, - "loops": 10000, - "threads": 2, - "histogram": 80 - } - sla = { - "max_min_latency": 100, - "max_avg_latency": 500, - "max_max_latency": 1000, - } - args = { - "options": options, - "sla": sla - } - c = cyclictest.Cyclictest(args, self.ctx) result = {} + self.scenario_cfg.update({"sla": {"max_max_latency": 10}}) + c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() - c.server = mock_ssh.SSH() + c.guest = mock_ssh.SSH() mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, c.run, result) diff --git a/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/tests/unit/benchmark/scenarios/compute/test_unixbench.py new file mode 100644 index 000000000..0935bcad2 --- /dev/null +++ b/tests/unit/benchmark/scenarios/compute/test_unixbench.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.compute.unixbench.Unixbench + +import mock +import unittest +import json + +from yardstick.benchmark.scenarios.compute import unixbench + + +@mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh') +class UnixbenchTestCase(unittest.TestCase): + + def setUp(self): + self.ctx = { + "host": { + "ip": "192.168.50.28", + "user": "root", + "key_filename": "mykey.key" + } + } + + def test_unixbench_successful_setup(self, mock_ssh): + + u = unixbench.Unixbench({}, self.ctx) + u.setup() + + mock_ssh.SSH().execute.return_value = (0, '', '') + self.assertIsNotNone(u.client) + self.assertEqual(u.setup_done, True) + + def test_unixbench_successful_no_sla(self, mock_ssh): + + options = { + "test_type": 'dhry2reg', + "run_mode": 'verbose' + } + args = { + "options": options, + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + + sample_output = '{"Score":"4425.4"}' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + + u.run(result) + expected_result = json.loads(sample_output) + self.assertEqual(result, expected_result) + + def test_unixbench_successful_in_quiet_mode(self, mock_ssh): + + options = { + "test_type": 'dhry2reg', + "run_mode": 'quiet', + "copies":1 + } + args = { + "options": options, + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + + sample_output = '{"Score":"4425.4"}' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + + u.run(result) + expected_result = json.loads(sample_output) + self.assertEqual(result, expected_result) + + + def test_unixbench_successful_sla(self, mock_ssh): + + options = { + "test_type": 'dhry2reg', + "run_mode": 'verbose' + } + sla = { + "single_score": '100', + "parallel_score": '500' + } + args = { + "options": options, + "sla": sla + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + + sample_output = '{"signle_score":"2251.7","parallel_score":"4395.9"}' + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + + u.run(result) + expected_result = json.loads(sample_output) + self.assertEqual(result, expected_result) + + def test_unixbench_unsuccessful_sla_single_score(self, mock_ssh): + + args = { + "options": {}, + "sla": {"single_score": "500"} + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}' + + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + self.assertRaises(AssertionError, u.run, result) + + def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh): + + args = { + "options": {}, + "sla": {"parallel_score": "4000"} + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}' + + mock_ssh.SSH().execute.return_value = (0, sample_output, '') + self.assertRaises(AssertionError, u.run, result) + + def test_unixbench_unsuccessful_script_error(self, mock_ssh): + + options = { + "test_type": 'dhry2reg', + "run_mode": 'verbose' + } + sla = { + "single_score": '100', + "parallel_score": '500' + } + args = { + "options": options, + "sla": sla + } + u = unixbench.Unixbench(args, self.ctx) + result = {} + + u.server = mock_ssh.SSH() + + mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR') + self.assertRaises(RuntimeError, u.run, result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/dummy/__init__.py b/tests/unit/benchmark/scenarios/dummy/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/tests/unit/benchmark/scenarios/dummy/__init__.py diff --git a/tests/unit/benchmark/scenarios/dummy/test_dummy.py b/tests/unit/benchmark/scenarios/dummy/test_dummy.py new file mode 100644 index 000000000..1f9b729a9 --- /dev/null +++ b/tests/unit/benchmark/scenarios/dummy/test_dummy.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.dummy.dummy + +import unittest + +from yardstick.benchmark.scenarios.dummy import dummy + + +class DummyTestCase(unittest.TestCase): + + def setUp(self): + self.test_context = dummy.Dummy(None, None) + + self.assertIsNone(self.test_context.scenario_cfg) + self.assertIsNone(self.test_context.context_cfg) + self.assertEqual(self.test_context.setup_done, False) + + def test_run(self): + result = {} + self.test_context.run(result) + + self.assertEqual(result["hello"], "yardstick") + self.assertEqual(self.test_context.setup_done, True) diff --git a/tests/unit/benchmark/scenarios/networking/test_ping6.py b/tests/unit/benchmark/scenarios/networking/test_ping6.py new file mode 100644 index 000000000..662b85c30 --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_ping6.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.ping.Ping + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import ping6 + + +class PingTestCase(unittest.TestCase): + + def setUp(self): + self.ctx = { + 'host': { + 'ip': '172.16.0.137', + 'user': 'cirros', + 'key_filename': "mykey.key", + 'password': "root" + }, + } + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_pktgen_successful_setup(self, mock_ssh): + + p = ping6.Ping6({}, self.ctx) + mock_ssh.SSH().execute.return_value = (0, '0', '') + p.setup() + + self.assertEqual(p.setup_done, True) + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_ping_successful_no_sla(self, mock_ssh): + + result = {} + + p = ping6.Ping6({}, self.ctx) + p.client = mock_ssh.SSH() + mock_ssh.SSH().execute.return_value = (0, '100', '') + p.run(result) + self.assertEqual(result, {'rtt': 100.0}) + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_ping_successful_sla(self, mock_ssh): + + args = { + 'sla': {'max_rtt': 150} + } + result = {} + + p = ping6.Ping6(args, self.ctx) + p.client = mock_ssh.SSH() + mock_ssh.SSH().execute.return_value = (0, '100', '') + p.run(result) + self.assertEqual(result, {'rtt': 100.0}) + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_ping_unsuccessful_sla(self, mock_ssh): + + args = { + 'options': {'packetsize': 200}, + 'sla': {'max_rtt': 50} + } + result = {} + + p = ping6.Ping6(args, self.ctx) + p.client = mock_ssh.SSH() + mock_ssh.SSH().execute.return_value = (0, '100', '') + self.assertRaises(AssertionError, p.run, result) + + @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh') + def test_ping_unsuccessful_script_error(self, mock_ssh): + + args = { + 'options': {'packetsize': 200}, + 'sla': {'max_rtt': 50} + } + result = {} + + p = ping6.Ping6(args, self.ctx) + p.client = mock_ssh.SSH() + mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR') + self.assertRaises(RuntimeError, p.run, result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py new file mode 100644 index 000000000..418dd39e6 --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation + + +class VtcInstantiationValidationTestCase(unittest.TestCase): + + def setUp(self): + scenario = dict() + scenario['options'] = dict() + scenario['options']['default_net_name'] = '' + scenario['options']['default_subnet_name'] = '' + scenario['options']['vlan_net_1_name'] = '' + scenario['options']['vlan_subnet_1_name'] = '' + scenario['options']['vlan_net_2_name'] = '' + scenario['options']['vlan_subnet_2_name'] = '' + scenario['options']['vnic_type'] = '' + scenario['options']['vtc_flavor'] = '' + scenario['options']['packet_size'] = '' + scenario['options']['vlan_sender'] = '' + scenario['options']['vlan_receiver'] = '' + + self.vt = vtc_instantiation_validation.VtcInstantiationValidation(scenario, '') + + def test_run_for_success(self): + result = {} + self.vt.run(result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py new file mode 100644 index 000000000..e0a46241c --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation_noisy + + +class VtcInstantiationValidationiNoisyTestCase(unittest.TestCase): + + def setUp(self): + scenario = dict() + scenario['options'] = dict() + scenario['options']['default_net_name'] = '' + scenario['options']['default_subnet_name'] = '' + scenario['options']['vlan_net_1_name'] = '' + scenario['options']['vlan_subnet_1_name'] = '' + scenario['options']['vlan_net_2_name'] = '' + scenario['options']['vlan_subnet_2_name'] = '' + scenario['options']['vnic_type'] = '' + scenario['options']['vtc_flavor'] = '' + scenario['options']['packet_size'] = '' + scenario['options']['vlan_sender'] = '' + scenario['options']['vlan_receiver'] = '' + scenario['options']['num_of_neighbours'] = '1' + scenario['options']['amount_of_ram'] = '1G' + scenario['options']['number_of_cores'] = '1' + + self.vt = vtc_instantiation_validation_noisy.VtcInstantiationValidationNoisy(scenario, '') + + def test_run_for_success(self): + result = {} + self.vt.run(result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py new file mode 100644 index 000000000..ecdf555d2 --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import vtc_throughput + + +class VtcThroughputTestCase(unittest.TestCase): + + def setUp(self): + scenario = dict() + scenario['options'] = dict() + scenario['options']['default_net_name'] = '' + scenario['options']['default_subnet_name'] = '' + scenario['options']['vlan_net_1_name'] = '' + scenario['options']['vlan_subnet_1_name'] = '' + scenario['options']['vlan_net_2_name'] = '' + scenario['options']['vlan_subnet_2_name'] = '' + scenario['options']['vnic_type'] = '' + scenario['options']['vtc_flavor'] = '' + scenario['options']['packet_size'] = '' + scenario['options']['vlan_sender'] = '' + scenario['options']['vlan_receiver'] = '' + + self.vt = vtc_throughput.VtcThroughput(scenario, '') + + def test_run_for_success(self): + result = {} + self.vt.run(result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py new file mode 100644 index 000000000..98957b1de --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf + +import mock +import unittest + +from yardstick.benchmark.scenarios.networking import vtc_throughput_noisy + + +class VtcThroughputNoisyTestCase(unittest.TestCase): + + def setUp(self): + scenario = dict() + scenario['options'] = dict() + scenario['options']['default_net_name'] = '' + scenario['options']['default_subnet_name'] = '' + scenario['options']['vlan_net_1_name'] = '' + scenario['options']['vlan_subnet_1_name'] = '' + scenario['options']['vlan_net_2_name'] = '' + scenario['options']['vlan_subnet_2_name'] = '' + scenario['options']['vnic_type'] = '' + scenario['options']['vtc_flavor'] = '' + scenario['options']['packet_size'] = '' + scenario['options']['vlan_sender'] = '' + scenario['options']['vlan_receiver'] = '' + scenario['options']['num_of_neighbours'] = '1' + scenario['options']['amount_of_ram'] = '1G' + scenario['options']['number_of_cores'] = '1' + + self.vt = vtc_throughput_noisy.VtcThroughputNoisy(scenario, '') + + def test_run_for_success(self): + result = {} + self.vt.run(result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/parser/__init__.py b/tests/unit/benchmark/scenarios/parser/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/tests/unit/benchmark/scenarios/parser/__init__.py diff --git a/tests/unit/benchmark/scenarios/parser/test_parser.py b/tests/unit/benchmark/scenarios/parser/test_parser.py new file mode 100644 index 000000000..d11a6d5c8 --- /dev/null +++ b/tests/unit/benchmark/scenarios/parser/test_parser.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.parser.Parser + +import mock +import unittest +import json + +from yardstick.benchmark.scenarios.parser import parser + +@mock.patch('yardstick.benchmark.scenarios.parser.parser.subprocess') +class ParserTestCase(unittest.TestCase): + + def setUp(self): + pass + + def test_parser_successful_setup(self, mock_subprocess): + + p = parser.Parser({}, {}) + mock_subprocess.call().return_value = 0 + p.setup() + self.assertEqual(p.setup_done, True) + + def test_parser_successful(self, mock_subprocess): + args = { + 'options': {'yangfile':'/root/yardstick/samples/yang.yaml', + 'toscafile':'/root/yardstick/samples/tosca.yaml'}, + } + p = parser.Parser(args, {}) + result = {} + mock_subprocess.call().return_value = 0 + sample_output = '{"yangtotosca": "success"}' + + p.run(result) + expected_result = json.loads(sample_output) + + def test_parser_teardown_successful(self, mock_subprocess): + + p = parser.Parser({}, {}) + mock_subprocess.call().return_value = 0 + p.teardown() + self.assertEqual(p.teardown_done, True) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/dispatcher/__init__.py b/tests/unit/dispatcher/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/tests/unit/dispatcher/__init__.py diff --git a/tests/unit/dispatcher/test_influxdb.py b/tests/unit/dispatcher/test_influxdb.py new file mode 100644 index 000000000..5553c86a9 --- /dev/null +++ b/tests/unit/dispatcher/test_influxdb.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.dispatcher.influxdb + +import mock +import unittest + +from yardstick.dispatcher.influxdb import InfluxdbDispatcher + +class InfluxdbDispatcherTestCase(unittest.TestCase): + + def setUp(self): + self.data1 = { + "runner_id": 8921, + "context_cfg": { + "host": { + "ip": "10.229.43.154", + "key_filename": "/root/yardstick/yardstick/resources/files/yardstick_key", + "name": "kvm.LF", + "user": "root" + }, + "target": { + "ipaddr": "10.229.44.134" + } + }, + "scenario_cfg": { + "runner": { + "interval": 1, + "object": "yardstick.benchmark.scenarios.networking.ping.Ping", + "output_filename": "/tmp/yardstick.out", + "runner_id": 8921, + "duration": 10, + "type": "Duration" + }, + "host": "kvm.LF", + "type": "Ping", + "target": "10.229.44.134", + "sla": { + "action": "monitor", + "max_rtt": 10 + }, + "tc": "ping", + "task_id": "ea958583-c91e-461a-af14-2a7f9d7f79e7" + } + } + self.data2 = { + "benchmark": { + "timestamp": "1451478117.883505", + "errors": "", + "data": { + "rtt": 0.613 + }, + "sequence": 1 + }, + "runner_id": 8921 + } + self.data3 ={ + "benchmark": { + "data": { + "mpstat": { + "cpu0": { + "%sys": "0.00", + "%idle": "99.00" + }, + "loadavg": [ + "1.09", + "0.29" + ] + }, + "rtt": "1.03" + } + } + } + + def test_record_result_data_no_target(self): + influxdb = InfluxdbDispatcher(None) + influxdb.target = '' + self.assertEqual(influxdb.record_result_data(self.data1), -1) + + def test_record_result_data_no_case_name(self): + influxdb = InfluxdbDispatcher(None) + self.assertEqual(influxdb.record_result_data(self.data2), -1) + + @mock.patch('yardstick.dispatcher.influxdb.requests') + def test_record_result_data(self, mock_requests): + type(mock_requests.post.return_value).status_code = 204 + influxdb = InfluxdbDispatcher(None) + self.assertEqual(influxdb.record_result_data(self.data1), 0) + self.assertEqual(influxdb.record_result_data(self.data2), 0) + self.assertEqual(influxdb.flush_result_data(), 0) + + def test__dict_key_flatten(self): + line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00' + influxdb = InfluxdbDispatcher(None) + flattened_data = influxdb._dict_key_flatten(self.data3['benchmark']['data']) + result = ",".join([k+"="+v for k, v in flattened_data.items()]) + self.assertEqual(result, line) + + def test__get_nano_timestamp(self): + influxdb = InfluxdbDispatcher(None) + results = {'benchmark': {'timestamp': '1451461248.925574'}} + self.assertEqual(influxdb._get_nano_timestamp(results), '1451461248925574144') + + @mock.patch('yardstick.dispatcher.influxdb.time') + def test__get_nano_timestamp_except(self, mock_time): + results = {} + influxdb = InfluxdbDispatcher(None) + mock_time.time.return_value = 1451461248.925574 + self.assertEqual(influxdb._get_nano_timestamp(results), '1451461248925574144') + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/dispatcher/test_influxdb_line_protocol.py b/tests/unit/dispatcher/test_influxdb_line_protocol.py new file mode 100644 index 000000000..cb05bf4d2 --- /dev/null +++ b/tests/unit/dispatcher/test_influxdb_line_protocol.py @@ -0,0 +1,55 @@ +# Unittest for yardstick.dispatcher.influxdb_line_protocol + +# yardstick comment: this file is a modified copy of +# influxdb-python/influxdb/tests/test_line_protocol.py + +import unittest +from yardstick.dispatcher.influxdb_line_protocol import make_lines + + +class TestLineProtocol(unittest.TestCase): + + def test_make_lines(self): + data = { + "tags": { + "empty_tag": "", + "none_tag": None, + "integer_tag": 2, + "string_tag": "hello" + }, + "points": [ + { + "measurement": "test", + "fields": { + "string_val": "hello!", + "int_val": 1, + "float_val": 1.1, + "none_field": None, + "bool_val": True, + } + } + ] + } + + self.assertEqual( + make_lines(data), + 'test,integer_tag=2,string_tag=hello ' + 'bool_val=True,float_val=1.1,int_val=1i,string_val="hello!"\n' + ) + + def test_string_val_newline(self): + data = { + "points": [ + { + "measurement": "m1", + "fields": { + "multi_line": "line1\nline1\nline3" + } + } + ] + } + + self.assertEqual( + make_lines(data), + 'm1 multi_line="line1\\nline1\\nline3"\n' + ) |