diff options
Diffstat (limited to 'yardstick/tests/unit/benchmark/scenarios')
4 files changed, 84 insertions, 35 deletions
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py new file mode 100644 index 000000000..74f86983b --- /dev/null +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py @@ -0,0 +1,36 @@ +############################################################################## +# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import unittest + +from yardstick.benchmark.scenarios.availability.attacker import baseattacker + + +class BaseAttackerTestCase(unittest.TestCase): + + def setUp(self): + self.attacker_cfg = { + 'fault_type': 'test-attacker', + 'action_parameter': {'process_name': 'nova_api'}, + 'rollback_parameter': {'process_name': 'nova_api'}, + 'key': 'stop-service', + 'attack_key': 'stop-service', + 'host': 'node1', + } + self.base_attacker = baseattacker.BaseAttacker({}, {}) + + def test__init__(self): + self.assertEqual(self.base_attacker.data, {}) + self.assertFalse(self.base_attacker.mandatory) + self.assertEqual(self.base_attacker.intermediate_variables, {}) + self.assertFalse(self.base_attacker.mandatory) + + def test_get_attacker_cls(self): + with self.assertRaises(RuntimeError): + baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg) diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py index ec0e5973c..d61fa67c7 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py @@ -109,6 +109,23 @@ class ServicehaTestCase(unittest.TestCase): ret = {} p.run(ret) attacker = mock.Mock() + attacker.mandatory = False p.attackers = [attacker] p.teardown() attacker.recover.assert_not_called() + + @mock.patch.object(serviceha, 'baseattacker') + @mock.patch.object(serviceha, 'basemonitor') + def test__serviceha_teardown_when_mandatory(self, mock_monitor, + *args): + p = serviceha.ServiceHA(self.args, self.ctx) + p.setup() + self.assertTrue(p.setup_done) + mock_monitor.MonitorMgr().verify_SLA.return_value = True + ret = {} + p.run(ret) + attacker = mock.Mock() + attacker.mandatory = True + p.attackers = [attacker] + p.teardown() + attacker.recover.assert_called_once() diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py index c4ac347f4..ba63e5f9e 100644 --- a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py +++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py @@ -6,11 +6,6 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench - -from __future__ import absolute_import - import unittest import mock @@ -18,13 +13,9 @@ from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.compute import lmbench from yardstick.common import exceptions as y_exc +from yardstick import ssh -# pylint: disable=unused-argument -# disable this for now because I keep forgetting mock patch arg ordering - - -@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh') class LmbenchTestCase(unittest.TestCase): def setUp(self): @@ -38,16 +29,23 @@ class LmbenchTestCase(unittest.TestCase): self.result = {} - def test_successful_setup(self, mock_ssh): + self._mock_ssh = mock.patch.object(ssh, 'SSH') + self.mock_ssh = self._mock_ssh.start() + self.addCleanup(self._stop_mocks) + + def _stop_mocks(self): + self._mock_ssh.stop() + + def test_successful_setup(self): l = lmbench.Lmbench({}, self.ctx) - mock_ssh.SSH.from_node().execute.return_value = (0, '', '') + self.mock_ssh.from_node().execute.return_value = (0, '', '') l.setup() self.assertIsNotNone(l.client) self.assertTrue(l.setup_done) - def test_unsuccessful_unknown_type_run(self, mock_ssh): + def test_unsuccessful_unknown_type_run(self): options = { "test_type": "foo" @@ -58,7 +56,7 @@ class LmbenchTestCase(unittest.TestCase): self.assertRaises(RuntimeError, l.run, self.result) - def test_successful_latency_run_no_sla(self, mock_ssh): + def test_successful_latency_run_no_sla(self): options = { "test_type": "latency", @@ -69,12 +67,12 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '[{"latency": 4.944, "size": 0.00049}]' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049} self.assertEqual(self.result, expected_result) - def test_successful_bandwidth_run_no_sla(self, mock_ssh): + def test_successful_bandwidth_run_no_sla(self): options = { "test_type": "bandwidth", @@ -86,12 +84,12 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) - def test_successful_latency_run_sla(self, mock_ssh): + def test_successful_latency_run_sla(self): options = { "test_type": "latency", @@ -105,12 +103,12 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '[{"latency": 4.944, "size": 0.00049}]' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049} self.assertEqual(self.result, expected_result) - def test_successful_bandwidth_run_sla(self, mock_ssh): + def test_successful_bandwidth_run_sla(self): options = { "test_type": "bandwidth", @@ -125,12 +123,12 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) - def test_unsuccessful_latency_run_sla(self, mock_ssh): + def test_unsuccessful_latency_run_sla(self): options = { "test_type": "latency", @@ -144,10 +142,10 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '[{"latency": 37.5, "size": 0.00049}]' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') self.assertRaises(y_exc.SLAValidationError, l.run, self.result) - def test_unsuccessful_bandwidth_run_sla(self, mock_ssh): + def test_unsuccessful_bandwidth_run_sla(self): options = { "test_type": "bandwidth", @@ -162,10 +160,10 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}' - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') self.assertRaises(y_exc.SLAValidationError, l.run, self.result) - def test_successful_latency_for_cache_run_sla(self, mock_ssh): + def test_successful_latency_for_cache_run_sla(self): options = { "test_type": "latency_for_cache", @@ -179,16 +177,16 @@ class LmbenchTestCase(unittest.TestCase): l = lmbench.Lmbench(args, self.ctx) sample_output = "{\"L1cache\": 1.6}" - mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') + self.mock_ssh.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) - def test_unsuccessful_script_error(self, mock_ssh): + def test_unsuccessful_script_error(self): options = {"test_type": "bandwidth"} args = {"options": options} l = lmbench.Lmbench(args, self.ctx) - mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR') + self.mock_ssh.from_node().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, l.run, self.result) diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py index 49578b383..6bf2f2c2f 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py @@ -405,7 +405,6 @@ class TestNetworkServiceTestCase(unittest.TestCase): def test___get_traffic_flow(self): self.scenario_cfg["traffic_options"]["flow"] = \ self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml") - self.scenario_cfg["options"] = {} self.scenario_cfg['options'] = { 'flow': { 'src_ip': [ @@ -421,11 +420,10 @@ class TestNetworkServiceTestCase(unittest.TestCase): 'public_ip': ['1.1.1.1'], }, } - # NOTE(ralonsoh): check the expected output. This test could be - # incorrect - # result = {'flow': {'dst_ip0': '152.16.40.2-152.16.40.254', - # 'src_ip0': '152.16.100.2-152.16.100.254'}} - self.assertEqual({'flow': {}}, self.s._get_traffic_flow()) + expected_flow = {'flow': {'dst_ip_0': '152.16.40.2-152.16.40.254', + 'public_ip_0': '1.1.1.1', + 'src_ip_0': '152.16.100.2-152.16.100.254'}} + self.assertEqual(expected_flow, self.s._get_traffic_flow()) def test___get_traffic_flow_error(self): self.scenario_cfg["traffic_options"]["flow"] = \ |