aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/tests/unit/benchmark/scenarios
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/tests/unit/benchmark/scenarios')
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py18
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py11
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py19
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py7
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py7
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py62
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py51
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py5
-rwxr-xr-xyardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py3
-rwxr-xr-xyardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py17
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py199
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py45
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py5
22 files changed, 347 insertions, 135 deletions
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
index ce972779d..8d042c406 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
@@ -7,6 +7,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import time
+
import mock
import unittest
@@ -86,13 +88,19 @@ class BaseMonitorTestCase(unittest.TestCase):
'sla': {'max_outage_time': 5}
}
+ def _close_queue(self, instace):
+ time.sleep(0.1)
+ instace._queue.close()
+
def test__basemonitor_start_wait_successful(self):
ins = basemonitor.BaseMonitor(self.monitor_cfg, None, {"nova-api": 10})
+ self.addCleanup(self._close_queue, ins)
ins.start_monitor()
ins.wait_monitor()
def test__basemonitor_all_successful(self):
ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
+ self.addCleanup(self._close_queue, ins)
ins.setup()
ins.run()
ins.verify_SLA()
@@ -100,16 +108,12 @@ class BaseMonitorTestCase(unittest.TestCase):
@mock.patch.object(basemonitor, 'multiprocessing')
def test__basemonitor_func_false(self, mock_multiprocess):
ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
+ self.addCleanup(self._close_queue, ins)
ins.setup()
mock_multiprocess.Event().is_set.return_value = False
ins.run()
ins.verify_SLA()
- # TODO(elfoley): fix this test to not throw an error
def test__basemonitor_getmonitorcls_successfule(self):
- cls = None
- try:
- cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
- except Exception: # pylint: disable=broad-except
- pass
- self.assertIsNone(cls)
+ with self.assertRaises(RuntimeError):
+ basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
index d1172d5a6..cd065c961 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -11,6 +11,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.availability import scenario_general
+from yardstick.common import exceptions as y_exc
class ScenarioGeneralTestCase(unittest.TestCase):
@@ -59,6 +60,14 @@ class ScenarioGeneralTestCase(unittest.TestCase):
self.instance.director.verify.return_value = False
self.instance.director.data = {}
ret = {}
- self.assertRaises(AssertionError, self.instance.run, ret)
+ self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
+ self.instance.teardown()
+ self.assertEqual(ret['sla_pass'], 0)
+
+ def test_scenario_general_case_service_not_found_fail(self):
+ self.instance.director.verify.return_value = True
+ self.instance.director.data = {"general-attacker": 0}
+ ret = {}
+ self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
self.instance.teardown()
self.assertEqual(ret['sla_pass'], 0)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index dd656fbd5..cf1e76d7a 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -11,6 +11,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.availability import serviceha
+from yardstick.common import exceptions as y_exc
class ServicehaTestCase(unittest.TestCase):
@@ -71,5 +72,21 @@ class ServicehaTestCase(unittest.TestCase):
mock_monitor.MonitorMgr().verify_SLA.return_value = False
ret = {}
- self.assertRaises(AssertionError, p.run, ret)
+ self.assertRaises(y_exc.SLAValidationError, p.run, ret)
+ self.assertEqual(ret['sla_pass'], 0)
+
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_run_service_not_found_sla_error(self, mock_monitor,
+ *args):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+
+ p.setup()
+ self.assertTrue(p.setup_done)
+ p.data["kill-process"] = 0
+
+ mock_monitor.MonitorMgr().verify_SLA.return_value = True
+
+ ret = {}
+ self.assertRaises(y_exc.SLAValidationError, p.run, ret)
self.assertEqual(ret['sla_pass'], 0)
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
index f24ec24ec..4fadde4dc 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import cyclictest
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.cyclictest.ssh')
@@ -122,7 +123,7 @@ class CyclictestTestCase(unittest.TestCase):
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, result)
+ self.assertRaises(y_exc.SLAValidationError, c.run, result)
def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
@@ -136,7 +137,7 @@ class CyclictestTestCase(unittest.TestCase):
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, result)
+ self.assertRaises(y_exc.SLAValidationError, c.run, result)
def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
@@ -150,7 +151,7 @@ class CyclictestTestCase(unittest.TestCase):
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, result)
+ self.assertRaises(y_exc.SLAValidationError, c.run, result)
def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
index 9640ce000..c4ac347f4 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import lmbench
+from yardstick.common import exceptions as y_exc
# pylint: disable=unused-argument
@@ -144,7 +145,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '[{"latency": 37.5, "size": 0.00049}]'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, l.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
@@ -162,7 +163,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, l.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
def test_successful_latency_for_cache_run_sla(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
index 03003d01f..02040ca01 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import qemu_migrate
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.qemu_migrate.ssh')
@@ -116,7 +117,7 @@ class QemuMigrateTestCase(unittest.TestCase):
sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, q.run, result)
+ self.assertRaises(y_exc.SLAValidationError, q.run, result)
def test_qemu_migrate_unsuccessful_sla_downtime(self, mock_ssh):
@@ -129,7 +130,7 @@ class QemuMigrateTestCase(unittest.TestCase):
sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, q.run, result)
+ self.assertRaises(y_exc.SLAValidationError, q.run, result)
def test_qemu_migrate_unsuccessful_sla_setuptime(self, mock_ssh):
@@ -142,7 +143,7 @@ class QemuMigrateTestCase(unittest.TestCase):
sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, q.run, result)
+ self.assertRaises(y_exc.SLAValidationError, q.run, result)
def test_qemu_migrate_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
index dcc0e810d..9e055befe 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
@@ -18,6 +18,7 @@ from oslo_serialization import jsonutils
from yardstick.common import utils
from yardstick.benchmark.scenarios.compute import ramspeed
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.ramspeed.ssh')
@@ -146,7 +147,7 @@ class RamspeedTestCase(unittest.TestCase):
"Block_size(kb)": 16384, "Bandwidth(MBps)": 14128.94}, {"Test_type":\
"INTEGER & WRITING", "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, r.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
def test_ramspeed_unsuccessful_script_error(self, mock_ssh):
options = {
@@ -219,7 +220,7 @@ class RamspeedTestCase(unittest.TestCase):
"Bandwidth(MBps)": 1300.27}, {"Test_type": "INTEGER AVERAGE:",\
"Bandwidth(MBps)": 2401.58}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, r.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
def test_ramspeed_unsuccessful_unknown_type_run(self, mock_ssh):
options = {
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
index 6339a2dcd..e4a8d6e26 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import unixbench
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh')
@@ -122,7 +123,7 @@ class UnixbenchTestCase(unittest.TestCase):
sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, u.run, result)
+ self.assertRaises(y_exc.SLAValidationError, u.run, result)
def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh):
@@ -137,7 +138,7 @@ class UnixbenchTestCase(unittest.TestCase):
sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, u.run, result)
+ self.assertRaises(y_exc.SLAValidationError, u.run, result)
def test_unixbench_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py
index 639cf2906..aebd1dfe8 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py
@@ -6,30 +6,50 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
+
import mock
+from oslo_utils import uuidutils
+import unittest
-from yardstick.benchmark.scenarios.lib import create_image
from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import create_image
+
-# NOTE(elfoley): There should be more tests here.
class CreateImageTestCase(unittest.TestCase):
- @mock.patch.object(openstack_utils, 'create_image')
- @mock.patch.object(openstack_utils, 'get_glance_client')
- def test_create_image(self, mock_get_glance_client, mock_create_image):
- options = {
- 'image_name': 'yardstick_test_image_01',
- 'disk_format': 'qcow2',
- 'container_format': 'bare',
- 'min_disk': '1',
- 'min_ram': '512',
- 'protected': 'False',
- 'tags': '["yardstick automatic test image"]',
- 'file_path': '/home/opnfv/images/cirros-0.3.5-x86_64-disk.img'
- }
- args = {"options": options}
- obj = create_image.CreateImage(args, {})
- obj.run({})
- mock_create_image.assert_called_once()
- mock_get_glance_client.assert_called_once()
+ def setUp(self):
+ self._mock_create_image = mock.patch.object(
+ openstack_utils, 'create_image')
+ self.mock_create_image = (
+ self._mock_create_image.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_image, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'image_name': 'yardstick_image'}}
+ self.result = {}
+ self.cimage_obj = create_image.CreateImage(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_create_image.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.cimage_obj.scenario_cfg = {'output': 'id'}
+ self.mock_create_image.return_value = _uuid
+ output = self.cimage_obj.run(self.result)
+ self.assertEqual({'image_create': 1}, self.result)
+ self.assertEqual({'id': _uuid}, output)
+ self.mock_log.info.asset_called_once_with('Create image successful!')
+
+ def test_run_fail(self):
+ self.mock_create_image.return_value = None
+ with self.assertRaises(exceptions.ScenarioCreateImageError):
+ self.cimage_obj.run(self.result)
+ self.assertEqual({'image_create': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Create image failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py
index e382d46fa..8a1d6d695 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py
@@ -9,21 +9,44 @@
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.delete_image import DeleteImage
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_image
class DeleteImageTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.delete_image')
- @mock.patch('yardstick.common.openstack_utils.get_image_id')
- @mock.patch('yardstick.common.openstack_utils.get_glance_client')
- def test_delete_image(self, mock_get_glance_client, mock_image_id, mock_delete_image):
- options = {
- 'image_name': 'yardstick_test_image_01'
- }
- args = {"options": options}
- obj = DeleteImage(args, {})
- obj.run({})
- mock_delete_image.assert_called_once()
- mock_image_id.assert_called_once()
- mock_get_glance_client.assert_called_once()
+ def setUp(self):
+ self._mock_delete_image = mock.patch.object(
+ openstack_utils, 'delete_image')
+ self.mock_delete_image = (
+ self._mock_delete_image.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(delete_image, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_image'}}
+ self.result = {}
+
+ self.delimg_obj = delete_image.DeleteImage(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_delete_image.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_delete_image.return_value = True
+ self.assertIsNone(self.delimg_obj.run(self.result))
+ self.assertEqual({'delete_image': 1}, self.result)
+ self.mock_log.info.assert_called_once_with('Delete image successful!')
+
+ def test_run_fail(self):
+ self.mock_delete_image.return_value = False
+ with self.assertRaises(exceptions.ScenarioDeleteImageError):
+ self.delimg_obj.run(self.result)
+ self.assertEqual({'delete_image': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Delete image failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
index 74144afd5..2190e9337 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
@@ -19,6 +19,7 @@ from oslo_serialization import jsonutils
from yardstick.common import utils
from yardstick.benchmark.scenarios.networking import iperf3
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.iperf3.ssh')
@@ -118,7 +119,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_iperf_successful_sla_jitter(self, mock_ssh):
options = {"protocol": "udp", "bandwidth": "20m"}
@@ -152,7 +153,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_udp)
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_iperf_successful_tcp_protocal(self, mock_ssh):
options = {"protocol": "tcp", "nodelay": "yes"}
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
index 5907562c2..a7abcd98a 100755
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
@@ -18,6 +18,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import netperf
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.netperf.ssh')
@@ -98,7 +99,7 @@ class NetperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_netperf_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
index 956a9c078..a577dba59 100755
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
@@ -19,6 +19,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import netperf_node
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.netperf_node.ssh')
@@ -98,7 +99,7 @@ class NetperfNodeTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_netperf_node_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
index 4adfab120..559e0599e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
@@ -14,6 +14,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.networking import ping
+from yardstick.common import exceptions as y_exc
class PingTestCase(unittest.TestCase):
@@ -74,7 +75,7 @@ class PingTestCase(unittest.TestCase):
p = ping.Ping(args, self.ctx)
mock_ssh.SSH.from_node().execute.return_value = (0, '100', '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
@mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
def test_ping_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
index 4662c8537..ad5217a14 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
@@ -14,6 +14,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.networking import ping6
+from yardstick.common import exceptions as y_exc
class PingTestCase(unittest.TestCase):
@@ -98,7 +99,7 @@ class PingTestCase(unittest.TestCase):
p = ping6.Ping6(args, self.ctx)
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.side_effect = [(0, 'host1', ''), (0, 100, '')]
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
@mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
def test_ping_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index 6aea03aee..ea0deab3e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -13,6 +13,7 @@ import unittest
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import pktgen
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
@@ -176,7 +177,7 @@ class PktgenTestCase(unittest.TestCase):
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_pktgen_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
index 976087148..b141591f7 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
@@ -12,6 +12,7 @@ import unittest
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios.networking import pktgen_dpdk
+from yardstick.common import exceptions as y_exc
class PktgenDPDKLatencyTestCase(unittest.TestCase):
@@ -162,7 +163,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_pktgen_dpdk_unsuccessful_script_error(self):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
index e90fb07c7..39392e4bb 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
@@ -16,6 +16,7 @@ from oslo_serialization import jsonutils
import mock
from yardstick.benchmark.scenarios.networking import pktgen_dpdk_throughput
+from yardstick.common import exceptions as y_exc
# pylint: disable=unused-argument
@@ -131,7 +132,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "flows": 110}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_pktgen_dpdk_throughput_unsuccessful_script_error(
self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 9bfbf0752..bb1a7aaca 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -20,11 +20,11 @@ import mock
import unittest
from yardstick import tests
+from yardstick.common import exceptions
from yardstick.common import utils
from yardstick.network_services.collector.subscriber import Collector
from yardstick.network_services.traffic_profile import base
from yardstick.network_services.vnf_generic import vnfdgen
-from yardstick.error import IncorrectConfig
from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
@@ -423,7 +423,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
with mock.patch.dict(sys.modules, tests.STL_MOCKS):
self.assertIsNotNone(self.s.get_vnf_impl(vnfd))
- with self.assertRaises(vnf_generic.IncorrectConfig) as raised:
+ with self.assertRaises(exceptions.IncorrectConfig) as raised:
self.s.get_vnf_impl('NonExistentClass')
exc_str = str(raised.exception)
@@ -465,7 +465,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
cfg_patch = mock.patch.object(self.s, 'context_cfg', cfg)
with cfg_patch:
- with self.assertRaises(IncorrectConfig):
+ with self.assertRaises(exceptions.IncorrectConfig):
self.s.map_topology_to_infrastructure()
def test_map_topology_to_infrastructure_config_invalid(self):
@@ -482,7 +482,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
config_patch = mock.patch.object(self.s, 'context_cfg', cfg)
with config_patch:
- with self.assertRaises(IncorrectConfig):
+ with self.assertRaises(exceptions.IncorrectConfig):
self.s.map_topology_to_infrastructure()
def test__resolve_topology_invalid_config(self):
@@ -496,7 +496,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
for interface in self.tg__1['interfaces'].values():
del interface['local_mac']
- with self.assertRaises(vnf_generic.IncorrectConfig) as raised:
+ with self.assertRaises(exceptions.IncorrectConfig) as raised:
self.s._resolve_topology()
self.assertIn('not found', str(raised.exception))
@@ -509,7 +509,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.topology["vld"][0]['vnfd-connection-point-ref'].append(
self.s.topology["vld"][0]['vnfd-connection-point-ref'][0])
- with self.assertRaises(vnf_generic.IncorrectConfig) as raised:
+ with self.assertRaises(exceptions.IncorrectConfig) as raised:
self.s._resolve_topology()
self.assertIn('wrong endpoint count', str(raised.exception))
@@ -518,7 +518,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.topology["vld"][0]['vnfd-connection-point-ref'] = \
self.s.topology["vld"][0]['vnfd-connection-point-ref'][:1]
- with self.assertRaises(vnf_generic.IncorrectConfig) as raised:
+ with self.assertRaises(exceptions.IncorrectConfig) as raised:
self.s._resolve_topology()
self.assertIn('wrong endpoint count', str(raised.exception))
@@ -628,7 +628,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
'extra_args': {'arg1': 'value1', 'arg2': 'value2'},
'flow': {'flow': {}},
'imix': {'imix': {'64B': 100}},
- 'uplink': {}}
+ 'uplink': {},
+ 'duration': 30}
)
mock_tprofile_get.assert_called_once_with(fake_vnfd)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
index 419605b26..a606543e5 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -12,31 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Unittest for yardstick.benchmark.scenarios.networking.vsperf.Vsperf
-
-from __future__ import absolute_import
-try:
- from unittest import mock
-except ImportError:
- import mock
+import mock
import unittest
+import subprocess
+import yardstick.ssh as ssh
from yardstick.benchmark.scenarios.networking import vsperf
+from yardstick import exceptions as y_exc
-@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.subprocess')
-@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.ssh')
class VsperfTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.context_cfg = {
"host": {
"ip": "10.229.47.137",
"user": "ubuntu",
"password": "ubuntu",
},
}
- self.args = {
+ self.scenario_cfg = {
'options': {
'testname': 'p2p_rfc2544_continuous',
'traffic_type': 'continuous',
@@ -57,70 +52,154 @@ class VsperfTestCase(unittest.TestCase):
}
}
- def test_vsperf_setup(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ self._mock_SSH = mock.patch.object(ssh, 'SSH')
+ self.mock_SSH = self._mock_SSH.start()
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
+
+ self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
+ self.mock_subprocess_call = self._mock_subprocess_call.start()
+ self.mock_subprocess_call.return_value = None
+
+ self.addCleanup(self._stop_mock)
+
+ self.scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+
+ def _stop_mock(self):
+ self._mock_SSH.stop()
+ self._mock_subprocess_call.stop()
+
+ def test_setup(self):
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ def test_setup_tg_port_not_set(self):
+ del self.scenario_cfg['options']['trafficgen_port1']
+ del self.scenario_cfg['options']['trafficgen_port2']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+
+ self.mock_subprocess_call.assert_called_once_with(
+ 'setup_yardstick.sh setup', shell=True)
+ self.assertIsNone(scenario.tg_port1)
+ self.assertIsNone(scenario.tg_port2)
+ self.assertIsNotNone(scenario.client)
+ self.assertTrue(scenario.setup_done)
+
+ def test_setup_no_setup_script(self):
+ del self.scenario_cfg['options']['setup_script']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+
+ self.mock_subprocess_call.assert_has_calls(
+ (mock.call('sudo bash -c "ovs-vsctl add-port br-ex eth1"',
+ shell=True),
+ mock.call('sudo bash -c "ovs-vsctl add-port br-ex eth3"',
+ shell=True)))
+ self.assertEqual(2, self.mock_subprocess_call.call_count)
+ self.assertIsNone(scenario.setup_script)
+ self.assertIsNotNone(scenario.client)
+ self.assertTrue(scenario.setup_done)
+
+ def test_run_ok(self):
+ self.scenario.setup()
+
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
+ result = {}
+ self.scenario.run(result)
- def test_vsperf_teardown(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ def test_run_ok_setup_not_done(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
+ result = {}
+ self.scenario.run(result)
- p.teardown()
- self.assertFalse(p.setup_done)
+ self.assertTrue(self.scenario.setup_done)
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_vsperf_run_ok(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ def test_run_failed_vsperf_execution(self):
+ self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
+ (1, '', ''))
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ with self.assertRaises(RuntimeError):
+ self.scenario.run({})
+ self.assertEqual(self.mock_SSH.from_node().execute.call_count, 2)
- # run() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_ssh.SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+ def test_run_failed_csv_report(self):
+ self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
+ (0, '', ''),
+ (1, '', ''))
- result = {}
- p.run(result)
+ with self.assertRaises(RuntimeError):
+ self.scenario.run({})
+ self.assertEqual(self.mock_SSH.from_node().execute.call_count, 3)
- self.assertEqual(result['throughput_rx_fps'], '14797660.000')
+ def test_run_sla_fail(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n123456.000\r\n', '')
- def test_vsperf_run_falied_vsperf_execution(self, mock_ssh,
- mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ self.assertTrue('VSPERF_throughput_rx_fps(123456.000000) < '
+ 'SLA_throughput_rx_fps(500000.000000)'
+ in str(raised.exception))
- # run() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ def test_run_sla_fail_metric_not_collected(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'nonexisting_metric\r\n14797660.000\r\n', '')
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
- def test_vsperf_run_falied_csv_report(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ self.assertTrue('throughput_rx_fps was not collected by VSPERF'
+ in str(raised.exception))
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ def test_run_sla_fail_metric_not_defined_in_sla(self):
+ del self.scenario_cfg['sla']['throughput_rx_fps']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
- # run() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ scenario.run({})
+ self.assertTrue('throughput_rx_fps is not defined in SLA'
+ in str(raised.exception))
+
+ def test_teardown(self):
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ self.scenario.teardown()
+ self.assertFalse(self.scenario.setup_done)
+
+ def test_teardown_tg_port_not_set(self):
+ del self.scenario_cfg['options']['trafficgen_port1']
+ del self.scenario_cfg['options']['trafficgen_port2']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.teardown()
+
+ self.mock_subprocess_call.assert_called_once_with(
+ 'setup_yardstick.sh teardown', shell=True)
+ self.assertFalse(scenario.setup_done)
+
+ def test_teardown_no_setup_script(self):
+ del self.scenario_cfg['options']['setup_script']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.teardown()
+
+ self.mock_subprocess_call.assert_has_calls(
+ (mock.call('sudo bash -c "ovs-vsctl del-port br-ex eth1"',
+ shell=True),
+ mock.call('sudo bash -c "ovs-vsctl del-port br-ex eth3"',
+ shell=True)))
+ self.assertEqual(2, self.mock_subprocess_call.call_count)
+ self.assertFalse(scenario.setup_done)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
index 1d2278e21..c05d2ced2 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
@@ -19,6 +19,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.networking import vsperf_dpdk
+from yardstick import exceptions as y_exc
class VsperfDPDKTestCase(unittest.TestCase):
@@ -211,3 +212,47 @@ class VsperfDPDKTestCase(unittest.TestCase):
result = {}
self.assertRaises(RuntimeError, self.scenario.run, result)
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail(self, *args):
+ self.scenario.setup()
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n123456.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('VSPERF_throughput_rx_fps(123456.000000) < '
+ 'SLA_throughput_rx_fps(500000.000000)',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_metric_not_collected(self, *args):
+ self.scenario.setup()
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'nonexisting_metric\r\n123456.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps was not collected by VSPERF',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_sla_not_defined(self, *args):
+ del self.scenario.scenario_cfg['sla']['throughput_rx_fps']
+ self.scenario.setup()
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps is not defined in SLA',
+ str(raised.exception))
diff --git a/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
index f149cee69..6e69ddc6d 100644
--- a/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
@@ -18,6 +18,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.storage import fio
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.storage.fio.ssh')
@@ -203,7 +204,7 @@ class FioTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_fio_successful_bw_iops_sla(self, mock_ssh):
@@ -252,7 +253,7 @@ class FioTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_fio_unsuccessful_script_error(self, mock_ssh):