aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/tests/unit/benchmark/scenarios/compute
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/tests/unit/benchmark/scenarios/compute')
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/__init__.py0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt9
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt2
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/memload_sample_output.txt3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_cachestat.py97
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_computecapacity.py66
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_cpuload.py264
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py175
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py202
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_memload.py119
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_plugintest.py62
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py167
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py244
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu.py85
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py84
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py171
17 files changed, 1755 insertions, 0 deletions
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/__init__.py b/yardstick/tests/unit/benchmark/scenarios/compute/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/__init__.py
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt b/yardstick/tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt
new file mode 100644
index 000000000..e2c79a9b1
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt
@@ -0,0 +1,5 @@
+Counting cache functions... Output every 1 seconds.
+ HITS MISSES DIRTIES RATIO BUFFERS_MB CACHE_MB
+ 6462 0 29 100.0% 1157 66782
+
+Ending tracing...
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt b/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt
new file mode 100644
index 000000000..723e64bcb
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt
@@ -0,0 +1,9 @@
+Linux 3.13.0-68-generic (elxg482ls42) 11/30/2015 _x86_64_ (1 CPU)
+
+04:34:26 PM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
+04:34:26 PM all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
+04:34:26 PM 0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
+
+Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
+Average: all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
+Average: 0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt b/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt
new file mode 100644
index 000000000..c66520a27
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt
@@ -0,0 +1,2 @@
+cpu 245813227 366650 17338727 1195600354 2652765 178 177114 0 80439531 0
+cpu0 32334587 35782 1659040 87008833 401178 60 73571 0 8030817 0
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/memload_sample_output.txt b/yardstick/tests/unit/benchmark/scenarios/compute/memload_sample_output.txt
new file mode 100644
index 000000000..1793e2f10
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/memload_sample_output.txt
@@ -0,0 +1,3 @@
+ total used free shared buff/cache available
+Mem: 263753976 76737332 187016644 2844 853528 67252400
+Swap: 268029948 0 268029948
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_cachestat.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_cachestat.py
new file mode 100644
index 000000000..b0ddfc6b4
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_cachestat.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.cachestat.CACHEstat
+
+from __future__ import absolute_import
+import mock
+import unittest
+import os
+
+from yardstick.benchmark.scenarios.compute import cachestat
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.cachestat.ssh')
+class CACHEstatTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'root',
+ 'key_filename': "mykey.key"
+ }
+ }
+
+ self.result = {}
+
+ def test_cachestat_successful_setup(self, mock_ssh):
+ c = cachestat.CACHEstat({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ c.setup()
+ self.assertIsNotNone(c.client)
+ self.assertTrue(c.setup_done)
+
+ def test_execute_command_success(self, mock_ssh):
+ c = cachestat.CACHEstat({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ c.setup()
+
+ expected_result = 'abcdefg'
+ mock_ssh.SSH.from_node().execute.return_value = (0, expected_result, '')
+ result = c._execute_command("foo")
+ self.assertEqual(result, expected_result)
+
+ def test_execute_command_failed(self, mock_ssh):
+ c = cachestat.CACHEstat({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ c.setup()
+
+ mock_ssh.SSH.from_node().execute.return_value = (127, '', 'Failed executing \
+ command')
+ self.assertRaises(RuntimeError, c._execute_command,
+ "cat /proc/meminfo")
+
+ def test_get_cache_usage_successful(self, mock_ssh):
+ options = {
+ "interval": 1,
+ }
+ args = {"options": options}
+ c = cachestat.CACHEstat(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ c.setup()
+
+ output = self._read_file("cachestat_sample_output.txt")
+ mock_ssh.SSH.from_node().execute.return_value = (0, output, '')
+ result = c._get_cache_usage()
+ expected_result = {"cachestat": {"cache0": {"HITS": "6462",
+ "DIRTIES": "29",
+ "RATIO": "100.0%",
+ "MISSES": "0",
+ "BUFFERS_MB": "1157",
+ "CACHE_MB": "66782"}},
+ "average": {"HITS": 6462, "DIRTIES": 29,
+ "RATIO": "100.0%",
+ "MISSES": 0, "BUFFERS_MB": 1157,
+ "CACHE_MB": 66782},
+ "max": {"HITS": 6462,
+ "DIRTIES": 29, "RATIO": 100.0, "MISSES": 0,
+ "BUFFERS_MB": 1157, "CACHE_MB": 66782}}
+
+ self.assertEqual(result, expected_result)
+
+ def _read_file(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ output = os.path.join(curr_path, filename)
+ with open(output) as f:
+ sample_output = f.read()
+ return sample_output
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_computecapacity.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_computecapacity.py
new file mode 100644
index 000000000..7b9a5ad4a
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_computecapacity.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for
+# yardstick.benchmark.scenarios.compute.computecapacity.ComputeCapacity
+
+from __future__ import absolute_import
+
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
+
+from yardstick.benchmark.scenarios.compute import computecapacity
+
+SAMPLE_OUTPUT = '{"Cpu_number": "2", "Core_number": "24",\
+ "Memory_size": "263753976 kB", "Thread_number": "48",\
+ "Cache_size": "30720 KB", "HT_Open": "0"}'
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.computecapacity.ssh')
+class ComputeCapacityTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'nodes': {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'key_filename': "mykey.key",
+ 'password': "root"
+ },
+ }
+ }
+
+ self.result = {}
+
+ def test_capacity_successful_setup(self, mock_ssh):
+ c = computecapacity.ComputeCapacity({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ c.setup()
+ self.assertIsNotNone(c.client)
+ self.assertTrue(c.setup_done)
+
+ def test_capacity_successful(self, mock_ssh):
+ c = computecapacity.ComputeCapacity({}, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, SAMPLE_OUTPUT, '')
+ c.run(self.result)
+ expected_result = jsonutils.loads(SAMPLE_OUTPUT)
+ self.assertEqual(self.result, expected_result)
+
+ def test_capacity_unsuccessful_script_error(self, mock_ssh):
+ c = computecapacity.ComputeCapacity({}, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, c.run, self.result)
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_cpuload.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_cpuload.py
new file mode 100644
index 000000000..840ac7885
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_cpuload.py
@@ -0,0 +1,264 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench
+
+from __future__ import absolute_import
+import mock
+import unittest
+import os
+
+from yardstick.benchmark.scenarios.compute import cpuload
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.cpuload.ssh')
+class CPULoadTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'key_filename': "mykey.key"
+ }
+ }
+
+ self.result = {}
+
+ def test_setup_mpstat_installed(self, mock_ssh):
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ l.setup()
+ self.assertIsNotNone(l.client)
+ self.assertTrue(l.setup_done)
+ self.assertTrue(l.has_mpstat)
+
+ def test_setup_mpstat_not_installed(self, mock_ssh):
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (127, '', '')
+
+ l.setup()
+ self.assertIsNotNone(l.client)
+ self.assertTrue(l.setup_done)
+ self.assertFalse(l.has_mpstat)
+
+ def test_execute_command_success(self, mock_ssh):
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ l.setup()
+
+ expected_result = 'abcdefg'
+ mock_ssh.SSH.from_node().execute.return_value = (0, expected_result, '')
+ result = l._execute_command("foo")
+ self.assertEqual(result, expected_result)
+
+ def test_execute_command_failed(self, mock_ssh):
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ l.setup()
+
+ mock_ssh.SSH.from_node().execute.return_value = (127, '', 'abcdefg')
+ self.assertRaises(RuntimeError, l._execute_command,
+ "cat /proc/loadavg")
+
+ def test_get_loadavg(self, mock_ssh):
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ l.setup()
+
+ mock_ssh.SSH.from_node().execute.return_value = \
+ (0, '1.50 1.45 1.51 3/813 14322', '')
+ result = l._get_loadavg()
+ expected_result = \
+ {'loadavg': ['1.50', '1.45', '1.51', '3/813', '14322']}
+ self.assertEqual(result, expected_result)
+
+ def test_get_cpu_usage_mpstat(self, mock_ssh):
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ l.setup()
+
+ l.interval = 1
+ l.count = 1
+ mpstat_output = self._read_file("cpuload_sample_output1.txt")
+ mock_ssh.SSH.from_node().execute.return_value = (0, mpstat_output, '')
+ result = l._get_cpu_usage_mpstat()
+
+ expected_result = \
+ {"mpstat_minimum":
+ {"cpu": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"},
+ "cpu0": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"}},
+ "mpstat_average":
+ {"cpu": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"},
+ "cpu0": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"}},
+ "mpstat_maximun":
+ {"cpu": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"},
+ "cpu0": {"%steal": "0.00", "%usr": "0.00", "%gnice": "0.00",
+ "%idle": "100.00", "%guest": "0.00",
+ "%iowait": "0.00", "%sys": "0.00", "%soft": "0.00",
+ "%irq": "0.00", "%nice": "0.00"}}}
+
+ self.assertDictEqual(result, expected_result)
+
+ def test_get_cpu_usage(self, mock_ssh):
+ options = {
+ "interval": 0,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ l.setup()
+
+ l.interval = 0
+ output = self._read_file("cpuload_sample_output2.txt")
+ mock_ssh.SSH.from_node().execute.return_value = (0, output, '')
+ result = l._get_cpu_usage()
+
+ expected_result = \
+ {'mpstat':
+ {'cpu':
+ {'%steal': '0.00',
+ '%usr': '11.31',
+ '%gnice': '0.00',
+ '%idle': '81.78',
+ '%iowait': '0.18',
+ '%guest': '5.50',
+ '%sys': '1.19',
+ '%soft': '0.01',
+ '%irq': '0.00',
+ '%nice': '0.03'},
+ 'cpu0':
+ {'%steal': '0.00',
+ '%usr': '20.00',
+ '%gnice': '0.00',
+ '%idle': '71.60',
+ '%iowait': '0.33',
+ '%guest': '6.61',
+ '%sys': '1.37',
+ '%soft': '0.06',
+ '%irq': '0.00',
+ '%nice': '0.03'}}}
+
+ self.assertDictEqual(result, expected_result)
+
+ def test_run_proc_stat(self, mock_ssh):
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+
+ args = {'options': options}
+
+ l = cpuload.CPULoad(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ l.setup()
+
+ l.interval = 0
+ stat_output = self._read_file("cpuload_sample_output2.txt")
+ mock_ssh.SSH.from_node().execute.side_effect = \
+ [(0, '1.50 1.45 1.51 3/813 14322', ''), (0, stat_output, '')]
+
+ l.run(self.result)
+ expected_result = {
+ 'loadavg': ['1.50', '1.45', '1.51', '3/813', '14322'],
+ 'mpstat':
+ {'cpu':
+ {'%steal': '0.00',
+ '%usr': '11.31',
+ '%gnice': '0.00',
+ '%idle': '81.78',
+ '%iowait': '0.18',
+ '%guest': '5.50',
+ '%sys': '1.19',
+ '%soft': '0.01',
+ '%irq': '0.00',
+ '%nice': '0.03'},
+ 'cpu0':
+ {'%steal': '0.00',
+ '%usr': '20.00',
+ '%gnice': '0.00',
+ '%idle': '71.60',
+ '%iowait': '0.33',
+ '%guest': '6.61',
+ '%sys': '1.37',
+ '%soft': '0.06',
+ '%irq': '0.00',
+ '%nice': '0.03'}}}
+
+ self.assertDictEqual(self.result, expected_result)
+
+ def _read_file(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ output = os.path.join(curr_path, filename)
+ with open(output) as f:
+ sample_output = f.read()
+ return sample_output
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
new file mode 100644
index 000000000..51ffd2488
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.cyclictest.Cyclictest
+
+from __future__ import absolute_import
+
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
+
+from yardstick.benchmark.scenarios.compute import cyclictest
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.cyclictest.ssh')
+class CyclictestTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.scenario_cfg = {
+ "host": "kvm.LF",
+ "setup_options": {
+ "rpm_dir": "/opt/rpm",
+ "host_setup_seqs": [
+ "host-setup0.sh",
+ "host-setup1.sh",
+ "host-run-qemu.sh"
+ ],
+ "script_dir": "/opt/scripts",
+ "image_dir": "/opt/image",
+ "guest_setup_seqs": [
+ "guest-setup0.sh",
+ "guest-setup1.sh"
+ ]
+ },
+ "sla": {
+ "action": "monitor",
+ "max_min_latency": 50,
+ "max_avg_latency": 100,
+ "max_max_latency": 1000
+ },
+ "options": {
+ "priority": 99,
+ "threads": 1,
+ "loops": 1000,
+ "affinity": 1,
+ "interval": 1000,
+ "histogram": 90
+ }
+ }
+ self.context_cfg = {
+ "host": {
+ "ip": "10.229.43.154",
+ "key_filename": "/yardstick/resources/files/yardstick_key",
+ "role": "BareMetal",
+ "name": "kvm.LF",
+ "user": "root"
+ }
+ }
+
+ def test_cyclictest_successful_setup(self, mock_ssh):
+
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ c.setup()
+ self.assertIsNotNone(c.guest)
+ self.assertIsNotNone(c.host)
+ self.assertTrue(c.setup_done)
+
+ def test_cyclictest_successful_no_sla(self, mock_ssh):
+ result = {}
+ self.scenario_cfg.pop("sla", None)
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ c.setup()
+
+ c.guest = mock_ssh.SSH.from_node()
+ sample_output = '{"min": 100, "avg": 500, "max": 1000}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ c.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+ def test_cyclictest_successful_sla(self, mock_ssh):
+ result = {}
+ self.scenario_cfg.update({"sla": {
+ "action": "monitor",
+ "max_min_latency": 100,
+ "max_avg_latency": 500,
+ "max_max_latency": 1000
+ }
+ })
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ c.setup()
+
+ c.guest = mock_ssh.SSH.from_node()
+ sample_output = '{"min": 100, "avg": 500, "max": 1000}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ c.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+ def test_cyclictest_unsuccessful_sla_min_latency(self, mock_ssh):
+
+ result = {}
+ self.scenario_cfg.update({"sla": {"max_min_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ c.setup()
+
+ c.guest = mock_ssh.SSH.from_node()
+ sample_output = '{"min": 100, "avg": 500, "max": 1000}'
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, c.run, result)
+
+ def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
+
+ result = {}
+ self.scenario_cfg.update({"sla": {"max_avg_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ c.setup()
+
+ c.guest = mock_ssh.SSH.from_node()
+ sample_output = '{"min": 100, "avg": 500, "max": 1000}'
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, c.run, result)
+
+ def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
+
+ result = {}
+ self.scenario_cfg.update({"sla": {"max_max_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ c.setup()
+
+ c.guest = mock_ssh.SSH.from_node()
+ sample_output = '{"min": 100, "avg": 500, "max": 1000}'
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, c.run, result)
+
+ def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
+
+ result = {}
+ self.scenario_cfg.update({"sla": {"max_max_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ c.setup()
+
+ c.guest = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, c.run, result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
new file mode 100644
index 000000000..b3152d12c
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench
+
+from __future__ import absolute_import
+
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
+
+from yardstick.benchmark.scenarios.compute import lmbench
+
+
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh')
+class LmbenchTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'key_filename': "mykey.key"
+ }
+ }
+
+ self.result = {}
+
+ def test_successful_setup(self, mock_ssh):
+
+ l = lmbench.Lmbench({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ l.setup()
+ self.assertIsNotNone(l.client)
+ self.assertTrue(l.setup_done)
+
+ def test_unsuccessful_unknown_type_run(self, mock_ssh):
+
+ options = {
+ "test_type": "foo"
+ }
+ args = {'options': options}
+
+ l = lmbench.Lmbench(args, self.ctx)
+
+ self.assertRaises(RuntimeError, l.run, self.result)
+
+ def test_successful_latency_run_no_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency",
+ "stride": 64,
+ "stop_size": 16
+ }
+ args = {'options': options}
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '[{"latency": 4.944, "size": 0.00049}]'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049}
+ self.assertEqual(self.result, expected_result)
+
+ def test_successful_bandwidth_run_no_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "bandwidth",
+ "size": 500,
+ "benchmark": "rd",
+ "warmup": 0
+ }
+ args = {"options": options}
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
+ def test_successful_latency_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency",
+ "stride": 64,
+ "stop_size": 16
+ }
+ args = {
+ "options": options,
+ "sla": {"max_latency": 35}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '[{"latency": 4.944, "size": 0.00049}]'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049}
+ self.assertEqual(self.result, expected_result)
+
+ def test_successful_bandwidth_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "bandwidth",
+ "size": 500,
+ "benchmark": "rd",
+ "warmup": 0
+ }
+ args = {
+ "options": options,
+ "sla": {"min_bandwidth": 10000}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
+ def test_unsuccessful_latency_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency",
+ "stride": 64,
+ "stop_size": 16
+ }
+ args = {
+ "options": options,
+ "sla": {"max_latency": 35}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '[{"latency": 37.5, "size": 0.00049}]'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, l.run, self.result)
+
+ def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "bandwidth",
+ "size": 500,
+ "benchmark": "rd",
+ "warmup": 0
+ }
+ args = {
+ "options": options,
+ "sla": {"min_bandwidth": 10000}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, l.run, self.result)
+
+ def test_successful_latency_for_cache_run_sla(self, mock_ssh):
+
+ options = {
+ "test_type": "latency_for_cache",
+ "repetition": 1,
+ "warmup": 0
+ }
+ args = {
+ "options": options,
+ "sla": {"max_latency": 35}
+ }
+ l = lmbench.Lmbench(args, self.ctx)
+
+ sample_output = "{\"L1cache\": 1.6}"
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ l.run(self.result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
+ def test_unsuccessful_script_error(self, mock_ssh):
+
+ options = {"test_type": "bandwidth"}
+ args = {"options": options}
+ l = lmbench.Lmbench(args, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, l.run, self.result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_memload.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_memload.py
new file mode 100644
index 000000000..ebae9993d
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_memload.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.memload.MEMLoad
+
+from __future__ import absolute_import
+import mock
+import unittest
+import os
+
+from yardstick.benchmark.scenarios.compute import memload
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.memload.ssh')
+class MEMLoadTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'root',
+ 'key_filename': "mykey.key"
+ }
+ }
+
+ self.result = {}
+
+ def test_memload_successful_setup(self, mock_ssh):
+ m = memload.MEMLoad({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ m.setup()
+ self.assertIsNotNone(m.client)
+ self.assertTrue(m.setup_done)
+
+ def test_execute_command_success(self, mock_ssh):
+ m = memload.MEMLoad({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ m.setup()
+
+ expected_result = 'abcdefg'
+ mock_ssh.SSH.from_node().execute.return_value = (0, expected_result, '')
+ result = m._execute_command("foo")
+ self.assertEqual(result, expected_result)
+
+ def test_execute_command_failed(self, mock_ssh):
+ m = memload.MEMLoad({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ m.setup()
+
+ mock_ssh.SSH.from_node().execute.return_value = (127, '', 'Failed executing \
+ command')
+ self.assertRaises(RuntimeError, m._execute_command,
+ "cat /proc/meminfo")
+
+ def test_get_mem_usage_successful(self, mock_ssh):
+ options = {
+ "interval": 1,
+ "count": 1
+ }
+ args = {"options": options}
+ m = memload.MEMLoad(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ m.setup()
+
+ output = self._read_file("memload_sample_output.txt")
+ mock_ssh.SSH.from_node().execute.return_value = (0, output, '')
+ result = m._get_mem_usage()
+ expected_result = {
+ "max": {
+ 'shared': 2844,
+ 'buff/cache': 853528,
+ 'total': 263753976,
+ 'free': 187016644,
+ 'used': 76737332
+ },
+ "average": {
+ 'shared': 2844,
+ 'buff/cache': 853528,
+ 'total': 263753976,
+ 'free': 187016644,
+ 'used': 76737332
+ },
+ "free": {
+ "memory0": {
+ "used": "76737332",
+ "buff/cache": "853528",
+ "free": "187016644",
+ "shared": "2844",
+ "total": "263753976",
+ "available": "67252400"
+ }
+ }
+ }
+
+ self.assertEqual(result, expected_result)
+
+ def _read_file(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ output = os.path.join(curr_path, filename)
+ with open(output) as f:
+ sample_output = f.read()
+ return sample_output
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_plugintest.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_plugintest.py
new file mode 100644
index 000000000..680f6ad65
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_plugintest.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.plugintest.PluginTest
+
+from __future__ import absolute_import
+
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
+
+from yardstick.benchmark.scenarios.compute import plugintest
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.plugintest.ssh')
+class PluginTestTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'nodes': {
+ 'host1': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'key_filename': "mykey.key",
+ 'password': "root"
+ },
+ }
+ }
+
+ self.result = {}
+
+ def test_sample_successful_setup(self, mock_ssh):
+ s = plugintest.PluginTest({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ s.setup()
+ self.assertIsNotNone(s.client)
+ self.assertTrue(s.setup_done)
+
+ def test_sample_successful(self, mock_ssh):
+ s = plugintest.PluginTest({}, self.ctx)
+
+ sample_output = '{"Test Output": "Hello world!"}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ s.run(self.result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(self.result, expected_result)
+
+ def test_sample_unsuccessful_script_error(self, mock_ssh):
+ s = plugintest.PluginTest({}, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, s.run, self.result)
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
new file mode 100644
index 000000000..26a26cdf7
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.qemu_migrate.QemuMigrate
+
+from __future__ import absolute_import
+
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
+
+from yardstick.benchmark.scenarios.compute import qemu_migrate
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.qemu_migrate.ssh')
+class QemuMigrateTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.scenario_cfg = {
+ "host": "kvm.LF",
+ "setup_options": {
+ "rpm_dir": "/opt/rpm",
+ "script_dir": "/opt/scripts",
+ "image_dir": "/opt/image",
+ "host_setup_seqs": [
+ "host-setup0.sh",
+ "host-setup1.sh",
+ "setup-ovsdpdk.sh",
+ "host-install-qemu.sh",
+ "host-run-qemu4lm.sh"
+ ]
+ },
+ "sla": {
+ "action": "monitor",
+ "max_totaltime": 10,
+ "max_downtime": 0.10,
+ "max_setuptime": 0.50
+ },
+ "options": {
+ "smp": 99,
+ "migrate_to_port": 4444,
+ "incoming_ip": 0,
+ "qmp_src_path": "/tmp/qmp-sock-src",
+ "qmp_dst_path": "/tmp/qmp-sock-dst",
+ "max_down_time": "0.10"
+ }
+ }
+ self.context_cfg = {
+ "host": {
+ "ip": "10.229.43.154",
+ "key_filename": "/yardstick/resources/files/yardstick_key",
+ "role": "BareMetal",
+ "name": "kvm.LF",
+ "user": "root"
+ }
+ }
+
+ def test_qemu_migrate_successful_setup(self, mock_ssh):
+
+ q = qemu_migrate.QemuMigrate(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ q.setup()
+ self.assertIsNotNone(q.host)
+ self.assertTrue(q.setup_done)
+
+ def test_qemu_migrate_successful_no_sla(self, mock_ssh):
+ result = {}
+ self.scenario_cfg.pop("sla", None)
+ q = qemu_migrate.QemuMigrate(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ q.setup()
+
+ sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ q.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+ def test_qemu_migrate_successful_sla(self, mock_ssh):
+ result = {}
+ self.scenario_cfg.update({"sla": {
+ "action": "monitor",
+ "max_totaltime": 15,
+ "max_downtime": 2,
+ "max_setuptime": 1
+ }
+ })
+ q = qemu_migrate.QemuMigrate(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ q.setup()
+
+ sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ q.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+ def test_qemu_migrate_unsuccessful_sla_totaltime(self, mock_ssh):
+
+ result = {}
+ self.scenario_cfg.update({"sla": {"max_totaltime": 10}})
+ q = qemu_migrate.QemuMigrate(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ q.setup()
+
+ sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, q.run, result)
+
+ def test_qemu_migrate_unsuccessful_sla_downtime(self, mock_ssh):
+
+ result = {}
+ self.scenario_cfg.update({"sla": {"max_downtime": 0.10}})
+ q = qemu_migrate.QemuMigrate(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ q.setup()
+
+ sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, q.run, result)
+
+ def test_qemu_migrate_unsuccessful_sla_setuptime(self, mock_ssh):
+
+ result = {}
+ self.scenario_cfg.update({"sla": {"max_setuptime": 0.50}})
+ q = qemu_migrate.QemuMigrate(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ q.setup()
+
+ sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, q.run, result)
+
+ def test_qemu_migrate_unsuccessful_script_error(self, mock_ssh):
+
+ result = {}
+ self.scenario_cfg.update({"sla": {"max_totaltime": 10}})
+ q = qemu_migrate.QemuMigrate(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ q.setup()
+
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, q.run, result)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
new file mode 100644
index 000000000..4f71fbb36
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
@@ -0,0 +1,244 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.ramspeed.Ramspeed
+
+from __future__ import absolute_import
+
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
+
+from yardstick.common import utils
+from yardstick.benchmark.scenarios.compute import ramspeed
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.ramspeed.ssh')
+class RamspeedTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'root',
+ 'key_filename': "mykey.key"
+ }
+ }
+
+ self.result = {}
+
+ def test_ramspeed_successful_setup(self, mock_ssh):
+
+ r = ramspeed.Ramspeed({}, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ r.setup()
+ self.assertIsNotNone(r.client)
+ self.assertTrue(r.setup_done, True)
+
+ def test_ramspeed_successful__run_no_sla(self, mock_ssh):
+
+ options = {
+ "test_id": 1,
+ "load": 16,
+ "block_size": 32
+ }
+ args = {"options": options}
+ r = ramspeed.Ramspeed(args, self.ctx)
+
+ sample_output = '{"Result": [{"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 1, "Bandwidth(MBps)": 19909.18}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 2, "Bandwidth(MBps)": 19873.89},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 4, "Bandwidth(MBps)":\
+ 19907.56}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 8,\
+ "Bandwidth(MBps)": 19906.94}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 16, "Bandwidth(MBps)": 19881.74}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 32, "Bandwidth(MBps)": 19395.65},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 64, "Bandwidth(MBps)":\
+ 17623.14}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 128,\
+ "Bandwidth(MBps)": 17677.36}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 256, "Bandwidth(MBps)": 16113.49}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 512, "Bandwidth(MBps)": 14659.19},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 1024, "Bandwidth(MBps)":\
+ 14680.75}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 2048,\
+ "Bandwidth(MBps)": 14756.45}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 4096, "Bandwidth(MBps)": 14604.44}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 8192, "Bandwidth(MBps)": 14159.86},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384,\
+ "Bandwidth(MBps)": 14128.94}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ r.run(self.result)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
+ self.assertEqual(self.result, expected_result)
+
+ def test_ramspeed_successful_run_sla(self, mock_ssh):
+
+ options = {
+ "test_id": 1,
+ "load": 16,
+ "block_size": 32
+ }
+ args = {"options": options, "sla": {"min_bandwidth": 6000}}
+ r = ramspeed.Ramspeed(args, self.ctx)
+
+ sample_output = '{"Result": [{"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 1, "Bandwidth(MBps)": 19909.18}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 2, "Bandwidth(MBps)": 19873.89},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 4, "Bandwidth(MBps)":\
+ 19907.56}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 8,\
+ "Bandwidth(MBps)": 19906.94}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 16, "Bandwidth(MBps)": 19881.74}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 32, "Bandwidth(MBps)": 19395.65},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 64, "Bandwidth(MBps)":\
+ 17623.14}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 128,\
+ "Bandwidth(MBps)": 17677.36}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 256, "Bandwidth(MBps)": 16113.49}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 512, "Bandwidth(MBps)": 14659.19},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 1024, "Bandwidth(MBps)":\
+ 14680.75}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 2048,\
+ "Bandwidth(MBps)": 14756.45}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 4096, "Bandwidth(MBps)": 14604.44}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 8192, "Bandwidth(MBps)": 14159.86},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384,\
+ "Bandwidth(MBps)": 14128.94}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ r.run(self.result)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
+ self.assertEqual(self.result, expected_result)
+
+ def test_ramspeed_unsuccessful_run_sla(self, mock_ssh):
+ options = {
+ "test_id": 1,
+ "load": 8,
+ "block_size": 64
+ }
+ args = {"options": options, "sla": {"min_bandwidth": 100000}}
+ r = ramspeed.Ramspeed(args, self.ctx)
+
+ sample_output = '{"Result": [{"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 1, "Bandwidth(MBps)": 5000.18}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 2, "Bandwidth(MBps)": 5000.89},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 4,\
+ "Bandwidth(MBps)": 5000.56}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 8, "Bandwidth(MBps)": 19906.94}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 16, "Bandwidth(MBps)": 19881.74},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 32,\
+ "Bandwidth(MBps)": 19395.65}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 64, "Bandwidth(MBps)": 17623.14}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 128, "Bandwidth(MBps)": 17677.36},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 256, "Bandwidth(MBps)":\
+ 16113.49}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 512,\
+ "Bandwidth(MBps)": 14659.19}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 1024, "Bandwidth(MBps)": 14680.75}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 2048, "Bandwidth(MBps)": 14756.45},\
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 4096, "Bandwidth(MBps)":\
+ 14604.44}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 8192,\
+ "Bandwidth(MBps)": 14159.86}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 16384, "Bandwidth(MBps)": 14128.94}, {"Test_type":\
+ "INTEGER & WRITING", "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, r.run, self.result)
+
+ def test_ramspeed_unsuccessful_script_error(self, mock_ssh):
+ options = {
+ "test_id": 1,
+ "load": 16,
+ "block_size": 32
+ }
+ args = {"options": options}
+ r = ramspeed.Ramspeed(args, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, r.run, self.result)
+
+ def test_ramspeed_mem_successful_run_no_sla(self, mock_ssh):
+ options = {
+ "test_id": 3,
+ "load": 16,
+ "block_size": 32,
+ "iteration": 1
+ }
+ args = {"options": options}
+ r = ramspeed.Ramspeed(args, self.ctx)
+
+ sample_output = '{"Result": [{"Test_type": "INTEGER Copy:",\
+ "Bandwidth(MBps)": 8353.97}, {"Test_type": "INTEGER Scale:",\
+ "Bandwidth(MBps)": 9078.59}, {"Test_type": "INTEGER Add:",\
+ "Bandwidth(MBps)": 10057.48}, {"Test_type": "INTEGER Triad:",\
+ "Bandwidth(MBps)": 10116.27}, {"Test_type": "INTEGER AVERAGE:",\
+ "Bandwidth(MBps)": 9401.58}]}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ r.run(self.result)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
+ self.assertEqual(self.result, expected_result)
+
+ def test_ramspeed_mem_successful_run_sla(self, mock_ssh):
+ options = {
+ "test_id": 3,
+ "load": 16,
+ "block_size": 32,
+ "iteration": 1
+ }
+ args = {"options": options, "sla": {"min_bandwidth": 6000}}
+ r = ramspeed.Ramspeed(args, self.ctx)
+
+ sample_output = '{"Result": [{"Test_type": "INTEGER Copy:",\
+ "Bandwidth(MBps)": 8353.97}, {"Test_type": "INTEGER Scale:",\
+ "Bandwidth(MBps)": 9078.59}, {"Test_type": "INTEGER Add:",\
+ "Bandwidth(MBps)": 10057.48}, {"Test_type": "INTEGER Triad:",\
+ "Bandwidth(MBps)": 10116.27}, {"Test_type": "INTEGER AVERAGE:",\
+ "Bandwidth(MBps)": 9401.58}]}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ r.run(self.result)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
+ self.assertEqual(self.result, expected_result)
+
+ def test_ramspeed_mem_unsuccessful_run_sla(self, mock_ssh):
+ options = {
+ "test_id": 3,
+ "load": 16,
+ "block_size": 32,
+ "iteration": 1
+ }
+ args = {"options": options, "sla": {"min_bandwidth": 86000}}
+ r = ramspeed.Ramspeed(args, self.ctx)
+
+ sample_output = '{"Result": [{"Test_type": "INTEGER Copy:",\
+ "Bandwidth(MBps)": 4000.97}, {"Test_type": "INTEGER Scale:",\
+ "Bandwidth(MBps)": 4400.59}, {"Test_type": "INTEGER Add:",\
+ "Bandwidth(MBps)": 4300.48}, {"Test_type": "INTEGER Triad:",\
+ "Bandwidth(MBps)": 1300.27}, {"Test_type": "INTEGER AVERAGE:",\
+ "Bandwidth(MBps)": 2401.58}]}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, r.run, self.result)
+
+ def test_ramspeed_unsuccessful_unknown_type_run(self, mock_ssh):
+ options = {
+ "test_id": 30,
+ "load": 16,
+ "block_size": 32
+ }
+ args = {'options': options}
+ r = ramspeed.Ramspeed(args, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'No such type_id: 30 for \
+ Ramspeed scenario')
+ self.assertRaises(RuntimeError, r.run, self.result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu.py
new file mode 100644
index 000000000..74612d7b6
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.spec_cpu.SpecCPU
+
+from __future__ import absolute_import
+
+import unittest
+
+import mock
+
+from yardstick.benchmark.scenarios.compute import spec_cpu
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.spec_cpu.ssh')
+class SpecCPUTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'root',
+ 'key_filename': "mykey.key"
+ }
+ }
+
+ self.result = {}
+
+ def test_spec_cpu_successful_setup(self, mock_ssh):
+
+ options = {
+ "SPECint_benchmark": "perlbench",
+ "output_format": "all",
+ "runspec_iterations": "1",
+ "runspec_tune": "base",
+ "runspec_size": "test"
+ }
+ args = {"options": options}
+ s = spec_cpu.SpecCPU(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ s.setup()
+ self.assertIsNotNone(s.client)
+ self.assertTrue(s.setup_done, True)
+
+ def test_spec_cpu_successful__run_no_sla(self, mock_ssh):
+
+ options = {
+ "SPECint_benchmark": "perlbench",
+ "runspec_tune": "all",
+ "output_format": "all"
+ }
+ args = {"options": options}
+ s = spec_cpu.SpecCPU(args, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ s.run(self.result)
+ expected_result = {}
+ self.assertEqual(self.result, expected_result)
+
+ def test_ramspeed_unsuccessful_script_error(self, mock_ssh):
+ options = {
+ "benchmark_subset": "int"
+ }
+ args = {"options": options}
+ s = spec_cpu.SpecCPU(args, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, s.run, self.result)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py
new file mode 100644
index 000000000..c428e1fb8
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.spec_cpu_for_vm.SpecCPUforVM
+
+from __future__ import absolute_import
+
+import unittest
+
+import mock
+
+from yardstick.benchmark.scenarios.compute import spec_cpu_for_vm
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.spec_cpu_for_vm.ssh')
+class SpecCPUforVMTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'root',
+ 'key_filename': "mykey.key"
+ }
+ }
+
+ self.result = {}
+
+ def test_spec_cpu_successful_setup(self, mock_ssh):
+
+ options = {
+ "SPECint_benchmark": "perlbench",
+ "runspec_tune": "all",
+ "output_format": "all",
+ "runspec_iterations": "1",
+ "runspec_size": "test"
+ }
+ args = {"options": options}
+ s = spec_cpu_for_vm.SpecCPUforVM(args, self.ctx)
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ s.setup()
+ self.assertIsNotNone(s.client)
+ self.assertTrue(s.setup_done, True)
+
+ def test_spec_cpu_successful__run_no_sla(self, mock_ssh):
+
+ options = {
+ "SPECint_benchmark": "perlbench",
+ "runspec_tune": "all",
+ "output_format": "all"
+ }
+ args = {"options": options}
+ s = spec_cpu_for_vm.SpecCPUforVM(args, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ mock_ssh.SSH.from_node().get.return_value = (0, '', '')
+ s.run(self.result)
+ expected_result = {'SPEC_CPU_result': ''}
+ self.assertEqual(self.result, expected_result)
+
+ def test_spec_cpu_unsuccessful_script_error(self, mock_ssh):
+ options = {
+ "benchmark_subset": "int"
+ }
+ args = {"options": options}
+ s = spec_cpu_for_vm.SpecCPUforVM(args, self.ctx)
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, s.run, self.result)
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
new file mode 100644
index 000000000..fec355b45
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.unixbench.Unixbench
+
+from __future__ import absolute_import
+
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
+
+from yardstick.benchmark.scenarios.compute import unixbench
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh')
+class UnixbenchTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ "host": {
+ "ip": "192.168.50.28",
+ "user": "root",
+ "key_filename": "mykey.key"
+ }
+ }
+
+ def test_unixbench_successful_setup(self, mock_ssh):
+
+ u = unixbench.Unixbench({}, self.ctx)
+ u.setup()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.assertIsNotNone(u.client)
+ self.assertTrue(u.setup_done)
+
+ def test_unixbench_successful_no_sla(self, mock_ssh):
+
+ options = {
+ "test_type": 'dhry2reg',
+ "run_mode": 'verbose'
+ }
+ args = {
+ "options": options,
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH.from_node()
+
+ sample_output = '{"Score":"4425.4"}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ u.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+ def test_unixbench_successful_in_quiet_mode(self, mock_ssh):
+
+ options = {
+ "test_type": 'dhry2reg',
+ "run_mode": 'quiet',
+ "copies": 1
+ }
+ args = {
+ "options": options,
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH.from_node()
+
+ sample_output = '{"Score":"4425.4"}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ u.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+ def test_unixbench_successful_sla(self, mock_ssh):
+
+ options = {
+ "test_type": 'dhry2reg',
+ "run_mode": 'verbose'
+ }
+ sla = {
+ "single_score": '100',
+ "parallel_score": '500'
+ }
+ args = {
+ "options": options,
+ "sla": sla
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH.from_node()
+
+ sample_output = '{"signle_score":"2251.7","parallel_score":"4395.9"}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ u.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+ def test_unixbench_unsuccessful_sla_single_score(self, mock_ssh):
+
+ args = {
+ "options": {},
+ "sla": {"single_score": "500"}
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH.from_node()
+ sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}'
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, u.run, result)
+
+ def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh):
+
+ args = {
+ "options": {},
+ "sla": {"parallel_score": "4000"}
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH.from_node()
+ sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}'
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, u.run, result)
+
+ def test_unixbench_unsuccessful_script_error(self, mock_ssh):
+
+ options = {
+ "test_type": 'dhry2reg',
+ "run_mode": 'verbose'
+ }
+ sla = {
+ "single_score": '100',
+ "parallel_score": '500'
+ }
+ args = {
+ "options": options,
+ "sla": sla
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, u.run, result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()