diff options
author | Ross Brattain <ross.b.brattain@intel.com> | 2016-12-05 16:11:54 -0500 |
---|---|---|
committer | Ross Brattain <ross.b.brattain@intel.com> | 2017-01-12 18:25:04 -0800 |
commit | f036e9898a69f5041f9cde02e3652c29e2de1643 (patch) | |
tree | 36e5eea75811bb640bb30f442f5a3c617e945909 /tests/unit/benchmark/scenarios/compute | |
parent | 5f0b3d417244397b2d5e61c7a6ddd145f1d25046 (diff) |
Add support for Python 3
Porting to Python3 using Openstack guidelines:
https://wiki.openstack.org/wiki/Python3
This passes unittests on Python 3.5 and passes opnfv_smoke suite
Updates:
use six for urlparse and urlopen
fix exception.message attribute removal
run unittests on python3
use unitest.mock on python 3
fix open mock for vsperf
fix float division by using delta/eplison comparison
use unicode in StringIO
use plugin/sample_config.yaml relative path from test case
fixed apexlake unittests
upgraded to mock 2.0.0 to match python3 unittest.mock features
fixed flake8 issues
implement safe JSON decode with oslo_serialization.jsonutils.dump_as_bytes()
implement safe unicode encode/decode with oslo_utils.encodeutils
heat: convert pub key file from bytes to unicode
pkg_resources returns raw bytes, in python3
we have to decode this to utf-8 unicode
so JSON can encode it for heat template
JIRA: YARDSTICK-452
Change-Id: Ib80dd1d0c0eb0592acd832b82f6a7f8f7c20bfda
Signed-off-by: Ross Brattain <ross.b.brattain@intel.com>
Diffstat (limited to 'tests/unit/benchmark/scenarios/compute')
9 files changed, 89 insertions, 59 deletions
diff --git a/tests/unit/benchmark/scenarios/compute/test_cachestat.py b/tests/unit/benchmark/scenarios/compute/test_cachestat.py index f5a6b5ff9..8a06c754b 100644 --- a/tests/unit/benchmark/scenarios/compute/test_cachestat.py +++ b/tests/unit/benchmark/scenarios/compute/test_cachestat.py @@ -11,6 +11,7 @@ # Unittest for yardstick.benchmark.scenarios.compute.cachestat.CACHEstat +from __future__ import absolute_import import mock import unittest import os @@ -72,11 +73,19 @@ class CACHEstatTestCase(unittest.TestCase): output = self._read_file("cachestat_sample_output.txt") mock_ssh.SSH().execute.return_value = (0, output, '') result = c._get_cache_usage() - expected_result = {"cachestat": {"cache0": {"HITS": "6462",\ - "DIRTIES": "29", "RATIO": "100.0%", "MISSES": "0", "BUFFERS_MB": "1157",\ - "CACHE_MB": "66782"}}, "average": {"HITS": 6462, "DIRTIES": 29, "RATIO": "100.0%",\ - "MISSES": 0, "BUFFERS_MB":1157, "CACHE_MB": 66782}, "max": {"HITS": 6462,\ - "DIRTIES": 29, "RATIO": 100.0, "MISSES": 0, "BUFFERS_MB": 1157, "CACHE_MB": 66782}} + expected_result = {"cachestat": {"cache0": {"HITS": "6462", + "DIRTIES": "29", + "RATIO": "100.0%", + "MISSES": "0", + "BUFFERS_MB": "1157", + "CACHE_MB": "66782"}}, + "average": {"HITS": 6462, "DIRTIES": 29, + "RATIO": "100.0%", + "MISSES": 0, "BUFFERS_MB": 1157, + "CACHE_MB": 66782}, + "max": {"HITS": 6462, + "DIRTIES": 29, "RATIO": 100.0, "MISSES": 0, + "BUFFERS_MB": 1157, "CACHE_MB": 66782}} self.assertEqual(result, expected_result) diff --git a/tests/unit/benchmark/scenarios/compute/test_computecapacity.py b/tests/unit/benchmark/scenarios/compute/test_computecapacity.py index da06b5dbb..4efa66932 100644 --- a/tests/unit/benchmark/scenarios/compute/test_computecapacity.py +++ b/tests/unit/benchmark/scenarios/compute/test_computecapacity.py @@ -9,12 +9,15 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Unittest for yardstick.benchmark.scenarios.compute.computecapacity.ComputeCapacity +# Unittest for +# yardstick.benchmark.scenarios.compute.computecapacity.ComputeCapacity + +from __future__ import absolute_import -import mock import unittest -import os -import json + +import mock +from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.compute import computecapacity @@ -53,7 +56,7 @@ class ComputeCapacityTestCase(unittest.TestCase): mock_ssh.SSH().execute.return_value = (0, SAMPLE_OUTPUT, '') c.run(self.result) - expected_result = json.loads(SAMPLE_OUTPUT) + expected_result = jsonutils.loads(SAMPLE_OUTPUT) self.assertEqual(self.result, expected_result) def test_capacity_unsuccessful_script_error(self, mock_ssh): diff --git a/tests/unit/benchmark/scenarios/compute/test_cpuload.py b/tests/unit/benchmark/scenarios/compute/test_cpuload.py index 77f2a02d8..ffa781215 100644 --- a/tests/unit/benchmark/scenarios/compute/test_cpuload.py +++ b/tests/unit/benchmark/scenarios/compute/test_cpuload.py @@ -11,6 +11,7 @@ # Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench +from __future__ import absolute_import import mock import unittest import os @@ -208,7 +209,7 @@ class CPULoadTestCase(unittest.TestCase): '%nice': '0.03'}}} self.assertDictEqual(result, expected_result) - + def test_run_proc_stat(self, mock_ssh): options = { "interval": 1, diff --git a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py index 807429025..04ca2abf9 100644 --- a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py +++ b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py @@ -11,9 +11,12 @@ # Unittest for yardstick.benchmark.scenarios.compute.cyclictest.Cyclictest -import mock +from __future__ import absolute_import + import unittest -import json + +import mock +from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.compute import cyclictest @@ -85,17 +88,17 @@ class CyclictestTestCase(unittest.TestCase): mock_ssh.SSH().execute.return_value = (0, sample_output, '') c.run(result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(result, expected_result) def test_cyclictest_successful_sla(self, mock_ssh): result = {} self.scenario_cfg.update({"sla": { - "action": "monitor", - "max_min_latency": 100, - "max_avg_latency": 500, - "max_max_latency": 1000 - } + "action": "monitor", + "max_min_latency": 100, + "max_avg_latency": 500, + "max_max_latency": 1000 + } }) c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg) mock_ssh.SSH().execute.return_value = (0, '', '') @@ -106,7 +109,7 @@ class CyclictestTestCase(unittest.TestCase): mock_ssh.SSH().execute.return_value = (0, sample_output, '') c.run(result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(result, expected_result) def test_cyclictest_unsuccessful_sla_min_latency(self, mock_ssh): diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py index 6be116371..5b72ef75d 100644 --- a/tests/unit/benchmark/scenarios/compute/test_lmbench.py +++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py @@ -11,9 +11,12 @@ # Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench -import mock +from __future__ import absolute_import + import unittest -import json + +import mock +from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.compute import lmbench @@ -65,7 +68,8 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '[{"latency": 4.944, "size": 0.00049}]' mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads('{"latencies": ' + sample_output + "}") + expected_result = jsonutils.loads( + '{"latencies": ' + sample_output + "}") self.assertEqual(self.result, expected_result) def test_successful_bandwidth_run_no_sla(self, mock_ssh): @@ -82,7 +86,7 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_successful_latency_run_sla(self, mock_ssh): @@ -101,7 +105,8 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '[{"latency": 4.944, "size": 0.00049}]' mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads('{"latencies": ' + sample_output + "}") + expected_result = jsonutils.loads( + '{"latencies": ' + sample_output + "}") self.assertEqual(self.result, expected_result) def test_successful_bandwidth_run_sla(self, mock_ssh): @@ -121,7 +126,7 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_unsuccessful_latency_run_sla(self, mock_ssh): @@ -163,7 +168,7 @@ class LmbenchTestCase(unittest.TestCase): options = { "test_type": "latency_for_cache", - "repetition":1, + "repetition": 1, "warmup": 0 } args = { @@ -175,7 +180,7 @@ class LmbenchTestCase(unittest.TestCase): sample_output = "{\"L1cache\": 1.6}" mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_unsuccessful_script_error(self, mock_ssh): diff --git a/tests/unit/benchmark/scenarios/compute/test_memload.py b/tests/unit/benchmark/scenarios/compute/test_memload.py index cdf518d82..76625ef11 100644 --- a/tests/unit/benchmark/scenarios/compute/test_memload.py +++ b/tests/unit/benchmark/scenarios/compute/test_memload.py @@ -11,6 +11,7 @@ # Unittest for yardstick.benchmark.scenarios.compute.memload.MEMLoad +from __future__ import absolute_import import mock import unittest import os @@ -74,15 +75,17 @@ class MEMLoadTestCase(unittest.TestCase): mock_ssh.SSH().execute.return_value = (0, output, '') result = m._get_mem_usage() expected_result = {"max": {"used": 76737332, "cached": 67252400, - "free": 187016644, "shared": 2844, - "total": 263753976, "buffers": 853528}, + "free": 187016644, "shared": 2844, + "total": 263753976, "buffers": 853528}, "average": {"used": 76737332, "cached": 67252400, - "free": 187016644, "shared": 2844, - "total": 263753976, "buffers": 853528}, + "free": 187016644, "shared": 2844, + "total": 263753976, "buffers": 853528}, "free": {"memory0": {"used": "76737332", - "cached": "67252400", "free": "187016644", - "shared": "2844", "total": "263753976", - "buffers": "853528"}}} + "cached": "67252400", + "free": "187016644", + "shared": "2844", + "total": "263753976", + "buffers": "853528"}}} self.assertEqual(result, expected_result) def _read_file(self, filename): @@ -91,4 +94,3 @@ class MEMLoadTestCase(unittest.TestCase): with open(output) as f: sample_output = f.read() return sample_output - diff --git a/tests/unit/benchmark/scenarios/compute/test_plugintest.py b/tests/unit/benchmark/scenarios/compute/test_plugintest.py index 94f52738c..a5331caf7 100644 --- a/tests/unit/benchmark/scenarios/compute/test_plugintest.py +++ b/tests/unit/benchmark/scenarios/compute/test_plugintest.py @@ -11,10 +11,12 @@ # Unittest for yardstick.benchmark.scenarios.compute.plugintest.PluginTest -import mock -import json +from __future__ import absolute_import + import unittest -import os + +import mock +from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.compute import plugintest @@ -50,7 +52,7 @@ class PluginTestTestCase(unittest.TestCase): sample_output = '{"Test Output": "Hello world!"}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') s.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_sample_unsuccessful_script_error(self, mock_ssh): diff --git a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py b/tests/unit/benchmark/scenarios/compute/test_ramspeed.py index 100102d19..82cc93870 100644 --- a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py +++ b/tests/unit/benchmark/scenarios/compute/test_ramspeed.py @@ -11,9 +11,12 @@ # Unittest for yardstick.benchmark.scenarios.compute.ramspeed.Ramspeed -import mock +from __future__ import absolute_import + import unittest -import json + +import mock +from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.compute import ramspeed @@ -69,12 +72,12 @@ class RamspeedTestCase(unittest.TestCase): "Bandwidth(MBps)": 14756.45}, {"Test_type": "INTEGER & WRITING",\ "Block_size(kb)": 4096, "Bandwidth(MBps)": 14604.44}, {"Test_type":\ "INTEGER & WRITING", "Block_size(kb)": 8192, "Bandwidth(MBps)": 14159.86},\ - {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384, "Bandwidth(MBps)":\ - 14128.94}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 32768,\ - "Bandwidth(MBps)": 8340.85}]}' + {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384,\ + "Bandwidth(MBps)": 14128.94}, {"Test_type": "INTEGER & WRITING",\ + "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') r.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_ramspeed_successful_run_sla(self, mock_ssh): @@ -105,12 +108,12 @@ class RamspeedTestCase(unittest.TestCase): "Bandwidth(MBps)": 14756.45}, {"Test_type": "INTEGER & WRITING",\ "Block_size(kb)": 4096, "Bandwidth(MBps)": 14604.44}, {"Test_type":\ "INTEGER & WRITING", "Block_size(kb)": 8192, "Bandwidth(MBps)": 14159.86},\ - {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384, "Bandwidth(MBps)":\ - 14128.94}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 32768,\ - "Bandwidth(MBps)": 8340.85}]}' + {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384,\ + "Bandwidth(MBps)": 14128.94}, {"Test_type": "INTEGER & WRITING",\ + "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') r.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_ramspeed_unsuccessful_run_sla(self, mock_ssh): @@ -176,7 +179,7 @@ class RamspeedTestCase(unittest.TestCase): "Bandwidth(MBps)": 9401.58}]}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') r.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_ramspeed_mem_successful_run_sla(self, mock_ssh): @@ -197,7 +200,7 @@ class RamspeedTestCase(unittest.TestCase): "Bandwidth(MBps)": 9401.58}]}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') r.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_ramspeed_mem_unsuccessful_run_sla(self, mock_ssh): diff --git a/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/tests/unit/benchmark/scenarios/compute/test_unixbench.py index 0935bcad2..747bda1ed 100644 --- a/tests/unit/benchmark/scenarios/compute/test_unixbench.py +++ b/tests/unit/benchmark/scenarios/compute/test_unixbench.py @@ -11,9 +11,12 @@ # Unittest for yardstick.benchmark.scenarios.compute.unixbench.Unixbench -import mock +from __future__ import absolute_import + import unittest -import json + +import mock +from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.compute import unixbench @@ -57,7 +60,7 @@ class UnixbenchTestCase(unittest.TestCase): mock_ssh.SSH().execute.return_value = (0, sample_output, '') u.run(result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(result, expected_result) def test_unixbench_successful_in_quiet_mode(self, mock_ssh): @@ -65,7 +68,7 @@ class UnixbenchTestCase(unittest.TestCase): options = { "test_type": 'dhry2reg', "run_mode": 'quiet', - "copies":1 + "copies": 1 } args = { "options": options, @@ -79,10 +82,9 @@ class UnixbenchTestCase(unittest.TestCase): mock_ssh.SSH().execute.return_value = (0, sample_output, '') u.run(result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(result, expected_result) - def test_unixbench_successful_sla(self, mock_ssh): options = { @@ -106,7 +108,7 @@ class UnixbenchTestCase(unittest.TestCase): mock_ssh.SSH().execute.return_value = (0, sample_output, '') u.run(result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(result, expected_result) def test_unixbench_unsuccessful_sla_single_score(self, mock_ssh): |