diff options
author | 2016-12-05 16:11:54 -0500 | |
---|---|---|
committer | 2017-01-12 18:25:04 -0800 | |
commit | f036e9898a69f5041f9cde02e3652c29e2de1643 (patch) | |
tree | 36e5eea75811bb640bb30f442f5a3c617e945909 /tests/unit/benchmark/scenarios/compute/test_lmbench.py | |
parent | 5f0b3d417244397b2d5e61c7a6ddd145f1d25046 (diff) |
Add support for Python 3
Porting to Python3 using Openstack guidelines:
https://wiki.openstack.org/wiki/Python3
This passes unittests on Python 3.5 and passes opnfv_smoke suite
Updates:
use six for urlparse and urlopen
fix exception.message attribute removal
run unittests on python3
use unitest.mock on python 3
fix open mock for vsperf
fix float division by using delta/eplison comparison
use unicode in StringIO
use plugin/sample_config.yaml relative path from test case
fixed apexlake unittests
upgraded to mock 2.0.0 to match python3 unittest.mock features
fixed flake8 issues
implement safe JSON decode with oslo_serialization.jsonutils.dump_as_bytes()
implement safe unicode encode/decode with oslo_utils.encodeutils
heat: convert pub key file from bytes to unicode
pkg_resources returns raw bytes, in python3
we have to decode this to utf-8 unicode
so JSON can encode it for heat template
JIRA: YARDSTICK-452
Change-Id: Ib80dd1d0c0eb0592acd832b82f6a7f8f7c20bfda
Signed-off-by: Ross Brattain <ross.b.brattain@intel.com>
Diffstat (limited to 'tests/unit/benchmark/scenarios/compute/test_lmbench.py')
-rw-r--r-- | tests/unit/benchmark/scenarios/compute/test_lmbench.py | 21 |
1 files changed, 13 insertions, 8 deletions
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py index 6be116371..5b72ef75d 100644 --- a/tests/unit/benchmark/scenarios/compute/test_lmbench.py +++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py @@ -11,9 +11,12 @@ # Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench -import mock +from __future__ import absolute_import + import unittest -import json + +import mock +from oslo_serialization import jsonutils from yardstick.benchmark.scenarios.compute import lmbench @@ -65,7 +68,8 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '[{"latency": 4.944, "size": 0.00049}]' mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads('{"latencies": ' + sample_output + "}") + expected_result = jsonutils.loads( + '{"latencies": ' + sample_output + "}") self.assertEqual(self.result, expected_result) def test_successful_bandwidth_run_no_sla(self, mock_ssh): @@ -82,7 +86,7 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_successful_latency_run_sla(self, mock_ssh): @@ -101,7 +105,8 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '[{"latency": 4.944, "size": 0.00049}]' mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads('{"latencies": ' + sample_output + "}") + expected_result = jsonutils.loads( + '{"latencies": ' + sample_output + "}") self.assertEqual(self.result, expected_result) def test_successful_bandwidth_run_sla(self, mock_ssh): @@ -121,7 +126,7 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}' mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_unsuccessful_latency_run_sla(self, mock_ssh): @@ -163,7 +168,7 @@ class LmbenchTestCase(unittest.TestCase): options = { "test_type": "latency_for_cache", - "repetition":1, + "repetition": 1, "warmup": 0 } args = { @@ -175,7 +180,7 @@ class LmbenchTestCase(unittest.TestCase): sample_output = "{\"L1cache\": 1.6}" mock_ssh.SSH().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = json.loads(sample_output) + expected_result = jsonutils.loads(sample_output) self.assertEqual(self.result, expected_result) def test_unsuccessful_script_error(self, mock_ssh): |