aboutsummaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorrexlee8776 <limingjiang@huawei.com>2017-07-05 02:50:06 +0000
committerrexlee8776 <limingjiang@huawei.com>2017-07-06 07:49:10 +0000
commit25a37b2048281c64719bd6ad67860f65f6c31546 (patch)
tree726e1ff64ea3470ab6b91db8fdbc386383f08903 /tests
parent3369461d0e51b22aa96e5bfd0d80b3f0f7f82c67 (diff)
move flatten dict key to common utils
So it can easily be used by other testcase to unify result JIRA: YARDSTICK-702 Change-Id: Id4fde38a9a0c2a87a6c870bdb7b0c8f3a3b371ac Signed-off-by: rexlee8776 <limingjiang@huawei.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_lmbench.py13
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_ramspeed.py9
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_iperf3.py7
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_ping.py4
-rw-r--r--tests/unit/common/test_utils.py31
-rw-r--r--tests/unit/dispatcher/test_influxdb.py29
6 files changed, 51 insertions, 42 deletions
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
index 08f5da332..65939c6ba 100644
--- a/tests/unit/benchmark/scenarios/compute/test_lmbench.py
+++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -68,8 +68,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '[{"latency": 4.944, "size": 0.00049}]'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
l.run(self.result)
- expected_result = jsonutils.loads(
- '{"latencies": ' + sample_output + "}")
+ expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049}
self.assertEqual(self.result, expected_result)
def test_successful_bandwidth_run_no_sla(self, mock_ssh):
@@ -105,8 +104,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '[{"latency": 4.944, "size": 0.00049}]'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
l.run(self.result)
- expected_result = jsonutils.loads(
- '{"latencies": ' + sample_output + "}")
+ expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049}
self.assertEqual(self.result, expected_result)
def test_successful_bandwidth_run_sla(self, mock_ssh):
@@ -191,3 +189,10 @@ class LmbenchTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, l.run, self.result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py b/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
index 85d49641e..4f71fbb36 100644
--- a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
+++ b/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
@@ -18,6 +18,7 @@ import unittest
import mock
from oslo_serialization import jsonutils
+from yardstick.common import utils
from yardstick.benchmark.scenarios.compute import ramspeed
@@ -77,7 +78,7 @@ class RamspeedTestCase(unittest.TestCase):
"Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
r.run(self.result)
- expected_result = jsonutils.loads(sample_output)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
self.assertEqual(self.result, expected_result)
def test_ramspeed_successful_run_sla(self, mock_ssh):
@@ -113,7 +114,7 @@ class RamspeedTestCase(unittest.TestCase):
"Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
r.run(self.result)
- expected_result = jsonutils.loads(sample_output)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
self.assertEqual(self.result, expected_result)
def test_ramspeed_unsuccessful_run_sla(self, mock_ssh):
@@ -179,7 +180,7 @@ class RamspeedTestCase(unittest.TestCase):
"Bandwidth(MBps)": 9401.58}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
r.run(self.result)
- expected_result = jsonutils.loads(sample_output)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
self.assertEqual(self.result, expected_result)
def test_ramspeed_mem_successful_run_sla(self, mock_ssh):
@@ -200,7 +201,7 @@ class RamspeedTestCase(unittest.TestCase):
"Bandwidth(MBps)": 9401.58}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
r.run(self.result)
- expected_result = jsonutils.loads(sample_output)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
self.assertEqual(self.result, expected_result)
def test_ramspeed_mem_unsuccessful_run_sla(self, mock_ssh):
diff --git a/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/tests/unit/benchmark/scenarios/networking/test_iperf3.py
index 45ff1b779..331245357 100644
--- a/tests/unit/benchmark/scenarios/networking/test_iperf3.py
+++ b/tests/unit/benchmark/scenarios/networking/test_iperf3.py
@@ -19,6 +19,7 @@ import unittest
import mock
from oslo_serialization import jsonutils
+from yardstick.common import utils
from yardstick.benchmark.scenarios.networking import iperf3
@@ -81,7 +82,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- expected_result = jsonutils.loads(sample_output)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
p.run(result)
self.assertEqual(result, expected_result)
@@ -100,7 +101,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- expected_result = jsonutils.loads(sample_output)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
p.run(result)
self.assertEqual(result, expected_result)
@@ -135,7 +136,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_udp)
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- expected_result = jsonutils.loads(sample_output)
+ expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output))
p.run(result)
self.assertEqual(result, expected_result)
diff --git a/tests/unit/benchmark/scenarios/networking/test_ping.py b/tests/unit/benchmark/scenarios/networking/test_ping.py
index 5269309c2..06353249a 100644
--- a/tests/unit/benchmark/scenarios/networking/test_ping.py
+++ b/tests/unit/benchmark/scenarios/networking/test_ping.py
@@ -45,7 +45,7 @@ class PingTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '100', '')
p.run(result)
- self.assertEqual(result, {'rtt': {'ares': 100.0}})
+ self.assertEqual(result, {'rtt.ares': 100.0})
@mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
def test_ping_successful_sla(self, mock_ssh):
@@ -61,7 +61,7 @@ class PingTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '100', '')
p.run(result)
- self.assertEqual(result, {'rtt': {'ares': 100.0}})
+ self.assertEqual(result, {'rtt.ares': 100.0})
@mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
def test_ping_unsuccessful_sla(self, mock_ssh):
diff --git a/tests/unit/common/test_utils.py b/tests/unit/common/test_utils.py
index 8f52b53b0..c4c61ceeb 100644
--- a/tests/unit/common/test_utils.py
+++ b/tests/unit/common/test_utils.py
@@ -109,6 +109,37 @@ class GetParaFromYaml(unittest.TestCase):
return file_path
+class CommonUtilTestCase(unittest.TestCase):
+ def setUp(self):
+ self.data = {
+ "benchmark": {
+ "data": {
+ "mpstat": {
+ "cpu0": {
+ "%sys": "0.00",
+ "%idle": "99.00"
+ },
+ "loadavg": [
+ "1.09",
+ "0.29"
+ ]
+ },
+ "rtt": "1.03"
+ }
+ }
+ }
+ def test__dict_key_flatten(self):
+ line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,' \
+ 'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
+ # need to sort for assert to work
+ line = ",".join(sorted(line.split(',')))
+ flattened_data = utils.flatten_dict_key(
+ self.data['benchmark']['data'])
+ result = ",".join(
+ ("=".join(item) for item in sorted(flattened_data.items())))
+ self.assertEqual(result, line)
+
+
def main():
unittest.main()
diff --git a/tests/unit/dispatcher/test_influxdb.py b/tests/unit/dispatcher/test_influxdb.py
index a5d9b0754..7ebe8c90b 100644
--- a/tests/unit/dispatcher/test_influxdb.py
+++ b/tests/unit/dispatcher/test_influxdb.py
@@ -76,23 +76,6 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
},
"runner_id": 8921
}
- self.data3 = {
- "benchmark": {
- "data": {
- "mpstat": {
- "cpu0": {
- "%sys": "0.00",
- "%idle": "99.00"
- },
- "loadavg": [
- "1.09",
- "0.29"
- ]
- },
- "rtt": "1.03"
- }
- }
- }
self.yardstick_conf = {'dispatcher_influxdb': {}}
@@ -113,18 +96,6 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
}
self.assertEqual(influxdb.flush_result_data(data), 0)
- def test__dict_key_flatten(self):
- line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,' \
- 'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
- # need to sort for assert to work
- line = ",".join(sorted(line.split(',')))
- influxdb = InfluxdbDispatcher(self.yardstick_conf)
- flattened_data = influxdb._dict_key_flatten(
- self.data3['benchmark']['data'])
- result = ",".join(
- [k + "=" + v for k, v in sorted(flattened_data.items())])
- self.assertEqual(result, line)
-
def test__get_nano_timestamp(self):
influxdb = InfluxdbDispatcher(self.yardstick_conf)
results = {'timestamp': '1451461248.925574'}