summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_cyclictest.py24
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_iperf3.py18
-rwxr-xr-xtests/unit/benchmark/scenarios/networking/test_netperf.py12
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_ping.py16
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_pktgen.py15
-rw-r--r--tests/unit/benchmark/scenarios/storage/test_fio.py28
-rwxr-xr-xyardstick/benchmark/runners/arithmetic.py2
-rw-r--r--yardstick/benchmark/runners/duration.py2
-rwxr-xr-xyardstick/benchmark/runners/iteration.py2
-rw-r--r--yardstick/benchmark/runners/sequence.py2
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py15
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py17
-rw-r--r--yardstick/benchmark/scenarios/compute/perf.py15
-rw-r--r--yardstick/benchmark/scenarios/networking/iperf3.py14
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf.py13
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py10
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen.py14
-rw-r--r--yardstick/benchmark/scenarios/storage/fio.py15
-rw-r--r--yardstick/plot/plotter.py2
19 files changed, 138 insertions, 98 deletions
diff --git a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
index 3791b4a76..28dc4d6b3 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
+++ b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
@@ -51,12 +51,14 @@ class CyclictestTestCase(unittest.TestCase):
args = {
"options": options,
}
+ result = {}
+
c.server = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- result = c.run(args)
+ c.run(args, result)
expected_result = json.loads(sample_output)
self.assertEqual(result, expected_result)
@@ -80,12 +82,14 @@ class CyclictestTestCase(unittest.TestCase):
"options": options,
"sla": sla
}
+ result = {}
+
c.server = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- result = c.run(args)
+ c.run(args, result)
expected_result = json.loads(sample_output)
self.assertEqual(result, expected_result)
@@ -96,11 +100,13 @@ class CyclictestTestCase(unittest.TestCase):
"options": {},
"sla": {"max_min_latency": 10}
}
+ result = {}
+
c.server = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, args)
+ self.assertRaises(AssertionError, c.run, args, result)
def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
@@ -109,11 +115,13 @@ class CyclictestTestCase(unittest.TestCase):
"options": {},
"sla": {"max_avg_latency": 10}
}
+ result = {}
+
c.server = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, args)
+ self.assertRaises(AssertionError, c.run, args, result)
def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
@@ -122,11 +130,13 @@ class CyclictestTestCase(unittest.TestCase):
"options": {},
"sla": {"max_max_latency": 10}
}
+ result = {}
+
c.server = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, args)
+ self.assertRaises(AssertionError, c.run, args, result)
def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
@@ -148,10 +158,12 @@ class CyclictestTestCase(unittest.TestCase):
"options": options,
"sla": sla
}
+ result = {}
+
c.server = mock_ssh.SSH()
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, c.run, args)
+ self.assertRaises(RuntimeError, c.run, args, result)
def main():
diff --git a/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/tests/unit/benchmark/scenarios/networking/test_iperf3.py
index 8b0da655b..2ec73ebd2 100644
--- a/tests/unit/benchmark/scenarios/networking/test_iperf3.py
+++ b/tests/unit/benchmark/scenarios/networking/test_iperf3.py
@@ -67,11 +67,12 @@ class IperfTestCase(unittest.TestCase):
options = {}
args = {'options': options}
+ result = {}
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
- result = p.run(args)
+ p.run(args, result)
self.assertEqual(result, expected_result)
def test_iperf_successful_sla(self, mock_ssh):
@@ -85,11 +86,12 @@ class IperfTestCase(unittest.TestCase):
'options': options,
'sla': {'bytes_per_second': 15000000}
}
+ result = {}
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
- result = p.run(args)
+ p.run(args, result)
self.assertEqual(result, expected_result)
def test_iperf_unsuccessful_sla(self, mock_ssh):
@@ -103,10 +105,11 @@ class IperfTestCase(unittest.TestCase):
'options': options,
'sla': {'bytes_per_second': 25000000}
}
+ result = {}
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, args)
+ self.assertRaises(AssertionError, p.run, args, result)
def test_iperf_successful_sla_jitter(self, mock_ssh):
@@ -119,11 +122,12 @@ class IperfTestCase(unittest.TestCase):
'options': options,
'sla': {'jitter': 10}
}
+ result = {}
sample_output = self._read_sample_output(self.output_name_udp)
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
- result = p.run(args)
+ p.run(args, result)
self.assertEqual(result, expected_result)
def test_iperf_unsuccessful_sla_jitter(self, mock_ssh):
@@ -137,10 +141,11 @@ class IperfTestCase(unittest.TestCase):
'options': options,
'sla': {'jitter': 0.0001}
}
+ result = {}
sample_output = self._read_sample_output(self.output_name_udp)
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, args)
+ self.assertRaises(AssertionError, p.run, args, result)
def test_iperf_unsuccessful_script_error(self, mock_ssh):
@@ -150,9 +155,10 @@ class IperfTestCase(unittest.TestCase):
options = {}
args = {'options': options}
+ result = {}
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p.run, args)
+ self.assertRaises(RuntimeError, p.run, args, result)
def _read_sample_output(self,filename):
curr_path = os.path.dirname(os.path.abspath(__file__))
diff --git a/tests/unit/benchmark/scenarios/networking/test_netperf.py b/tests/unit/benchmark/scenarios/networking/test_netperf.py
index d5c19918b..4bb5983c3 100755
--- a/tests/unit/benchmark/scenarios/networking/test_netperf.py
+++ b/tests/unit/benchmark/scenarios/networking/test_netperf.py
@@ -48,11 +48,12 @@ class NetperfTestCase(unittest.TestCase):
options = {}
args = {'options': options}
+ result = {}
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
- result = p.run(args)
+ p.run(args, result)
self.assertEqual(result, expected_result)
def test_netperf_successful_sla(self, mock_ssh):
@@ -66,11 +67,12 @@ class NetperfTestCase(unittest.TestCase):
'options': options,
'sla': {'mean_latency': 100}
}
+ result = {}
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
- result = p.run(args)
+ p.run(args, result)
self.assertEqual(result, expected_result)
def test_netperf_unsuccessful_sla(self, mock_ssh):
@@ -84,10 +86,11 @@ class NetperfTestCase(unittest.TestCase):
'options': options,
'sla': {'mean_latency': 5}
}
+ result = {}
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, args)
+ self.assertRaises(AssertionError, p.run, args, result)
def test_netperf_unsuccessful_script_error(self, mock_ssh):
@@ -97,9 +100,10 @@ class NetperfTestCase(unittest.TestCase):
options = {}
args = {'options': options}
+ result = {}
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p.run, args)
+ self.assertRaises(RuntimeError, p.run, args, result)
def _read_sample_output(self):
curr_path = os.path.dirname(os.path.abspath(__file__))
diff --git a/tests/unit/benchmark/scenarios/networking/test_ping.py b/tests/unit/benchmark/scenarios/networking/test_ping.py
index d930adcee..b2c5b9859 100644
--- a/tests/unit/benchmark/scenarios/networking/test_ping.py
+++ b/tests/unit/benchmark/scenarios/networking/test_ping.py
@@ -35,10 +35,11 @@ class PingTestCase(unittest.TestCase):
'options': {'packetsize': 200},
'ipaddr': '172.16.0.138'
}
+ result = {}
mock_ssh.SSH().execute.return_value = (0, '100', '')
- result = p.run(args)
- self.assertEqual(result, float(mock_ssh.SSH().execute.return_value[1]))
+ p.run(args, result)
+ self.assertEqual(result, {'rtt': 100.0})
@mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
def test_ping_successful_sla(self, mock_ssh):
@@ -50,10 +51,11 @@ class PingTestCase(unittest.TestCase):
'ipaddr': '172.16.0.138',
'sla': {'max_rtt': 150}
}
+ result = {}
mock_ssh.SSH().execute.return_value = (0, '100', '')
- result = p.run(args)
- self.assertEqual(result, float(mock_ssh.SSH().execute.return_value[1]))
+ p.run(args, result)
+ self.assertEqual(result, {'rtt': 100.0})
@mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
def test_ping_unsuccessful_sla(self, mock_ssh):
@@ -65,9 +67,10 @@ class PingTestCase(unittest.TestCase):
'ipaddr': '172.16.0.138',
'sla': {'max_rtt': 50}
}
+ result = {}
mock_ssh.SSH().execute.return_value = (0, '100', '')
- self.assertRaises(AssertionError, p.run, args)
+ self.assertRaises(AssertionError, p.run, args, result)
@mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
def test_ping_unsuccessful_script_error(self, mock_ssh):
@@ -79,9 +82,10 @@ class PingTestCase(unittest.TestCase):
'ipaddr': '172.16.0.138',
'sla': {'max_rtt': 50}
}
+ result = {}
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p.run, args)
+ self.assertRaises(RuntimeError, p.run, args, result)
def main():
diff --git a/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index a20382cb7..ae4481f0e 100644
--- a/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -113,6 +113,8 @@ class PktgenTestCase(unittest.TestCase):
'options': {'packetsize': 60, 'number_of_ports': 10},
'ipaddr': '172.16.0.139'
}
+ result = {}
+
p.server = mock_ssh.SSH()
p.client = mock_ssh.SSH()
@@ -124,7 +126,7 @@ class PktgenTestCase(unittest.TestCase):
"packets_sent": 149776, "flows": 110}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- result = p.run(args)
+ p.run(args, result)
expected_result = json.loads(sample_output)
expected_result["packets_received"] = 149300
self.assertEqual(result, expected_result)
@@ -137,6 +139,7 @@ class PktgenTestCase(unittest.TestCase):
'ipaddr': '172.16.0.139',
'sla': {'max_ppm': 10000}
}
+ result = {}
p.server = mock_ssh.SSH()
p.client = mock_ssh.SSH()
@@ -148,7 +151,7 @@ class PktgenTestCase(unittest.TestCase):
"packets_sent": 149776, "flows": 110}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- result = p.run(args)
+ p.run(args, result)
expected_result = json.loads(sample_output)
expected_result["packets_received"] = 149300
self.assertEqual(result, expected_result)
@@ -161,6 +164,8 @@ class PktgenTestCase(unittest.TestCase):
'ipaddr': '172.16.0.139',
'sla': {'max_ppm': 1000}
}
+ result = {}
+
p.server = mock_ssh.SSH()
p.client = mock_ssh.SSH()
@@ -171,7 +176,7 @@ class PktgenTestCase(unittest.TestCase):
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "flows": 110}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, args)
+ self.assertRaises(AssertionError, p.run, args, result)
def test_pktgen_unsuccessful_script_error(self, mock_ssh):
@@ -181,11 +186,13 @@ class PktgenTestCase(unittest.TestCase):
'ipaddr': '172.16.0.139',
'sla': {'max_ppm': 1000}
}
+ result = {}
+
p.server = mock_ssh.SSH()
p.client = mock_ssh.SSH()
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p.run, args)
+ self.assertRaises(RuntimeError, p.run, args, result)
def main():
diff --git a/tests/unit/benchmark/scenarios/storage/test_fio.py b/tests/unit/benchmark/scenarios/storage/test_fio.py
index 6d38e9c53..b47aed968 100644
--- a/tests/unit/benchmark/scenarios/storage/test_fio.py
+++ b/tests/unit/benchmark/scenarios/storage/test_fio.py
@@ -60,12 +60,14 @@ class FioTestCase(unittest.TestCase):
'ramp_time': 10
}
args = {'options': options}
+ result = {}
+
p.client = mock_ssh.SSH()
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- result = p.run(args)
+ p.run(args, result)
expected_result = '{"read_bw": 83888, "read_iops": 20972,' \
'"read_lat": 236.8, "write_bw": 84182, "write_iops": 21045,'\
@@ -83,12 +85,14 @@ class FioTestCase(unittest.TestCase):
'ramp_time': 10
}
args = {'options': options}
+ result = {}
+
p.client = mock_ssh.SSH()
sample_output = self._read_sample_output(self.sample_output['read'])
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- result = p.run(args)
+ p.run(args, result)
expected_result = '{"read_bw": 36113, "read_iops": 9028,' \
'"read_lat": 108.7}'
@@ -105,12 +109,14 @@ class FioTestCase(unittest.TestCase):
'ramp_time': 10
}
args = {'options': options}
+ result = {}
+
p.client = mock_ssh.SSH()
sample_output = self._read_sample_output(self.sample_output['write'])
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- result = p.run(args)
+ p.run(args, result)
expected_result = '{"write_bw": 35107, "write_iops": 8776,'\
'"write_lat": 111.74}'
@@ -130,13 +136,14 @@ class FioTestCase(unittest.TestCase):
'options': options,
'sla': {'write_lat': 300.1}
}
+ result = {}
p.client = mock_ssh.SSH()
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- result = p.run(args)
+ p.run(args, result)
expected_result = '{"read_bw": 83888, "read_iops": 20972,' \
'"read_lat": 236.8, "write_bw": 84182, "write_iops": 21045,'\
@@ -158,12 +165,13 @@ class FioTestCase(unittest.TestCase):
'options': options,
'sla': {'write_lat': 200.1}
}
+ result = {}
p.client = mock_ssh.SSH()
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, args)
+ self.assertRaises(AssertionError, p.run, args, result)
def test_fio_successful_bw_iops_sla(self, mock_ssh):
@@ -178,13 +186,14 @@ class FioTestCase(unittest.TestCase):
'options': options,
'sla': {'read_iops': 20000}
}
+ result = {}
p.client = mock_ssh.SSH()
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- result = p.run(args)
+ p.run(args, result)
expected_result = '{"read_bw": 83888, "read_iops": 20972,' \
'"read_lat": 236.8, "write_bw": 84182, "write_iops": 21045,'\
@@ -205,12 +214,13 @@ class FioTestCase(unittest.TestCase):
'options': options,
'sla': {'read_iops': 30000}
}
+ result = {}
p.client = mock_ssh.SSH()
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, args)
+ self.assertRaises(AssertionError, p.run, args, result)
def test_fio_unsuccessful_script_error(self, mock_ssh):
@@ -222,10 +232,12 @@ class FioTestCase(unittest.TestCase):
'ramp_time': 10
}
args = {'options': options}
+ result = {}
+
p.client = mock_ssh.SSH()
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p.run, args)
+ self.assertRaises(RuntimeError, p.run, args, result)
def _read_sample_output(self, file_name):
curr_path = os.path.dirname(os.path.abspath(__file__))
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 3f5b640d9..68c8bfdef 100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -63,7 +63,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg):
errors = ""
try:
- data = method(scenario_cfg)
+ method(scenario_cfg, data)
except AssertionError as assertion:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py
index af5aae899..e4ad037af 100644
--- a/yardstick/benchmark/runners/duration.py
+++ b/yardstick/benchmark/runners/duration.py
@@ -54,7 +54,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg):
errors = ""
try:
- data = method(scenario_cfg)
+ method(scenario_cfg, data)
except AssertionError as assertion:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index 3a6b2e1d6..b6d861d6c 100755
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -53,7 +53,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg):
errors = ""
try:
- data = method(scenario_cfg)
+ method(scenario_cfg, data)
except AssertionError as assertion:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index ac8fe1418..29f86e19c 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -63,7 +63,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg):
errors = ""
try:
- data = method(scenario_cfg)
+ method(scenario_cfg, data)
except AssertionError as assertion:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index aaa98b881..595986f8a 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -78,7 +78,7 @@ class Cyclictest(base.Scenario):
self.setup_done = True
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
default_args = "-m -n -q"
@@ -102,19 +102,20 @@ class Cyclictest(base.Scenario):
if status:
raise RuntimeError(stderr)
- data = json.loads(stdout)
+ result.update(json.loads(stdout))
if "sla" in args:
- for t, latency in data.items():
+ sla_error = ""
+ for t, latency in result.items():
if 'max_%s_latency' % t not in args['sla']:
continue
sla_latency = int(args['sla']['max_%s_latency' % t])
latency = int(latency)
- assert latency <= sla_latency, "%s latency %d > " \
- "sla:max_%s_latency(%d)" % (t, latency, t, sla_latency)
-
- return data
+ if latency > sla_latency:
+ sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
+ (t, latency, t, sla_latency)
+ assert sla_error == "", sla_error
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index 367739128..d2558c936 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -58,7 +58,7 @@ class Lmbench(base.Scenario):
self.setup_done = True
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
if not self.setup_done:
@@ -75,16 +75,17 @@ class Lmbench(base.Scenario):
if status:
raise RuntimeError(stderr)
- data = json.loads(stdout)
+ result.update(json.loads(stdout))
if "sla" in args:
+ sla_error = ""
sla_max_latency = int(args['sla']['max_latency'])
- for result in data:
- latency = result['latency']
- assert latency <= sla_max_latency, "latency %f > " \
- "sla:max_latency(%f)" % (latency, sla_max_latency)
-
- return data
+ for t_latency in result:
+ latency = t_latency['latency']
+ if latency > sla_max_latency:
+ sla_error += "latency %f > sla:max_latency(%f); " \
+ % (latency, sla_max_latency)
+ assert sla_error == "", sla_error
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/perf.py b/yardstick/benchmark/scenarios/compute/perf.py
index a874ea94c..281bd8e0c 100644
--- a/yardstick/benchmark/scenarios/compute/perf.py
+++ b/yardstick/benchmark/scenarios/compute/perf.py
@@ -58,7 +58,7 @@ class Perf(base.Scenario):
self.setup_done = True
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
if not self.setup_done:
@@ -96,23 +96,22 @@ class Perf(base.Scenario):
if status:
raise RuntimeError(stdout)
- output = json.loads(stdout)
+ result.update(json.loads(stdout))
if "sla" in args:
metric = args['sla']['metric']
exp_val = args['sla']['expected_value']
smaller_than_exp = 'smaller_than_expected' in args['sla']
- if metric not in output:
+ if metric not in result:
assert False, "Metric (%s) not found." % metric
else:
if smaller_than_exp:
- assert output[metric] < exp_val, "%s %d >= %d (sla)" \
- % (metric, output[metric], exp_val)
+ assert result[metric] < exp_val, "%s %d >= %d (sla); " \
+ % (metric, result[metric], exp_val)
else:
- assert output[metric] >= exp_val, "%s %d < %d (sla)" \
- % (metric, output[metric], exp_val)
- return output
+ assert result[metric] >= exp_val, "%s %d < %d (sla); " \
+ % (metric, result[metric], exp_val)
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py
index e31a892d2..a324c5b85 100644
--- a/yardstick/benchmark/scenarios/networking/iperf3.py
+++ b/yardstick/benchmark/scenarios/networking/iperf3.py
@@ -82,7 +82,7 @@ For more info see http://software.es.net/iperf
LOG.warn(stderr)
self.target.close()
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
# if run by a duration runner, get the duration time and setup as arg
@@ -122,7 +122,7 @@ For more info see http://software.es.net/iperf
# error cause in json dict on stdout
raise RuntimeError(stdout)
- output = json.loads(stdout)
+ result.update(json.loads(stdout))
if "sla" in args:
sla_iperf = args["sla"]
@@ -131,21 +131,19 @@ For more info see http://software.es.net/iperf
# convert bits per second to bytes per second
bit_per_second = \
- int(output["end"]["sum_received"]["bits_per_second"])
+ int(result["end"]["sum_received"]["bits_per_second"])
bytes_per_second = bit_per_second / 8
assert bytes_per_second >= sla_bytes_per_second, \
- "bytes_per_second %d < sla:bytes_per_second (%d)" % \
+ "bytes_per_second %d < sla:bytes_per_second (%d); " % \
(bytes_per_second, sla_bytes_per_second)
else:
sla_jitter = float(sla_iperf["jitter"])
- jitter_ms = float(output["end"]["sum"]["jitter_ms"])
+ jitter_ms = float(result["end"]["sum"]["jitter_ms"])
assert jitter_ms <= sla_jitter, \
- "jitter_ms %f > sla:jitter %f" % \
+ "jitter_ms %f > sla:jitter %f; " % \
(jitter_ms, sla_jitter)
- return output
-
def _test():
'''internal test function'''
diff --git a/yardstick/benchmark/scenarios/networking/netperf.py b/yardstick/benchmark/scenarios/networking/netperf.py
index 3121fdaf2..fb5497089 100755
--- a/yardstick/benchmark/scenarios/networking/netperf.py
+++ b/yardstick/benchmark/scenarios/networking/netperf.py
@@ -79,7 +79,7 @@ class Netperf(base.Scenario):
self.setup_done = True
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
if not self.setup_done:
@@ -118,21 +118,20 @@ class Netperf(base.Scenario):
if status:
raise RuntimeError(stderr)
- data = json.loads(stdout)
- if data['mean_latency'] == '':
+ result.update(json.loads(stdout))
+
+ if result['mean_latency'] == '':
raise RuntimeError(stdout)
# sla check
- mean_latency = float(data['mean_latency'])
+ mean_latency = float(result['mean_latency'])
if "sla" in args:
sla_max_mean_latency = int(args["sla"]["mean_latency"])
assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f)" % \
+ "mean_latency %f > sla_max_mean_latency(%f); " % \
(mean_latency, sla_max_mean_latency)
- return data
-
def _test():
'''internal test function'''
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index 41395d8d6..10964350b 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -45,7 +45,7 @@ class Ping(base.Scenario):
self.connection = ssh.SSH(user, host, key_filename=key_filename)
self.connection.wait()
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
if "options" in args:
@@ -64,11 +64,9 @@ class Ping(base.Scenario):
if exit_status != 0:
raise RuntimeError(stderr)
- rtt = float(stdout)
+ result["rtt"] = float(stdout)
if "sla" in args:
sla_max_rtt = int(args["sla"]["max_rtt"])
- assert rtt <= sla_max_rtt, "rtt %f > sla:max_rtt(%f)" % \
- (rtt, sla_max_rtt)
-
- return rtt
+ assert result["rtt"] <= sla_max_rtt, "rtt %f > sla:max_rtt(%f); " % \
+ (result["rtt"], sla_max_rtt)
diff --git a/yardstick/benchmark/scenarios/networking/pktgen.py b/yardstick/benchmark/scenarios/networking/pktgen.py
index cc28b514a..f373fd2ec 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen.py
@@ -86,7 +86,7 @@ class Pktgen(base.Scenario):
raise RuntimeError(stderr)
return int(stdout)
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
if not self.setup_done:
@@ -119,20 +119,18 @@ class Pktgen(base.Scenario):
if status:
raise RuntimeError(stderr)
- data = json.loads(stdout)
+ result.update(json.loads(stdout))
- data['packets_received'] = self._iptables_get_result()
+ result['packets_received'] = self._iptables_get_result()
if "sla" in args:
- sent = data['packets_sent']
- received = data['packets_received']
+ sent = result['packets_sent']
+ received = result['packets_received']
ppm = 1000000 * (sent - received) / sent
sla_max_ppm = int(args["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d" \
+ assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
% (ppm, sla_max_ppm)
- return data
-
def _test():
'''internal test function'''
diff --git a/yardstick/benchmark/scenarios/storage/fio.py b/yardstick/benchmark/scenarios/storage/fio.py
index 1107a8b2c..af90b0703 100644
--- a/yardstick/benchmark/scenarios/storage/fio.py
+++ b/yardstick/benchmark/scenarios/storage/fio.py
@@ -71,11 +71,10 @@ class Fio(base.Scenario):
self.setup_done = True
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
default_args = "-ioengine=libaio -direct=1 -group_reporting " \
"-numjobs=1 -time_based --output-format=json"
- result = {}
if not self.setup_done:
self.setup()
@@ -124,6 +123,7 @@ class Fio(base.Scenario):
result["write_lat"] = raw_data["jobs"][0]["write"]["lat"]["mean"]
if "sla" in args:
+ sla_error = ""
for k, v in result.items():
if k not in args['sla']:
continue
@@ -131,15 +131,16 @@ class Fio(base.Scenario):
if "lat" in k:
# For lattency small value is better
max_v = float(args['sla'][k])
- assert v <= max_v, "%s %f > " \
- "sla:%s(%f)" % (k, v, k, max_v)
+ if v > max_v:
+ sla_error += "%s %f > sla:%s(%f); " % (k, v, k, max_v)
else:
# For bandwidth and iops big value is better
min_v = int(args['sla'][k])
- assert v >= min_v, "%s %d < " \
- "sla:%s(%d)" % (k, v, k, min_v)
+ if v < min_v:
+ sla_error += "%s %d < " \
+ "sla:%s(%d); " % (k, v, k, min_v)
- return result
+ assert sla_error == "", sla_error
def _test():
diff --git a/yardstick/plot/plotter.py b/yardstick/plot/plotter.py
index 0455386b5..91dd521f7 100644
--- a/yardstick/plot/plotter.py
+++ b/yardstick/plot/plotter.py
@@ -125,7 +125,7 @@ class Plotter(object):
def _plot_ping(self, records):
'''ping test result interpretation and visualization on the graph'''
- rtts = [r['benchmark']['data'] for r in records]
+ rtts = [r['benchmark']['data']['rtt'] for r in records]
seqs = [r['benchmark']['sequence'] for r in records]
for i in range(0, len(rtts)):