aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/scenarios/compute
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/scenarios/compute')
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py15
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py17
-rw-r--r--yardstick/benchmark/scenarios/compute/perf.py15
3 files changed, 24 insertions, 23 deletions
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index aaa98b881..595986f8a 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -78,7 +78,7 @@ class Cyclictest(base.Scenario):
self.setup_done = True
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
default_args = "-m -n -q"
@@ -102,19 +102,20 @@ class Cyclictest(base.Scenario):
if status:
raise RuntimeError(stderr)
- data = json.loads(stdout)
+ result.update(json.loads(stdout))
if "sla" in args:
- for t, latency in data.items():
+ sla_error = ""
+ for t, latency in result.items():
if 'max_%s_latency' % t not in args['sla']:
continue
sla_latency = int(args['sla']['max_%s_latency' % t])
latency = int(latency)
- assert latency <= sla_latency, "%s latency %d > " \
- "sla:max_%s_latency(%d)" % (t, latency, t, sla_latency)
-
- return data
+ if latency > sla_latency:
+ sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
+ (t, latency, t, sla_latency)
+ assert sla_error == "", sla_error
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index 367739128..d2558c936 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -58,7 +58,7 @@ class Lmbench(base.Scenario):
self.setup_done = True
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
if not self.setup_done:
@@ -75,16 +75,17 @@ class Lmbench(base.Scenario):
if status:
raise RuntimeError(stderr)
- data = json.loads(stdout)
+ result.update(json.loads(stdout))
if "sla" in args:
+ sla_error = ""
sla_max_latency = int(args['sla']['max_latency'])
- for result in data:
- latency = result['latency']
- assert latency <= sla_max_latency, "latency %f > " \
- "sla:max_latency(%f)" % (latency, sla_max_latency)
-
- return data
+ for t_latency in result:
+ latency = t_latency['latency']
+ if latency > sla_max_latency:
+ sla_error += "latency %f > sla:max_latency(%f); " \
+ % (latency, sla_max_latency)
+ assert sla_error == "", sla_error
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/perf.py b/yardstick/benchmark/scenarios/compute/perf.py
index a874ea94c..281bd8e0c 100644
--- a/yardstick/benchmark/scenarios/compute/perf.py
+++ b/yardstick/benchmark/scenarios/compute/perf.py
@@ -58,7 +58,7 @@ class Perf(base.Scenario):
self.setup_done = True
- def run(self, args):
+ def run(self, args, result):
"""execute the benchmark"""
if not self.setup_done:
@@ -96,23 +96,22 @@ class Perf(base.Scenario):
if status:
raise RuntimeError(stdout)
- output = json.loads(stdout)
+ result.update(json.loads(stdout))
if "sla" in args:
metric = args['sla']['metric']
exp_val = args['sla']['expected_value']
smaller_than_exp = 'smaller_than_expected' in args['sla']
- if metric not in output:
+ if metric not in result:
assert False, "Metric (%s) not found." % metric
else:
if smaller_than_exp:
- assert output[metric] < exp_val, "%s %d >= %d (sla)" \
- % (metric, output[metric], exp_val)
+ assert result[metric] < exp_val, "%s %d >= %d (sla); " \
+ % (metric, result[metric], exp_val)
else:
- assert output[metric] >= exp_val, "%s %d < %d (sla)" \
- % (metric, output[metric], exp_val)
- return output
+ assert result[metric] >= exp_val, "%s %d < %d (sla); " \
+ % (metric, result[metric], exp_val)
def _test():