aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/scenarios/compute
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/scenarios/compute')
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/perf.py20
-rw-r--r--yardstick/benchmark/scenarios/compute/qemu_migrate.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/ramspeed.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/unixbench.py2
-rw-r--r--yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash6
7 files changed, 23 insertions, 25 deletions
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index 998463ef6..413709f3b 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -100,7 +100,7 @@ class Cyclictest(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -195,7 +195,7 @@ class Cyclictest(base.Scenario):
if latency > sla_latency:
sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
(t, latency, t, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index 801f7fa80..2237e49e0 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -119,8 +119,8 @@ class Lmbench(base.Scenario):
cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
(repetition, warmup)
else:
- raise RuntimeError("No such test_type: %s for Lmbench scenario",
- test_type)
+ raise RuntimeError("No such test_type: %s for Lmbench scenario"
+ % test_type)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -157,7 +157,7 @@ class Lmbench(base.Scenario):
if sla_latency < cache_latency:
sla_error += "latency %f > sla:max_latency(%f); " \
% (cache_latency, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/perf.py b/yardstick/benchmark/scenarios/compute/perf.py
index 0b8ed9b28..b973211f1 100644
--- a/yardstick/benchmark/scenarios/compute/perf.py
+++ b/yardstick/benchmark/scenarios/compute/perf.py
@@ -93,7 +93,7 @@ class Perf(base.Scenario):
% (load, duration, events_string)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, stdout, _ = self.client.execute(cmd)
if status:
raise RuntimeError(stdout)
@@ -105,16 +105,14 @@ class Perf(base.Scenario):
exp_val = self.scenario_cfg['sla']['expected_value']
smaller_than_exp = 'smaller_than_expected' \
in self.scenario_cfg['sla']
-
- if metric not in result:
- assert False, "Metric (%s) not found." % metric
- else:
- if smaller_than_exp:
- assert result[metric] < exp_val, "%s %d >= %d (sla); " \
- % (metric, result[metric], exp_val)
- else:
- assert result[metric] >= exp_val, "%s %d < %d (sla); " \
- % (metric, result[metric], exp_val)
+ self.verify_SLA(metric in result,
+ "Metric (%s) not found." % metric)
+ self.verify_SLA(
+ not smaller_than_exp,
+ "%s %d >= %d (sla); " % (metric, result[metric], exp_val))
+ self.verify_SLA(
+ result[metric] >= exp_val,
+ "%s %d < %d (sla); " % (metric, result[metric], exp_val))
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/qemu_migrate.py b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
index 2de1270ef..975c90b22 100644
--- a/yardstick/benchmark/scenarios/compute/qemu_migrate.py
+++ b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
@@ -56,7 +56,7 @@ class QemuMigrate(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -127,7 +127,7 @@ class QemuMigrate(base.Scenario):
if timevalue > sla_time:
sla_error += "%s timevalue %d > sla:max_%s(%d); " % \
(t, timevalue, t, sla_time)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/ramspeed.py b/yardstick/benchmark/scenarios/compute/ramspeed.py
index ca64935dd..4daf776ff 100644
--- a/yardstick/benchmark/scenarios/compute/ramspeed.py
+++ b/yardstick/benchmark/scenarios/compute/ramspeed.py
@@ -121,8 +121,8 @@ class Ramspeed(base.Scenario):
(test_id, load, block_size)
# only the test_id 1-6 will be used in this scenario
else:
- raise RuntimeError("No such type_id: %s for Ramspeed scenario",
- test_id)
+ raise RuntimeError("No such type_id: %s for Ramspeed scenario"
+ % test_id)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -140,4 +140,4 @@ class Ramspeed(base.Scenario):
if bw < sla_min_bw:
sla_error += "Bandwidth %f < " \
"sla:min_bandwidth(%f)" % (bw, sla_min_bw)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/compute/unixbench.py b/yardstick/benchmark/scenarios/compute/unixbench.py
index cdb345717..3cea31694 100644
--- a/yardstick/benchmark/scenarios/compute/unixbench.py
+++ b/yardstick/benchmark/scenarios/compute/unixbench.py
@@ -125,7 +125,7 @@ class Unixbench(base.Scenario):
if score < sla_score:
sla_error += "%s score %f < sla:%s_score(%f); " % \
(t, score, t, sla_score)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
index 5a5dbc394..0f0122e51 100644
--- a/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
+++ b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
@@ -18,15 +18,15 @@ OUTPUT_FILE=/tmp/unixbench-out.log
# run unixbench test
run_unixbench()
{
- cd /opt/tempT/UnixBench/
+ cd /opt/tempT/UnixBench/UnixBench/
./Run $OPTIONS > $OUTPUT_FILE
}
# write the result to stdout in json format
output_json()
{
- single_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | head -1 )
- parallel_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | tail -1 )
+ single_score=$(awk '/Score/{print $NF}' $OUTPUT_FILE | head -1 )
+ parallel_score=$(awk '/Score/{print $NF}' $OUTPUT_FILE | tail -1 )
echo -e "{ \
\"single_score\":\"$single_score\", \
\"parallel_score\":\"$parallel_score\" \