aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/scenarios
diff options
context:
space:
mode:
authorMiikka Koistinen <miikka.koistinen@nokia.com>2018-06-08 13:54:48 +0300
committerMiikka Koistinen <miikka.koistinen@nokia.com>2018-06-15 14:32:37 +0300
commit39f80e9b06395ae1515cfcf08508504ad4dd978a (patch)
tree9b9ecfcf72e6438228787d67c12212a498e885b6 /yardstick/benchmark/scenarios
parentded989093f16c2d7d8900b2a3afbc0521d474e08 (diff)
Convert SLA asserts to raises
This commit converts Python assertions to a custom exception in all places where SLA validation is checked with an assertion. This commit also fixes all emerged pylint errors. JIRA: YARDSTICK-966 Change-Id: If771ed03b2cbc0a43a57fcfb9293f18740b3ff80 Signed-off-by: Miikka Koistinen <miikka.koistinen@nokia.com>
Diffstat (limited to 'yardstick/benchmark/scenarios')
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py8
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py9
-rw-r--r--yardstick/benchmark/scenarios/base.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/perf.py20
-rw-r--r--yardstick/benchmark/scenarios/compute/qemu_migrate.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/ramspeed.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/unixbench.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/iperf3.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/moongen_testpmd.py7
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf.py6
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_node.py7
-rw-r--r--yardstick/benchmark/scenarios/networking/nstat.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py20
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6.py13
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen.py46
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py8
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py14
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf_dpdk.py16
-rw-r--r--yardstick/benchmark/scenarios/storage/fio.py2
22 files changed, 119 insertions, 106 deletions
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 1fadd2532..e2db03a70 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -58,16 +58,20 @@ class ScenarioGeneral(base.Scenario):
self.director.stopMonitors()
verify_result = self.director.verify()
+ service_not_found = False
for k, v in self.director.data.items():
if v == 0:
- result['sla_pass'] = 0
verify_result = False
+ service_not_found = True
LOG.info("\033[92m The service process (%s) not found in the host environment", k)
result['sla_pass'] = 1 if verify_result else 0
self.director.store_result(result)
- assert verify_result is True, "The HA test case NOT passed"
+ self.verify_SLA(
+ verify_result, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "Director.verify() failed"))
def teardown(self):
self.director.knockoff()
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 42941c6e7..76721e38c 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -70,17 +70,20 @@ class ServiceHA(base.Scenario):
LOG.info("Monitor '%s' stop!", self.__scenario_type__)
sla_pass = self.monitorMgr.verify_SLA()
+ service_not_found = False
for k, v in self.data.items():
if v == 0:
sla_pass = False
+ service_not_found = True
LOG.info("The service process (%s) not found in the host envrioment", k)
result['sla_pass'] = 1 if sla_pass else 0
self.monitorMgr.store_result(result)
- assert sla_pass is True, "The HA test case NOT pass the SLA"
-
- return
+ self.verify_SLA(
+ sla_pass, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "MonitorMgr.verify_SLA() failed"))
def teardown(self):
"""scenario teardown"""
diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py
index 58a02805c..30ac1bea9 100644
--- a/yardstick/benchmark/scenarios/base.py
+++ b/yardstick/benchmark/scenarios/base.py
@@ -20,6 +20,7 @@ import six
from stevedore import extension
import yardstick.common.utils as utils
+from yardstick.common import exceptions as y_exc
def _iter_scenario_classes(scenario_type=None):
@@ -61,6 +62,11 @@ class Scenario(object):
"""Time waited after executing the run method"""
time.sleep(time_seconds)
+ def verify_SLA(self, condition, error_msg):
+ if not condition:
+ raise y_exc.SLAValidationError(
+ case_name=self.__scenario_type__, error_msg=error_msg)
+
@staticmethod
def get_types():
"""return a list of known runner type (class) names"""
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index 998463ef6..413709f3b 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -100,7 +100,7 @@ class Cyclictest(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -195,7 +195,7 @@ class Cyclictest(base.Scenario):
if latency > sla_latency:
sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
(t, latency, t, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index 801f7fa80..2237e49e0 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -119,8 +119,8 @@ class Lmbench(base.Scenario):
cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
(repetition, warmup)
else:
- raise RuntimeError("No such test_type: %s for Lmbench scenario",
- test_type)
+ raise RuntimeError("No such test_type: %s for Lmbench scenario"
+ % test_type)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -157,7 +157,7 @@ class Lmbench(base.Scenario):
if sla_latency < cache_latency:
sla_error += "latency %f > sla:max_latency(%f); " \
% (cache_latency, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/perf.py b/yardstick/benchmark/scenarios/compute/perf.py
index 0b8ed9b28..b973211f1 100644
--- a/yardstick/benchmark/scenarios/compute/perf.py
+++ b/yardstick/benchmark/scenarios/compute/perf.py
@@ -93,7 +93,7 @@ class Perf(base.Scenario):
% (load, duration, events_string)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, stdout, _ = self.client.execute(cmd)
if status:
raise RuntimeError(stdout)
@@ -105,16 +105,14 @@ class Perf(base.Scenario):
exp_val = self.scenario_cfg['sla']['expected_value']
smaller_than_exp = 'smaller_than_expected' \
in self.scenario_cfg['sla']
-
- if metric not in result:
- assert False, "Metric (%s) not found." % metric
- else:
- if smaller_than_exp:
- assert result[metric] < exp_val, "%s %d >= %d (sla); " \
- % (metric, result[metric], exp_val)
- else:
- assert result[metric] >= exp_val, "%s %d < %d (sla); " \
- % (metric, result[metric], exp_val)
+ self.verify_SLA(metric in result,
+ "Metric (%s) not found." % metric)
+ self.verify_SLA(
+ not smaller_than_exp,
+ "%s %d >= %d (sla); " % (metric, result[metric], exp_val))
+ self.verify_SLA(
+ result[metric] >= exp_val,
+ "%s %d < %d (sla); " % (metric, result[metric], exp_val))
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/qemu_migrate.py b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
index 2de1270ef..975c90b22 100644
--- a/yardstick/benchmark/scenarios/compute/qemu_migrate.py
+++ b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
@@ -56,7 +56,7 @@ class QemuMigrate(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -127,7 +127,7 @@ class QemuMigrate(base.Scenario):
if timevalue > sla_time:
sla_error += "%s timevalue %d > sla:max_%s(%d); " % \
(t, timevalue, t, sla_time)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/ramspeed.py b/yardstick/benchmark/scenarios/compute/ramspeed.py
index ca64935dd..4daf776ff 100644
--- a/yardstick/benchmark/scenarios/compute/ramspeed.py
+++ b/yardstick/benchmark/scenarios/compute/ramspeed.py
@@ -121,8 +121,8 @@ class Ramspeed(base.Scenario):
(test_id, load, block_size)
# only the test_id 1-6 will be used in this scenario
else:
- raise RuntimeError("No such type_id: %s for Ramspeed scenario",
- test_id)
+ raise RuntimeError("No such type_id: %s for Ramspeed scenario"
+ % test_id)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -140,4 +140,4 @@ class Ramspeed(base.Scenario):
if bw < sla_min_bw:
sla_error += "Bandwidth %f < " \
"sla:min_bandwidth(%f)" % (bw, sla_min_bw)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/compute/unixbench.py b/yardstick/benchmark/scenarios/compute/unixbench.py
index cdb345717..3cea31694 100644
--- a/yardstick/benchmark/scenarios/compute/unixbench.py
+++ b/yardstick/benchmark/scenarios/compute/unixbench.py
@@ -125,7 +125,7 @@ class Unixbench(base.Scenario):
if score < sla_score:
sla_error += "%s score %f < sla:%s_score(%f); " % \
(t, score, t, sla_score)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py
index 98c45990e..51e044e7b 100644
--- a/yardstick/benchmark/scenarios/networking/iperf3.py
+++ b/yardstick/benchmark/scenarios/networking/iperf3.py
@@ -92,7 +92,7 @@ For more info see http://software.es.net/iperf
def teardown(self):
LOG.debug("teardown")
self.host.close()
- status, stdout, stderr = self.target.execute("pkill iperf3")
+ status, _, stderr = self.target.execute("pkill iperf3")
if status:
LOG.warning(stderr)
self.target.close()
@@ -145,7 +145,7 @@ For more info see http://software.es.net/iperf
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.host.execute(cmd)
+ status, stdout, _ = self.host.execute(cmd)
if status:
# error cause in json dict on stdout
raise RuntimeError(stdout)
@@ -165,16 +165,17 @@ For more info see http://software.es.net/iperf
bit_per_second = \
int(iperf_result["end"]["sum_received"]["bits_per_second"])
bytes_per_second = bit_per_second / 8
- assert bytes_per_second >= sla_bytes_per_second, \
- "bytes_per_second %d < sla:bytes_per_second (%d); " % \
- (bytes_per_second, sla_bytes_per_second)
+ self.verify_SLA(
+ bytes_per_second >= sla_bytes_per_second,
+ "bytes_per_second %d < sla:bytes_per_second (%d); "
+ % (bytes_per_second, sla_bytes_per_second))
else:
sla_jitter = float(sla_iperf["jitter"])
jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"])
- assert jitter_ms <= sla_jitter, \
- "jitter_ms %f > sla:jitter %f; " % \
- (jitter_ms, sla_jitter)
+ self.verify_SLA(jitter_ms <= sla_jitter,
+ "jitter_ms %f > sla:jitter %f; "
+ % (jitter_ms, sla_jitter))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
index 86173c9da..e3bd7af46 100644
--- a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
+++ b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
@@ -367,9 +367,10 @@ ports = {0,1},
throughput_rx_mpps = int(
self.scenario_cfg["sla"]["throughput_rx_mpps"])
- assert throughput_rx_mpps <= moongen_result["tx_mpps"], \
- "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); " % \
- (throughput_rx_mpps, moongen_result["tx_mpps"])
+ self.verify_SLA(
+ throughput_rx_mpps <= moongen_result["tx_mpps"],
+ "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); "
+ % (throughput_rx_mpps, moongen_result["tx_mpps"]))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/netperf.py b/yardstick/benchmark/scenarios/networking/netperf.py
index 33c02d409..9f1a81413 100755
--- a/yardstick/benchmark/scenarios/networking/netperf.py
+++ b/yardstick/benchmark/scenarios/networking/netperf.py
@@ -138,9 +138,9 @@ class Netperf(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/netperf_node.py b/yardstick/benchmark/scenarios/networking/netperf_node.py
index d52e6b9e1..0ad2ecff5 100755
--- a/yardstick/benchmark/scenarios/networking/netperf_node.py
+++ b/yardstick/benchmark/scenarios/networking/netperf_node.py
@@ -156,9 +156,10 @@ class NetperfNode(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(
+ mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def teardown(self):
"""remove netperf from nodes after test"""
diff --git a/yardstick/benchmark/scenarios/networking/nstat.py b/yardstick/benchmark/scenarios/networking/nstat.py
index 10c560769..ea067f8ab 100644
--- a/yardstick/benchmark/scenarios/networking/nstat.py
+++ b/yardstick/benchmark/scenarios/networking/nstat.py
@@ -121,4 +121,4 @@ class Nstat(base.Scenario):
if rate > sla_rate:
sla_error += "%s rate %f > sla:%s_rate(%f); " % \
(i, rate, i, sla_rate)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index e7d9beea8..6caeab5ef 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -91,9 +91,10 @@ class Ping(base.Scenario):
result.update(utils.flatten_dict_key(ping_result))
if sla_max_rtt is not None:
sla_max_rtt = float(sla_max_rtt)
- assert rtt_result[target_vm_name] <= sla_max_rtt,\
- "rtt %f > sla: max_rtt(%f); " % \
- (rtt_result[target_vm_name], sla_max_rtt)
+ self.verify_SLA(
+ rtt_result[target_vm_name] <= sla_max_rtt,
+ "rtt %f > sla: max_rtt(%f); "
+ % (rtt_result[target_vm_name], sla_max_rtt))
else:
LOG.error("ping '%s' '%s' timeout", options, target_vm)
# we need to specify a result to satisfy influxdb schema
@@ -102,13 +103,12 @@ class Ping(base.Scenario):
rtt_result[target_vm_name] = float(self.PING_ERROR_RTT)
# store result before potential AssertionError
result.update(utils.flatten_dict_key(ping_result))
- if sla_max_rtt is not None:
- raise AssertionError("packet dropped rtt {:f} > sla: max_rtt({:f})".format(
- rtt_result[target_vm_name], sla_max_rtt))
-
- else:
- raise AssertionError(
- "packet dropped rtt {:f}".format(rtt_result[target_vm_name]))
+ self.verify_SLA(sla_max_rtt is None,
+ "packet dropped rtt %f > sla: max_rtt(%f)"
+ % (rtt_result[target_vm_name], sla_max_rtt))
+ self.verify_SLA(False,
+ "packet dropped rtt %f"
+ % (rtt_result[target_vm_name]))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/ping6.py b/yardstick/benchmark/scenarios/networking/ping6.py
index 74855a10f..377278004 100644
--- a/yardstick/benchmark/scenarios/networking/ping6.py
+++ b/yardstick/benchmark/scenarios/networking/ping6.py
@@ -59,8 +59,7 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.pre_setup_script, '~/pre_setup.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash pre_setup.sh")
+ self.client.execute("sudo bash pre_setup.sh")
def _get_controller_node(self, host_list):
for host_name in host_list:
@@ -122,7 +121,7 @@ class Ping6(base.Scenario): # pragma: no cover
cmd = "sudo bash %s %s %s" % \
(setup_bash_file, self.openrc, self.external_network)
LOG.debug("Executing setup command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ self.client.execute(cmd)
self.setup_done = True
@@ -171,8 +170,9 @@ class Ping6(base.Scenario): # pragma: no cover
result["rtt"] = float(stdout)
if "sla" in self.scenario_cfg:
sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
- assert result["rtt"] <= sla_max_rtt, \
- "rtt %f > sla:max_rtt(%f); " % (result["rtt"], sla_max_rtt)
+ self.verify_SLA(result["rtt"] <= sla_max_rtt,
+ "rtt %f > sla:max_rtt(%f); "
+ % (result["rtt"], sla_max_rtt))
else:
LOG.error("ping6 timeout!!!")
self.run_done = True
@@ -216,5 +216,4 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.post_teardown_script, '~/post_teardown.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash post_teardown.sh")
+ self.client.execute("sudo bash post_teardown.sh")
diff --git a/yardstick/benchmark/scenarios/networking/pktgen.py b/yardstick/benchmark/scenarios/networking/pktgen.py
index b79b91539..d1d500ff6 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen.py
@@ -87,7 +87,7 @@ class Pktgen(base.Scenario):
self.server.send_command(cmd)
self.client.send_command(cmd)
- """multiqueue setup"""
+ # multiqueue setup
if not self._is_irqbalance_disabled():
self._disable_irqbalance()
@@ -132,20 +132,20 @@ class Pktgen(base.Scenario):
def _disable_irqbalance(self):
cmd = "sudo sed -i -e 's/ENABLED=\"1\"/ENABLED=\"0\"/g' " \
"/etc/default/irqbalance"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
cmd = "sudo service irqbalance stop"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
cmd = "sudo service irqbalance disable"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -158,8 +158,8 @@ class Pktgen(base.Scenario):
raise RuntimeError(stderr)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -171,8 +171,8 @@ class Pktgen(base.Scenario):
raise RuntimeError(stderr)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -192,8 +192,8 @@ class Pktgen(base.Scenario):
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -206,8 +206,8 @@ class Pktgen(base.Scenario):
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -220,8 +220,8 @@ class Pktgen(base.Scenario):
raise RuntimeError(stderr)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -240,8 +240,8 @@ class Pktgen(base.Scenario):
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -282,8 +282,8 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -L %s combined %s" % \
(self.vnic_name, available_queue_number)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
return available_queue_number
@@ -374,8 +374,8 @@ class Pktgen(base.Scenario):
if "sla" in self.scenario_cfg:
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm,
+ "ppm %d > sla_max_ppm %d; " % (ppm, sla_max_ppm))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
index 9a7b975a2..1b018f52a 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
@@ -135,4 +135,4 @@ cat ~/result.log -vT \
LOG.info("sla_max_latency: %d", sla_max_latency)
debug_info = "avg_latency %d > sla_max_latency %d" \
% (avg_latency, sla_max_latency)
- assert avg_latency <= sla_max_latency, debug_info
+ self.verify_SLA(avg_latency <= sla_max_latency, debug_info)
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
index 497e59ee8..97b9cf73f 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
@@ -143,11 +143,11 @@ class PktgenDPDK(base.Scenario):
cmd = "ip a | grep eth1 2>/dev/null"
LOG.debug("Executing command: %s in %s", cmd, host)
if "server" in host:
- status, stdout, stderr = self.server.execute(cmd)
+ _, stdout, _ = self.server.execute(cmd)
if stdout:
is_run = False
else:
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
@@ -222,5 +222,5 @@ class PktgenDPDK(base.Scenario):
ppm += (sent - received) % sent > 0
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; "
+ % (ppm, sla_max_ppm))
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 705544c41..2b3474070 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -215,15 +215,15 @@ class Vsperf(base.Scenario):
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
index 454587829..27bf40dcb 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
@@ -231,7 +231,7 @@ class VsperfDPDK(base.Scenario):
is_run = True
cmd = "ip a | grep %s 2>/dev/null" % (self.tg_port1)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
return is_run
@@ -325,15 +325,15 @@ class VsperfDPDK(base.Scenario):
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/storage/fio.py b/yardstick/benchmark/scenarios/storage/fio.py
index d3ed840d8..c57c6edf2 100644
--- a/yardstick/benchmark/scenarios/storage/fio.py
+++ b/yardstick/benchmark/scenarios/storage/fio.py
@@ -223,7 +223,7 @@ class Fio(base.Scenario):
sla_error += "%s %d < " \
"sla:%s(%d); " % (k, v, k, min_v)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test():