aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/scenarios/networking
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/scenarios/networking')
-rw-r--r--yardstick/benchmark/scenarios/networking/iperf3.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/moongen_testpmd.py7
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf.py6
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_node.py7
-rw-r--r--yardstick/benchmark/scenarios/networking/nstat.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py18
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6.py13
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen.py124
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk.py7
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py8
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py369
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py31
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf_dpdk.py50
13 files changed, 394 insertions, 265 deletions
diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py
index 98c45990e..51e044e7b 100644
--- a/yardstick/benchmark/scenarios/networking/iperf3.py
+++ b/yardstick/benchmark/scenarios/networking/iperf3.py
@@ -92,7 +92,7 @@ For more info see http://software.es.net/iperf
def teardown(self):
LOG.debug("teardown")
self.host.close()
- status, stdout, stderr = self.target.execute("pkill iperf3")
+ status, _, stderr = self.target.execute("pkill iperf3")
if status:
LOG.warning(stderr)
self.target.close()
@@ -145,7 +145,7 @@ For more info see http://software.es.net/iperf
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.host.execute(cmd)
+ status, stdout, _ = self.host.execute(cmd)
if status:
# error cause in json dict on stdout
raise RuntimeError(stdout)
@@ -165,16 +165,17 @@ For more info see http://software.es.net/iperf
bit_per_second = \
int(iperf_result["end"]["sum_received"]["bits_per_second"])
bytes_per_second = bit_per_second / 8
- assert bytes_per_second >= sla_bytes_per_second, \
- "bytes_per_second %d < sla:bytes_per_second (%d); " % \
- (bytes_per_second, sla_bytes_per_second)
+ self.verify_SLA(
+ bytes_per_second >= sla_bytes_per_second,
+ "bytes_per_second %d < sla:bytes_per_second (%d); "
+ % (bytes_per_second, sla_bytes_per_second))
else:
sla_jitter = float(sla_iperf["jitter"])
jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"])
- assert jitter_ms <= sla_jitter, \
- "jitter_ms %f > sla:jitter %f; " % \
- (jitter_ms, sla_jitter)
+ self.verify_SLA(jitter_ms <= sla_jitter,
+ "jitter_ms %f > sla:jitter %f; "
+ % (jitter_ms, sla_jitter))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
index 86173c9da..e3bd7af46 100644
--- a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
+++ b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
@@ -367,9 +367,10 @@ ports = {0,1},
throughput_rx_mpps = int(
self.scenario_cfg["sla"]["throughput_rx_mpps"])
- assert throughput_rx_mpps <= moongen_result["tx_mpps"], \
- "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); " % \
- (throughput_rx_mpps, moongen_result["tx_mpps"])
+ self.verify_SLA(
+ throughput_rx_mpps <= moongen_result["tx_mpps"],
+ "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); "
+ % (throughput_rx_mpps, moongen_result["tx_mpps"]))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/netperf.py b/yardstick/benchmark/scenarios/networking/netperf.py
index 33c02d409..9f1a81413 100755
--- a/yardstick/benchmark/scenarios/networking/netperf.py
+++ b/yardstick/benchmark/scenarios/networking/netperf.py
@@ -138,9 +138,9 @@ class Netperf(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/netperf_node.py b/yardstick/benchmark/scenarios/networking/netperf_node.py
index d52e6b9e1..0ad2ecff5 100755
--- a/yardstick/benchmark/scenarios/networking/netperf_node.py
+++ b/yardstick/benchmark/scenarios/networking/netperf_node.py
@@ -156,9 +156,10 @@ class NetperfNode(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(
+ mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def teardown(self):
"""remove netperf from nodes after test"""
diff --git a/yardstick/benchmark/scenarios/networking/nstat.py b/yardstick/benchmark/scenarios/networking/nstat.py
index 10c560769..ea067f8ab 100644
--- a/yardstick/benchmark/scenarios/networking/nstat.py
+++ b/yardstick/benchmark/scenarios/networking/nstat.py
@@ -121,4 +121,4 @@ class Nstat(base.Scenario):
if rate > sla_rate:
sla_error += "%s rate %f > sla:%s_rate(%f); " % \
(i, rate, i, sla_rate)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index e7d9beea8..1c9510220 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -91,9 +91,10 @@ class Ping(base.Scenario):
result.update(utils.flatten_dict_key(ping_result))
if sla_max_rtt is not None:
sla_max_rtt = float(sla_max_rtt)
- assert rtt_result[target_vm_name] <= sla_max_rtt,\
- "rtt %f > sla: max_rtt(%f); " % \
- (rtt_result[target_vm_name], sla_max_rtt)
+ self.verify_SLA(
+ rtt_result[target_vm_name] <= sla_max_rtt,
+ "rtt %f > sla: max_rtt(%f); "
+ % (rtt_result[target_vm_name], sla_max_rtt))
else:
LOG.error("ping '%s' '%s' timeout", options, target_vm)
# we need to specify a result to satisfy influxdb schema
@@ -103,12 +104,13 @@ class Ping(base.Scenario):
# store result before potential AssertionError
result.update(utils.flatten_dict_key(ping_result))
if sla_max_rtt is not None:
- raise AssertionError("packet dropped rtt {:f} > sla: max_rtt({:f})".format(
- rtt_result[target_vm_name], sla_max_rtt))
-
+ self.verify_SLA(rtt_result[target_vm_name] <= sla_max_rtt,
+ "packet dropped rtt %f > sla: max_rtt(%f)"
+ % (rtt_result[target_vm_name], sla_max_rtt))
else:
- raise AssertionError(
- "packet dropped rtt {:f}".format(rtt_result[target_vm_name]))
+ self.verify_SLA(False,
+ "packet dropped rtt %f"
+ % (rtt_result[target_vm_name]))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/ping6.py b/yardstick/benchmark/scenarios/networking/ping6.py
index 74855a10f..377278004 100644
--- a/yardstick/benchmark/scenarios/networking/ping6.py
+++ b/yardstick/benchmark/scenarios/networking/ping6.py
@@ -59,8 +59,7 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.pre_setup_script, '~/pre_setup.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash pre_setup.sh")
+ self.client.execute("sudo bash pre_setup.sh")
def _get_controller_node(self, host_list):
for host_name in host_list:
@@ -122,7 +121,7 @@ class Ping6(base.Scenario): # pragma: no cover
cmd = "sudo bash %s %s %s" % \
(setup_bash_file, self.openrc, self.external_network)
LOG.debug("Executing setup command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ self.client.execute(cmd)
self.setup_done = True
@@ -171,8 +170,9 @@ class Ping6(base.Scenario): # pragma: no cover
result["rtt"] = float(stdout)
if "sla" in self.scenario_cfg:
sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
- assert result["rtt"] <= sla_max_rtt, \
- "rtt %f > sla:max_rtt(%f); " % (result["rtt"], sla_max_rtt)
+ self.verify_SLA(result["rtt"] <= sla_max_rtt,
+ "rtt %f > sla:max_rtt(%f); "
+ % (result["rtt"], sla_max_rtt))
else:
LOG.error("ping6 timeout!!!")
self.run_done = True
@@ -216,5 +216,4 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.post_teardown_script, '~/post_teardown.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash post_teardown.sh")
+ self.client.execute("sudo bash post_teardown.sh")
diff --git a/yardstick/benchmark/scenarios/networking/pktgen.py b/yardstick/benchmark/scenarios/networking/pktgen.py
index b79b91539..c78108adb 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen.py
@@ -87,7 +87,7 @@ class Pktgen(base.Scenario):
self.server.send_command(cmd)
self.client.send_command(cmd)
- """multiqueue setup"""
+ # multiqueue setup
if not self._is_irqbalance_disabled():
self._disable_irqbalance()
@@ -112,18 +112,14 @@ class Pktgen(base.Scenario):
def _get_vnic_driver_name(self):
cmd = "readlink /sys/class/net/%s/device/driver" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return os.path.basename(stdout.strip())
def _is_irqbalance_disabled(self):
"""Did we disable irqbalance already in the guest?"""
is_disabled = False
cmd = "grep ENABLED /etc/default/irqbalance"
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
if "0" in stdout:
is_disabled = True
@@ -132,49 +128,35 @@ class Pktgen(base.Scenario):
def _disable_irqbalance(self):
cmd = "sudo sed -i -e 's/ENABLED=\"1\"/ENABLED=\"0\"/g' " \
"/etc/default/irqbalance"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "sudo service irqbalance stop"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "sudo service irqbalance disable"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
def _setup_irqmapping_ovs(self, queue_number):
cmd = "grep 'virtio0-input.0' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'"
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "grep 'virtio0-output.0' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'"
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
if queue_number == 1:
return
@@ -186,44 +168,32 @@ class Pktgen(base.Scenario):
cmd = "grep 'virtio0-input.%s' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % (i)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "grep 'virtio0-output.%s' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % (i)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
def _setup_irqmapping_sriov(self, queue_number):
cmd = "grep '%s-TxRx-0' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % self.vnic_name
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
if queue_number == 1:
return
@@ -234,24 +204,18 @@ class Pktgen(base.Scenario):
cmd = "grep '%s-TxRx-%s' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % (self.vnic_name, i)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
def _get_sriov_queue_number(self):
"""Get queue number from server as both VMs are the same"""
cmd = "grep %s-TxRx- /proc/interrupts | wc -l" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def _get_available_queue_number(self):
@@ -259,9 +223,7 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -l %s | grep Combined | head -1 |" \
"awk '{printf $2}'" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def _get_usable_queue_number(self):
@@ -269,9 +231,7 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -l %s | grep Combined | tail -1 |" \
"awk '{printf $2}'" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def _enable_ovs_multiqueue(self):
@@ -282,10 +242,8 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -L %s combined %s" % \
(self.vnic_name, available_queue_number)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
return available_queue_number
def _iptables_setup(self):
@@ -294,9 +252,7 @@ class Pktgen(base.Scenario):
"sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP" \
% (1000 + self.number_of_ports)
LOG.debug("Executing command: %s", cmd)
- status, _, stderr = self.server.execute(cmd, timeout=SSH_TIMEOUT)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd, timeout=SSH_TIMEOUT)
def _iptables_get_result(self):
"""Get packet statistics from server"""
@@ -304,9 +260,7 @@ class Pktgen(base.Scenario):
"awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'" \
% (1000 + self.number_of_ports)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def run(self, result):
@@ -356,10 +310,8 @@ class Pktgen(base.Scenario):
duration, queue_number, pps)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd, timeout=SSH_TIMEOUT)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True,
+ timeout=SSH_TIMEOUT)
result.update(jsonutils.loads(stdout))
@@ -374,8 +326,8 @@ class Pktgen(base.Scenario):
if "sla" in self.scenario_cfg:
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm,
+ "ppm %d > sla_max_ppm %d; " % (ppm, sla_max_ppm))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
index 9a7b975a2..efb7d8b5d 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
@@ -113,10 +113,7 @@ cat ~/result.log -vT \
{print substr($0,RSTART,RLENGTH)}' \
|grep -v ^$ |awk '{if ($2 != 0) print $2}'\
"""
- client_status, client_stdout, client_stderr = self.client.execute(cmd)
-
- if client_status:
- raise RuntimeError(client_stderr)
+ _, client_stdout, _ = self.client.execute(cmd, raise_on_error=True)
avg_latency = 0
if client_stdout:
@@ -135,4 +132,4 @@ cat ~/result.log -vT \
LOG.info("sla_max_latency: %d", sla_max_latency)
debug_info = "avg_latency %d > sla_max_latency %d" \
% (avg_latency, sla_max_latency)
- assert avg_latency <= sla_max_latency, debug_info
+ self.verify_SLA(avg_latency <= sla_max_latency, debug_info)
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
index 497e59ee8..97b9cf73f 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
@@ -143,11 +143,11 @@ class PktgenDPDK(base.Scenario):
cmd = "ip a | grep eth1 2>/dev/null"
LOG.debug("Executing command: %s in %s", cmd, host)
if "server" in host:
- status, stdout, stderr = self.server.execute(cmd)
+ _, stdout, _ = self.server.execute(cmd)
if stdout:
is_run = False
else:
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
@@ -222,5 +222,5 @@ class PktgenDPDK(base.Scenario):
ppm += (sent - received) % sent > 0
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; "
+ % (ppm, sla_max_ppm))
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index be2fa3f3b..c5e75d093 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,20 +13,20 @@
# limitations under the License.
import copy
-import logging
-import time
-
import ipaddress
from itertools import chain
+import logging
import os
import sys
+import time
import six
import yaml
+from yardstick.benchmark.contexts import base as context_base
from yardstick.benchmark.scenarios import base as scenario_base
-from yardstick.error import IncorrectConfig
from yardstick.common.constants import LOG_DIR
+from yardstick.common import exceptions
from yardstick.common.process import terminate_children
from yardstick.common import utils
from yardstick.network_services.collector.subscriber import Collector
@@ -37,20 +37,20 @@ from yardstick.network_services.traffic_profile import base as tprofile_base
from yardstick.network_services.utils import get_nsb_option
from yardstick import ssh
+
traffic_profile.register_modules()
LOG = logging.getLogger(__name__)
-class NetworkServiceTestCase(scenario_base.Scenario):
- """Class handles Generic framework to do pre-deployment VNF &
- Network service testing """
+class NetworkServiceBase(scenario_base.Scenario):
+ """Base class for Network service testing scenarios"""
- __scenario_type__ = "NSPerf"
+ __scenario_type__ = ""
- def __init__(self, scenario_cfg, context_cfg): # Yardstick API
- super(NetworkServiceTestCase, self).__init__()
+ def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
+ super(NetworkServiceBase, self).__init__()
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
@@ -61,38 +61,66 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self.node_netdevs = {}
self.bin_path = get_nsb_option('bin_path', '')
+ def run(self, *args):
+ pass
+
+ def teardown(self):
+ """ Stop the collector and terminate VNF & TG instance
+
+ :return
+ """
+
+ try:
+ try:
+ self.collector.stop()
+ for vnf in self.vnfs:
+ LOG.info("Stopping %s", vnf.name)
+ vnf.terminate()
+ LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
+ finally:
+ terminate_children()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise RuntimeError("Error in teardown")
+
+ def is_ended(self):
+ return self.traffic_profile is not None and self.traffic_profile.is_ended()
+
def _get_ip_flow_range(self, ip_start_range):
+ """Retrieve a CIDR first and last viable IPs
- # IP range is specified as 'x.x.x.x-y.y.y.y'
+ :param ip_start_range: could be the IP range itself or a dictionary
+ with the host name and the port.
+ :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
+ """
if isinstance(ip_start_range, six.string_types):
return ip_start_range
- node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
+ node_name, range_or_interface = next(iter(ip_start_range.items()),
+ (None, '0.0.0.0'))
if node_name is None:
- # we are manually specifying the range
- ip_addr_range = range_or_interface
+ return range_or_interface
+
+ node = self.context_cfg['nodes'].get(node_name, {})
+ interface = node.get('interfaces', {}).get(range_or_interface)
+ if interface:
+ ip = interface['local_ip']
+ mask = interface['netmask']
else:
- node = self.context_cfg["nodes"].get(node_name, {})
- try:
- # the ip_range is the interface name
- interface = node.get("interfaces", {})[range_or_interface]
- except KeyError:
- ip = "0.0.0.0"
- mask = "255.255.255.0"
- else:
- ip = interface["local_ip"]
- # we can't default these values, they must both exist to be valid
- mask = interface["netmask"]
-
- ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
- hosts = list(ipaddr.hosts())
- if len(hosts) > 2:
- # skip the first host in case of gateway
- ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
- else:
- LOG.warning("Only single IP in range %s", ipaddr)
- # fall back to single IP range
- ip_addr_range = ip
+ ip = '0.0.0.0'
+ mask = '255.255.255.0'
+
+ ipaddr = ipaddress.ip_network(
+ six.text_type('{}/{}'.format(ip, mask)), strict=False)
+ if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
+ ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
+ else:
+ LOG.warning('Only single IP in range %s', ipaddr)
+ ip_addr_range = ip
return ip_addr_range
def _get_traffic_flow(self):
@@ -116,7 +144,15 @@ class NetworkServiceTestCase(scenario_base.Scenario):
for index, dst_port in enumerate(fflow.get("dst_port", [])):
flow["dst_port_{}".format(index)] = dst_port
- flow["count"] = fflow["count"]
+ if "count" in fflow:
+ flow["count"] = fflow["count"]
+
+ if "srcseed" in fflow:
+ flow["srcseed"] = fflow["srcseed"]
+
+ if "dstseed" in fflow:
+ flow["dstseed"] = fflow["dstseed"]
+
except KeyError:
flow = {}
return {"flow": flow}
@@ -128,17 +164,43 @@ class NetworkServiceTestCase(scenario_base.Scenario):
imix = {}
return imix
+ def _get_ip_priority(self):
+ try:
+ priority = self.scenario_cfg['options']['priority']
+ except KeyError:
+ priority = {}
+ return priority
+
def _get_traffic_profile(self):
profile = self.scenario_cfg["traffic_profile"]
path = self.scenario_cfg["task_path"]
with utils.open_relative_file(profile, path) as infile:
return infile.read()
- def _get_topology(self):
- topology = self.scenario_cfg["topology"]
- path = self.scenario_cfg["task_path"]
- with utils.open_relative_file(topology, path) as infile:
- return infile.read()
+ def _get_duration(self):
+ options = self.scenario_cfg.get('options', {})
+ return options.get('duration',
+ tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
+
+ def _key_list_to_dict(self, key, value_list):
+ value_dict = {}
+ try:
+ for index, count in enumerate(value_list[key]):
+ value_dict["{}_{}".format(key, index)] = count
+ except KeyError:
+ value_dict = {}
+
+ return value_dict
+
+ def _get_simulated_users(self):
+ users = self.scenario_cfg.get("options", {}).get("simulated_users", {})
+ simulated_users = self._key_list_to_dict("uplink", users)
+ return {"simulated_users": simulated_users}
+
+ def _get_page_object(self):
+ objects = self.scenario_cfg.get("options", {}).get("page_object", {})
+ page_object = self._key_list_to_dict("uplink", objects)
+ return {"page_object": page_object}
def _fill_traffic_profile(self):
tprofile = self._get_traffic_profile()
@@ -146,13 +208,29 @@ class NetworkServiceTestCase(scenario_base.Scenario):
tprofile_data = {
'flow': self._get_traffic_flow(),
'imix': self._get_traffic_imix(),
+ 'priority': self._get_ip_priority(),
tprofile_base.TrafficProfile.UPLINK: {},
tprofile_base.TrafficProfile.DOWNLINK: {},
- 'extra_args': extra_args
- }
-
+ 'extra_args': extra_args,
+ 'duration': self._get_duration(),
+ 'page_object': self._get_page_object(),
+ 'simulated_users': self._get_simulated_users()}
traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
- self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
+
+ traffic_config = \
+ self.scenario_cfg.get("options", {}).get("traffic_config", {})
+
+ traffic_vnfd.setdefault("traffic_profile", {})
+ traffic_vnfd["traffic_profile"].update(traffic_config)
+
+ self.traffic_profile = \
+ tprofile_base.TrafficProfile.get(traffic_vnfd)
+
+ def _get_topology(self):
+ topology = self.scenario_cfg["topology"]
+ path = self.scenario_cfg["task_path"]
+ with utils.open_relative_file(topology, path) as infile:
+ return infile.read()
def _render_topology(self):
topology = self._get_topology()
@@ -163,18 +241,18 @@ class NetworkServiceTestCase(scenario_base.Scenario):
topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
- def _find_vnf_name_from_id(self, vnf_id):
+ def _find_vnf_name_from_id(self, vnf_id): # pragma: no cover
return next((vnfd["vnfd-id-ref"]
for vnfd in self.topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
- def _find_vnfd_from_vnf_idx(self, vnf_id):
+ def _find_vnfd_from_vnf_idx(self, vnf_id): # pragma: no cover
return next((vnfd
for vnfd in self.topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
@staticmethod
- def find_node_if(nodes, name, if_name, vld_id):
+ def find_node_if(nodes, name, if_name, vld_id): # pragma: no cover
try:
# check for xe0, xe1
intf = nodes[name]["interfaces"][if_name]
@@ -190,8 +268,9 @@ class NetworkServiceTestCase(scenario_base.Scenario):
try:
node0_data, node1_data = vld["vnfd-connection-point-ref"]
except (ValueError, TypeError):
- raise IncorrectConfig("Topology file corrupted, "
- "wrong endpoint count for connection")
+ raise exceptions.IncorrectConfig(
+ error_msg='Topology file corrupted, wrong endpoint count '
+ 'for connection')
node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
@@ -237,15 +316,17 @@ class NetworkServiceTestCase(scenario_base.Scenario):
except KeyError:
LOG.exception("")
- raise IncorrectConfig("Required interface not found, "
- "topology file corrupted")
+ raise exceptions.IncorrectConfig(
+ error_msg='Required interface not found, topology file '
+ 'corrupted')
for vld in self.topology['vld']:
try:
node0_data, node1_data = vld["vnfd-connection-point-ref"]
except (ValueError, TypeError):
- raise IncorrectConfig("Topology file corrupted, "
- "wrong endpoint count for connection")
+ raise exceptions.IncorrectConfig(
+ error_msg='Topology file corrupted, wrong endpoint count '
+ 'for connection')
node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
@@ -264,14 +345,14 @@ class NetworkServiceTestCase(scenario_base.Scenario):
node0_if["peer_intf"] = node1_copy
node1_if["peer_intf"] = node0_copy
- def _update_context_with_topology(self):
+ def _update_context_with_topology(self): # pragma: no cover
for vnfd in self.topology["constituent-vnfd"]:
vnf_idx = vnfd["member-vnf-index"]
vnf_name = self._find_vnf_name_from_id(vnf_idx)
vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
self.context_cfg["nodes"][vnf_name].update(vnfd)
- def _generate_pod_yaml(self):
+ def _generate_pod_yaml(self): # pragma: no cover
context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
# convert OrderedDict to a list
# pod.yaml nodes is a list
@@ -285,7 +366,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
explicit_start=True)
@staticmethod
- def _serialize_node(node):
+ def _serialize_node(node): # pragma: no cover
new_node = copy.deepcopy(node)
# name field is required
# remove context suffix
@@ -307,7 +388,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self._update_context_with_topology()
@classmethod
- def get_vnf_impl(cls, vnf_model_id):
+ def get_vnf_impl(cls, vnf_model_id): # pragma: no cover
""" Find the implementing class from vnf_model["vnf"]["name"] field
:param vnf_model_id: parsed vnfd model ID field
@@ -330,11 +411,12 @@ class NetworkServiceTestCase(scenario_base.Scenario):
except StopIteration:
pass
- raise IncorrectConfig("No implementation for %s found in %s" %
- (expected_name, classes_found))
+ message = ('No implementation for %s found in %s'
+ % (expected_name, classes_found))
+ raise exceptions.IncorrectConfig(error_msg=message)
@staticmethod
- def create_interfaces_from_node(vnfd, node):
+ def create_interfaces_from_node(vnfd, node): # pragma: no cover
ext_intfs = vnfd["vdu"][0]["external-interface"] = []
# have to sort so xe0 goes first
for intf_name, intf in sorted(node['interfaces'].items()):
@@ -402,11 +484,26 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self.vnfs = vnfs
return vnfs
- def setup(self):
- """ Setup infrastructure, provission VNFs & start traffic
+ def pre_run_wait_time(self, time_seconds): # pragma: no cover
+ """Time waited before executing the run method"""
+ time.sleep(time_seconds)
- :return:
- """
+ def post_run_wait_time(self, time_seconds): # pragma: no cover
+ """Time waited after executing the run method"""
+ pass
+
+
+class NetworkServiceTestCase(NetworkServiceBase):
+ """Class handles Generic framework to do pre-deployment VNF &
+ Network service testing """
+
+ __scenario_type__ = "NSPerf"
+
+ def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
+ super(NetworkServiceTestCase, self).__init__(scenario_cfg, context_cfg)
+
+ def setup(self):
+ """Setup infrastructure, provission VNFs & start traffic"""
# 1. Verify if infrastructure mapping can meet topology
self.map_topology_to_infrastructure()
# 1a. Load VNF models
@@ -441,7 +538,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
traffic_gen.listen_traffic(self.traffic_profile)
# register collector with yardstick for KPI collection.
- self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile)
+ self.collector = Collector(self.vnfs, context_base.Context.get_physical_nodes())
self.collector.start()
# Start the actual traffic
@@ -463,33 +560,125 @@ class NetworkServiceTestCase(scenario_base.Scenario):
result.update(self.collector.get_kpi())
- def teardown(self):
- """ Stop the collector and terminate VNF & TG instance
- :return
+class NetworkServiceRFC2544(NetworkServiceBase):
+ """Class handles RFC2544 Network service testing"""
+
+ __scenario_type__ = "NSPerf-RFC2544"
+
+ def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
+ super(NetworkServiceRFC2544, self).__init__(scenario_cfg, context_cfg)
+
+ def setup(self):
+ """Setup infrastructure, provision VNFs"""
+ self.map_topology_to_infrastructure()
+ self.load_vnf_models()
+
+ traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
+ non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
+ try:
+ for vnf in chain(traffic_runners, non_traffic_runners):
+ LOG.info("Instantiating %s", vnf.name)
+ vnf.instantiate(self.scenario_cfg, self.context_cfg)
+ LOG.info("Waiting for %s to instantiate", vnf.name)
+ vnf.wait_for_instantiate()
+ except:
+ LOG.exception("")
+ for vnf in self.vnfs:
+ vnf.terminate()
+ raise
+
+ self._generate_pod_yaml()
+
+ def run(self, output):
+ """ Run experiment
+
+ :param output: scenario output to push results
+ :return: None
"""
+ self._fill_traffic_profile()
+
+ traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
+
+ for traffic_gen in traffic_runners:
+ traffic_gen.listen_traffic(self.traffic_profile)
+
+ self.collector = Collector(self.vnfs,
+ context_base.Context.get_physical_nodes())
+ self.collector.start()
+
+ test_completed = False
+ while not test_completed:
+ for traffic_gen in traffic_runners:
+ LOG.info("Run traffic on %s", traffic_gen.name)
+ traffic_gen.run_traffic_once(self.traffic_profile)
+
+ test_completed = True
+ for traffic_gen in traffic_runners:
+ # wait for all tg to complete running traffic
+ status = traffic_gen.wait_on_traffic()
+ LOG.info("Run traffic on %s complete status=%s",
+ traffic_gen.name, status)
+ if status == 'CONTINUE':
+ # continue running if at least one tg is running
+ test_completed = False
+
+ output.push(self.collector.get_kpi())
+
+ self.collector.stop()
+
+class NetworkServiceRFC3511(NetworkServiceBase):
+ """Class handles RFC3511 Network service testing"""
+
+ __scenario_type__ = "NSPerf-RFC3511"
+
+ def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
+ super(NetworkServiceRFC3511, self).__init__(scenario_cfg, context_cfg)
+
+ def setup(self):
+ """Setup infrastructure, provision VNFs"""
+ self.map_topology_to_infrastructure()
+ self.load_vnf_models()
+
+ traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
+ non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
try:
- try:
- self.collector.stop()
- for vnf in self.vnfs:
- LOG.info("Stopping %s", vnf.name)
- vnf.terminate()
- LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
- finally:
- terminate_children()
- except Exception:
- # catch any exception in teardown and convert to simple exception
- # never pass exceptions back to multiprocessing, because some exceptions can
- # be unpicklable
- # https://bugs.python.org/issue9400
+ for vnf in chain(traffic_runners, non_traffic_runners):
+ LOG.info("Instantiating %s", vnf.name)
+ vnf.instantiate(self.scenario_cfg, self.context_cfg)
+ LOG.info("Waiting for %s to instantiate", vnf.name)
+ vnf.wait_for_instantiate()
+ except:
LOG.exception("")
- raise RuntimeError("Error in teardown")
+ for vnf in self.vnfs:
+ vnf.terminate()
+ raise
- def pre_run_wait_time(self, time_seconds):
- """Time waited before executing the run method"""
- time.sleep(time_seconds)
+ self._generate_pod_yaml()
- def post_run_wait_time(self, time_seconds):
- """Time waited after executing the run method"""
- pass
+ def run(self, output):
+ """ Run experiment
+
+ :param output: scenario output to push results
+ :return: None
+ """
+
+ self._fill_traffic_profile()
+
+ traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
+
+ for traffic_gen in traffic_runners:
+ traffic_gen.listen_traffic(self.traffic_profile)
+
+ self.collector = Collector(self.vnfs,
+ context_base.Context.get_physical_nodes())
+ self.collector.start()
+
+ for traffic_gen in traffic_runners:
+ LOG.info("Run traffic on %s", traffic_gen.name)
+ traffic_gen.run_traffic(self.traffic_profile)
+
+ output.push(self.collector.get_kpi())
+
+ self.collector.stop()
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 705544c41..8344b1595 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -193,37 +193,34 @@ class Vsperf(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
- reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ reader = csv.DictReader(stdout.split('\r\n'), strict=True)
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
# sla check; go through all defined SLAs and check if values measured
# by VSPERF are higher then those defined by SLAs
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
index 454587829..d5c8a3bfe 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
@@ -205,22 +205,17 @@ class VsperfDPDK(base.Scenario):
self.client.send_command(cmd)
else:
cmd = "cat ~/.testpmd.macaddr.port1"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port1_mac = stdout
+
cmd = "cat ~/.testpmd.macaddr.port2"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port2_mac = stdout
cmd = "screen -d -m sudo -E bash ~/testpmd_vsperf.sh %s %s" % \
(self.moongen_port1_mac, self.moongen_port2_mac)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
time.sleep(1)
@@ -231,7 +226,7 @@ class VsperfDPDK(base.Scenario):
is_run = True
cmd = "ip a | grep %s 2>/dev/null" % (self.tg_port1)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
return is_run
@@ -245,7 +240,7 @@ class VsperfDPDK(base.Scenario):
self.setup()
# remove results from previous tests
- self.client.execute("rm -rf /tmp/results*")
+ self.client.run("rm -rf /tmp/results*", raise_on_error=False)
# get vsperf options
options = self.scenario_cfg['options']
@@ -291,9 +286,7 @@ class VsperfDPDK(base.Scenario):
cmd = "sshpass -p yardstick ssh-copy-id -o StrictHostKeyChecking=no " \
"root@%s -p 22" % (self.moongen_host_ip)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# execute vsperf
cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; "
@@ -302,22 +295,19 @@ class VsperfDPDK(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
result['nrFlows'] = multistream
# sla check; go through all defined SLAs and check if values measured
@@ -325,15 +315,15 @@ class VsperfDPDK(base.Scenario):
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""