summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf_dpdk.py34
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py39
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py34
4 files changed, 48 insertions, 76 deletions
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 2b3474070..8344b1595 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -193,22 +193,19 @@ class Vsperf(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
- reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ reader = csv.DictReader(stdout.split('\r\n'), strict=True)
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
# sla check; go through all defined SLAs and check if values measured
# by VSPERF are higher then those defined by SLAs
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
index 27bf40dcb..d5c8a3bfe 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
@@ -205,22 +205,17 @@ class VsperfDPDK(base.Scenario):
self.client.send_command(cmd)
else:
cmd = "cat ~/.testpmd.macaddr.port1"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port1_mac = stdout
+
cmd = "cat ~/.testpmd.macaddr.port2"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port2_mac = stdout
cmd = "screen -d -m sudo -E bash ~/testpmd_vsperf.sh %s %s" % \
(self.moongen_port1_mac, self.moongen_port2_mac)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
time.sleep(1)
@@ -245,7 +240,7 @@ class VsperfDPDK(base.Scenario):
self.setup()
# remove results from previous tests
- self.client.execute("rm -rf /tmp/results*")
+ self.client.run("rm -rf /tmp/results*", raise_on_error=False)
# get vsperf options
options = self.scenario_cfg['options']
@@ -291,9 +286,7 @@ class VsperfDPDK(base.Scenario):
cmd = "sshpass -p yardstick ssh-copy-id -o StrictHostKeyChecking=no " \
"root@%s -p 22" % (self.moongen_host_ip)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# execute vsperf
cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; "
@@ -302,22 +295,19 @@ class VsperfDPDK(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
result['nrFlows'] = multistream
# sla check; go through all defined SLAs and check if values measured
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
index a606543e5..a1c27f5fb 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -54,7 +54,8 @@ class VsperfTestCase(unittest.TestCase):
self._mock_SSH = mock.patch.object(ssh, 'SSH')
self.mock_SSH = self._mock_SSH.start()
- self.mock_SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
self.mock_subprocess_call = self._mock_subprocess_call.start()
@@ -104,40 +105,23 @@ class VsperfTestCase(unittest.TestCase):
def test_run_ok(self):
self.scenario.setup()
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
result = {}
self.scenario.run(result)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
def test_run_ok_setup_not_done(self):
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
result = {}
self.scenario.run(result)
self.assertTrue(self.scenario.setup_done)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_run_failed_vsperf_execution(self):
- self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
- (1, '', ''))
+ def test_run_ssh_command_call_counts(self):
+ self.scenario.run({})
- with self.assertRaises(RuntimeError):
- self.scenario.run({})
self.assertEqual(self.mock_SSH.from_node().execute.call_count, 2)
-
- def test_run_failed_csv_report(self):
- self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
- (0, '', ''),
- (1, '', ''))
-
- with self.assertRaises(RuntimeError):
- self.scenario.run({})
- self.assertEqual(self.mock_SSH.from_node().execute.call_count, 3)
+ self.mock_SSH.from_node().run.assert_called_once()
def test_run_sla_fail(self):
self.mock_SSH.from_node().execute.return_value = (
@@ -160,14 +144,21 @@ class VsperfTestCase(unittest.TestCase):
self.assertTrue('throughput_rx_fps was not collected by VSPERF'
in str(raised.exception))
+ def test_run_faulty_result_csv(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'faulty output not csv', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertTrue('throughput_rx_fps was not collected by VSPERF'
+ in str(raised.exception))
+
def test_run_sla_fail_metric_not_defined_in_sla(self):
del self.scenario_cfg['sla']['throughput_rx_fps']
scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
scenario.setup()
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
with self.assertRaises(y_exc.SLAValidationError) as raised:
scenario.run({})
self.assertTrue('throughput_rx_fps is not defined in SLA'
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
index b305fc93b..8bbe6911e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
@@ -116,15 +116,6 @@ class VsperfDPDKTestCase(unittest.TestCase):
self.assertTrue(self.scenario._is_dpdk_setup())
self.assertTrue(self.scenario.dpdk_setup_done)
- @mock.patch.object(time, 'sleep')
- def test_dpdk_setup_runtime_error(self, *args):
- self.assertIsNotNone(self.scenario.client)
- self.mock_ssh.from_node().execute.return_value = (1, '', '')
- self.assertTrue(self.scenario.setup_done)
-
- self.assertRaises(RuntimeError, self.scenario.dpdk_setup)
-
- @mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
def test_run_ok(self, *args):
# run() specific mocks
@@ -135,17 +126,6 @@ class VsperfDPDKTestCase(unittest.TestCase):
self.scenario.run(result)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_run_failed_vsperf_execution(self):
- self.mock_ssh.from_node().execute.return_value = (1, '', '')
-
- self.assertRaises(RuntimeError, self.scenario.run, {})
-
- def test_run_falied_csv_report(self):
- # run() specific mocks
- self.mock_ssh.from_node().execute.return_value = (1, '', '')
-
- self.assertRaises(RuntimeError, self.scenario.run, {})
-
@mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
def test_vsperf_run_sla_fail(self, *args):
@@ -173,6 +153,20 @@ class VsperfDPDKTestCase(unittest.TestCase):
@mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_metric_not_collected_faulty_csv(self, *args):
+ self.scenario.setup()
+
+ self.mock_ssh.from_node().execute.return_value = (
+ 0, 'faulty output not csv', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps was not collected by VSPERF',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
def test_vsperf_run_sla_fail_sla_not_defined(self, *args):
del self.scenario.scenario_cfg['sla']['throughput_rx_fps']
self.scenario.setup()