diff options
author | Rex Lee <limingjiang@huawei.com> | 2018-07-31 07:33:29 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@opnfv.org> | 2018-07-31 07:33:29 +0000 |
commit | 0258ce3bc5713339cf0cf3d7e711cbd8d276f74f (patch) | |
tree | 61ed974809654d3bd96c0abda716e1b0e158895a | |
parent | fbf9928d76f2d52d5f4687d942d60120db792b7e (diff) | |
parent | c84cc7000a5f4159d3eb905bf6cf2a8e438bab6f (diff) |
Merge "Refactor remote command execution in vsperf_dpdk"
-rw-r--r-- | yardstick/benchmark/scenarios/networking/vsperf_dpdk.py | 34 | ||||
-rw-r--r-- | yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py | 34 |
2 files changed, 26 insertions, 42 deletions
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py index 27bf40dcb..d5c8a3bfe 100644 --- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py +++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py @@ -205,22 +205,17 @@ class VsperfDPDK(base.Scenario): self.client.send_command(cmd) else: cmd = "cat ~/.testpmd.macaddr.port1" - status, stdout, stderr = self.client.execute(cmd) - if status: - raise RuntimeError(stderr) + _, stdout, _ = self.client.execute(cmd, raise_on_error=True) self.tgen_port1_mac = stdout + cmd = "cat ~/.testpmd.macaddr.port2" - status, stdout, stderr = self.client.execute(cmd) - if status: - raise RuntimeError(stderr) + _, stdout, _ = self.client.execute(cmd, raise_on_error=True) self.tgen_port2_mac = stdout cmd = "screen -d -m sudo -E bash ~/testpmd_vsperf.sh %s %s" % \ (self.moongen_port1_mac, self.moongen_port2_mac) LOG.debug("Executing command: %s", cmd) - status, stdout, stderr = self.client.execute(cmd) - if status: - raise RuntimeError(stderr) + self.client.run(cmd) time.sleep(1) @@ -245,7 +240,7 @@ class VsperfDPDK(base.Scenario): self.setup() # remove results from previous tests - self.client.execute("rm -rf /tmp/results*") + self.client.run("rm -rf /tmp/results*", raise_on_error=False) # get vsperf options options = self.scenario_cfg['options'] @@ -291,9 +286,7 @@ class VsperfDPDK(base.Scenario): cmd = "sshpass -p yardstick ssh-copy-id -o StrictHostKeyChecking=no " \ "root@%s -p 22" % (self.moongen_host_ip) LOG.debug("Executing command: %s", cmd) - status, stdout, stderr = self.client.execute(cmd) - if status: - raise RuntimeError(stderr) + self.client.run(cmd) # execute vsperf cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; " @@ -302,22 +295,19 @@ class VsperfDPDK(base.Scenario): cmd += "--conf-file ~/vsperf.conf " cmd += "--test-params=\"%s\"" % (';'.join(test_params)) LOG.debug("Executing command: %s", cmd) - status, stdout, stderr = self.client.execute(cmd) - - if status: - raise RuntimeError(stderr) + self.client.run(cmd) # get test results cmd = "cat /tmp/results*/result.csv" LOG.debug("Executing command: %s", cmd) - status, stdout, stderr = self.client.execute(cmd) - - if status: - raise RuntimeError(stderr) + _, stdout, _ = self.client.execute(cmd, raise_on_error=True) # convert result.csv to JSON format reader = csv.DictReader(stdout.split('\r\n')) - result.update(next(reader)) + try: + result.update(next(reader)) + except StopIteration: + pass result['nrFlows'] = multistream # sla check; go through all defined SLAs and check if values measured diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py index b305fc93b..8bbe6911e 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py @@ -116,15 +116,6 @@ class VsperfDPDKTestCase(unittest.TestCase): self.assertTrue(self.scenario._is_dpdk_setup()) self.assertTrue(self.scenario.dpdk_setup_done) - @mock.patch.object(time, 'sleep') - def test_dpdk_setup_runtime_error(self, *args): - self.assertIsNotNone(self.scenario.client) - self.mock_ssh.from_node().execute.return_value = (1, '', '') - self.assertTrue(self.scenario.setup_done) - - self.assertRaises(RuntimeError, self.scenario.dpdk_setup) - - @mock.patch.object(time, 'sleep') @mock.patch.object(subprocess, 'check_output') def test_run_ok(self, *args): # run() specific mocks @@ -135,17 +126,6 @@ class VsperfDPDKTestCase(unittest.TestCase): self.scenario.run(result) self.assertEqual(result['throughput_rx_fps'], '14797660.000') - def test_run_failed_vsperf_execution(self): - self.mock_ssh.from_node().execute.return_value = (1, '', '') - - self.assertRaises(RuntimeError, self.scenario.run, {}) - - def test_run_falied_csv_report(self): - # run() specific mocks - self.mock_ssh.from_node().execute.return_value = (1, '', '') - - self.assertRaises(RuntimeError, self.scenario.run, {}) - @mock.patch.object(time, 'sleep') @mock.patch.object(subprocess, 'check_output') def test_vsperf_run_sla_fail(self, *args): @@ -173,6 +153,20 @@ class VsperfDPDKTestCase(unittest.TestCase): @mock.patch.object(time, 'sleep') @mock.patch.object(subprocess, 'check_output') + def test_vsperf_run_sla_fail_metric_not_collected_faulty_csv(self, *args): + self.scenario.setup() + + self.mock_ssh.from_node().execute.return_value = ( + 0, 'faulty output not csv', '') + + with self.assertRaises(y_exc.SLAValidationError) as raised: + self.scenario.run({}) + + self.assertIn('throughput_rx_fps was not collected by VSPERF', + str(raised.exception)) + + @mock.patch.object(time, 'sleep') + @mock.patch.object(subprocess, 'check_output') def test_vsperf_run_sla_fail_sla_not_defined(self, *args): del self.scenario.scenario_cfg['sla']['throughput_rx_fps'] self.scenario.setup() |