aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.rst4
-rwxr-xr-xrun_tests.sh39
-rw-r--r--samples/lmbench.yaml46
-rw-r--r--samples/perf.yaml43
-rwxr-xr-xsamples/ping-iteration.yaml45
-rw-r--r--setup.py9
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_pktgen.py195
-rw-r--r--tests/unit/common/__init__.py0
-rw-r--r--tests/unit/common/test_template_format.py50
-rw-r--r--tests/unit/common/test_utils.py90
-rw-r--r--tests/unit/test_ssh.py285
-rwxr-xr-xtools/ubuntu-server-cloudimg-modify.sh2
-rwxr-xr-x[-rw-r--r--]yardstick/benchmark/runners/arithmetic.py5
-rwxr-xr-xyardstick/benchmark/runners/iteration.py111
-rw-r--r--yardstick/benchmark/runners/sequence.py2
-rw-r--r--yardstick/benchmark/scenarios/compute/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py112
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash41
-rw-r--r--yardstick/benchmark/scenarios/compute/perf.py140
-rw-r--r--yardstick/benchmark/scenarios/compute/perf_benchmark.bash68
-rwxr-xr-xyardstick/main.py6
-rw-r--r--yardstick/plot/__init__.py0
-rw-r--r--yardstick/plot/plotter.py311
23 files changed, 1585 insertions, 19 deletions
diff --git a/README.rst b/README.rst
index 049a42677..f9276abe4 100644
--- a/README.rst
+++ b/README.rst
@@ -79,8 +79,10 @@ Example setup known to work for development and test:
- Development environment: Ubuntu14.04, eclipse, virtual environment
- Cloud: Mirantis OpenStack 6.0 deployed using Virtualbox
+Install dependencies:
+$ sudo apt-get install python-virtualenv python-dev libffi-dev libssl-dev
+
Create a virtual environment:
-$ sudo apt-get install python-virtualenv
$ virtualenv ~/yardstick_venv
$ source ~/yardstick_venv/bin/activate
$ python setup.py develop
diff --git a/run_tests.sh b/run_tests.sh
index b2bf807c1..c8d8e7d1a 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -11,29 +11,48 @@
# Run yardstick's test suite(s)
+getopts ":f" FILE_OPTION
+
run_flake8() {
- echo -n "Running flake8 ... "
- logfile=pep8.log
- flake8 yardstick > $logfile
+ echo "Running flake8 ... "
+ logfile=test_results.log
+ if [ $FILE_OPTION == "f" ]; then
+ flake8 yardstick > $logfile
+ else
+ flake8 yardstick
+ fi
+
if [ $? -ne 0 ]; then
- echo "FAILED, result in $logfile"
+ echo "FAILED"
+ if [ $FILE_OPTION == "f" ]; then
+ echo "Results in $logfile"
+ fi
exit 1
else
- echo "OK, result in $logfile"
+ echo "OK"
fi
}
run_tests() {
- echo -n "Running unittest ... "
- python -m unittest discover -s tests/unit
+ echo "Running unittest ... "
+ if [ $FILE_OPTION == "f" ]; then
+ python -m unittest discover -v -s tests/unit > $logfile 2>&1
+ else
+ python -m unittest discover -v -s tests/unit
+ fi
+
if [ $? -ne 0 ]; then
- echo "FAILED, result in $logfile"
+ if [ $FILE_OPTION == "f" ]; then
+ echo "FAILED, results in $logfile"
+ fi
exit 1
else
- echo "OK, result in $logfile"
+ if [ $FILE_OPTION == "f" ]; then
+ echo "OK, results in $logfile"
+ fi
fi
}
run_flake8
-#run_tests
+run_tests
diff --git a/samples/lmbench.yaml b/samples/lmbench.yaml
new file mode 100644
index 000000000..c7526c04c
--- /dev/null
+++ b/samples/lmbench.yaml
@@ -0,0 +1,46 @@
+---
+# Sample benchmark task config file
+# measure memory read latency using lmbench
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Lmbench
+ options:
+ stride: 64
+ stop_size: 32
+
+ host: demeter.demo
+
+ runner:
+ type: Arithmetic
+ name: stride
+ stop: 128
+ step: 64
+
+ sla:
+ max_latency: 35
+ action: monitor
+
+context:
+ name: demo
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ demeter:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
+ external_network: "net04_ext"
+
+
diff --git a/samples/perf.yaml b/samples/perf.yaml
new file mode 100644
index 000000000..e7ba2d0e7
--- /dev/null
+++ b/samples/perf.yaml
@@ -0,0 +1,43 @@
+---
+# Sample benchmark task config file
+# use perf to perform Linux performance measurements
+# this sample demonstrates measurements of various software perf events
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Perf
+ options:
+ load: true
+ events:
+ - task-clock
+ - cpu-clock
+ - context-switches
+ - page-faults
+ - cpu-migrations
+ host: hades.demo
+
+ runner:
+ type: Duration
+ duration: 30
+
+ sla:
+ metric: context-switches
+ smaller_than_expected: true
+ expected_value: 300
+ action: monitor
+
+context:
+ name: demo
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ servers:
+ hades:
+ floating_ip: true
+ networks:
+ test:
+ cidr: "10.0.1.0/24"
+ external_network: "net04_ext"
diff --git a/samples/ping-iteration.yaml b/samples/ping-iteration.yaml
new file mode 100755
index 000000000..810530c82
--- /dev/null
+++ b/samples/ping-iteration.yaml
@@ -0,0 +1,45 @@
+---
+# Sample benchmark task config file
+# measure network latency using ping
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Ping
+ options:
+ packetsize: 200
+ host: athena.demo
+ target: ares.demo
+
+ runner:
+ type: Iteration
+ iterations: 60
+ interval: 1
+
+ sla:
+ max_rtt: 10
+ action: monitor
+
+context:
+ name: demo
+ image: cirros-0.3.3
+ flavor: m1.tiny
+ user: cirros
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ athena:
+ floating_ip: true
+ placement: "pgrp1"
+ ares:
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
+ external_network: "net04_ext"
+
diff --git a/setup.py b/setup.py
index fee8f3c25..f73094ac1 100644
--- a/setup.py
+++ b/setup.py
@@ -10,16 +10,17 @@ setup(
include_package_data=True,
package_data={
'yardstick': [
+ 'benchmark/scenarios/compute/*.bash',
'benchmark/scenarios/networking/*.bash',
'benchmark/scenarios/storage/*.bash',
'resources/files/*'
- ]
+ ]
},
url="https://www.opnfv.org",
install_requires=["backport_ipaddress", # remove with python3
"flake8",
"PyYAML>=3.10",
- "pbr!=0.7,<1.0,>=0.6",
+ "pbr<2.0,>=1.3",
"python-glanceclient>=0.12.0",
"python-heatclient>=0.2.12",
"python-keystoneclient>=0.11.1",
@@ -29,9 +30,13 @@ setup(
"paramiko",
"six"
],
+ extras_require={
+ 'plot': ["matplotlib>=1.4.2"]
+ },
entry_points={
'console_scripts': [
'yardstick=yardstick.main:main',
+ 'yardstick-plot=yardstick.plot.plotter:main [plot]'
],
},
scripts=['tools/yardstick-img-modify']
diff --git a/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
new file mode 100644
index 000000000..a20382cb7
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.pktgen.Pktgen
+
+import mock
+import unittest
+import json
+
+from yardstick.benchmark.scenarios.networking import pktgen
+
+
+@mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
+class PktgenTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': '172.16.0.137',
+ 'target': '172.16.0.138',
+ 'user': 'cirros',
+ 'key_filename': "mykey.key"
+ }
+
+ def test_pktgen_successful_setup(self, mock_ssh):
+
+ p = pktgen.Pktgen(self.ctx)
+ args = {
+ 'options': {'packetsize': 60},
+ 'ipaddr': '172.16.0.139'
+ }
+ p.setup()
+
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ self.assertIsNotNone(p.server)
+ self.assertIsNotNone(p.client)
+ self.assertEqual(p.setup_done, True)
+
+ def test_pktgen_successful_iptables_setup(self, mock_ssh):
+
+ p = pktgen.Pktgen(self.ctx)
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10},
+ 'ipaddr': '172.16.0.139'
+ }
+ p.server = mock_ssh.SSH()
+ p.number_of_ports = args['options']['number_of_ports']
+
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+
+ p._iptables_setup()
+
+ mock_ssh.SSH().execute.assert_called_with(
+ "sudo iptables -F; "
+ "sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP"
+ % 1010)
+
+ def test_pktgen_unsuccessful_iptables_setup(self, mock_ssh):
+
+ p = pktgen.Pktgen(self.ctx)
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10},
+ 'ipaddr': '172.16.0.139'
+ }
+ p.server = mock_ssh.SSH()
+ p.number_of_ports = args['options']['number_of_ports']
+
+ mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, p._iptables_setup)
+
+ def test_pktgen_successful_iptables_get_result(self, mock_ssh):
+
+ p = pktgen.Pktgen(self.ctx)
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10},
+ 'ipaddr': '172.16.0.139'
+ }
+ p.server = mock_ssh.SSH()
+ p.number_of_ports = args['options']['number_of_ports']
+
+ mock_ssh.SSH().execute.return_value = (0, '150000', '')
+ p._iptables_get_result()
+
+ mock_ssh.SSH().execute.assert_called_with(
+ "sudo iptables -L INPUT -vnx |"
+ "awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'"
+ % 1010)
+
+ def test_pktgen_unsuccessful_iptables_get_result(self, mock_ssh):
+
+ p = pktgen.Pktgen(self.ctx)
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10},
+ 'ipaddr': '172.16.0.139'
+ }
+ p.server = mock_ssh.SSH()
+ p.number_of_ports = args['options']['number_of_ports']
+
+ mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, p._iptables_get_result)
+
+ def test_pktgen_successful_no_sla(self, mock_ssh):
+
+ p = pktgen.Pktgen(self.ctx)
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10},
+ 'ipaddr': '172.16.0.139'
+ }
+ p.server = mock_ssh.SSH()
+ p.client = mock_ssh.SSH()
+
+ mock_iptables_result = mock.Mock()
+ mock_iptables_result.return_value = 149300
+ p._iptables_get_result = mock_iptables_result
+
+ sample_output = '{"packets_per_second": 9753, "errors": 0, \
+ "packets_sent": 149776, "flows": 110}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+
+ result = p.run(args)
+ expected_result = json.loads(sample_output)
+ expected_result["packets_received"] = 149300
+ self.assertEqual(result, expected_result)
+
+ def test_pktgen_successful_sla(self, mock_ssh):
+
+ p = pktgen.Pktgen(self.ctx)
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10},
+ 'ipaddr': '172.16.0.139',
+ 'sla': {'max_ppm': 10000}
+ }
+ p.server = mock_ssh.SSH()
+ p.client = mock_ssh.SSH()
+
+ mock_iptables_result = mock.Mock()
+ mock_iptables_result.return_value = 149300
+ p._iptables_get_result = mock_iptables_result
+
+ sample_output = '{"packets_per_second": 9753, "errors": 0, \
+ "packets_sent": 149776, "flows": 110}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+
+ result = p.run(args)
+ expected_result = json.loads(sample_output)
+ expected_result["packets_received"] = 149300
+ self.assertEqual(result, expected_result)
+
+ def test_pktgen_unsuccessful_sla(self, mock_ssh):
+
+ p = pktgen.Pktgen(self.ctx)
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10},
+ 'ipaddr': '172.16.0.139',
+ 'sla': {'max_ppm': 1000}
+ }
+ p.server = mock_ssh.SSH()
+ p.client = mock_ssh.SSH()
+
+ mock_iptables_result = mock.Mock()
+ mock_iptables_result.return_value = 149300
+ p._iptables_get_result = mock_iptables_result
+
+ sample_output = '{"packets_per_second": 9753, "errors": 0, \
+ "packets_sent": 149776, "flows": 110}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, p.run, args)
+
+ def test_pktgen_unsuccessful_script_error(self, mock_ssh):
+
+ p = pktgen.Pktgen(self.ctx)
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10},
+ 'ipaddr': '172.16.0.139',
+ 'sla': {'max_ppm': 1000}
+ }
+ p.server = mock_ssh.SSH()
+ p.client = mock_ssh.SSH()
+
+ mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, p.run, args)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/common/__init__.py b/tests/unit/common/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/unit/common/__init__.py
diff --git a/tests/unit/common/test_template_format.py b/tests/unit/common/test_template_format.py
new file mode 100644
index 000000000..0e1a1a57d
--- /dev/null
+++ b/tests/unit/common/test_template_format.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# yardstick: this file is copied from python-heatclient and slightly modified
+
+import mock
+import unittest
+import yaml
+
+from yardstick.common import template_format
+
+
+class TemplateFormatTestCase(unittest.TestCase):
+
+ def test_parse_to_value_exception(self):
+
+ with mock.patch.object(yaml, 'load') as yaml_loader:
+ yaml_loader.side_effect = yaml.scanner.ScannerError()
+ self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
+ yaml_loader.side_effect = yaml.parser.ParserError()
+ self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
+ yaml_loader.side_effect = \
+ yaml.reader.ReaderError('', '', '', '', '')
+ self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
+
+ def test_parse_no_version_format(self):
+
+ yaml = ''
+ self.assertRaises(ValueError, template_format.parse, yaml)
+ yaml2 = "Parameters: {}\n" \
+ "Mappings: {}\n" \
+ "Resources: {}\n" \
+ "Outputs: {}"
+ self.assertRaises(ValueError, template_format.parse, yaml2)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/common/test_utils.py b/tests/unit/common/test_utils.py
new file mode 100644
index 000000000..002d0494c
--- /dev/null
+++ b/tests/unit/common/test_utils.py
@@ -0,0 +1,90 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.common.utils
+
+import os
+import mock
+import unittest
+
+from yardstick.common import utils
+
+
+class IterSubclassesTestCase(unittest.TestCase):
+# Disclaimer: this class is a modified copy from
+# rally/tests/unit/common/plugin/test_discover.py
+# Copyright 2015: Mirantis Inc.
+ def test_itersubclasses(self):
+ class A(object):
+ pass
+
+ class B(A):
+ pass
+
+ class C(A):
+ pass
+
+ class D(C):
+ pass
+
+ self.assertEqual([B, C, D], list(utils.itersubclasses(A)))
+
+
+class TryAppendModuleTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.utils.importutils')
+ def test_try_append_module_not_in_modules(self, mock_importutils):
+
+ modules = {}
+ name = 'foo'
+ utils.try_append_module(name, modules)
+ mock_importutils.import_module.assert_called_with(name)
+
+ @mock.patch('yardstick.common.utils.importutils')
+ def test_try_append_module_already_in_modules(self, mock_importutils):
+
+ modules = {'foo'}
+ name = 'foo'
+ utils.try_append_module(name, modules)
+ self.assertFalse(mock_importutils.import_module.called)
+
+
+class ImportModulesFromPackageTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.utils.os.walk')
+ @mock.patch('yardstick.common.utils.try_append_module')
+ def test_import_modules_from_package_no_mod(self, mock_append, mock_walk):
+
+ sep = os.sep
+ mock_walk.return_value = ([
+ ('..' + sep + 'foo', ['bar'], ['__init__.py']),
+ ('..' + sep + 'foo' + sep + 'bar', [], ['baz.txt', 'qux.rst'])
+ ])
+
+ utils.import_modules_from_package('foo.bar')
+ self.assertFalse(mock_append.called)
+
+ @mock.patch('yardstick.common.utils.os.walk')
+ @mock.patch('yardstick.common.utils.importutils')
+ def test_import_modules_from_package(self, mock_importutils, mock_walk):
+
+ sep = os.sep
+ mock_walk.return_value = ([
+ ('foo' + sep + '..' + sep + 'bar', [], ['baz.py'])
+ ])
+
+ utils.import_modules_from_package('foo.bar')
+ mock_importutils.import_module.assert_called_with('bar.baz')
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/test_ssh.py b/tests/unit/test_ssh.py
new file mode 100644
index 000000000..4260b39bc
--- /dev/null
+++ b/tests/unit/test_ssh.py
@@ -0,0 +1,285 @@
+# Copyright 2013: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# yardstick comment: this file is a modified copy of
+# rally/tests/unit/common/test_sshutils.py
+
+import os
+import unittest
+import mock
+
+from yardstick import ssh
+
+
+class FakeParamikoException(Exception):
+ pass
+
+
+class SSHTestCase(unittest.TestCase):
+ """Test all small SSH methods."""
+
+ def setUp(self):
+ super(SSHTestCase, self).setUp()
+ self.test_client = ssh.SSH("root", "example.net")
+
+ @mock.patch("yardstick.ssh.SSH._get_pkey")
+ def test_construct(self, mock_ssh__get_pkey):
+ mock_ssh__get_pkey.return_value = "pkey"
+ test_ssh = ssh.SSH("root", "example.net", port=33, pkey="key",
+ key_filename="kf", password="secret")
+ mock_ssh__get_pkey.assert_called_once_with("key")
+ self.assertEqual("root", test_ssh.user)
+ self.assertEqual("example.net", test_ssh.host)
+ self.assertEqual(33, test_ssh.port)
+ self.assertEqual("pkey", test_ssh.pkey)
+ self.assertEqual("kf", test_ssh.key_filename)
+ self.assertEqual("secret", test_ssh.password)
+
+ def test_construct_default(self):
+ self.assertEqual("root", self.test_client.user)
+ self.assertEqual("example.net", self.test_client.host)
+ self.assertEqual(22, self.test_client.port)
+ self.assertIsNone(self.test_client.pkey)
+ self.assertIsNone(self.test_client.key_filename)
+ self.assertIsNone(self.test_client.password)
+
+ @mock.patch("yardstick.ssh.paramiko")
+ def test__get_pkey_invalid(self, mock_paramiko):
+ mock_paramiko.SSHException = FakeParamikoException
+ rsa = mock_paramiko.rsakey.RSAKey
+ dss = mock_paramiko.dsskey.DSSKey
+ rsa.from_private_key.side_effect = mock_paramiko.SSHException
+ dss.from_private_key.side_effect = mock_paramiko.SSHException
+ self.assertRaises(ssh.SSHError, self.test_client._get_pkey, "key")
+
+ @mock.patch("yardstick.ssh.six.moves.StringIO")
+ @mock.patch("yardstick.ssh.paramiko")
+ def test__get_pkey_dss(self, mock_paramiko, mock_string_io):
+ mock_paramiko.SSHException = FakeParamikoException
+ mock_string_io.return_value = "string_key"
+ mock_paramiko.dsskey.DSSKey.from_private_key.return_value = "dss_key"
+ rsa = mock_paramiko.rsakey.RSAKey
+ rsa.from_private_key.side_effect = mock_paramiko.SSHException
+ key = self.test_client._get_pkey("key")
+ dss_calls = mock_paramiko.dsskey.DSSKey.from_private_key.mock_calls
+ self.assertEqual([mock.call("string_key")], dss_calls)
+ self.assertEqual(key, "dss_key")
+ mock_string_io.assert_called_once_with("key")
+
+ @mock.patch("yardstick.ssh.six.moves.StringIO")
+ @mock.patch("yardstick.ssh.paramiko")
+ def test__get_pkey_rsa(self, mock_paramiko, mock_string_io):
+ mock_paramiko.SSHException = FakeParamikoException
+ mock_string_io.return_value = "string_key"
+ mock_paramiko.rsakey.RSAKey.from_private_key.return_value = "rsa_key"
+ dss = mock_paramiko.dsskey.DSSKey
+ dss.from_private_key.side_effect = mock_paramiko.SSHException
+ key = self.test_client._get_pkey("key")
+ rsa_calls = mock_paramiko.rsakey.RSAKey.from_private_key.mock_calls
+ self.assertEqual([mock.call("string_key")], rsa_calls)
+ self.assertEqual(key, "rsa_key")
+ mock_string_io.assert_called_once_with("key")
+
+ @mock.patch("yardstick.ssh.SSH._get_pkey")
+ @mock.patch("yardstick.ssh.paramiko")
+ def test__get_client(self, mock_paramiko, mock_ssh__get_pkey):
+ mock_ssh__get_pkey.return_value = "key"
+ fake_client = mock.Mock()
+ mock_paramiko.SSHClient.return_value = fake_client
+ mock_paramiko.AutoAddPolicy.return_value = "autoadd"
+
+ test_ssh = ssh.SSH("admin", "example.net", pkey="key")
+ client = test_ssh._get_client()
+
+ self.assertEqual(fake_client, client)
+ client_calls = [
+ mock.call.set_missing_host_key_policy("autoadd"),
+ mock.call.connect("example.net", username="admin",
+ port=22, pkey="key", key_filename=None,
+ password=None, timeout=1),
+ ]
+ self.assertEqual(client_calls, client.mock_calls)
+
+ def test_close(self):
+ with mock.patch.object(self.test_client, "_client") as m_client:
+ self.test_client.close()
+ m_client.close.assert_called_once_with()
+ self.assertFalse(self.test_client._client)
+
+ @mock.patch("yardstick.ssh.six.moves.StringIO")
+ def test_execute(self, mock_string_io):
+ mock_string_io.side_effect = stdio = [mock.Mock(), mock.Mock()]
+ stdio[0].read.return_value = "stdout fake data"
+ stdio[1].read.return_value = "stderr fake data"
+ with mock.patch.object(self.test_client, "run", return_value=0)\
+ as mock_run:
+ status, stdout, stderr = self.test_client.execute(
+ "cmd",
+ stdin="fake_stdin",
+ timeout=43)
+ mock_run.assert_called_once_with(
+ "cmd", stdin="fake_stdin", stdout=stdio[0],
+ stderr=stdio[1], timeout=43, raise_on_error=False)
+ self.assertEqual(0, status)
+ self.assertEqual("stdout fake data", stdout)
+ self.assertEqual("stderr fake data", stderr)
+
+ @mock.patch("yardstick.ssh.time")
+ def test_wait_timeout(self, mock_time):
+ mock_time.time.side_effect = [1, 50, 150]
+ self.test_client.execute = mock.Mock(side_effect=[ssh.SSHError,
+ ssh.SSHError,
+ 0])
+ self.assertRaises(ssh.SSHTimeout, self.test_client.wait)
+ self.assertEqual([mock.call("uname")] * 2,
+ self.test_client.execute.mock_calls)
+
+ @mock.patch("yardstick.ssh.time")
+ def test_wait(self, mock_time):
+ mock_time.time.side_effect = [1, 50, 100]
+ self.test_client.execute = mock.Mock(side_effect=[ssh.SSHError,
+ ssh.SSHError,
+ 0])
+ self.test_client.wait()
+ self.assertEqual([mock.call("uname")] * 3,
+ self.test_client.execute.mock_calls)
+
+
+class SSHRunTestCase(unittest.TestCase):
+ """Test SSH.run method in different aspects.
+
+ Also tested method "execute".
+ """
+
+ def setUp(self):
+ super(SSHRunTestCase, self).setUp()
+
+ self.fake_client = mock.Mock()
+ self.fake_session = mock.Mock()
+ self.fake_transport = mock.Mock()
+
+ self.fake_transport.open_session.return_value = self.fake_session
+ self.fake_client.get_transport.return_value = self.fake_transport
+
+ self.fake_session.recv_ready.return_value = False
+ self.fake_session.recv_stderr_ready.return_value = False
+ self.fake_session.send_ready.return_value = False
+ self.fake_session.exit_status_ready.return_value = True
+ self.fake_session.recv_exit_status.return_value = 0
+
+ self.test_client = ssh.SSH("admin", "example.net")
+ self.test_client._get_client = mock.Mock(return_value=self.fake_client)
+
+ @mock.patch("yardstick.ssh.select")
+ def test_execute(self, mock_select):
+ mock_select.select.return_value = ([], [], [])
+ self.fake_session.recv_ready.side_effect = [1, 0, 0]
+ self.fake_session.recv_stderr_ready.side_effect = [1, 0]
+ self.fake_session.recv.return_value = "ok"
+ self.fake_session.recv_stderr.return_value = "error"
+ self.fake_session.exit_status_ready.return_value = 1
+ self.fake_session.recv_exit_status.return_value = 127
+ self.assertEqual((127, "ok", "error"), self.test_client.execute("cmd"))
+ self.fake_session.exec_command.assert_called_once_with("cmd")
+
+ @mock.patch("yardstick.ssh.select")
+ def test_execute_args(self, mock_select):
+ mock_select.select.return_value = ([], [], [])
+ self.fake_session.recv_ready.side_effect = [1, 0, 0]
+ self.fake_session.recv_stderr_ready.side_effect = [1, 0]
+ self.fake_session.recv.return_value = "ok"
+ self.fake_session.recv_stderr.return_value = "error"
+ self.fake_session.exit_status_ready.return_value = 1
+ self.fake_session.recv_exit_status.return_value = 127
+
+ result = self.test_client.execute("cmd arg1 'arg2 with space'")
+ self.assertEqual((127, "ok", "error"), result)
+ self.fake_session.exec_command.assert_called_once_with(
+ "cmd arg1 'arg2 with space'")
+
+ @mock.patch("yardstick.ssh.select")
+ def test_run(self, mock_select):
+ mock_select.select.return_value = ([], [], [])
+ self.assertEqual(0, self.test_client.run("cmd"))
+
+ @mock.patch("yardstick.ssh.select")
+ def test_run_nonzero_status(self, mock_select):
+ mock_select.select.return_value = ([], [], [])
+ self.fake_session.recv_exit_status.return_value = 1
+ self.assertRaises(ssh.SSHError, self.test_client.run, "cmd")
+ self.assertEqual(1, self.test_client.run("cmd", raise_on_error=False))
+
+ @mock.patch("yardstick.ssh.select")
+ def test_run_stdout(self, mock_select):
+ mock_select.select.return_value = ([], [], [])
+ self.fake_session.recv_ready.side_effect = [True, True, False]
+ self.fake_session.recv.side_effect = ["ok1", "ok2"]
+ stdout = mock.Mock()
+ self.test_client.run("cmd", stdout=stdout)
+ self.assertEqual([mock.call("ok1"), mock.call("ok2")],
+ stdout.write.mock_calls)
+
+ @mock.patch("yardstick.ssh.select")
+ def test_run_stderr(self, mock_select):
+ mock_select.select.return_value = ([], [], [])
+ self.fake_session.recv_stderr_ready.side_effect = [True, False]
+ self.fake_session.recv_stderr.return_value = "error"
+ stderr = mock.Mock()
+ self.test_client.run("cmd", stderr=stderr)
+ stderr.write.assert_called_once_with("error")
+
+ @mock.patch("yardstick.ssh.select")
+ def test_run_stdin(self, mock_select):
+ """Test run method with stdin.
+
+ Third send call was called with "e2" because only 3 bytes was sent
+ by second call. So remainig 2 bytes of "line2" was sent by third call.
+ """
+ mock_select.select.return_value = ([], [], [])
+ self.fake_session.exit_status_ready.side_effect = [0, 0, 0, True]
+ self.fake_session.send_ready.return_value = True
+ self.fake_session.send.side_effect = [5, 3, 2]
+ fake_stdin = mock.Mock()
+ fake_stdin.read.side_effect = ["line1", "line2", ""]
+ fake_stdin.closed = False
+
+ def close():
+ fake_stdin.closed = True
+ fake_stdin.close = mock.Mock(side_effect=close)
+ self.test_client.run("cmd", stdin=fake_stdin)
+ call = mock.call
+ send_calls = [call("line1"), call("line2"), call("e2")]
+ self.assertEqual(send_calls, self.fake_session.send.mock_calls)
+
+ @mock.patch("yardstick.ssh.select")
+ def test_run_select_error(self, mock_select):
+ self.fake_session.exit_status_ready.return_value = False
+ mock_select.select.return_value = ([], [], [True])
+ self.assertRaises(ssh.SSHError, self.test_client.run, "cmd")
+
+ @mock.patch("yardstick.ssh.time")
+ @mock.patch("yardstick.ssh.select")
+ def test_run_timemout(self, mock_select, mock_time):
+ mock_time.time.side_effect = [1, 3700]
+ mock_select.select.return_value = ([], [], [])
+ self.fake_session.exit_status_ready.return_value = False
+ self.assertRaises(ssh.SSHTimeout, self.test_client.run, "cmd")
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/ubuntu-server-cloudimg-modify.sh b/tools/ubuntu-server-cloudimg-modify.sh
index 96447d8d9..41d654a08 100755
--- a/tools/ubuntu-server-cloudimg-modify.sh
+++ b/tools/ubuntu-server-cloudimg-modify.sh
@@ -29,6 +29,8 @@ apt-get update
apt-get install -y \
fio \
iperf3 \
+ linux-tools-common \
+ linux-tools-generic \
lmbench \
stress
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 9efafffec..bae40eb75 100644..100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -48,8 +48,9 @@ def _worker_process(queue, cls, method_name, context, scenario_args):
sla_action = None
if "sla" in scenario_args:
sla_action = scenario_args["sla"].get("action", "assert")
+ margin = 1 if step > 0 else -1
- for value in range(start, stop+step, step):
+ for value in range(start, stop+margin, step):
options[arg_name] = value
@@ -81,7 +82,7 @@ def _worker_process(queue, cls, method_name, context, scenario_args):
'errors': errors
}
- queue.put({'context': record_context, 'sargs:': scenario_args,
+ queue.put({'context': record_context, 'sargs': scenario_args,
'benchmark': benchmark_output})
LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
new file mode 100755
index 000000000..03dcfae03
--- /dev/null
+++ b/yardstick/benchmark/runners/iteration.py
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+'''A runner that runs a configurable number of times before it returns
+'''
+
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from yardstick.benchmark.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, cls, method_name, context, scenario_args):
+
+ sequence = 1
+
+ interval = context.get("interval", 1)
+ iterations = context.get("iterations", 1)
+ LOG.info("worker START, iterations %d times, class %s", iterations, cls)
+
+ context['runner'] = os.getpid()
+
+ benchmark = cls(context)
+ benchmark.setup()
+ method = getattr(benchmark, method_name)
+
+ record_context = {"runner": context["runner"],
+ "host": context["host"]}
+
+ sla_action = None
+ if "sla" in scenario_args:
+ sla_action = scenario_args["sla"].get("action", "assert")
+
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+ {"runner": context["runner"], "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ try:
+ data = method(scenario_args)
+ except AssertionError as assertion:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s" % assertion.args)
+ errors = assertion.args
+ except Exception as e:
+ errors = traceback.format_exc()
+ LOG.exception(e)
+
+ time.sleep(interval)
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ queue.put({'context': record_context, 'sargs': scenario_args,
+ 'benchmark': benchmark_output})
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+ {"runner": context["runner"], "sequence": sequence})
+
+ sequence += 1
+
+ if (errors and sla_action is None) or (sequence > iterations):
+ LOG.info("worker END")
+ break
+
+ benchmark.teardown()
+
+
+class IterationRunner(base.Runner):
+ '''Run a scenario for a configurable number of times
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+ Parameters
+ iterations - amount of times the scenario will be run for
+ type: int
+ unit: na
+ default: 1
+ interval - time to wait between each scenario invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ '''
+ __execution_type__ = 'Iteration'
+
+ def _run_benchmark(self, cls, method, scenario_args):
+ self.process = multiprocessing.Process(
+ target=_worker_process,
+ args=(self.result_queue, cls, method, self.config, scenario_args))
+ self.process.start()
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index 52bb69abf..25b65b0b8 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -82,7 +82,7 @@ def _worker_process(queue, cls, method_name, context, scenario_args):
'errors': errors
}
- queue.put({'context': record_context, 'sargs:': scenario_args,
+ queue.put({'context': record_context, 'sargs': scenario_args,
'benchmark': benchmark_output})
LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
diff --git a/yardstick/benchmark/scenarios/compute/__init__.py b/yardstick/benchmark/scenarios/compute/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/compute/__init__.py
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
new file mode 100644
index 000000000..4ce2825c7
--- /dev/null
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -0,0 +1,112 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import json
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class Lmbench(base.Scenario):
+ """Execute lmbench memory read latency benchmark in a host
+
+ Parameters
+ stride - number of locations in memory between starts of array elements
+ type: int
+ unit: bytes
+ default: 128
+ stop_size - maximum array size to test (minimum value is 0.000512)
+ type: int
+ unit: megabytes
+ default: 16
+
+ Results are accurate to the ~2-5 nanosecond range.
+ """
+ __scenario_type__ = "Lmbench"
+
+ TARGET_SCRIPT = "lmbench_benchmark.bash"
+
+ def __init__(self, context):
+ self.context = context
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+ self.target_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.compute",
+ Lmbench.TARGET_SCRIPT)
+ user = self.context.get("user", "ubuntu")
+ host = self.context.get("host", None)
+ key_filename = self.context.get('key_filename', "~/.ssh/id_rsa")
+
+ LOG.debug("user:%s, host:%s", user, host)
+ self.client = ssh.SSH(user, host, key_filename=key_filename)
+ self.client.wait(timeout=600)
+
+ # copy script to host
+ self.client.run("cat > ~/lmbench.sh",
+ stdin=open(self.target_script, 'rb'))
+
+ self.setup_done = True
+
+ def run(self, args):
+ """execute the benchmark"""
+
+ if not self.setup_done:
+ self.setup()
+
+ options = args['options']
+ stride = options.get('stride', 128)
+ stop_size = options.get('stop_size', 16)
+
+ cmd = "sudo bash lmbench.sh %d %d" % (stop_size, stride)
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+
+ data = json.loads(stdout)
+
+ if "sla" in args:
+ sla_max_latency = int(args['sla']['max_latency'])
+ for result in data:
+ latency = result['latency']
+ assert latency <= sla_max_latency, "latency %f > " \
+ "sla:max_latency(%f)" % (latency, sla_max_latency)
+
+ return data
+
+
+def _test():
+ """internal test function"""
+ key_filename = pkg_resources.resource_filename('yardstick.resources',
+ 'files/yardstick_key')
+ ctx = {'host': '172.16.0.137',
+ 'user': 'ubuntu',
+ 'key_filename': key_filename
+ }
+
+ logger = logging.getLogger('yardstick')
+ logger.setLevel(logging.DEBUG)
+
+ p = Lmbench(ctx)
+
+ options = {'stride': 128, 'stop_size': 16}
+
+ args = {'options': options}
+ result = p.run(args)
+ print result
+
+if __name__ == '__main__':
+ _test()
diff --git a/yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash
new file mode 100644
index 000000000..04e3c1a9d
--- /dev/null
+++ b/yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Run a lmbench read memory latency benchmark in a host and
+# outputs in json format the array sizes in megabytes and
+# load latency over all points in that array in nanosecods
+
+set -e
+
+SIZE=$1
+shift
+STRIDE=$1
+
+# write the result to stdout in json format
+output_json()
+{
+ iter=0
+ echo [
+ while read DATA
+ do
+ if [ $iter -gt 1 ] && [ -n "$DATA" ]; then
+ echo ,
+ fi
+
+ echo -n $DATA | awk '/ /{printf "{\"size\": %s, \"latency\": %s}", $1, $2}'
+
+ iter=$((iter+1))
+ done
+ echo ]
+}
+
+/usr/lib/lmbench/bin/x86_64-linux-gnu/lat_mem_rd $SIZE $STRIDE 2>&1 | output_json
+
diff --git a/yardstick/benchmark/scenarios/compute/perf.py b/yardstick/benchmark/scenarios/compute/perf.py
new file mode 100644
index 000000000..62b4297e3
--- /dev/null
+++ b/yardstick/benchmark/scenarios/compute/perf.py
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import json
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class Perf(base.Scenario):
+ """Execute perf benchmark in a host
+
+ Parameters
+ events - perf tool software, hardware or tracepoint events
+ type: [str]
+ unit: na
+ default: ['task-clock']
+ load - simulate load on the host by doing IO operations
+ type: bool
+ unit: na
+ default: false
+
+ For more info about perf and perf events see https://perf.wiki.kernel.org
+ """
+
+ __scenario_type__ = "Perf"
+
+ TARGET_SCRIPT = 'perf_benchmark.bash'
+
+ def __init__(self, context):
+ self.context = context
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+ self.target_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.compute', Perf.TARGET_SCRIPT)
+ user = self.context.get('user', 'ubuntu')
+ host = self.context.get('host', None)
+ key_filename = self.context.get('key_filename', '~/.ssh/id_rsa')
+
+ LOG.debug("user:%s, host:%s", user, host)
+ self.client = ssh.SSH(user, host, key_filename=key_filename)
+ self.client.wait(timeout=600)
+
+ # copy script to host
+ self.client.run("cat > ~/perf_benchmark.sh",
+ stdin=open(self.target_script, "rb"))
+
+ self.setup_done = True
+
+ def run(self, args):
+ """execute the benchmark"""
+
+ if not self.setup_done:
+ self.setup()
+
+ options = args['options']
+ events = options.get('events', ['task-clock'])
+
+ events_string = ""
+ for event in events:
+ events_string += event + " "
+
+ # if run by a duration runner
+ duration_time = self.context.get("duration", None)
+ # if run by an arithmetic runner
+ arithmetic_time = options.get("duration", None)
+ if duration_time:
+ duration = duration_time
+ elif arithmetic_time:
+ duration = arithmetic_time
+ else:
+ duration = 30
+
+ if 'load' in options:
+ load = "dd if=/dev/urandom of=/dev/null"
+ else:
+ load = "sleep %d" % duration
+
+ cmd = "sudo bash perf_benchmark.sh '%s' %d %s" \
+ % (load, duration, events_string)
+
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+
+ if status:
+ raise RuntimeError(stdout)
+
+ output = json.loads(stdout)
+
+ if "sla" in args:
+ metric = args['sla']['metric']
+ exp_val = args['sla']['expected_value']
+ smaller_than_exp = 'smaller_than_expected' in args['sla']
+
+ if metric not in output:
+ assert False, "Metric (%s) not found." % metric
+ else:
+ if smaller_than_exp:
+ assert output[metric] < exp_val, "%s %d >= %d (sla)" \
+ % (metric, output[metric], exp_val)
+ else:
+ assert output[metric] >= exp_val, "%s %d < %d (sla)" \
+ % (metric, output[metric], exp_val)
+ return output
+
+
+def _test():
+ """internal test function"""
+ key_filename = pkg_resources.resource_filename('yardstick.resources',
+ 'files/yardstick_key')
+ ctx = {'host': '172.16.0.137',
+ 'user': 'ubuntu',
+ 'key_filename': key_filename
+ }
+
+ logger = logging.getLogger('yardstick')
+ logger.setLevel(logging.DEBUG)
+
+ p = Perf(ctx)
+
+ options = {'load': True}
+ args = {'options': options}
+
+ result = p.run(args)
+ print result
+
+if __name__ == '__main__':
+ _test()
diff --git a/yardstick/benchmark/scenarios/compute/perf_benchmark.bash b/yardstick/benchmark/scenarios/compute/perf_benchmark.bash
new file mode 100644
index 000000000..5ae107a52
--- /dev/null
+++ b/yardstick/benchmark/scenarios/compute/perf_benchmark.bash
@@ -0,0 +1,68 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -e
+
+# Commandline arguments
+PAYLOAD_OP=$1
+shift
+DURATION=$1
+shift
+EVENTS=("$@")
+OUTPUT_FILE=/tmp/perfout.txt
+
+# run perf test
+run_perf()
+{
+ COMMA_SEP_E=$( IFS=$','; echo "${EVENTS[*]}" )
+
+ if [[ $PAYLOAD_OP == dd* ]]
+ then
+ sudo perf stat -o $OUTPUT_FILE -e ${COMMA_SEP_E[@]} $PAYLOAD_OP &
+ sleep $DURATION
+ sudo killall -q -u root dd
+ else
+ sudo perf stat -o $OUTPUT_FILE -e ${COMMA_SEP_E[@]} $PAYLOAD_OP
+ fi
+}
+
+# write the result to stdout in json format
+output_json()
+{
+ EVENTS+=('time')
+
+ last_pos=$(( ${#EVENTS[*]} - 1 ))
+ last=${EVENTS[$last_pos]}
+
+ echo -n {
+ for EVENT in ${EVENTS[@]}
+ do
+ value=$(cat $OUTPUT_FILE | grep $EVENT | awk 'match($0,/[0-9]+|[0-9]+\.[0-9]*/, a) { print a[0]}')
+
+ if [[ $EVENT != $last ]]
+ then
+ echo -n \"$EVENT\": $value,
+ else
+ echo -n \"$EVENT\": $value
+ fi
+ done
+ echo }
+}
+
+# main entry
+main()
+{
+ run_perf > /dev/null 2>&1
+ sleep 1
+ output_json
+}
+
+main
diff --git a/yardstick/main.py b/yardstick/main.py
index c16a42e91..418e3daca 100755
--- a/yardstick/main.py
+++ b/yardstick/main.py
@@ -12,15 +12,15 @@
""" yardstick - command line tool for managing benchmarks
Example invocation:
- $ yardstick samples/ping-task.yaml
+ $ yardstick task start samples/ping.yaml
- Servers are the same as VMs (Nova call them servers in the API)
+ Servers are the same as VMs (Nova calls them servers in the API)
Many tests use a client/server architecture. A test client is configured
to use a specific test server e.g. using an IP address. This is true for
example iperf. In some cases the test server is included in the kernel
(ping, pktgen) and no additional software is needed on the server. In other
- cases (iperf) a server process needs to be installed and started
+ cases (iperf) a server process needs to be installed and started.
One server is required to host the test client program (such as ping or
iperf). In the task file this server is called host.
diff --git a/yardstick/plot/__init__.py b/yardstick/plot/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/plot/__init__.py
diff --git a/yardstick/plot/plotter.py b/yardstick/plot/plotter.py
new file mode 100644
index 000000000..f3fb75d3e
--- /dev/null
+++ b/yardstick/plot/plotter.py
@@ -0,0 +1,311 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+''' yardstick-plot - a command line tool for visualizing results from the
+ output file of yardstick framework.
+
+ Example invocation:
+ $ yardstick-plot -i /tmp/yardstick.out -o /tmp/plots/
+'''
+
+import argparse
+import json
+import os
+import sys
+import time
+import matplotlib.pyplot as plt
+import matplotlib.lines as mlines
+
+
+class Parser(object):
+ ''' Command-line argument and input file parser for yardstick-plot tool'''
+
+ def __init__(self):
+ self.data = {
+ 'ping': [],
+ 'pktgen': [],
+ 'iperf3': [],
+ 'fio': []
+ }
+ self.default_input_loc = "/tmp/yardstick.out"
+
+ def _get_parser(self):
+ '''get a command-line parser'''
+ parser = argparse.ArgumentParser(
+ prog='yardstick-plot',
+ description="A tool for visualizing results from yardstick. "
+ "Currently supports plotting graphs for output files "
+ "from tests: " + str(self.data.keys())
+ )
+ parser.add_argument(
+ '-i', '--input',
+ help="The input file name. If left unspecified then "
+ "it defaults to %s" % self.default_input_loc
+ )
+ parser.add_argument(
+ '-o', '--output-folder',
+ help="The output folder location. If left unspecified then "
+ "it defaults to <script_directory>/plots/"
+ )
+ return parser
+
+ def _add_record(self, record):
+ '''add record to the relevant scenario'''
+ runner_object = record['sargs']['runner']['object']
+ for test_type in self.data.keys():
+ if test_type in runner_object:
+ self.data[test_type].append(record)
+
+ def parse_args(self):
+ '''parse command-line arguments'''
+ parser = self._get_parser()
+ self.args = parser.parse_args()
+ return self.args
+
+ def parse_input_file(self):
+ '''parse the input test results file'''
+ if self.args.input:
+ input_file = self.args.input
+ else:
+ print("No input file specified, reading from %s"
+ % self.default_input_loc)
+ input_file = self.default_input_loc
+
+ try:
+ with open(input_file) as f:
+ for line in f:
+ record = json.loads(line)
+ self._add_record(record)
+ except IOError as e:
+ print(os.strerror(e.errno))
+ sys.exit(1)
+
+
+class Plotter(object):
+ '''Graph plotter for scenario-specific results from yardstick framework'''
+
+ def __init__(self, data, output_folder):
+ self.data = data
+ self.output_folder = output_folder
+ self.fig_counter = 1
+ self.colors = ['g', 'b', 'c', 'm', 'y']
+
+ def plot(self):
+ '''plot the graph(s)'''
+ for test_type in self.data.keys():
+ if self.data[test_type]:
+ plt.figure(self.fig_counter)
+ self.fig_counter += 1
+
+ plt.title(test_type, loc="left")
+ method_name = "_plot_" + test_type
+ getattr(self, method_name)(self.data[test_type])
+ self._save_plot(test_type)
+
+ def _save_plot(self, test_type):
+ '''save the graph to output folder'''
+ timestr = time.strftime("%Y%m%d-%H%M%S")
+ file_name = test_type + "_" + timestr + ".png"
+ if not self.output_folder:
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ self.output_folder = os.path.join(curr_path, "plots")
+ if not os.path.isdir(self.output_folder):
+ os.makedirs(self.output_folder)
+ new_file = os.path.join(self.output_folder, file_name)
+ plt.savefig(new_file)
+ print("Saved graph to " + new_file)
+
+ def _plot_ping(self, records):
+ '''ping test result interpretation and visualization on the graph'''
+ rtts = [r['benchmark']['data'] for r in records]
+ seqs = [r['benchmark']['sequence'] for r in records]
+
+ for i in range(0, len(rtts)):
+ # If SLA failed
+ if not rtts[i]:
+ rtts[i] = 0.0
+ plt.axvline(seqs[i], color='r')
+
+ # If there is a single data-point then display a bar-chart
+ if len(rtts) == 1:
+ plt.bar(1, rtts[0], 0.35, color=self.colors[0])
+ else:
+ plt.plot(seqs, rtts, self.colors[0]+'-')
+
+ self._construct_legend(['rtt'])
+ plt.xlabel("sequence number")
+ plt.xticks(seqs, seqs)
+ plt.ylabel("round trip time in milliseconds (rtt)")
+
+ def _plot_pktgen(self, records):
+ '''pktgen test result interpretation and visualization on the graph'''
+ flows = [r['benchmark']['data']['flows'] for r in records]
+ sent = [r['benchmark']['data']['packets_sent'] for r in records]
+ received = [int(r['benchmark']['data']['packets_received'])
+ for r in records]
+
+ for i in range(0, len(sent)):
+ # If SLA failed
+ if not sent[i] or not received[i]:
+ sent[i] = 0.0
+ received[i] = 0.0
+ plt.axvline(flows[i], color='r')
+
+ ppm = [1000000.0*(i - j)/i for i, j in zip(sent, received)]
+
+ # If there is a single data-point then display a bar-chart
+ if len(ppm) == 1:
+ plt.bar(1, ppm[0], 0.35, color=self.colors[0])
+ else:
+ plt.plot(flows, ppm, self.colors[0]+'-')
+
+ self._construct_legend(['ppm'])
+ plt.xlabel("number of flows")
+ plt.ylabel("lost packets per million packets (ppm)")
+
+ def _plot_iperf3(self, records):
+ '''iperf3 test result interpretation and visualization on the graph'''
+ intervals = []
+ for r in records:
+ # If did not fail the SLA
+ if r['benchmark']['data']:
+ intervals.append(r['benchmark']['data']['intervals'])
+ else:
+ intervals.append(None)
+
+ kbps = [0]
+ seconds = [0]
+ for i, val in enumerate(intervals):
+ if val:
+ for j, _ in enumerate(intervals):
+ kbps.append(val[j]['sum']['bits_per_second']/1000)
+ seconds.append(seconds[-1] + val[j]['sum']['seconds'])
+ else:
+ kbps.append(0.0)
+ # Don't know how long the failed test took, add 1 second
+ # TODO more accurate solution or replace x-axis from seconds
+ # to measurement nr
+ seconds.append(seconds[-1] + 1)
+ plt.axvline(seconds[-1], color='r')
+
+ self._construct_legend(['bandwidth'])
+ plt.plot(seconds[1:], kbps[1:], self.colors[0]+'-')
+ plt.xlabel("time in seconds")
+ plt.ylabel("bandwidth in Kb/s")
+
+ def _plot_fio(self, records):
+ '''fio test result interpretation and visualization on the graph'''
+ rw_types = [r['sargs']['options']['rw'] for r in records]
+ seqs = [x for x in range(1, len(records) + 1)]
+ data = {}
+
+ for i in range(0, len(records)):
+ is_r_type = rw_types[i] == "read" or rw_types[i] == "randread"
+ is_w_type = rw_types[i] == "write" or rw_types[i] == "randwrite"
+ is_rw_type = rw_types[i] == "rw" or rw_types[i] == "randrw"
+
+ if is_r_type or is_rw_type:
+ # Remove trailing 'usec' and convert to float
+ data['read_lat'] = \
+ [r['benchmark']['data']['read_lat'][:-4] for r in records]
+ data['read_lat'] = \
+ [float(i) for i in data['read_lat']]
+ # Remove trailing 'KB/s' and convert to float
+ data['read_bw'] = \
+ [r['benchmark']['data']['read_bw'][:-4] for r in records]
+ data['read_bw'] = \
+ [float(i) for i in data['read_bw']]
+ # Convert to int
+ data['read_iops'] = \
+ [r['benchmark']['data']['read_iops'] for r in records]
+ data['read_iops'] = \
+ [int(i) for i in data['read_iops']]
+
+ if is_w_type or is_rw_type:
+ data['write_lat'] = \
+ [r['benchmark']['data']['write_lat'][:-4] for r in records]
+ data['write_lat'] = \
+ [float(i) for i in data['write_lat']]
+
+ data['write_bw'] = \
+ [r['benchmark']['data']['write_bw'][:-4] for r in records]
+ data['write_bw'] = \
+ [float(i) for i in data['write_bw']]
+
+ data['write_iops'] = \
+ [r['benchmark']['data']['write_iops'] for r in records]
+ data['write_iops'] = \
+ [int(i) for i in data['write_iops']]
+
+ # Divide the area into 3 subplots, sharing a common x-axis
+ fig, axl = plt.subplots(3, sharex=True)
+ axl[0].set_title("fio", loc="left")
+
+ self._plot_fio_helper(data, seqs, 'read_bw', self.colors[0], axl[0])
+ self._plot_fio_helper(data, seqs, 'write_bw', self.colors[1], axl[0])
+ axl[0].set_ylabel("Bandwidth in KB/s")
+
+ self._plot_fio_helper(data, seqs, 'read_iops', self.colors[0], axl[1])
+ self._plot_fio_helper(data, seqs, 'write_iops', self.colors[1], axl[1])
+ axl[1].set_ylabel("IOPS")
+
+ self._plot_fio_helper(data, seqs, 'read_lat', self.colors[0], axl[2])
+ self._plot_fio_helper(data, seqs, 'write_lat', self.colors[1], axl[2])
+ axl[2].set_ylabel("Latency in " + u"\u00B5s")
+
+ self._construct_legend(['read', 'write'], obj=axl[0])
+ plt.xlabel("Sequence number")
+ plt.xticks(seqs, seqs)
+
+ def _plot_fio_helper(self, data, seqs, key, bar_color, axl):
+ '''check if measurements exist for a key and then plot the
+ data to a given subplot'''
+ if key in data:
+ if len(data[key]) == 1:
+ axl.bar(0.1, data[key], 0.35, color=bar_color)
+ else:
+ line_style = bar_color + '-'
+ axl.plot(seqs, data[key], line_style)
+
+ def _construct_legend(self, legend_texts, obj=plt):
+ '''construct legend for the plot or subplot'''
+ ci = 0
+ lines = []
+
+ for text in legend_texts:
+ line = mlines.Line2D([], [], color=self.colors[ci], label=text)
+ lines.append(line)
+ ci += 1
+
+ lines.append(mlines.Line2D([], [], color='r', label="SLA failed"))
+
+ getattr(obj, "legend")(
+ bbox_to_anchor=(0.25, 1.02, 0.75, .102),
+ loc=3,
+ borderaxespad=0.0,
+ ncol=len(lines),
+ mode="expand",
+ handles=lines
+ )
+
+
+def main():
+ parser = Parser()
+ args = parser.parse_args()
+ print("Parsing input file")
+ parser.parse_input_file()
+ print("Initializing plotter")
+ plotter = Plotter(parser.data, args.output_folder)
+ print("Plotting graph(s)")
+ plotter.plot()
+
+if __name__ == '__main__':
+ main()