summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xci/build-vsperf.sh2
-rw-r--r--conf/00_common.conf27
-rw-r--r--docs/testing/user/userguide/testusage.rst76
-rw-r--r--requirements.txt1
-rw-r--r--testcases/testcase.py4
-rw-r--r--tools/report/report_foot.rst4
-rwxr-xr-xvsperf186
7 files changed, 261 insertions, 39 deletions
diff --git a/ci/build-vsperf.sh b/ci/build-vsperf.sh
index cfe964ef..265a2031 100755
--- a/ci/build-vsperf.sh
+++ b/ci/build-vsperf.sh
@@ -137,7 +137,7 @@ function print_results() {
printf " %-70s %-6s\n" "result_${i}" "FAILED"
EXIT=$EXIT_TC_FAILED
else
- RES_FILE=`ls -1 $1 | egrep "result_${i}_[0-9a-zA-Z\-]+.csv"`
+ RES_FILE=`ls -1 $1 | egrep "result_[0-9]+_${i}_[0-9a-zA-Z\-]+.csv"`
if [ "x$RES_FILE" != "x" -a -e "${1}/${RES_FILE}" ]; then
if grep ^FAILED "${1}/${RES_FILE}" &> /dev/null ; then
diff --git a/conf/00_common.conf b/conf/00_common.conf
index 279c67b4..d54c8a5a 100644
--- a/conf/00_common.conf
+++ b/conf/00_common.conf
@@ -90,9 +90,6 @@ PATHS = {}
# shell command to use when running commands through Pexpect
SHELL_CMD = ['/bin/bash', '-c']
-# internal list to keep track of PIDs of jobs executed by vsperf
-_EXECUTED_PIDS = []
-
# ############################
# Logging configuration
# ############################
@@ -122,6 +119,15 @@ TEST_PARAMS = {}
# delay enforced after every step to allow system to process changes
TEST_STEP_DELAY = 5
+# parameter used, when running mupltiple tests, to accumulate _PARAMS_LIST
+# parameters for multiple tests running in a series
+CUMULATIVE_PARAMS = False
+
+# metric used by the performance matrix for comparision and analysis
+# of tests run in a series. Must always refer to a numeric value.
+# For example: 'throughput_rx_mbps', 'throughput_rx_fps', 'avg_latency_ns'
+MATRIX_METRIC = 'throughput_rx_fps'
+
# ############################
# Modules
# ############################
@@ -131,3 +137,18 @@ TEST_STEP_DELAY = 5
# Example:
# EXCLUDE_MODULES = ['ovs_vanilla', 'qemu_virtio_net', 'pidstat']
EXCLUDE_MODULES = ["testcenter-rfc2544-throughput"]
+
+# ############################
+# Vsperf Internal Options
+# ############################
+# following options should not be changed by the user
+
+# internal list to keep track of PIDs of jobs executed by vsperf
+_EXECUTED_PIDS = []
+
+# dictionary containing the test-specific parameters of all tests being run
+# for the purposes of cummulative parameter assignment using performance matrix
+_PARAMS_LIST = {}
+
+# index number of the current test, used for naming of result files
+_TEST_INDEX = 0
diff --git a/docs/testing/user/userguide/testusage.rst b/docs/testing/user/userguide/testusage.rst
index 20c30a40..f679566e 100644
--- a/docs/testing/user/userguide/testusage.rst
+++ b/docs/testing/user/userguide/testusage.rst
@@ -155,6 +155,17 @@ Example:
$ ./vsperf --test-params "TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,);" \
"GUEST_LOOPBACK=['testpmd','l2fwd']" pvvp_tput
+The ``--test-params`` command line argument can also be used to override default
+configuration values for multiple tests. Providing a list of parameters will apply each
+element of the list to the test with the same index. If more tests are run than
+parameters provided the last element of the list will repeat.
+
+.. code:: console
+
+ $ ./vsperf --test-params "['TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)',"
+ "'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)']" \
+ pvvp_tput pvvp_tput
+
The second option is to override configuration items by ``Parameters`` section
of the test case definition. The configuration items can be added into ``Parameters``
dictionary with their new values. These values will override values defined in
@@ -234,6 +245,12 @@ To run a single test:
Where $TESTNAME is the name of the vsperf test you would like to run.
+To run a test multiple times, repeat it:
+
+.. code-block:: console
+
+ $ ./vsperf $TESTNAME $TESTNAME $TESTNAME
+
To run a group of tests, for example all tests with a name containing
'RFC2544':
@@ -256,6 +273,30 @@ Some tests allow for configurable parameters, including test duration
--tests RFC2544Tput \
--test-params "TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)"
+To specify configurable parameters for multiple tests, use a list of
+parameters. One element for each test.
+
+.. code:: console
+
+ $ ./vsperf --conf-file user_settings.py \
+ --test-params "['TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)',"\
+ "'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)']" \
+ phy2phy_cont phy2phy_cont
+
+If the ``CUMULATIVE_PARAMS`` setting is set to True and there are different parameters
+provided for each test using ``--test-params``, each test will take the parameters of
+the previous test before appyling it's own.
+With ``CUMULATIVE_PARAMS`` set to True the following command will be equivalent to the
+previous example:
+
+.. code:: console
+
+ $ ./vsperf --conf-file user_settings.py \
+ --test-params "['TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)',"\
+ "'TRAFFICGEN_PKT_SIZES=(64,)']" \
+ phy2phy_cont phy2phy_cont
+ "
+
For all available options, check out the help dialog:
.. code-block:: console
@@ -584,7 +625,7 @@ The supported dpdk guest bind drivers are:
.. code-block:: console
- 'uio_pci_generic' - Use uio_pci_generic driver
+ 'uio_pci_generic' - Use uio_pci_generic driver
'igb_uio_from_src' - Build and use the igb_uio driver from the dpdk src
files
'vfio_no_iommu' - Use vfio with no iommu option. This requires custom
@@ -915,6 +956,39 @@ Example of execution of VSPERF in "trafficgen" mode:
$ ./vsperf -m trafficgen --trafficgen IxNet --conf-file vsperf.conf \
--test-params "TRAFFIC={'traffic_type':'rfc2544_continuous','bidir':'False','framerate':60}"
+Performance Matrix
+^^^^^^^^^^^^^^^^^^
+
+The ``--matrix`` command line argument analyses and displays the performance of
+all the tests run. Using the metric specified by ``MATRIX_METRIC`` in the conf-file,
+the first test is set as the baseline and all the other tests are compared to it.
+The ``MATRIX_METRIC`` must always refer to a numeric value to enable comparision.
+A table, with the test ID, metric value, the change of the metric in %, testname
+and the test parameters used for each test, is printed out as well as saved into the
+results directory.
+
+Example of 2 tests being compared using Performance Matrix:
+
+.. code-block:: console
+
+ $ ./vsperf --conf-file user_settings.py \
+ --test-params "['TRAFFICGEN_PKT_SIZES=(64,)',"\
+ "'TRAFFICGEN_PKT_SIZES=(128,)']" \
+ phy2phy_cont phy2phy_cont --matrix
+
+Example output:
+
+.. code-block:: console
+
+ +------+--------------+---------------------+----------+---------------------------------------+
+ | ID | Name | throughput_rx_fps | Change | Parameters, CUMULATIVE_PARAMS = False |
+ +======+==============+=====================+==========+=======================================+
+ | 0 | phy2phy_cont | 23749000.000 | 0 | 'TRAFFICGEN_PKT_SIZES': [64] |
+ +------+--------------+---------------------+----------+---------------------------------------+
+ | 1 | phy2phy_cont | 16850500.000 | -29.048 | 'TRAFFICGEN_PKT_SIZES': [128] |
+ +------+--------------+---------------------+----------+---------------------------------------+
+
+
Code change verification by pylint
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/requirements.txt b/requirements.txt
index 00108452..894c1cc5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,3 +19,4 @@ stcrestclient
matplotlib
numpy
pycrypto
+tabulate
diff --git a/testcases/testcase.py b/testcases/testcase.py
index ebf1e797..18c3186c 100644
--- a/testcases/testcase.py
+++ b/testcases/testcase.py
@@ -257,8 +257,8 @@ class TestCase(object):
loader.get_loadgen_class(),
self._load_cfg)
- self._output_file = os.path.join(self._results_dir, "result_" + self.name +
- "_" + self.deployment + ".csv")
+ self._output_file = os.path.join(self._results_dir, "result_{}_{}_{}.csv".format(
+ str(S.getValue('_TEST_INDEX')), self.name, self.deployment))
self._step_status = {'status' : True, 'details' : ''}
diff --git a/tools/report/report_foot.rst b/tools/report/report_foot.rst
index 5045e186..a49e3452 100644
--- a/tools/report/report_foot.rst
+++ b/tools/report/report_foot.rst
@@ -5,6 +5,7 @@
Rationale for decisions
=======================
+
The tests conducted do not have pass/fail/conditional-pass criteria. The test
is simply conducted and the results are reported.
@@ -12,6 +13,7 @@ is simply conducted and the results are reported.
Conclusions and recommendations
===============================
+
The test results are stable. The vsperf CI jobs that were used to obtain the
results can be found at https://wiki.opnfv.org/wiki/vsperf_results.
@@ -20,11 +22,13 @@ General
Glossary
--------
+
- NFV - Network Function Virtualization
- Mbps - 1,000,000bps
Document change procedures and history
--------------------------------------
+
=============================================== ================= =============
Document ID Author Date Modified
=============================================== ================= =============
diff --git a/vsperf b/vsperf
index 5589ac39..d205ad1f 100755
--- a/vsperf
+++ b/vsperf
@@ -23,6 +23,7 @@ import sys
import argparse
import re
import time
+import csv
import datetime
import shutil
import unittest
@@ -32,6 +33,8 @@ import glob
import subprocess
import ast
import xmlrunner
+from tabulate import tabulate
+from conf import merge_spec
from conf import settings
import core.component_factory as component_factory
from core.loader import Loader
@@ -42,7 +45,6 @@ from tools import networkcard
from tools import functions
from tools.pkt_gen import trafficgen
from tools.opnfvdashboard import opnfvdashboard
-
sys.dont_write_bytecode = True
VERBOSITY_LEVELS = {
@@ -61,40 +63,68 @@ _TEMPLATE_RST = {'head' : os.path.join(_CURR_DIR, 'tools/report/report_head.rst
'tmp' : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
}
+_TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
+ "The following performance matrix was generated with the results of all the\n"\
+ "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
_LOGGER = logging.getLogger()
+def parse_param_string(values):
+ """
+ Parse and split a single '--test-params' argument.
+
+ This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
+ values. For multiple overrides use a ; separated list for
+ e.g. --test-params 'x=z; y=(a,b)'
+ """
+ results = {}
+
+ if values == '':
+ return {}
+
+ for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
+ param = param.strip()
+ value = value.strip()
+ if param:
+ if value:
+ # values are passed inside string from CLI, so we must retype them accordingly
+ try:
+ results[param] = ast.literal_eval(value)
+ except ValueError:
+ # for backward compatibility, we have to accept strings without quotes
+ _LOGGER.warning("Adding missing quotes around string value: %s = %s",
+ param, str(value))
+ results[param] = str(value)
+ else:
+ results[param] = True
+ return results
+
+
def parse_arguments():
"""
Parse command line arguments.
"""
class _SplitTestParamsAction(argparse.Action):
"""
- Parse and split the '--test-params' argument.
+ Parse and split '--test-params' arguments.
- This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
- values. For multiple overrides use a ; separated list for
+ This expects either a single list of ; separated overrides
+ as 'x=y', 'x=y,z' or 'x' (implicit true) values.
e.g. --test-params 'x=z; y=(a,b)'
+ Or a list of these ; separated lists with overrides for
+ multiple tests.
+ e.g. --test-params "['x=z; y=(a,b)','x=z']"
"""
def __call__(self, parser, namespace, values, option_string=None):
- results = {}
-
- for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
- param = param.strip()
- value = value.strip()
- if len(param):
- if len(value):
- # values are passed inside string from CLI, so we must retype them accordingly
- try:
- results[param] = ast.literal_eval(value)
- except ValueError:
- # for backward compatibility, we have to accept strings without quotes
- _LOGGER.warning("Adding missing quotes around string value: %s = %s",
- param, str(value))
- results[param] = str(value)
- else:
- results[param] = True
+ if values[0] == '[':
+ input_list = ast.literal_eval(values)
+ parameter_list = []
+ for test_params in input_list:
+ parameter_list.append(parse_param_string(test_params))
+ else:
+ parameter_list = parse_param_string(values)
+ results = {'_PARAMS_LIST':parameter_list}
setattr(namespace, self.dest, results)
class _ValidateFileAction(argparse.Action):
@@ -126,7 +156,7 @@ def parse_arguments():
def list_logging_levels():
"""Give a summary of all available logging levels.
- :return: List of verbosity level names in decreasing order of
+ :return: List of verbosity level names in decreasing order of
verbosity
"""
return sorted(VERBOSITY_LEVELS.keys(),
@@ -189,9 +219,14 @@ def parse_arguments():
help='settings file')
group.add_argument('--test-params', action=_SplitTestParamsAction,
help='csv list of test parameters: key=val; e.g. '
- 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFICGEN_DURATION=30; '
- 'GUEST_LOOPBACK=["l2fwd"] ...')
+ 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
+ 'GUEST_LOOPBACK=["l2fwd"] ...'
+ ' or a list of csv lists of test parameters: key=val; e.g. '
+ '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
+ '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
group.add_argument('--opnfvpod', help='name of POD in opnfv')
+ group.add_argument('--matrix', help='enable performance matrix analysis',
+ action='store_true', default=False)
args = vars(parser.parse_args())
@@ -359,6 +394,69 @@ def generate_final_report():
_LOGGER.error('Generatrion of overall test report has failed.')
+def generate_performance_matrix(selected_tests, results_path):
+ """
+ Loads the results of all the currently run tests, compares them
+ based on the MATRIX_METRIC, outputs and saves the generated table.
+ :selected_tests: list of currently run test
+ :results_path: directory path to the results of current tests
+ """
+ _LOGGER.info('Performance Matrix:')
+ test_list = []
+
+ for test in selected_tests:
+ test_name = test.get('Name', '<Name not set>')
+ test_deployment = test.get('Deployment', '<Deployment not set>')
+ test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})
+
+ test_params = {}
+ output = []
+ all_params = settings.getValue('_PARAMS_LIST')
+ for i in range(len(selected_tests)):
+ test = test_list[i]
+ if isinstance(all_params, list):
+ list_index = i
+ if i >= len(all_params):
+ list_index = len(all_params) - 1
+ if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
+ test_params.update(all_params[list_index])
+ else:
+ test_params = all_params[list_index]
+ else:
+ test_params = all_params
+ settings.setValue('TEST_PARAMS', test_params)
+ test['test_params'] = copy.deepcopy(test_params)
+ try:
+ with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
+ test['test_name'], test['test_deployment'])) as csvfile:
+ reader = list(csv.DictReader(csvfile))
+ test['csv_data'] = reader[0]
+ # pylint: disable=broad-except
+ except (Exception) as ex:
+ _LOGGER.error("Result file not found: %s", ex)
+
+ metric = settings.getValue('MATRIX_METRIC')
+ change = {}
+ output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
+ "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
+ if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
+ _LOGGER.error("Incorrect format of test results")
+ return
+ for i, test in enumerate(test_list):
+ if test['csv_data']:
+ change[i] = float(test['csv_data'][metric])/\
+ (float(test_list[0]['csv_data'][metric]) / 100) - 100
+ output.append([i, test['test_name'], float(test['csv_data'][metric]),
+ change[i], str(test['test_params'])[1:-1]])
+ else:
+ change[i] = 0
+ output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
+ print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
+ with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
+ output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
+ tablefmt="rst", floatfmt="0.3f")))
+ _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)
+
def enable_sriov(nic_list):
""" Enable SRIOV for given enhanced PCI IDs
@@ -483,9 +581,6 @@ def list_testcases(args):
print(' {:40} {}'.format('', description[i]))
-
-
-
def vsperf_finalize():
""" Clean up before exit
"""
@@ -665,14 +760,13 @@ def main():
if not os.path.exists(results_path):
_LOGGER.info("Creating result directory: " + results_path)
os.makedirs(results_path)
-
+ # pylint: disable=too-many-nested-blocks
if settings.getValue('mode') == 'trafficgen':
# execute only traffic generator
_LOGGER.debug("Executing traffic generator:")
loader = Loader()
# set traffic details, so they can be passed to traffic ctl
traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
-
traffic = functions.check_traffic(traffic)
traffic_ctl = component_factory.create_traffic(
@@ -696,7 +790,11 @@ def main():
if args['exact_test_name']:
exact_names = args['exact_test_name']
# positional args => exact matches only
- selected_tests = [test for test in testcases if test['Name'] in exact_names]
+ selected_tests = []
+ for test_name in exact_names:
+ for test in testcases:
+ if test['Name'] == test_name:
+ selected_tests.append(test)
elif args['tests']:
# --tests => apply filter to select requested tests
selected_tests = apply_filter(testcases, args['tests'])
@@ -715,26 +813,50 @@ def main():
# pylint: disable=redefined-variable-type
suite = unittest.TestSuite()
settings_snapshot = copy.deepcopy(settings.__dict__)
- for cfg in selected_tests:
+
+ for i, cfg in enumerate(selected_tests):
+ settings.setValue('_TEST_INDEX', i)
test_name = cfg.get('Name', '<Name not set>')
try:
+ test_params = settings.getValue('_PARAMS_LIST')
+ if isinstance(test_params, list):
+ list_index = i
+ if i >= len(test_params):
+ list_index = len(test_params) - 1
+ test_params = test_params[list_index]
+ if settings.getValue('CUMULATIVE_PARAMS'):
+ test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
+ settings.setValue('TEST_PARAMS', test_params)
+
if args['integration']:
test = IntegrationTestCase(cfg)
else:
test = PerformanceTestCase(cfg)
+
test.run()
suite.addTest(MockTestCase('', True, test.name))
+
# pylint: disable=broad-except
except (Exception) as ex:
_LOGGER.exception("Failed to run test: %s", test_name)
suite.addTest(MockTestCase(str(ex), False, test_name))
_LOGGER.info("Continuing with next test...")
finally:
- settings.restore_from_dict(settings_snapshot)
+ if not settings.getValue('CUMULATIVE_PARAMS'):
+ settings.restore_from_dict(settings_snapshot)
+
+ settings.restore_from_dict(settings_snapshot)
+
+
+ # Generate and printout Performance Matrix
+ if args['matrix']:
+ generate_performance_matrix(selected_tests, results_path)
# generate final rst report with results of all executed TCs
generate_final_report()
+
+
if settings.getValue('XUNIT'):
xmlrunner.XMLTestRunner(
output=settings.getValue('XUNIT_DIR'), outsuffix="",