aboutsummaryrefslogtreecommitdiffstats
path: root/vsperf
diff options
context:
space:
mode:
Diffstat (limited to 'vsperf')
-rwxr-xr-xvsperf308
1 files changed, 249 insertions, 59 deletions
diff --git a/vsperf b/vsperf
index 5589ac39..773ad759 100755
--- a/vsperf
+++ b/vsperf
@@ -23,6 +23,7 @@ import sys
import argparse
import re
import time
+import csv
import datetime
import shutil
import unittest
@@ -32,17 +33,20 @@ import glob
import subprocess
import ast
import xmlrunner
+from tabulate import tabulate
+from conf import merge_spec
from conf import settings
import core.component_factory as component_factory
from core.loader import Loader
from testcases import PerformanceTestCase
from testcases import IntegrationTestCase
+from testcases import K8sPerformanceTestCase
from tools import tasks
from tools import networkcard
from tools import functions
from tools.pkt_gen import trafficgen
from tools.opnfvdashboard import opnfvdashboard
-
+from tools.os_deploy_tgen import osdt
sys.dont_write_bytecode = True
VERBOSITY_LEVELS = {
@@ -61,8 +65,43 @@ _TEMPLATE_RST = {'head' : os.path.join(_CURR_DIR, 'tools/report/report_head.rst
'tmp' : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
}
+_TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
+ "The following performance matrix was generated with the results of all the\n"\
+ "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
_LOGGER = logging.getLogger()
+logging.getLogger('matplotlib').setLevel(logging.ERROR)
+
+def parse_param_string(values):
+ """
+ Parse and split a single '--test-params' argument.
+
+ This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
+ values. For multiple overrides use a ; separated list for
+ e.g. --test-params 'x=z; y=(a,b)'
+ """
+ results = {}
+
+ if values == '':
+ return {}
+
+ for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
+ param = param.strip()
+ value = value.strip()
+ if param:
+ if value:
+ # values are passed inside string from CLI, so we must retype them accordingly
+ try:
+ results[param] = ast.literal_eval(value)
+ except ValueError:
+ # for backward compatibility, we have to accept strings without quotes
+ _LOGGER.warning("Adding missing quotes around string value: %s = %s",
+ param, str(value))
+ results[param] = str(value)
+ else:
+ results[param] = True
+ return results
+
def parse_arguments():
"""
@@ -70,31 +109,24 @@ def parse_arguments():
"""
class _SplitTestParamsAction(argparse.Action):
"""
- Parse and split the '--test-params' argument.
+ Parse and split '--test-params' arguments.
- This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
- values. For multiple overrides use a ; separated list for
+ This expects either a single list of ; separated overrides
+ as 'x=y', 'x=y,z' or 'x' (implicit true) values.
e.g. --test-params 'x=z; y=(a,b)'
+ Or a list of these ; separated lists with overrides for
+ multiple tests.
+ e.g. --test-params "['x=z; y=(a,b)','x=z']"
"""
def __call__(self, parser, namespace, values, option_string=None):
- results = {}
-
- for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
- param = param.strip()
- value = value.strip()
- if len(param):
- if len(value):
- # values are passed inside string from CLI, so we must retype them accordingly
- try:
- results[param] = ast.literal_eval(value)
- except ValueError:
- # for backward compatibility, we have to accept strings without quotes
- _LOGGER.warning("Adding missing quotes around string value: %s = %s",
- param, str(value))
- results[param] = str(value)
- else:
- results[param] = True
-
+ if values[0] == '[':
+ input_list = ast.literal_eval(values)
+ parameter_list = []
+ for test_params in input_list:
+ parameter_list.append(parse_param_string(test_params))
+ else:
+ parameter_list = parse_param_string(values)
+ results = {'_PARAMS_LIST':parameter_list}
setattr(namespace, self.dest, results)
class _ValidateFileAction(argparse.Action):
@@ -126,7 +158,7 @@ def parse_arguments():
def list_logging_levels():
"""Give a summary of all available logging levels.
- :return: List of verbosity level names in decreasing order of
+ :return: List of verbosity level names in decreasing order of
verbosity
"""
return sorted(VERBOSITY_LEVELS.keys(),
@@ -149,6 +181,8 @@ def parse_arguments():
help='list all system vnfs and exit')
parser.add_argument('--list-loadgens', action='store_true',
help='list all background load generators')
+ parser.add_argument('--list-pods', action='store_true',
+ help='list all system pods')
parser.add_argument('--list-settings', action='store_true',
help='list effective settings configuration and exit')
parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
@@ -172,6 +206,8 @@ def parse_arguments():
group.add_argument('--verbosity', choices=list_logging_levels(),
help='debug level')
group.add_argument('--integration', action='store_true', help='execute integration tests')
+ group.add_argument('--k8s', action='store_true', help='execute Kubernetes tests')
+ group.add_argument('--openstack', action='store_true', help='Run VSPERF with openstack')
group.add_argument('--trafficgen', help='traffic generator to use')
group.add_argument('--vswitch', help='vswitch implementation to use')
group.add_argument('--fwdapp', help='packet forwarding application to use')
@@ -189,9 +225,14 @@ def parse_arguments():
help='settings file')
group.add_argument('--test-params', action=_SplitTestParamsAction,
help='csv list of test parameters: key=val; e.g. '
- 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFICGEN_DURATION=30; '
- 'GUEST_LOOPBACK=["l2fwd"] ...')
+ 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
+ 'GUEST_LOOPBACK=["l2fwd"] ...'
+ ' or a list of csv lists of test parameters: key=val; e.g. '
+ '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
+ '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
group.add_argument('--opnfvpod', help='name of POD in opnfv')
+ group.add_argument('--matrix', help='enable performance matrix analysis',
+ action='store_true', default=False)
args = vars(parser.parse_args())
@@ -201,13 +242,31 @@ def parse_arguments():
def configure_logging(level):
"""Configure logging.
"""
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT'))
+ rename_default = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_default = os.path.join(
- settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_DEFAULT'))
+ settings.getValue('RESULTS_PATH'), rename_default)
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_HOST_CMDS'))
+ rename_hostcmd = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_host_cmds = os.path.join(
- settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_HOST_CMDS'))
+ settings.getValue('RESULTS_PATH'), rename_hostcmd)
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+ rename_traffic = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_traffic_gen = os.path.join(
- settings.getValue('LOG_DIR'),
- settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+ settings.getValue('RESULTS_PATH'), rename_traffic)
+ metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') +
+ settings.getValue('LOG_TIMESTAMP') + '.log')
+ log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'),
+ metrics_file)
_LOGGER.setLevel(logging.DEBUG)
@@ -219,6 +278,8 @@ def configure_logging(level):
file_logger = logging.FileHandler(filename=log_file_default)
file_logger.setLevel(logging.DEBUG)
+ file_logger.setFormatter(logging.Formatter(
+ '%(asctime)s : %(message)s'))
_LOGGER.addHandler(file_logger)
class CommandFilter(logging.Filter):
@@ -231,6 +292,11 @@ def configure_logging(level):
def filter(self, record):
return record.getMessage().startswith(trafficgen.CMD_PREFIX)
+ class CollectdMetricsFilter(logging.Filter):
+ """Filter out strings beginning with 'COLLECTD' :'"""
+ def filter(self, record):
+ return record.getMessage().startswith('COLLECTD')
+
cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
cmd_logger.setLevel(logging.DEBUG)
cmd_logger.addFilter(CommandFilter())
@@ -241,6 +307,12 @@ def configure_logging(level):
gen_logger.addFilter(TrafficGenCommandFilter())
_LOGGER.addHandler(gen_logger)
+ if settings.getValue('COLLECTOR') == 'Collectd':
+ met_logger = logging.FileHandler(filename=log_file_infra_metrics)
+ met_logger.setLevel(logging.DEBUG)
+ met_logger.addFilter(CollectdMetricsFilter())
+ _LOGGER.addHandler(met_logger)
+
def apply_filter(tests, tc_filter):
"""Allow a subset of tests to be conveniently selected
@@ -292,7 +364,7 @@ def get_vswitch_names(rst_files):
""" Function will return a list of vSwitches detected in given ``rst_files``.
"""
vswitch_names = set()
- if len(rst_files):
+ if rst_files:
try:
output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
for line in output:
@@ -300,7 +372,7 @@ def get_vswitch_names(rst_files):
if match:
vswitch_names.add(match.group(1))
- if len(vswitch_names):
+ if vswitch_names:
return list(vswitch_names)
except subprocess.CalledProcessError:
@@ -331,7 +403,7 @@ def generate_final_report():
# check if there are any results in rst format
rst_results = glob.glob(os.path.join(path, 'result*rst'))
pkt_processors = get_vswitch_names(rst_results)
- if len(rst_results):
+ if rst_results:
try:
test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
# create report caption directly - it is not worth to execute jinja machinery
@@ -359,6 +431,69 @@ def generate_final_report():
_LOGGER.error('Generatrion of overall test report has failed.')
+def generate_performance_matrix(selected_tests, results_path):
+ """
+ Loads the results of all the currently run tests, compares them
+ based on the MATRIX_METRIC, outputs and saves the generated table.
+ :selected_tests: list of currently run test
+ :results_path: directory path to the results of current tests
+ """
+ _LOGGER.info('Performance Matrix:')
+ test_list = []
+
+ for test in selected_tests:
+ test_name = test.get('Name', '<Name not set>')
+ test_deployment = test.get('Deployment', '<Deployment not set>')
+ test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})
+
+ test_params = {}
+ output = []
+ all_params = settings.getValue('_PARAMS_LIST')
+ for i in range(len(selected_tests)):
+ test = test_list[i]
+ if isinstance(all_params, list):
+ list_index = i
+ if i >= len(all_params):
+ list_index = len(all_params) - 1
+ if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
+ test_params.update(all_params[list_index])
+ else:
+ test_params = all_params[list_index]
+ else:
+ test_params = all_params
+ settings.setValue('TEST_PARAMS', test_params)
+ test['test_params'] = copy.deepcopy(test_params)
+ try:
+ with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
+ test['test_name'], test['test_deployment'])) as csvfile:
+ reader = list(csv.DictReader(csvfile))
+ test['csv_data'] = reader[0]
+ # pylint: disable=broad-except
+ except (Exception) as ex:
+ _LOGGER.error("Result file not found: %s", ex)
+
+ metric = settings.getValue('MATRIX_METRIC')
+ change = {}
+ output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
+ "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
+ if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
+ _LOGGER.error("Incorrect format of test results")
+ return
+ for i, test in enumerate(test_list):
+ if test['csv_data']:
+ change[i] = float(test['csv_data'][metric])/\
+ (float(test_list[0]['csv_data'][metric]) / 100) - 100
+ output.append([i, test['test_name'], float(test['csv_data'][metric]),
+ change[i], str(test['test_params'])[1:-1]])
+ else:
+ change[i] = 0
+ output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
+ print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
+ with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
+ output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
+ tablefmt="rst", floatfmt="0.3f")))
+ _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)
+
def enable_sriov(nic_list):
""" Enable SRIOV for given enhanced PCI IDs
@@ -376,7 +511,7 @@ def enable_sriov(nic_list):
sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})
# sriov is required for some NICs
- if len(sriov_nic):
+ if sriov_nic:
for nic in sriov_nic:
# check if SRIOV is supported and enough virt interfaces are available
if not networkcard.is_sriov_supported(nic) \
@@ -449,6 +584,10 @@ def handle_list_options(args):
print(Loader().get_loadgens_printable())
sys.exit(0)
+ if args['list_pods']:
+ print(Loader().get_pods_printable())
+ sys.exit(0)
+
if args['list_settings']:
print(str(settings))
sys.exit(0)
@@ -466,6 +605,8 @@ def list_testcases(args):
# configure tests
if args['integration']:
testcases = settings.getValue('INTEGRATION_TESTS')
+ elif args['k8s']:
+ testcases = settings.getValue('K8SPERFORMANCE_TESTS')
else:
testcases = settings.getValue('PERFORMANCE_TESTS')
@@ -483,9 +624,6 @@ def list_testcases(args):
print(' {:40} {}'.format('', description[i]))
-
-
-
def vsperf_finalize():
""" Clean up before exit
"""
@@ -495,7 +633,7 @@ def vsperf_finalize():
if os.path.exists(results_path):
files_list = os.listdir(results_path)
if files_list == []:
- _LOGGER.info("Removing empty result directory: " + results_path)
+ _LOGGER.info("Removing empty result directory: %s", results_path)
shutil.rmtree(results_path)
except AttributeError:
# skip it if parameter doesn't exist
@@ -547,9 +685,26 @@ def main():
settings.load_from_dir(os.path.join(_CURR_DIR, 'conf'))
- # Load non performance/integration tests
+ # define the timestamp to be used by logs and results
+ date = datetime.datetime.fromtimestamp(time.time())
+ timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
+ settings.setValue('LOG_TIMESTAMP', timestamp)
+
+ # generate results directory name
+ # integration test use vswitchd log in test step assertions, ensure that
+ # correct value will be set before loading integration test configuration
+ results_dir = "results_" + timestamp
+ results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
+ settings.setValue('RESULTS_PATH', results_path)
+ # create results directory
+ if not os.path.exists(results_path):
+ os.makedirs(results_path)
+
+ # load non performance/integration tests
if args['integration']:
settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration'))
+ if args['k8s']:
+ settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/kubernetes'))
# load command line parameters first in case there are settings files
# to be used
@@ -567,6 +722,19 @@ def main():
settings.setValue('mode', args['mode'])
+ if args['k8s']:
+ settings.setValue('K8S', True)
+ else:
+ settings.setValue('K8S', False)
+
+ if args['openstack']:
+ result = osdt.deploy_testvnf()
+ if result:
+ _LOGGER.info('TestVNF successfully deployed on Openstack')
+ settings.setValue('mode', 'trafficgen')
+ else:
+ _LOGGER.error('Failed to deploy TestVNF in Openstac')
+ sys.exit(1)
# update paths to trafficgens if required
if settings.getValue('mode') == 'trafficgen':
functions.settings_update_paths()
@@ -576,6 +744,9 @@ def main():
configure_logging(settings.getValue('VERBOSITY'))
+ # CI build support
+ _LOGGER.info("Creating result directory: %s", results_path)
+
# check and fix locale
check_and_set_locale()
@@ -655,24 +826,14 @@ def main():
# for backward compatibility
settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list))
- # generate results directory name
- date = datetime.datetime.fromtimestamp(time.time())
- results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S')
- results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
- settings.setValue('RESULTS_PATH', results_path)
-
- # create results directory
- if not os.path.exists(results_path):
- _LOGGER.info("Creating result directory: " + results_path)
- os.makedirs(results_path)
+ # pylint: disable=too-many-nested-blocks
if settings.getValue('mode') == 'trafficgen':
# execute only traffic generator
_LOGGER.debug("Executing traffic generator:")
loader = Loader()
# set traffic details, so they can be passed to traffic ctl
traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
-
traffic = functions.check_traffic(traffic)
traffic_ctl = component_factory.create_traffic(
@@ -690,13 +851,19 @@ def main():
# configure tests
if args['integration']:
testcases = settings.getValue('INTEGRATION_TESTS')
+ elif args['k8s']:
+ testcases = settings.getValue('K8SPERFORMANCE_TESTS')
else:
testcases = settings.getValue('PERFORMANCE_TESTS')
if args['exact_test_name']:
exact_names = args['exact_test_name']
# positional args => exact matches only
- selected_tests = [test for test in testcases if test['Name'] in exact_names]
+ selected_tests = []
+ for test_name in exact_names:
+ for test in testcases:
+ if test['Name'] == test_name:
+ selected_tests.append(test)
elif args['tests']:
# --tests => apply filter to select requested tests
selected_tests = apply_filter(testcases, args['tests'])
@@ -704,44 +871,67 @@ def main():
# Default - run all tests
selected_tests = testcases
- if not len(selected_tests):
+ if not selected_tests:
_LOGGER.error("No tests matched --tests option or positional args. Done.")
vsperf_finalize()
sys.exit(1)
- # run tests
- # Add pylint exception: Redefinition of test type from
- # testcases.integration.IntegrationTestCase to testcases.performance.PerformanceTestCase
- # pylint: disable=redefined-variable-type
suite = unittest.TestSuite()
settings_snapshot = copy.deepcopy(settings.__dict__)
- for cfg in selected_tests:
+
+ for i, cfg in enumerate(selected_tests):
+ settings.setValue('_TEST_INDEX', i)
test_name = cfg.get('Name', '<Name not set>')
try:
+ test_params = settings.getValue('_PARAMS_LIST')
+ if isinstance(test_params, list):
+ list_index = i
+ if i >= len(test_params):
+ list_index = len(test_params) - 1
+ test_params = test_params[list_index]
+ if settings.getValue('CUMULATIVE_PARAMS'):
+ test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
+ settings.setValue('TEST_PARAMS', test_params)
+
if args['integration']:
test = IntegrationTestCase(cfg)
+ elif args['k8s']:
+ test = K8sPerformanceTestCase(cfg)
else:
test = PerformanceTestCase(cfg)
+
test.run()
suite.addTest(MockTestCase('', True, test.name))
+
# pylint: disable=broad-except
except (Exception) as ex:
_LOGGER.exception("Failed to run test: %s", test_name)
suite.addTest(MockTestCase(str(ex), False, test_name))
_LOGGER.info("Continuing with next test...")
finally:
- settings.restore_from_dict(settings_snapshot)
+ if not settings.getValue('CUMULATIVE_PARAMS'):
+ settings.restore_from_dict(settings_snapshot)
+
+ settings.restore_from_dict(settings_snapshot)
+
+
+ # Generate and printout Performance Matrix
+ if args['matrix']:
+ generate_performance_matrix(selected_tests, results_path)
# generate final rst report with results of all executed TCs
generate_final_report()
+
+
if settings.getValue('XUNIT'):
xmlrunner.XMLTestRunner(
output=settings.getValue('XUNIT_DIR'), outsuffix="",
verbosity=0).run(suite)
- if args['opnfvpod']:
- pod_name = args['opnfvpod']
+ if args['opnfvpod'] or settings.getValue('OPNFVPOD'):
+ pod_name = (args['opnfvpod'] if args['opnfvpod'] else
+ settings.getValue('OPNFVPOD'))
installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower()
opnfv_url = settings.getValue('OPNFV_URL')
pkg_list = settings.getValue('PACKAGE_LIST')