diff options
author | Martin Klozik <martinx.klozik@intel.com> | 2016-02-23 09:54:43 +0000 |
---|---|---|
committer | Martin Klozik <martinx.klozik@intel.com> | 2016-03-21 14:18:56 +0000 |
commit | b55c8beb6003f07f025fc0edbc08c3e0fcaed064 (patch) | |
tree | 435359b6ba1d382389dedc0d9bccc6964bcbb606 /tools/report/report.py | |
parent | 8ee2450bd267c7dc173f62909a8a4ebe13feab84 (diff) |
integration: Support of integration testcases
Generic support for integration testcases with first
set of tests for vswitch testing.
New test option "TestSteps" has been introduced
to define test step by step directly in configuration
file.
In case that this concept will be accepted, there
are plenty of possibilities for future improvements.
For example:
* use it also for performance tests without explicit
call of validation methods
* introduce step macros for repetitive scenarios,
so new tests can be easily written
* further generalization, which would go beyond
usage of controllers and will operate directly
with vswitch, vnf and trafficgen objects
Change-Id: Ifad166c8ef9cfbda6694682fe6b3421e0e97bbf2
JIRA: VSPERF-212
Signed-off-by: Martin Klozik <martinx.klozik@intel.com>
Reviewed-by: Maryam Tahhan <maryam.tahhan@intel.com>
Reviewed-by: Al Morton <acmorton@att.com>
Reviewed-by: Christian Trautman <ctrautma@redhat.com>
Reviewed-by: Brian Castelli <brian.castelli@spirent.com>
Diffstat (limited to 'tools/report/report.py')
-rw-r--r-- | tools/report/report.py | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/tools/report/report.py b/tools/report/report.py index 92463f26..7d991011 100644 --- a/tools/report/report.py +++ b/tools/report/report.py @@ -70,13 +70,21 @@ def _get_env(result): return env -def generate(input_file, tc_results, tc_stats, performance_test=True): +def generate(input_file, tc_results, tc_stats, test_type='performance'): """Generate actual report. - Generate a Markdown-formatted file using results of tests and some + Generate a Markdown and RST formatted files using results of tests and some parsed system info. :param input_file: Path to CSV results file + :param tc_results: A list of dictionaries with detailed test results. + Each dictionary represents test results for one of specified packet + sizes. + :param tc_stats: System statistics collected during testcase execution. + These statistics are overall statistics for all specified packet + sizes. + :test_type: Specifies type of the testcase. Supported values are + 'performance' and 'integration'. :returns: Path to generated report """ @@ -89,16 +97,18 @@ def generate(input_file, tc_results, tc_stats, performance_test=True): try: for result in tc_results: test_config = {} - if performance_test: + if test_type == 'performance': for tc_conf in S.getValue('PERFORMANCE_TESTS'): if tc_conf['Name'] == result[ResultsConstants.ID]: test_config = tc_conf break - else: + elif test_type == 'integration': for tc_conf in S.getValue('INTEGRATION_TESTS'): if tc_conf['Name'] == result[ResultsConstants.ID]: test_config = tc_conf break + else: + logging.error("Unsupported test type '%s'. Test details are not known.", test_type) # pass test results, env details and configuration to template tests.append({ |