summaryrefslogtreecommitdiffstats
path: root/testcases
diff options
context:
space:
mode:
authorMartin Klozik <martinx.klozik@intel.com>2016-02-23 09:54:43 +0000
committerMartin Klozik <martinx.klozik@intel.com>2016-03-21 14:18:56 +0000
commitb55c8beb6003f07f025fc0edbc08c3e0fcaed064 (patch)
tree435359b6ba1d382389dedc0d9bccc6964bcbb606 /testcases
parent8ee2450bd267c7dc173f62909a8a4ebe13feab84 (diff)
integration: Support of integration testcases
Generic support for integration testcases with first set of tests for vswitch testing. New test option "TestSteps" has been introduced to define test step by step directly in configuration file. In case that this concept will be accepted, there are plenty of possibilities for future improvements. For example: * use it also for performance tests without explicit call of validation methods * introduce step macros for repetitive scenarios, so new tests can be easily written * further generalization, which would go beyond usage of controllers and will operate directly with vswitch, vnf and trafficgen objects Change-Id: Ifad166c8ef9cfbda6694682fe6b3421e0e97bbf2 JIRA: VSPERF-212 Signed-off-by: Martin Klozik <martinx.klozik@intel.com> Reviewed-by: Maryam Tahhan <maryam.tahhan@intel.com> Reviewed-by: Al Morton <acmorton@att.com> Reviewed-by: Christian Trautman <ctrautma@redhat.com> Reviewed-by: Brian Castelli <brian.castelli@spirent.com>
Diffstat (limited to 'testcases')
-rw-r--r--testcases/__init__.py2
-rw-r--r--testcases/integration.py173
-rw-r--r--testcases/performance.py38
-rw-r--r--testcases/testcase.py106
4 files changed, 276 insertions, 43 deletions
diff --git a/testcases/__init__.py b/testcases/__init__.py
index addf63df..0b6b77e4 100644
--- a/testcases/__init__.py
+++ b/testcases/__init__.py
@@ -15,3 +15,5 @@
"""This module contains test definitions.
"""
from testcases.testcase import (TestCase)
+from testcases.performance import (PerformanceTestCase)
+from testcases.integration import (IntegrationTestCase)
diff --git a/testcases/integration.py b/testcases/integration.py
new file mode 100644
index 00000000..ecaed14f
--- /dev/null
+++ b/testcases/integration.py
@@ -0,0 +1,173 @@
+# Copyright 2015-2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""IntegrationTestCase class
+"""
+
+import os
+import time
+import logging
+
+from testcases import TestCase
+from conf import settings as S
+from collections import OrderedDict
+
+CHECK_PREFIX = 'validate_'
+
+class IntegrationTestCase(TestCase):
+ """IntegrationTestCase class
+ """
+
+ def __init__(self, cfg, results_dir):
+ """ Testcase initialization
+ """
+ self._type = 'integration'
+ super(IntegrationTestCase, self).__init__(cfg, results_dir)
+ self._logger = logging.getLogger(__name__)
+ self._inttest = None
+
+ def report_status(self, label, status):
+ """ Log status of test step
+ """
+ self._logger.debug("%s ... %s", label, 'OK' if status else 'FAILED')
+
+ def run_initialize(self):
+ """ Prepare test execution environment
+ """
+ super(IntegrationTestCase, self).run_initialize()
+ self._inttest = {'status' : True, 'details' : ''}
+
+ def run(self):
+ """Run the test
+
+ All setup and teardown through controllers is included.
+ """
+ def eval_step_params(params, step_result):
+ """ Evaluates referrences to results from previous steps
+ """
+ def eval_param(param, STEP):
+ """ Helper function
+ """
+ if isinstance(param, str):
+ tmp_param = ''
+ # evaluate every #STEP reference inside parameter itself
+ for chunk in param.split('#'):
+ if chunk.startswith('STEP['):
+ tmp_param = tmp_param + str(eval(chunk))
+ else:
+ tmp_param = tmp_param + chunk
+ return tmp_param
+ elif isinstance(param, list) or isinstance(param, tuple):
+ tmp_list = []
+ for item in param:
+ tmp_list.append(eval_param(item, STEP))
+ return tmp_list
+ elif isinstance(param, dict):
+ tmp_dict = {}
+ for (key, value) in param.items():
+ tmp_dict[key] = eval_param(value, STEP)
+ return tmp_dict
+ else:
+ return param
+
+ eval_params = []
+ # evaluate all parameters if needed
+ for param in params:
+ eval_params.append(eval_param(param, step_result))
+ return eval_params
+
+ # prepare test execution environment
+ self.run_initialize()
+
+ with self._vswitch_ctl, self._loadgen:
+ with self._vnf_ctl, self._collector:
+ if not self._vswitch_none:
+ self._add_flows()
+
+ # run traffic generator if requested, otherwise wait for manual termination
+ if S.getValue('mode') == 'trafficgen-off':
+ time.sleep(2)
+ self._logger.debug("All is set. Please run traffic generator manually.")
+ input(os.linesep + "Press Enter to terminate vswitchperf..." + os.linesep + os.linesep)
+ else:
+ with self._traffic_ctl:
+ if not self.test:
+ self._traffic_ctl.send_traffic(self._traffic)
+ else:
+ # execute test based on TestSteps definition
+ if self.test:
+ step_result = [None] * len(self.test)
+ for i, step in enumerate(self.test):
+ step_ok = False
+ if step[0] == 'vswitch':
+ test_object = self._vswitch_ctl.get_vswitch()
+ elif step[0] == 'trafficgen':
+ test_object = self._traffic_ctl
+ else:
+ self._logger.error("Unsupported test object %s", step[0])
+ self._inttest = {'status' : False, 'details' : ' '.join(step)}
+ self.report_status("Step '{}'".format(' '.join(step)), self._inttest['status'])
+ break
+
+ test_method = getattr(test_object, step[1])
+ test_method_check = getattr(test_object, CHECK_PREFIX + step[1])
+
+ step_params = []
+ if test_method and test_method_check and \
+ callable(test_method) and callable(test_method_check):
+
+ try:
+ step_params = eval_step_params(step[2:], step_result)
+ step_log = '{} {}'.format(' '.join(step[:2]), step_params)
+ step_result[i] = test_method(*step_params)
+ self._logger.debug("Step {} '{}' results '{}'".format(
+ i, step_log, step_result[i]))
+ time.sleep(2)
+ step_ok = test_method_check(step_result[i], *step_params)
+ except AssertionError:
+ self._inttest = {'status' : False, 'details' : step_log}
+ self._logger.error("Step {} raised assertion error".format(i))
+ break
+ except IndexError:
+ self._inttest = {'status' : False, 'details' : step_log}
+ self._logger.error("Step {} result index error {}".format(
+ i, ' '.join(step[2:])))
+ break
+
+ self.report_status("Step {} - '{}'".format(i, step_log), step_ok)
+ if not step_ok:
+ self._inttest = {'status' : False, 'details' : step_log}
+ break
+
+ # dump vswitch flows before they are affected by VNF termination
+ if not self._vswitch_none:
+ self._vswitch_ctl.dump_vswitch_flows()
+
+ # tear down test execution environment and log results
+ self.run_finalize()
+
+ # report test results
+ self.run_report()
+
+ def run_report(self):
+ """ Report test results
+ """
+ if self.test:
+ results = OrderedDict()
+ results['status'] = 'OK' if self._inttest['status'] else 'FAILED'
+ results['details'] = self._inttest['details']
+ TestCase._write_result_to_file([results], self._output_file)
+ self.report_status("Test '{}'".format(self.name), self._inttest['status'])
+ # inform vsperf about testcase failure
+ if not self._inttest['status']:
+ raise Exception
diff --git a/testcases/performance.py b/testcases/performance.py
new file mode 100644
index 00000000..0ae3ea77
--- /dev/null
+++ b/testcases/performance.py
@@ -0,0 +1,38 @@
+# Copyright 2015-2016 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PerformanceTestCase class
+"""
+
+import logging
+
+from testcases import TestCase
+from tools.report import report
+from conf import settings as S
+
+class PerformanceTestCase(TestCase):
+ """PerformanceTestCase class
+
+ In this basic form runs RFC2544 throughput test
+ """
+ def __init__(self, cfg, results_dir):
+ """ Testcase initialization
+ """
+ self._type = 'performance'
+ super(PerformanceTestCase, self).__init__(cfg, results_dir)
+ self._logger = logging.getLogger(__name__)
+
+ def run_report(self):
+ super(PerformanceTestCase, self).run_report()
+ if S.getValue('mode') != 'trafficgen-off':
+ report.generate(self._output_file, self._tc_results, self._collector.get_results(), self._type)
diff --git a/testcases/testcase.py b/testcases/testcase.py
index dfc766d2..0effce75 100644
--- a/testcases/testcase.py
+++ b/testcases/testcase.py
@@ -16,18 +16,17 @@
import csv
import os
+import time
import logging
import subprocess
import copy
-import time
from collections import OrderedDict
-from core.results.results_constants import ResultsConstants
import core.component_factory as component_factory
from core.loader import Loader
+from core.results.results_constants import ResultsConstants
from tools import tasks
from tools import hugepages
-from tools.report import report
from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS
from conf import settings as S
from conf import get_test_param
@@ -37,7 +36,7 @@ class TestCase(object):
In this basic form runs RFC2544 throughput test
"""
- def __init__(self, cfg, results_dir, performance_test=True):
+ def __init__(self, cfg, results_dir):
"""Pull out fields from test config
:param cfg: A dictionary of string-value pairs describing the test
@@ -46,11 +45,19 @@ class TestCase(object):
:param results_dir: Where the csv formatted results are written.
"""
self._hugepages_mounted = False
+ self._traffic_ctl = None
+ self._vnf_ctl = None
+ self._vswitch_ctl = None
+ self._collector = None
+ self._loadgen = None
+ self._output_file = None
+ self._tc_results = None
# set test parameters; CLI options take precedence to testcase settings
self._logger = logging.getLogger(__name__)
self.name = cfg['Name']
self.desc = cfg.get('Description', 'No description given.')
+ self.test = cfg.get('TestSteps', None)
bidirectional = cfg.get('biDirectional', TRAFFIC_DEFAULTS['bidir'])
bidirectional = get_test_param('bidirectional', bidirectional)
@@ -63,7 +70,6 @@ class TestCase(object):
self.deployment = cfg['Deployment']
self._frame_mod = cfg.get('Frame Modification', None)
- self._performance_test = performance_test
self._tunnel_type = None
self._tunnel_operation = None
@@ -131,10 +137,8 @@ class TestCase(object):
# Packet Forwarding mode
self._vswitch_none = 'none' == S.getValue('VSWITCH').strip().lower()
- def run(self):
- """Run the test
-
- All setup and teardown through controllers is included.
+ def run_initialize(self):
+ """ Prepare test execution environment
"""
self._logger.debug(self.name)
@@ -163,35 +167,67 @@ class TestCase(object):
self._logger.debug("Controllers:")
loader = Loader()
- traffic_ctl = component_factory.create_traffic(
+ self._traffic_ctl = component_factory.create_traffic(
self._traffic['traffic_type'],
loader.get_trafficgen_class())
- vnf_ctl = component_factory.create_vnf(
+
+ self._vnf_ctl = component_factory.create_vnf(
self.deployment,
loader.get_vnf_class())
if self._vswitch_none:
- vswitch_ctl = component_factory.create_pktfwd(
+ self._vswitch_ctl = component_factory.create_pktfwd(
loader.get_pktfwd_class())
else:
- vswitch_ctl = component_factory.create_vswitch(
+ self._vswitch_ctl = component_factory.create_vswitch(
self.deployment,
loader.get_vswitch_class(),
self._traffic,
self._tunnel_operation)
- collector = component_factory.create_collector(
+ self._collector = component_factory.create_collector(
loader.get_collector_class(),
self._results_dir, self.name)
- loadgen = component_factory.create_loadgen(
+ self._loadgen = component_factory.create_loadgen(
self._loadgen,
self._load_cfg)
+ self._output_file = os.path.join(self._results_dir, "result_" + self.name +
+ "_" + self.deployment + ".csv")
+
self._logger.debug("Setup:")
- with vswitch_ctl, loadgen:
- with vnf_ctl, collector:
+
+ def run_finalize(self):
+ """ Tear down test execution environment and record test results
+ """
+ # umount hugepages if mounted
+ self._umount_hugepages()
+
+ def run_report(self):
+ """ Report test results
+ """
+ self._logger.debug("self._collector Results:")
+ self._collector.print_results()
+
+ if S.getValue('mode') != 'trafficgen-off':
+ self._logger.debug("Traffic Results:")
+ self._traffic_ctl.print_results()
+
+ self._tc_results = self._append_results(self._traffic_ctl.get_results())
+ TestCase._write_result_to_file(self._tc_results, self._output_file)
+
+ def run(self):
+ """Run the test
+
+ All setup and teardown through controllers is included.
+ """
+ # prepare test execution environment
+ self.run_initialize()
+
+ with self._vswitch_ctl, self._loadgen:
+ with self._vnf_ctl, self._collector:
if not self._vswitch_none:
- self._add_flows(vswitch_ctl)
+ self._add_flows()
# run traffic generator if requested, otherwise wait for manual termination
if S.getValue('mode') == 'trafficgen-off':
@@ -209,30 +245,18 @@ class TestCase(object):
print('Please respond with \'yes\' or \'y\' ', end='')
else:
break
- with traffic_ctl:
- traffic_ctl.send_traffic(self._traffic)
+ with self._traffic_ctl:
+ self._traffic_ctl.send_traffic(self._traffic)
# dump vswitch flows before they are affected by VNF termination
if not self._vswitch_none:
- vswitch_ctl.dump_vswitch_flows()
+ self._vswitch_ctl.dump_vswitch_flows()
- # umount hugepages if mounted
- self._umount_hugepages()
+ # tear down test execution environment and log results
+ self.run_finalize()
- self._logger.debug("Collector Results:")
- collector.print_results()
-
- if S.getValue('mode') != 'trafficgen-off':
- self._logger.debug("Traffic Results:")
- traffic_ctl.print_results()
-
- output_file = os.path.join(self._results_dir, "result_" + self.name +
- "_" + self.deployment + ".csv")
-
- tc_results = self._append_results(traffic_ctl.get_results())
- TestCase._write_result_to_file(tc_results, output_file)
-
- report.generate(output_file, tc_results, collector.get_results(), self._performance_test)
+ # report test results
+ self.run_report()
def _append_results(self, results):
"""
@@ -335,7 +359,6 @@ class TestCase(object):
for result in results:
writer.writerow(result)
-
@staticmethod
def _get_unique_keys(list_of_dicts):
"""Gets unique key values as ordered list of strings in given dicts
@@ -351,13 +374,10 @@ class TestCase(object):
return list(result.keys())
-
- def _add_flows(self, vswitch_ctl):
+ def _add_flows(self):
"""Add flows to the vswitch
-
- :param vswitch_ctl vswitch controller
"""
- vswitch = vswitch_ctl.get_vswitch()
+ vswitch = self._vswitch_ctl.get_vswitch()
# TODO BOM 15-08-07 the frame mod code assumes that the
# physical ports are ports 1 & 2. The actual numbers
# need to be retrived from the vSwitch and the metadata value