aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--conf/00_common.conf54
-rwxr-xr-xconf/01_testcases.conf7
-rw-r--r--conf/02_vswitch.conf114
-rw-r--r--conf/03_traffic.conf6
-rw-r--r--conf/04_vnf.conf20
-rw-r--r--core/component_factory.py6
-rw-r--r--core/results/results_constants.py5
-rw-r--r--core/traffic_controller_rfc2889.py146
-rw-r--r--docs/configguide/trafficgen.rst36
-rwxr-xr-xdocs/design/vswitchperf_design.rst158
-rwxr-xr-xdocs/userguide/integration.rst44
-rwxr-xr-xdocs/userguide/testusage.rst13
-rw-r--r--src/dpdk/dpdk.py51
-rw-r--r--src/dpdk/testpmd_proc.py6
-rw-r--r--src/ovs/dpctl.py5
-rw-r--r--src/ovs/ofctl.py22
-rw-r--r--src/package-list.mk6
-rw-r--r--testcases/integration.py18
-rw-r--r--testcases/testcase.py45
-rw-r--r--tools/functions.py124
-rw-r--r--tools/module_manager.py50
-rw-r--r--tools/networkcard.py2
-rw-r--r--tools/pkt_gen/moongen/moongen.py118
-rw-r--r--tools/pkt_gen/testcenter/testcenter-rfc2889-rest.py304
-rw-r--r--tools/pkt_gen/testcenter/testcenter.py86
-rw-r--r--tools/report/report.py5
-rw-r--r--tools/systeminfo.py104
-rw-r--r--vnfs/qemu/qemu.py2
-rw-r--r--vnfs/qemu/qemu_dpdk_vhost_user.py11
-rw-r--r--vnfs/qemu/qemu_pci_passthrough.py16
-rwxr-xr-xvsperf2
-rw-r--r--vswitches/ovs.py81
-rw-r--r--vswitches/ovs_dpdk_vhost.py3
-rw-r--r--vswitches/ovs_vanilla.py9
34 files changed, 1346 insertions, 333 deletions
diff --git a/conf/00_common.conf b/conf/00_common.conf
index fe4e1f5d..7f30deb2 100644
--- a/conf/00_common.conf
+++ b/conf/00_common.conf
@@ -17,6 +17,7 @@
# ###########################
import os
+import copy
# default language and encoding, which will be set in case
# that locale is not set properly
@@ -36,14 +37,51 @@ ROOT_DIR = os.path.normpath(os.path.join(
TRAFFICGEN_DIR = os.path.join(ROOT_DIR, 'tools/pkt_gen')
SYSMETRICS_DIR = os.path.join(ROOT_DIR, 'tools/collectors')
-# deployment specific paths to OVS and DPDK
-OVS_DIR_VANILLA = os.path.join(ROOT_DIR, 'src_vanilla/ovs/ovs/')
-
-RTE_SDK_USER = os.path.join(ROOT_DIR, 'src/dpdk/dpdk/')
-OVS_DIR_USER = os.path.join(ROOT_DIR, 'src/ovs/ovs/')
-
-# the same qemu version is used for vanilla and vHost User
-QEMU_DIR = os.path.join(ROOT_DIR, 'src/qemu/qemu/')
+# Dictionary PATHS is used for configuration of vswitches, dpdk and qemu.
+# It contains paths to various utilities, temporary directories and kernel
+# modules used by VSPERF. Particular sections of PATHS dictionary are spread
+# among several configuration files, i.e.:
+# conf/02_vswtich.conf - configuration of vswitches (i.e. PATHS['vswitch'])
+# and dpdk (i.e. PATHS['dpdk']) can be found there
+# conf/04_vnf.conf - configuration of qemu (i.e. PATHS['qemu']) can
+# be found there
+#
+# VSPERF will process PATHS dictionary before execution of every testcase
+# and it will create a testcase specific dictionary TOOLS with paths to the
+# utilities used by the test. During PATHS processing, following rules
+# will apply for every item of PATHS dictionary:
+# item 'type' - string, which defines the type of paths ('src' or 'bin') to be selected
+# for a given section:
+# 'src' means, that VSPERF will use OVS, DPDK or QEMU built from sources
+# e.g. by execution of systems/build_base_machine.sh script during VSPERF
+# installation
+# 'bin' means, that VSPERF will use OVS, DPDK or QEMU binaries installed
+# in the OS, e.g. via OS specific packaging system
+# item 'path' - string with valid path; Its content is checked for existence, prefixed
+# with section name and stored into TOOLS for later use
+# e.g. TOOLS['dpdk_src'] or TOOLS['vswitch_src']
+# item 'modules' - list of strings; Every value from given list is checked for '.ko'
+# suffix. In case it matches and it is not an absolute path to the module, then
+# module name is prefixed with 'path' defined for the same section
+# e.g. TOOLS['vswitch_modules'] = [
+# '/tmp/vsperf/src_vanilla/ovs/ovs/datapath/linux/openvswitch.ko']
+# all other items - string - if given string is a relative path and item 'path'
+# is defined for a given section, then item content will be prefixed with
+# content of the 'path'. Otherwise tool name will be searched within
+# standard system directories. Also any OS filename wildcards will be
+# expanded to the real path. At the end of processing, every absolute
+# path is checked for its existence. In case that temporary path (i.e. path
+# with '_tmp' suffix) doesn't exist, then log will be written and vsperf will
+# continue. If any other path will not exist, then vsperf execution will
+# be terminated with runtime error.
+#
+# Note: In case that 'bin' type is set for DPDK, then TOOLS['dpdk_src'] will be set to
+# the value of PATHS['dpdk']['src']['path']. The reason is, that VSPERF uses downloaded
+# DPDK sources to copy DPDK and testpmd into the GUEST, where testpmd is built. In case,
+# that DPDK sources are not available, then vsperf will continue with test execution,
+# but testpmd can't be used as a guest loopback. This is useful in case, that other guest
+# loopback applications (e.g. buildin) are used by CI jobs, etc.
+PATHS = {}
# ############################
# Process configuration
diff --git a/conf/01_testcases.conf b/conf/01_testcases.conf
index b9c59a11..55cce1cf 100755
--- a/conf/01_testcases.conf
+++ b/conf/01_testcases.conf
@@ -132,6 +132,13 @@ PERFORMANCE_TESTS = [
"Description": "LTD.Throughput.RFC2544.PacketLossRatio",
},
{
+ "Name": "phy2phy_forwarding",
+ "Traffic Type": "rfc2889",
+ "Deployment": "p2p",
+ "biDirectional": "True",
+ "Description": "LTD.Forwarding.RFC2889.MaxForwardingRate",
+ },
+ {
"Name": "back2back",
"Traffic Type": "back2back",
"Deployment": "p2p",
diff --git a/conf/02_vswitch.conf b/conf/02_vswitch.conf
index abca63bb..f9b8f957 100644
--- a/conf/02_vswitch.conf
+++ b/conf/02_vswitch.conf
@@ -12,17 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# ############################
-# Directories
-# ############################
-# use DPDK VHOST USER by default
-RTE_SDK = RTE_SDK_USER
-OVS_DIR = OVS_DIR_USER
-
-OVS_VAR_DIR = '/usr/local/var/run/openvswitch/'
-OVS_ETC_DIR = '/usr/local/etc/openvswitch/'
-
-VSWITCH_DIR = os.path.join(ROOT_DIR, 'vswitches')
+# OVS Command timeout for execution of commands.
+OVS_CMD_TIMEOUT = 10
# ############################
# DPDK configuration
@@ -38,29 +29,83 @@ RTE_TARGET = 'x86_64-native-linuxapp-gcc'
# will be used for testing
WHITELIST_NICS = ['0000:05:00.0', '0000:05:00.1']
-# for DPDK_MODULES the path is in reference to the build directory
-# To use vfio set
-# DPDK_MODULES = [
-# ('vfio-pci'),
-# ]
-DPDK_MODULES = [
- ('kmod', 'igb_uio'),
-]
+# vhost character device file used by dpdkvhostport QemuWrap cases
+VHOST_DEV_FILE = 'ovs-vhost-net'
-VHOST_MODULE = [
- ('eventfd_link', 'eventfd_link')
-]
+# location of vhost-user sockets relative to 'ovs_var_tmp'
+VHOST_USER_SOCKS = 'dpdkvhostuser*'
+
+# please see conf/00_common.conf for description of PATHS dictionary
+PATHS['dpdk'] = {
+ 'type' : 'src',
+ 'src': {
+ 'path': os.path.join(ROOT_DIR, 'src/dpdk/dpdk/'),
+ # To use vfio set:
+ # 'modules' : ['uio', 'vfio-pci'],
+ 'modules' : ['uio', os.path.join(RTE_TARGET, 'kmod/igb_uio.ko')],
+ 'bind-tool': 'tools/dpdk*bind.py',
+ 'testpmd': os.path.join(RTE_TARGET, 'app', 'testpmd'),
+ },
+ 'bin': {
+ 'bind-tool': '/usr/share/dpdk/tools/dpdk*bind.py',
+ 'modules' : ['uio', 'igb_uio'],
+ 'testpmd' : 'testpmd'
+ }
+ }
-# list of modules that will be inserted using 'modprobe' on system init
-# To use vfio set
-# SYS_MODULES = ['cuse']
-SYS_MODULES = ['uio', 'cuse']
+# ############################
+# Directories
+# ############################
+VSWITCH_DIR = os.path.join(ROOT_DIR, 'vswitches')
-# vhost character device file used by dpdkvhostport QemuWrap cases
-VHOST_DEV_FILE = 'ovs-vhost-net'
+# please see conf/00_common.conf for description of PATHS dictionary
+# Every vswitch type supported by VSPERF must have its configuration
+# stored inside PATHS['vswitch']. List of all supported vswitches
+# can be obtained by call of ./vsperf --list-vswitches
+#
+# Directories defined by "ovs_var_tmp" and "ovs_etc_tmp" will be used
+# by OVS to temporarily store its configuration, pid and socket files.
+# In case, that these directories exist already, then their original
+# content will be restored after the testcase execution.
+
+PATHS['vswitch'] = {
+ 'none' : { # used by SRIOV tests
+ 'type' : 'src',
+ 'src' : {},
+ },
+ 'OvsDpdkVhost': {
+ 'type' : 'src',
+ 'src': {
+ 'path': os.path.join(ROOT_DIR, 'src/ovs/ovs/'),
+ 'ovs-vswitchd': 'vswitchd/ovs-vswitchd',
+ 'ovsdb-server': 'ovsdb/ovsdb-server',
+ 'ovsdb-tool': 'ovsdb/ovsdb-tool',
+ 'ovsschema': 'vswitchd/vswitch.ovsschema',
+ 'ovs-vsctl': 'utilities/ovs-vsctl',
+ 'ovs-ofctl': 'utilities/ovs-ofctl',
+ 'ovs-dpctl': 'utilities/ovs-dpctl',
+ 'ovs-appctl': 'utilities/ovs-appctl',
+ },
+ 'bin': {
+ 'ovs-vswitchd': 'ovs-vswitchd',
+ 'ovsdb-server': 'ovsdb-server',
+ 'ovsdb-tool': 'ovsdb-tool',
+ 'ovsschema': '/usr/share/openvswitch/vswitch.ovsschema',
+ 'ovs-vsctl': 'ovs-vsctl',
+ 'ovs-ofctl': 'ovs-ofctl',
+ 'ovs-dpctl': 'ovs-dpctl',
+ 'ovs-appctl': 'ovs-appctl',
+ }
+ },
+ 'ovs_var_tmp': '/usr/local/var/run/openvswitch/',
+ 'ovs_etc_tmp': '/usr/local/etc/openvswitch/',
+}
-# location of vhost-user sockets
-VHOST_USER_SOCKS = os.path.join(OVS_VAR_DIR, 'dpdkvhostuser*')
+# default OvsVanilla configuration is similar to OvsDpdkVhost except 'path' and 'modules'
+PATHS['vswitch'].update({'OvsVanilla' : copy.deepcopy(PATHS['vswitch']['OvsDpdkVhost'])})
+PATHS['vswitch']['OvsVanilla']['src']['path'] = os.path.join(ROOT_DIR, 'src_vanilla/ovs/ovs/')
+PATHS['vswitch']['OvsVanilla']['src']['modules'] = ['datapath/linux/openvswitch.ko']
+PATHS['vswitch']['OvsVanilla']['bin']['modules'] = ['openvswitch']
# ############################
# vswitch configuration
@@ -93,9 +138,6 @@ OVS_OLD_STYLE_MQ = False
# parameters passed to ovs-vswitchd in case that OvsVanilla is selected
VSWITCHD_VANILLA_ARGS = []
-# use full module path to load module matching OVS version built from the source
-VSWITCH_VANILLA_KERNEL_MODULES = ['libcrc32c', 'ip_tunnel', 'vxlan', 'gre', 'nf_conntrack', 'nf_defrag_ipv4', 'nf_defrag_ipv6', os.path.join(OVS_DIR_VANILLA, 'datapath/linux/openvswitch.ko')]
-
# Bridge name to be used by VSWTICH
VSWITCH_BRIDGE_NAME = 'br0'
@@ -114,12 +156,6 @@ VSWITCH_AFFINITIZATION_ON = 1
VSWITCH_FLOW_TIMEOUT = '30000'
-# list of tuples of format (path, module_name), which will be inserted
-# using 'insmod' on system init
-
-# for OVS modules the path is in reference to the OVS directory.
-OVS_MODULES = []
-
# log file for ovs-vswitchd
LOG_FILE_VSWITCHD = 'vswitchd.log'
diff --git a/conf/03_traffic.conf b/conf/03_traffic.conf
index fd0f589d..04266923 100644
--- a/conf/03_traffic.conf
+++ b/conf/03_traffic.conf
@@ -75,6 +75,12 @@ TRAFFICGEN_STC_TESTCENTER_PATH = os.path.join(ROOT_DIR, 'tools/pkt_gen/testcente
# Name of the TestCenter RFC2544 Tput helper python script
TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME = "testcenter-rfc2544-throughput.py"
+# Name of the Testcenter RFC2899 Tput Helper Python Scripts
+TRAFFICGEN_STC_RFC2889_TEST_FILE_NAME = "testcenter-rfc2889-rest.py"
+
+# 2889 Port Locations
+TRAFFICGEN_STC_RFC2889_LOCATION = ""
+
# The address of the Spirent Lab Server to use
TRAFFICGEN_STC_LAB_SERVER_ADDR = ""
diff --git a/conf/04_vnf.conf b/conf/04_vnf.conf
index 2e86b358..e0c72b10 100644
--- a/conf/04_vnf.conf
+++ b/conf/04_vnf.conf
@@ -20,10 +20,20 @@ VNF = 'QemuDpdkVhostUser'
VNF_AFFINITIZATION_ON = True
# ############################
-# Executables and log files
+# Directories, executables and log files
# ############################
-QEMU_BIN = os.path.join(QEMU_DIR, 'x86_64-softmmu/qemu-system-x86_64')
+# please see conf/00_common.conf for description of PATHS dictionary
+PATHS['qemu'] = {
+ 'type' : 'src',
+ 'src': {
+ 'path': os.path.join(ROOT_DIR, 'src/qemu/qemu/'),
+ 'qemu-system': 'x86_64-softmmu/qemu-system-x86_64'
+ },
+ 'bin': {
+ 'qemu-system': 'qemu-system-x86_64'
+ }
+}
# log file for qemu
LOG_FILE_QEMU = 'qemu.log'
@@ -140,11 +150,15 @@ GUEST_CORE_BINDING = [('#EVAL(6+2*#VMINDEX)', '#EVAL(7+2*#VMINDEX)')]
# using Vanilla OVS without enabling switch multi-queue.
GUEST_NIC_QUEUES = [0]
+# Disable VHost user guest NIC merge buffers by enabling the below setting. This
+# can improve performance when not using Jumbo Frames.
+GUEST_NIC_MERGE_BUFFERS_DISABLE = [True]
+
# Virtio-Net vhost thread CPU mapping. If using vanilla OVS with virtio-net,
# you can affinitize the vhost-net threads by enabling the below setting. There
# is one vhost-net thread per port per queue so one guest with 2 queues will
# have 4 vhost-net threads. If more threads are present than CPUs given, the
-# affinitize will overlap CPUs.
+# affinitize will overlap CPUs in a round robin fashion.
VSWITCH_VHOST_NET_AFFINITIZATION = False
VSWITCH_VHOST_CPU_MAP = [4,5,8,11]
diff --git a/core/component_factory.py b/core/component_factory.py
index 7f453bd2..ef7ba86f 100644
--- a/core/component_factory.py
+++ b/core/component_factory.py
@@ -16,6 +16,7 @@
"""
from core.traffic_controller_rfc2544 import TrafficControllerRFC2544
+from core.traffic_controller_rfc2889 import TrafficControllerRFC2889
from core.vswitch_controller_clean import VswitchControllerClean
from core.vswitch_controller_p2p import VswitchControllerP2P
from core.vswitch_controller_pxp import VswitchControllerPXP
@@ -47,7 +48,10 @@ def create_traffic(traffic_type, trafficgen_class):
:param trafficgen_class: Reference to traffic generator class to be used.
:return: A new ITrafficController
"""
- return TrafficControllerRFC2544(trafficgen_class)
+ if traffic_type.lower().startswith('rfc2889'):
+ return TrafficControllerRFC2889(trafficgen_class)
+ else:
+ return TrafficControllerRFC2544(trafficgen_class)
def create_vswitch(deployment_scenario, vswitch_class, traffic,
diff --git a/core/results/results_constants.py b/core/results/results_constants.py
index b7ab7052..864712bc 100644
--- a/core/results/results_constants.py
+++ b/core/results/results_constants.py
@@ -58,6 +58,11 @@ class ResultsConstants(object):
SCAL_STREAM_COUNT = 'stream_count'
SCAL_STREAM_TYPE = 'match_type'
SCAL_PRE_INSTALLED_FLOWS = 'pre-installed_flows'
+ # RFC2889 Forwarding, Address-Caching and Congestion
+ FORWARDING_RATE_FPS = 'forwarding_rate_fps'
+ ADDRS_COUNT_FLOOD_COUNT_RATIO = 'addrs_count_flood_count_ratio'
+ CONGESTION_CONTROL_EXISTS = 'congestion_control_exists'
+ PORTS_MAP = 'ports_map'
TEST_RUN_TIME = "test_execution_time"
diff --git a/core/traffic_controller_rfc2889.py b/core/traffic_controller_rfc2889.py
new file mode 100644
index 00000000..a97a47d3
--- /dev/null
+++ b/core/traffic_controller_rfc2889.py
@@ -0,0 +1,146 @@
+"""RFC2889 Traffic Controller implementation.
+"""
+import logging
+
+from core.traffic_controller import ITrafficController
+from core.results.results_constants import ResultsConstants
+from core.results.results import IResults
+from conf import settings
+from conf import get_test_param
+
+
+class TrafficControllerRFC2889(ITrafficController, IResults):
+ """Traffic controller for RFC2889 traffic
+
+ Used to setup and control a traffic generator for an RFC2889 deployment
+ traffic scenario.
+ """
+
+ def __init__(self, traffic_gen_class):
+ """Initialise the trafficgen and store.
+
+ :param traffic_gen_class: The traffic generator class to be used.
+ """
+ self._logger = logging.getLogger(__name__)
+ self._logger.debug("__init__")
+ self._traffic_gen_class = traffic_gen_class()
+ self._traffic_started = False
+ self._traffic_started_call_count = 0
+ self._trials = int(get_test_param('rfc2889_trials', 1))
+ self._duration = int(get_test_param('duration', 30))
+ self._results = []
+
+ # If set, comma separated packet_sizes value from --test_params
+ # on cli takes precedence over value in settings file.
+ self._packet_sizes = None
+ packet_sizes_cli = get_test_param('pkt_sizes')
+ if packet_sizes_cli:
+ self._packet_sizes = [int(x.strip())
+ for x in packet_sizes_cli.split(',')]
+ else:
+ self._packet_sizes = settings.getValue('TRAFFICGEN_PKT_SIZES')
+
+ def __enter__(self):
+ """Call initialisation function.
+ """
+ self._traffic_gen_class.connect()
+
+ def __exit__(self, type_, value, traceback):
+ """Stop traffic, clean up.
+ """
+ if self._traffic_started:
+ self.stop_traffic()
+
+ @staticmethod
+ def _append_results(result_dict, packet_size):
+ """Adds common values to traffic generator results.
+
+ :param result_dict: Dictionary containing results from trafficgen
+ :param packet_size: Packet size value.
+
+ :returns: dictionary of results with additional entries.
+ """
+
+ ret_value = result_dict
+
+ ret_value[ResultsConstants.TYPE] = 'rfc2889'
+ ret_value[ResultsConstants.PACKET_SIZE] = str(packet_size)
+
+ return ret_value
+
+ def send_traffic(self, traffic):
+ """See ITrafficController for description
+ """
+ self._logger.debug('send_traffic with ' +
+ str(self._traffic_gen_class))
+
+ for packet_size in self._packet_sizes:
+ # Merge framesize with the default traffic definition
+ if 'l2' in traffic:
+ traffic['l2'] = dict(traffic['l2'],
+ **{'framesize': packet_size})
+ else:
+ traffic['l2'] = {'framesize': packet_size}
+
+ if traffic['traffic_type'] == 'caching':
+ result = self._traffic_gen_class.send_rfc2889_caching(
+ traffic, trials=self._trials, duration=self._duration)
+ elif traffic['traffic_type'] == 'congestion':
+ result = self._traffic_gen_class.send_rfc2889_congestion(
+ traffic, duration=self._duration)
+ else:
+ result = self._traffic_gen_class.send_rfc2889_forwarding(
+ traffic, tests=self._trials, duration=self._duration)
+
+ result = TrafficControllerRFC2889._append_results(result,
+ packet_size)
+ self._results.append(result)
+
+ def send_traffic_async(self, traffic, function):
+ """See ITrafficController for description
+ """
+ self._logger.debug('send_traffic_async with ' +
+ str(self._traffic_gen_class))
+
+ for packet_size in self._packet_sizes:
+ traffic['l2'] = {'framesize': packet_size}
+ self._traffic_gen_class.start_rfc2889_forwarding(
+ traffic,
+ trials=self._trials,
+ duration=self._duration)
+ self._traffic_started = True
+ if len(function['args']) > 0:
+ function['function'](function['args'])
+ else:
+ function['function']()
+ result = self._traffic_gen_class.wait_rfc2889_forwarding(
+ traffic, trials=self._trials, duration=self._duration)
+ result = TrafficControllerRFC2889._append_results(result,
+ packet_size)
+ self._results.append(result)
+
+ def stop_traffic(self):
+ """Kills traffic being sent from the traffic generator.
+ """
+ self._logger.debug("stop_traffic()")
+
+ def print_results(self):
+ """IResult interface implementation.
+ """
+ counter = 0
+ for item in self._results:
+ logging.info("Record: " + str(counter))
+ counter += 1
+ for(key, value) in list(item.items()):
+ logging.info(" Key: " + str(key) +
+ ", Value: " + str(value))
+
+ def get_results(self):
+ """IResult interface implementation.
+ """
+ return self._results
+
+ def validate_send_traffic(self, result, traffic):
+ """Verify that send traffic has succeeded
+ """
+ return True
diff --git a/docs/configguide/trafficgen.rst b/docs/configguide/trafficgen.rst
index 28b34a6b..6ede7f2f 100644
--- a/docs/configguide/trafficgen.rst
+++ b/docs/configguide/trafficgen.rst
@@ -323,6 +323,42 @@ install the package. Once installed, the scripts named with 'rest' keyword
can be used. For example: testcenter-rfc2544-rest.py can be used to run
RFC 2544 tests using the REST interface.
+Configuration:
+~~~~~~~~~~~~~~
+The mandatory configurations are enlisted below.
+
+1. The Labserver and license server addresses. These parameters applies to
+ all the tests and are mandatory.
+
+.. code-block:: console
+
+ TRAFFICGEN_STC_LAB_SERVER_ADDR = " "
+ TRAFFICGEN_STC_LICENSE_SERVER_ADDR = " "
+
+2. For RFC2544 tests, the following parameters are mandatory
+
+
+.. code-block:: console
+
+ TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME = " "
+ TRAFFICGEN_STC_EAST_CHASSIS_ADDR = " "
+ TRAFFICGEN_STC_EAST_SLOT_NUM = " "
+ TRAFFICGEN_STC_EAST_PORT_NUM = " "
+ TRAFFICGEN_STC_EAST_INTF_ADDR = " "
+ TRAFFICGEN_STC_EAST_INTF_GATEWAY_ADDR = " "
+ TRAFFICGEN_STC_WEST_CHASSIS_ADDR = ""
+ TRAFFICGEN_STC_WEST_SLOT_NUM = " "
+ TRAFFICGEN_STC_WEST_PORT_NUM = " "
+ TRAFFICGEN_STC_WEST_INTF_ADDR = " "
+ TRAFFICGEN_STC_WEST_INTF_GATEWAY_ADDR = " "
+
+3. For RFC2889 tests, specifying the locations of the ports is mandatory.
+
+.. code-block:: console
+
+ TRAFFICGEN_STC_RFC2889_TEST_FILE_NAME = " "
+ TRAFFICGEN_STC_RFC2889_LOCATIONS= " "
+
Xena Networks
-------------
diff --git a/docs/design/vswitchperf_design.rst b/docs/design/vswitchperf_design.rst
index cdf9f318..375fa12e 100755
--- a/docs/design/vswitchperf_design.rst
+++ b/docs/design/vswitchperf_design.rst
@@ -100,6 +100,164 @@ The values in the file specified by ``--conf-file`` takes precedence over all
the other configuration files and does not have to follow the naming
convention.
+Configuration of PATHS dictionary
+---------------------------------
+
+VSPERF uses external tools like Open vSwitch and Qemu for execution of testcases. These
+tools may be downloaded and built automatically by `VSPERF installation scripts`_
+or installed manually by user from binary packages. It is also possible to use a combination
+of both approaches, but it is essential to correctly set paths to all required tools.
+These paths are stored within a PATHS dictionary, which is evaluated before execution
+of each testcase, in order to setup testcase specific environment. Values selected for testcase
+execution are internally stored inside TOOLS dictionary, which is used by VSPERF to execute
+external tools, load kernel modules, etc.
+
+The default configuration of PATHS dictionary is spread among three different configuration files
+to follow logical grouping of configuration options. Basic description of PATHS dictionary
+is placed inside ``conf/00_common.conf``. The configuration specific to DPDK and vswitches
+is located at ``conf/02_vswitch.conf``. The last part related to the Qemu is defined inside
+``conf/04_vnf.conf``. Default configuration values can be used in case, that all required
+tools were downloaded and built automatically by vsperf itself. In case, that some of
+tools were installed manually from binary packages, then it will be necessary to modify
+the content of PATHS dictionary accordingly.
+
+Dictionary has a specific section of configuration options for every tool type, it means:
+
+ * ``PATHS['vswitch']`` - contains a separete dictionary for each of vswitches supported by VSPEF
+
+ Example:
+
+ .. code-block:: python
+
+ PATHS['vswitch'] = {
+ 'OvsDpdkVhost': { ... },
+ 'OvsVanilla' : { ... },
+ ...
+ }
+
+ * ``PATHS['dpdk']`` - contains paths to the dpdk sources, kernel modules and tools (e.g. testpmd)
+
+ Example:
+
+ .. code-block:: python
+
+ PATHS['dpdk'] = {
+ 'type' : 'src',
+ 'src': {
+ 'path': os.path.join(ROOT_DIR, 'src/dpdk/dpdk/'),
+ 'modules' : ['uio', os.path.join(RTE_TARGET, 'kmod/igb_uio.ko')],
+ 'bind-tool': 'tools/dpdk*bind.py',
+ 'testpmd': os.path.join(RTE_TARGET, 'app', 'testpmd'),
+ },
+ ...
+ }
+
+ * ``PATHS['qemu']`` - contains paths to the qemu sources and executable file
+
+ Example:
+
+ .. code-block:: python
+
+ PATHS['qemu'] = {
+ 'type' : 'bin',
+ 'bin': {
+ 'qemu-system': 'qemu-system-x86_64'
+ },
+ ...
+ }
+
+Every section specific to the particular vswitch, dpdk or qemu may contain following types
+of configuration options:
+
+ * option ``type`` - is a string, which defines the type of configured paths ('src' or 'bin')
+ to be selected for a given section:
+
+ * value ``src`` means, that VSPERF will use vswitch, DPDK or QEMU built from sources
+ e.g. by execution of ``systems/build_base_machine.sh`` script during VSPERF
+ installation
+
+ * value ``bin`` means, that VSPERF will use vswitch, DPDK or QEMU binaries installed
+ directly in the operating system, e.g. via OS specific packaging system
+
+ * option ``path`` - is a string with a valid system path; Its content is checked for
+ existence, prefixed with section name and stored into TOOLS for later use
+ e.g. ``TOOLS['dpdk_src']`` or ``TOOLS['vswitch_src']``
+
+ * option ``modules`` - is list of strings with names of kernel modules; Every module name
+ from given list is checked for a '.ko' suffix. In case that it matches and if it is not
+ an absolute path to the module, then module name is prefixed with value of ``path``
+ option defined for the same section
+
+ Example:
+
+ .. code-block:: python
+
+ """
+ snippet of PATHS definition from the configuration file:
+ """
+ PATHS['vswitch'] = {
+ 'OvsVanilla' = {
+ 'type' : 'src',
+ 'src': {
+ 'path': '/tmp/vsperf/src_vanilla/ovs/ovs/',
+ 'modules' : ['datapath/linux/openvswitch.ko'],
+ ...
+ },
+ ...
+ }
+ ...
+ }
+
+ """
+ Final content of TOOLS dictionary used during runtime:
+ """
+ TOOLS['vswitch_modules'] = ['/tmp/vsperf/src_vanilla/ovs/ovs/datapath/linux/openvswitch.ko']
+
+ * all other options are strings with names and paths to specific tools; If a given string
+ contains a relative path and option ``path`` is defined for a given section, then string
+ content will be prefixed with content of the ``path``. Otherwise the name of the tool will be
+ searched within standard system directories. In case that filename contains OS specific
+ wildcards, then they will be expanded to the real path. At the end of the processing, every
+ absolute path will be checked for its existence. In case that temporary path (i.e. path with
+ a ``_tmp`` suffix) does not exist, then log will be written and vsperf will continue. If any
+ other path will not exist, then vsperf execution will be terminated with a runtime error.
+
+ Example:
+
+ .. code-block:: python
+
+ """
+ snippet of PATHS definition from the configuration file:
+ """
+ PATHS['vswitch'] = {
+ 'OvsDpdkVhost': {
+ 'type' : 'src',
+ 'src': {
+ 'path': '/tmp/vsperf/src_vanilla/ovs/ovs/',
+ 'ovs-vswitchd': 'vswitchd/ovs-vswitchd',
+ 'ovsdb-server': 'ovsdb/ovsdb-server',
+ ...
+ }
+ ...
+ }
+ ...
+ }
+
+ """
+ Final content of TOOLS dictionary used during runtime:
+ """
+ TOOLS['ovs-vswitchd'] = '/tmp/vsperf/src_vanilla/ovs/ovs/vswitchd/ovs-vswitchd'
+ TOOLS['ovsdb-server'] = '/tmp/vsperf/src_vanilla/ovs/ovs/ovsdb/ovsdb-server'
+
+Note: In case that ``bin`` type is set for DPDK, then ``TOOLS['dpdk_src']`` will be set to
+the value of ``PATHS['dpdk']['src']['path']``. The reason is, that VSPERF uses downloaded
+DPDK sources to copy DPDK and testpmd into the GUEST, where testpmd is built. In case,
+that DPDK sources are not available, then vsperf will continue with test execution,
+but testpmd can't be used as a guest loopback. This is useful in case, that other guest
+loopback applications (e.g. buildin or l2fwd) are used.
+
+.. _VSPERF installation scripts: http://artifacts.opnfv.org/vswitchperf/docs/configguide/installation.html#other-requirements
+
Configuration of GUEST options
------------------------------
diff --git a/docs/userguide/integration.rst b/docs/userguide/integration.rst
index 51c2f241..b0926d89 100755
--- a/docs/userguide/integration.rst
+++ b/docs/userguide/integration.rst
@@ -509,11 +509,15 @@ To run OVS NATIVE tunnel tests (VXLAN/GRE/GENEVE):
VSWITCH = 'OvsVanilla'
# Specify vport_* kernel module to test.
- VSWITCH_VANILLA_KERNEL_MODULES = ['vport_vxlan',
- 'vport_gre',
- 'vport_geneve',
- os.path.join(OVS_DIR_VANILLA,
- 'datapath/linux/openvswitch.ko')]
+ PATHS['vswitch']['OvsVanilla']['src']['modules'] = [
+ 'vport_vxlan',
+ 'vport_gre',
+ 'vport_geneve',
+ 'datapath/linux/openvswitch.ko',
+ ]
+
+ **NOTE:** In case, that Vanilla OVS is installed from binary package, then
+ please set ``PATHS['vswitch']['OvsVanilla']['bin']['modules']`` instead.
3. Run tests:
@@ -674,9 +678,10 @@ To run VXLAN decapsulation tests:
.. code-block:: python
- VSWITCH_VANILLA_KERNEL_MODULES = ['vport_vxlan',
- os.path.join(OVS_DIR_VANILLA,
- 'datapath/linux/openvswitch.ko')]
+ PATHS['vswitch']['OvsVanilla']['src']['modules'] = [
+ 'vport_vxlan',
+ 'datapath/linux/openvswitch.ko',
+ ]
DUT_NIC1_MAC = '<DUT NIC1 MAC ADDRESS>'
@@ -714,6 +719,9 @@ To run VXLAN decapsulation tests:
'inner_dstport': 3001,
}
+ **NOTE:** In case, that Vanilla OVS is installed from binary package, then
+ please set ``PATHS['vswitch']['OvsVanilla']['bin']['modules']`` instead.
+
2. Run test:
.. code-block:: console
@@ -730,9 +738,10 @@ To run GRE decapsulation tests:
.. code-block:: python
- VSWITCH_VANILLA_KERNEL_MODULES = ['vport_gre',
- os.path.join(OVS_DIR_VANILLA,
- 'datapath/linux/openvswitch.ko')]
+ PATHS['vswitch']['OvsVanilla']['src']['modules'] = [
+ 'vport_gre',
+ 'datapath/linux/openvswitch.ko',
+ ]
DUT_NIC1_MAC = '<DUT NIC1 MAC ADDRESS>'
@@ -769,6 +778,9 @@ To run GRE decapsulation tests:
'inner_dstport': 3001,
}
+ **NOTE:** In case, that Vanilla OVS is installed from binary package, then
+ please set ``PATHS['vswitch']['OvsVanilla']['bin']['modules']`` instead.
+
2. Run test:
.. code-block:: console
@@ -785,9 +797,10 @@ To run GENEVE decapsulation tests:
.. code-block:: python
- VSWITCH_VANILLA_KERNEL_MODULES = ['vport_geneve',
- os.path.join(OVS_DIR_VANILLA,
- 'datapath/linux/openvswitch.ko')]
+ PATHS['vswitch']['OvsVanilla']['src']['modules'] = [
+ 'vport_geneve',
+ 'datapath/linux/openvswitch.ko',
+ ]
DUT_NIC1_MAC = '<DUT NIC1 MAC ADDRESS>'
@@ -824,6 +837,9 @@ To run GENEVE decapsulation tests:
'inner_dstport': 3001,
}
+ **NOTE:** In case, that Vanilla OVS is installed from binary package, then
+ please set ``PATHS['vswitch']['OvsVanilla']['bin']['modules']`` instead.
+
2. Run test:
.. code-block:: console
diff --git a/docs/userguide/testusage.rst b/docs/userguide/testusage.rst
index ce647c6b..3c5cc4d4 100755
--- a/docs/userguide/testusage.rst
+++ b/docs/userguide/testusage.rst
@@ -306,15 +306,16 @@ To run tests using Vanilla OVS:
Using vfio_pci with DPDK
^^^^^^^^^^^^^^^^^^^^^^^^^
-To use vfio with DPDK instead of igb_uio edit 'conf/02_vswitch.conf'
-with the following parameters:
+To use vfio with DPDK instead of igb_uio add into your custom configuration
+file the following parameter:
.. code-block:: console
- DPDK_MODULES = [
- ('vfio-pci'),
- ]
- SYS_MODULES = ['cuse']
+ PATHS['dpdk']['src']['modules'] = ['uio', 'vfio-pci']
+
+
+**NOTE:** In case, that DPDK is installed from binary package, then please
+set ``PATHS['dpdk']['bin']['modules']`` instead.
**NOTE:** Please ensure that Intel VT-d is enabled in BIOS.
diff --git a/src/dpdk/dpdk.py b/src/dpdk/dpdk.py
index bd9bb9cf..16223915 100644
--- a/src/dpdk/dpdk.py
+++ b/src/dpdk/dpdk.py
@@ -25,13 +25,11 @@ import subprocess
import logging
import glob
-from conf import settings
+from conf import settings as S
from tools import tasks
from tools.module_manager import ModuleManager
_LOGGER = logging.getLogger(__name__)
-RTE_PCI_TOOL = glob.glob(os.path.join(
- settings.getValue('RTE_SDK_USER'), 'tools', 'dpdk*bind.py'))[0]
_DPDK_MODULE_MANAGER = ModuleManager()
@@ -48,7 +46,7 @@ def init():
"""
global _NICS
global _NICS_PCI
- _NICS = settings.getValue('NICS')
+ _NICS = S.getValue('NICS')
_NICS_PCI = list(nic['pci'] for nic in _NICS)
if not _is_linux():
_LOGGER.error('Not running on a compatible Linux version. Exiting...')
@@ -91,18 +89,7 @@ def _insert_modules():
"""Ensure required modules are inserted on system.
"""
- _DPDK_MODULE_MANAGER.insert_modules(settings.getValue('SYS_MODULES'))
-
- mod_path_prefix = settings.getValue('OVS_DIR')
- _DPDK_MODULE_MANAGER.insert_module_group(settings.getValue('OVS_MODULES'),
- mod_path_prefix)
- if 'vfio-pci' not in settings.getValue('DPDK_MODULES'):
- mod_path_prefix = os.path.join(settings.getValue('RTE_SDK'),
- settings.getValue('RTE_TARGET'))
- _DPDK_MODULE_MANAGER.insert_module_group(settings.getValue('DPDK_MODULES'),
- mod_path_prefix)
- else:
- _DPDK_MODULE_MANAGER.insert_modules(settings.getValue('DPDK_MODULES'))
+ _DPDK_MODULE_MANAGER.insert_modules(S.getValue('TOOLS')['dpdk_modules'])
def _remove_modules():
"""Ensure required modules are removed from system.
@@ -110,26 +97,6 @@ def _remove_modules():
_DPDK_MODULE_MANAGER.remove_modules()
#
-# vhost specific modules management
-#
-
-def insert_vhost_modules():
- """Inserts VHOST related kernel modules
- """
- mod_path_prefix = os.path.join(settings.getValue('RTE_SDK'),
- 'lib',
- 'librte_vhost')
- _DPDK_MODULE_MANAGER.insert_module_group(settings.getValue('VHOST_MODULE'), mod_path_prefix)
-
-
-def remove_vhost_modules():
- """Removes all VHOST related kernel modules
- """
- # all modules are removed automatically by _remove_modules() method
- pass
-
-
-#
# 'vhost-net' module cleanup
#
@@ -150,7 +117,8 @@ def _remove_vhost_net():
def _vhost_user_cleanup():
"""Remove files created by vhost-user tests.
"""
- for sock in glob.glob(settings.getValue('VHOST_USER_SOCKS')):
+ for sock in glob.glob(os.path.join(S.getValue('TOOLS')['ovs_var_tmp'],
+ S.getValue('VHOST_USER_SOCKS'))):
if os.path.exists(sock):
try:
tasks.run_task(['sudo', 'rm', sock],
@@ -173,7 +141,7 @@ def _bind_nics():
"""
try:
_driver = 'igb_uio'
- if 'vfio-pci' in settings.getValue('DPDK_MODULES'):
+ if 'vfio-pci' in S.getValue('TOOLS')['dpdk_modules']:
_driver = 'vfio-pci'
tasks.run_task(['sudo', 'chmod', 'a+x', '/dev/vfio'],
_LOGGER, 'Setting VFIO permissions .. a+x',
@@ -182,7 +150,8 @@ def _bind_nics():
_LOGGER, 'Setting VFIO permissions .. 0666',
True)
- tasks.run_task(['sudo', RTE_PCI_TOOL, '--bind=' + _driver] +
+ tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'],
+ '--bind=' + _driver] +
_NICS_PCI, _LOGGER,
'Binding NICs %s...' % _NICS_PCI,
True)
@@ -193,7 +162,7 @@ def _unbind_nics():
"""Unbind NICs using the Intel DPDK ``dpdk*bind.py`` tool.
"""
try:
- tasks.run_task(['sudo', RTE_PCI_TOOL, '--unbind'] +
+ tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'], '--unbind'] +
_NICS_PCI, _LOGGER,
'Unbinding NICs %s...' % str(_NICS_PCI),
True)
@@ -204,7 +173,7 @@ def _unbind_nics():
for nic in _NICS:
try:
if nic['driver']:
- tasks.run_task(['sudo', RTE_PCI_TOOL, '--bind',
+ tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'], '--bind',
nic['driver'], nic['pci']],
_LOGGER, 'Binding NIC %s to %s...' %
(nic['pci'], nic['driver']),
diff --git a/src/dpdk/testpmd_proc.py b/src/dpdk/testpmd_proc.py
index 990ef8da..a8fa8eee 100644
--- a/src/dpdk/testpmd_proc.py
+++ b/src/dpdk/testpmd_proc.py
@@ -27,10 +27,6 @@ from tools import tasks
_TESTPMD_PROMPT = 'Done'
-_TESTPMD_BIN = os.path.join(
- settings.getValue('RTE_SDK'), settings.getValue('RTE_TARGET'),
- 'app', 'testpmd')
-
_LOG_FILE_VSWITCHD = os.path.join(
settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_VSWITCHD'))
@@ -57,7 +53,7 @@ class TestPMDProcess(tasks.Process):
if not self._expect:
self._expect = _TESTPMD_PROMPT
testpmd_args = testpmd_args or []
- self._cmd = ['sudo', '-E', _TESTPMD_BIN] + testpmd_args
+ self._cmd = ['sudo', '-E', settings.getValue('TOOLS')['testpmd']] + testpmd_args
# startup/shutdown
diff --git a/src/ovs/dpctl.py b/src/ovs/dpctl.py
index 8ecac6dc..44a4ec9b 100644
--- a/src/ovs/dpctl.py
+++ b/src/ovs/dpctl.py
@@ -23,9 +23,6 @@ import string
from tools import tasks
from conf import settings
-_OVS_DPCTL_BIN = os.path.join(settings.getValue('OVS_DIR'), 'utilities',
- 'ovs-dpctl')
-
_OVS_LOCAL_DATAPATH = 'ovs-system'
class DPCtl(object):
@@ -51,7 +48,7 @@ class DPCtl(object):
:return: None
"""
- cmd = ['sudo', _OVS_DPCTL_BIN,
+ cmd = ['sudo', settings.getValue('TOOLS')['ovs-dpctl'],
'--timeout',
str(self.timeout)] + args
return tasks.run_task(
diff --git a/src/ovs/ofctl.py b/src/ovs/ofctl.py
index a75d0be2..e9b86127 100644
--- a/src/ovs/ofctl.py
+++ b/src/ovs/ofctl.py
@@ -28,21 +28,15 @@ import re
from tools import tasks
from conf import settings
-_OVS_VSCTL_BIN = os.path.join(settings.getValue('OVS_DIR'), 'utilities',
- 'ovs-vsctl')
-_OVS_OFCTL_BIN = os.path.join(settings.getValue('OVS_DIR'), 'utilities',
- 'ovs-ofctl')
-_OVS_APPCTL_BIN = os.path.join(settings.getValue('OVS_DIR'), 'utilities',
- 'ovs-appctl')
-
_OVS_BRIDGE_NAME = settings.getValue('VSWITCH_BRIDGE_NAME')
+_OVS_CMD_TIMEOUT = settings.getValue('OVS_CMD_TIMEOUT')
_CACHE_FILE_NAME = '/tmp/vsperf_flows_cache'
class OFBase(object):
"""Add/remove/show datapaths using ``ovs-ofctl``.
"""
- def __init__(self, timeout=10):
+ def __init__(self, timeout=_OVS_CMD_TIMEOUT):
"""Initialise logger.
:param timeout: Timeout to be used for each command
@@ -66,9 +60,9 @@ class OFBase(object):
:return: None
"""
if self.timeout == -1:
- cmd = ['sudo', _OVS_VSCTL_BIN, '--no-wait'] + args
+ cmd = ['sudo', settings.getValue('TOOLS')['ovs-vsctl'], '--no-wait'] + args
else:
- cmd = ['sudo', _OVS_VSCTL_BIN, '--timeout', str(self.timeout)] + args
+ cmd = ['sudo', settings.getValue('TOOLS')['ovs-vsctl'], '--timeout', str(self.timeout)] + args
return tasks.run_task(
cmd, self.logger, 'Running ovs-vsctl...', check_error)
@@ -81,7 +75,7 @@ class OFBase(object):
:return: None
"""
- cmd = ['sudo', _OVS_APPCTL_BIN,
+ cmd = ['sudo', settings.getValue('TOOLS')['ovs-appctl'],
'--timeout',
str(self.timeout)] + args
return tasks.run_task(
@@ -145,7 +139,7 @@ class OFBase(object):
class OFBridge(OFBase):
"""Control a bridge instance using ``ovs-vsctl`` and ``ovs-ofctl``.
"""
- def __init__(self, br_name=_OVS_BRIDGE_NAME, timeout=10):
+ def __init__(self, br_name=_OVS_BRIDGE_NAME, timeout=_OVS_CMD_TIMEOUT):
"""Initialise bridge.
:param br_name: Bridge name
@@ -184,8 +178,8 @@ class OFBridge(OFBase):
:return: None
"""
tmp_timeout = self.timeout if timeout == None else timeout
- cmd = ['sudo', _OVS_OFCTL_BIN, '-O', 'OpenFlow13', '--timeout',
- str(tmp_timeout)] + args
+ cmd = ['sudo', settings.getValue('TOOLS')['ovs-ofctl'], '-O',
+ 'OpenFlow13', '--timeout', str(tmp_timeout)] + args
return tasks.run_task(
cmd, self.logger, 'Running ovs-ofctl...', check_error)
diff --git a/src/package-list.mk b/src/package-list.mk
index 5aa40bd2..6eb43b8b 100644
--- a/src/package-list.mk
+++ b/src/package-list.mk
@@ -6,13 +6,11 @@
# dpdk section
# DPDK_URL ?= git://dpdk.org/dpdk
DPDK_URL ?= http://dpdk.org/git/dpdk
-DPDK_TAG ?= v16.04
+DPDK_TAG ?= v16.07
# OVS section
OVS_URL ?= https://github.com/openvswitch/ovs
-#The Tag below is for OVS v2.5.0 with backwards compatibility support for Qemu
-#versions < 2.5.
-OVS_TAG ?= 31871ee3839c35e6878debfc7926afa471dbdec6
+OVS_TAG ?= ed26e3ea9995ba632e681d5990af5ee9814f650e
# QEMU section
QEMU_URL ?= https://github.com/qemu/qemu.git
diff --git a/testcases/integration.py b/testcases/integration.py
index f3f684ba..88a6f12c 100644
--- a/testcases/integration.py
+++ b/testcases/integration.py
@@ -114,7 +114,20 @@ class IntegrationTestCase(TestCase):
loader = Loader()
# execute test based on TestSteps definition
if self.test:
+ # initialize list with results
step_result = [None] * len(self.test)
+
+ # count how many VNFs are involved in the test
+ for step in self.test:
+ if step[0].startswith('vnf'):
+ vnf_list[step[0]] = None
+
+ # check/expand GUEST configuration and copy data to shares
+ if len(vnf_list):
+ S.check_vm_settings(len(vnf_list))
+ self._copy_fwd_tools_for_all_guests(len(vnf_list))
+
+ # run test step by step...
for i, step in enumerate(self.test):
step_ok = False
if step[0] == 'vswitch':
@@ -132,10 +145,9 @@ class IntegrationTestCase(TestCase):
tmp_traffic.update(step[2])
step[2] = tmp_traffic
elif step[0].startswith('vnf'):
- if not step[0] in vnf_list:
- # initialize new VM and copy data to its shared dir
+ if not vnf_list[step[0]]:
+ # initialize new VM
vnf_list[step[0]] = loader.get_vnf_class()()
- self._copy_fwd_tools_for_guest(len(vnf_list))
test_object = vnf_list[step[0]]
else:
self._logger.error("Unsupported test object %s", step[0])
diff --git a/testcases/testcase.py b/testcases/testcase.py
index 6e215b46..7f22c18f 100644
--- a/testcases/testcase.py
+++ b/testcases/testcase.py
@@ -61,24 +61,23 @@ class TestCase(object):
self._settings_paths_modified = False
self._testcast_run_time = None
+ # store all GUEST_ specific settings to keep original values before their expansion
+ for key in S.__dict__:
+ if key.startswith('GUEST_'):
+ self._settings_original[key] = S.getValue(key)
+
self._update_settings('VSWITCH', cfg.get('vSwitch', S.getValue('VSWITCH')))
self._update_settings('VNF', cfg.get('VNF', S.getValue('VNF')))
self._update_settings('TRAFFICGEN', cfg.get('Trafficgen', S.getValue('TRAFFICGEN')))
self._update_settings('TEST_PARAMS', cfg.get('Parameters', S.getValue('TEST_PARAMS')))
# update global settings
+ functions.settings_update_paths()
guest_loopback = get_test_param('guest_loopback', None)
if guest_loopback:
# we can put just one item, it'll be expanded automatically for all VMs
self._update_settings('GUEST_LOOPBACK', [guest_loopback])
- if 'VSWITCH' in self._settings_original or 'VNF' in self._settings_original:
- self._settings_original.update({
- 'RTE_SDK' : S.getValue('RTE_SDK'),
- 'OVS_DIR' : S.getValue('OVS_DIR'),
- })
- functions.settings_update_paths()
-
# set test parameters; CLI options take precedence to testcase settings
self._logger = logging.getLogger(__name__)
self.name = cfg['Name']
@@ -196,7 +195,7 @@ class TestCase(object):
# perform guest related handling
if self._vnf_ctl.get_vnfs_number():
# copy sources of l2 forwarding tools into VM shared dir if needed
- self._copy_fwd_tools_for_all_guests()
+ self._copy_fwd_tools_for_all_guests(self._vnf_ctl.get_vnfs_number())
# in case of multi VM in parallel, set the number of streams to the number of VMs
if self.deployment.startswith('pvpv'):
@@ -362,11 +361,11 @@ class TestCase(object):
item[ResultsConstants.TUNNEL_TYPE] = self._tunnel_type
return results
- def _copy_fwd_tools_for_all_guests(self):
+ def _copy_fwd_tools_for_all_guests(self, vm_count):
"""Copy dpdk and l2fwd code to GUEST_SHARE_DIR[s] based on selected deployment.
"""
# consider only VNFs involved in the test
- for guest_dir in set(S.getValue('GUEST_SHARE_DIR')[:self._vnf_ctl.get_vnfs_number()]):
+ for guest_dir in set(S.getValue('GUEST_SHARE_DIR')[:vm_count]):
self._copy_fwd_tools_for_guest(guest_dir)
def _copy_fwd_tools_for_guest(self, guest_dir):
@@ -384,14 +383,31 @@ class TestCase(object):
# copy sources into shared dir only if neccessary
guest_loopback = set(S.getValue('GUEST_LOOPBACK'))
- if 'testpmd' in guest_loopback or 'l2fwd' in guest_loopback:
+ if 'testpmd' in guest_loopback:
try:
- tasks.run_task(['rsync', '-a', '-r', '-l', r'--exclude="\.git"',
- os.path.join(S.getValue('RTE_SDK_USER'), ''),
+ # exclude whole .git/ subdirectory and all o-files;
+ # It is assumed, that the same RTE_TARGET is used in both host
+ # and VMs; This simplification significantly speeds up testpmd
+ # build. If we will need a different RTE_TARGET in VM,
+ # then we have to build whole DPDK from the scratch in VM.
+ # In that case we can copy just DPDK sources (e.g. by excluding
+ # all items obtained by git status -unormal --porcelain).
+ # NOTE: Excluding RTE_TARGET directory won't help on systems,
+ # where DPDK is built for multiple targets (e.g. for gcc & icc)
+ exclude = []
+ exclude.append(r'--exclude=.git/')
+ exclude.append(r'--exclude=*.o')
+ tasks.run_task(['rsync', '-a', '-r', '-l'] + exclude +
+ [os.path.join(S.getValue('TOOLS')['dpdk_src'], ''),
os.path.join(guest_dir, 'DPDK')],
self._logger,
'Copying DPDK to shared directory...',
True)
+ except subprocess.CalledProcessError:
+ self._logger.error('Unable to copy DPDK to shared directory')
+ raise
+ if 'l2fwd' in guest_loopback:
+ try:
tasks.run_task(['rsync', '-a', '-r', '-l',
os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/'),
os.path.join(guest_dir, 'l2fwd')],
@@ -399,7 +415,8 @@ class TestCase(object):
'Copying l2fwd to shared directory...',
True)
except subprocess.CalledProcessError:
- self._logger.error('Unable to copy DPDK and l2fwd to shared directory')
+ self._logger.error('Unable to copy l2fwd to shared directory')
+ raise
def _mount_hugepages(self):
"""Mount hugepages if usage of DPDK or Qemu is detected
diff --git a/tools/functions.py b/tools/functions.py
index 60ed0802..3bd8cc4d 100644
--- a/tools/functions.py
+++ b/tools/functions.py
@@ -15,20 +15,126 @@
"""Various helper functions
"""
-from conf import settings
+import os
+import logging
+import glob
+import shutil
+from conf import settings as S
#
# Support functions
#
def settings_update_paths():
- """ Configure paths to OVS and DPDK based on VSWITCH and VNF values
+ """ Configure paths to OVS, DPDK and QEMU sources and binaries based on
+ selected vswitch type and src/binary switch. Data are taken from
+ PATHS dictionary and after their processing they are stored inside TOOLS.
+ PATHS dictionary has specific section for 'vswitch', 'qemu' and 'dpdk'
+ Following processing is done for every item:
+ item 'type' - string, which defines the type of paths ('src' or 'bin') to be selected
+ for a given section:
+ 'src' means, that VSPERF will use OVS, DPDK or QEMU built from sources
+ e.g. by execution of systems/build_base_machine.sh script during VSPERF
+ installation
+ 'bin' means, that VSPERF will use OVS, DPDK or QEMU binaries installed
+ in the OS, e.g. via OS specific packaging system
+ item 'path' - string with valid path; Its content is checked for existence, prefixed
+ with section name and stored into TOOLS for later use
+ e.g. TOOLS['dpdk_src'] or TOOLS['vswitch_src']
+ item 'modules' - list of strings; Every value from given list is checked for '.ko'
+ suffix. In case it matches and it is not an absolute path to the module, then
+ module name is prefixed with 'path' defined for the same section
+ e.g. TOOLS['vswitch_modules'] = [
+ '/tmp/vsperf/src_vanilla/ovs/ovs/datapath/linux/openvswitch.ko']
+ all other items - string - if given string is a relative path and item 'path'
+ is defined for a given section, then item content will be prefixed with
+ content of the 'path'. Otherwise tool name will be searched within
+ standard system directories. Also any OS filename wildcards will be
+ expanded to the real path. At the end of processing, every absolute
+ path is checked for its existence. In case that temporary path (i.e. path
+ with '_tmp' suffix) doesn't exist, then log will be written and vsperf will
+ continue. If any other path will not exist, then vsperf execution will
+ be terminated with runtime error.
+
+ Note: In case that 'bin' type is set for DPDK, then TOOLS['dpdk_src'] will be set to
+ the value of PATHS['dpdk']['src']['path']. The reason is, that VSPERF uses downloaded
+ DPDK sources to copy DPDK and testpmd into the GUEST, where testpmd is built. In case,
+ that DPDK sources are not available, then vsperf will continue with test execution,
+ but testpmd can't be used as a guest loopback. This is useful in case, that other guest
+ loopback applications (e.g. buildin) are used by CI jobs, etc.
"""
# set dpdk and ovs paths accorfing to VNF and VSWITCH
- if settings.getValue('VSWITCH').endswith('Vanilla'):
- # settings paths for Vanilla
- settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_VANILLA')))
- else:
- # default - set to VHOST USER but can be changed during enhancement
- settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_USER')))
- settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_USER')))
+ paths = {}
+ vswitch_type = S.getValue('PATHS')['vswitch'][S.getValue('VSWITCH')]['type']
+ paths['vswitch'] = S.getValue('PATHS')['vswitch'][S.getValue('VSWITCH')][vswitch_type]
+ paths['dpdk'] = S.getValue('PATHS')['dpdk'][S.getValue('PATHS')['dpdk']['type']]
+ paths['qemu'] = S.getValue('PATHS')['qemu'][S.getValue('PATHS')['qemu']['type']]
+ paths['paths'] = {}
+ paths['paths']['ovs_var_tmp'] = S.getValue('PATHS')['vswitch']['ovs_var_tmp']
+ paths['paths']['ovs_etc_tmp'] = S.getValue('PATHS')['vswitch']['ovs_etc_tmp']
+
+ tools = {}
+ for path_class in paths:
+ for tool in paths[path_class]:
+ tmp_tool = paths[path_class][tool]
+
+ # store valid path of given class into tools dict
+ if tool == 'path':
+ if os.path.isdir(tmp_tool):
+ tools['{}_src'.format(path_class)] = tmp_tool
+ continue
+ else:
+ raise RuntimeError('Path {} does not exist.'.format(tmp_tool))
+
+ # store list of modules of given class into tools dict
+ if tool == 'modules':
+ tmp_modules = []
+ for module in tmp_tool:
+ # add path to the .ko modules and check it for existence
+ if module.endswith('.ko') and not os.path.isabs(module):
+ module = os.path.join(paths[path_class]['path'], module)
+ if not os.path.exists(module):
+ raise RuntimeError('Cannot locate modlue {}'.format(module))
+
+ tmp_modules.append(module)
+
+ tools['{}_modules'.format(path_class)] = tmp_modules
+ continue
+
+ # if path to the tool is relative, then 'path' will be prefixed
+ # in case that 'path' is not defined, then tool will be searched
+ # within standard system paths
+ if not os.path.isabs(tmp_tool):
+ if 'path' in paths[path_class]:
+ tmp_tool = os.path.join(paths[path_class]['path'], tmp_tool)
+ elif shutil.which(tmp_tool):
+ tmp_tool = shutil.which(tmp_tool)
+ else:
+ raise RuntimeError('Cannot locate tool {}'.format(tmp_tool))
+
+ # expand OS wildcards in paths if needed
+ if glob.has_magic(tmp_tool):
+ tmp_glob = glob.glob(tmp_tool)
+ if len(tmp_glob) == 0:
+ raise RuntimeError('Path to the {} is not valid: {}.'.format(tool, tmp_tool))
+ elif len(tmp_glob) > 1:
+ raise RuntimeError('Path to the {} is ambiguous {}'.format(tool, tmp_glob))
+ elif len(tmp_glob) == 1:
+ tmp_tool = tmp_glob[0]
+ elif not os.path.exists(tmp_tool):
+ if tool.endswith('_tmp'):
+ logging.getLogger().debug('Temporary path to the {} does not '
+ 'exist: {}.'.format(tool, tmp_tool))
+ else:
+ raise RuntimeError('Path to the {} is not valid: {}'.format(tool, tmp_tool))
+
+ tools[tool] = tmp_tool
+
+ # ensure, that dpkg_src for bin will be set to downloaded DPDK sources, so it can
+ # be copied to the guest share dir and used by GUEST to build and run testpmd
+ # Validity of the path is not checked by purpose, so user can use VSPERF without
+ # downloading DPDK sources. In that case guest loopback can't be set to 'testpmd'
+ if S.getValue('PATHS')['dpdk']['type'] == 'bin':
+ tools['dpdk_src'] = S.getValue('PATHS')['dpdk']['src']['path']
+
+ S.setValue('TOOLS', tools)
diff --git a/tools/module_manager.py b/tools/module_manager.py
index 2eb4c63d..911f7252 100644
--- a/tools/module_manager.py
+++ b/tools/module_manager.py
@@ -31,30 +31,40 @@ class ModuleManager(object):
"""
self._modules = []
- def insert_module(self, module):
+ def insert_module(self, module, auto_remove=True):
"""Method inserts given module.
In case that module name ends with .ko suffix then insmod will
be used for its insertion. Otherwise modprobe will be called.
:param module: a name of kernel module
+ :param auto_remove: if True (by default), then module will be
+ automatically removed by remove_modules() method
"""
module_base_name = os.path.basename(os.path.splitext(module)[0])
if self.is_module_inserted(module):
self._logger.info('Module already loaded \'%s\'.', module_base_name)
# add it to internal list, so we can try to remove it at the end
- self._modules.append(module)
+ if auto_remove:
+ self._modules.append(module)
return
try:
if module.endswith('.ko'):
+ # load module dependecies first, but suppress automatic
+ # module removal at the end; Just for case, that module
+ # depends on generic module
+ for depmod in self.get_module_dependecies(module):
+ self.insert_module(depmod, auto_remove=False)
+
tasks.run_task(['sudo', 'insmod', module], self._logger,
'Insmod module \'%s\'...' % module_base_name, True)
else:
tasks.run_task(['sudo', 'modprobe', module], self._logger,
'Modprobe module \'%s\'...' % module_base_name, True)
- self._modules.append(module)
+ if auto_remove:
+ self._modules.append(module)
except subprocess.CalledProcessError:
# in case of error, show full module name
self._logger.error('Unable to insert module \'%s\'.', module)
@@ -68,17 +78,6 @@ class ModuleManager(object):
for module in modules:
self.insert_module(module)
- def insert_module_group(self, module_group, path_prefix):
- """Ensure all modules in a group are inserted into the system.
-
- :param module_group: A name of configuration item containing a list
- of module names
- :param path_prefix: A name of directory which contains given
- group of modules
- """
- for (path_suffix, module) in module_group:
- self.insert_module(os.path.join(path_prefix, path_suffix, '%s.ko' % module))
-
def remove_module(self, module):
"""Removes a single module.
@@ -143,3 +142,26 @@ class ModuleManager(object):
return line
return None
+
+ @staticmethod
+ def get_module_dependecies(module):
+ """Return list of modules, which must be loaded before module itself
+
+ :param module: a name of kernel module
+ :returns: In case that module has any dependencies, then list of module
+ names will be returned. Otherwise it returns empty list, i.e. [].
+ """
+ deps = ''
+ try:
+ # get list of module dependecies from kernel
+ deps = subprocess.check_output('modinfo -F depends {}'.format(module),
+ shell=True).decode().rstrip('\n')
+ except subprocess.CalledProcessError:
+ # in case of error, show full module name...
+ self._logger.info('Unable to get list of dependecies for module \'%s\'.', module)
+ # ...and try to continue, just for case that dependecies are already loaded
+
+ if len(deps):
+ return deps.split(',')
+ else:
+ return []
diff --git a/tools/networkcard.py b/tools/networkcard.py
index 8d704fd5..945534be 100644
--- a/tools/networkcard.py
+++ b/tools/networkcard.py
@@ -249,7 +249,7 @@ def reinit_vfs(pf_pci_handle):
:param pf_pci_handle: PCI slot identifier of PF with domain part.
"""
- rte_pci_tool = glob.glob(os.path.join(settings.getValue('RTE_SDK'), 'tools', 'dpdk*bind.py'))[0]
+ rte_pci_tool = settings.getValue('TOOLS')['bind-tool']
for vf_nic in get_sriov_vfs_list(pf_pci_handle):
nic_driver = get_driver(vf_nic)
diff --git a/tools/pkt_gen/moongen/moongen.py b/tools/pkt_gen/moongen/moongen.py
index 6ae52f61..fe3aca52 100644
--- a/tools/pkt_gen/moongen/moongen.py
+++ b/tools/pkt_gen/moongen/moongen.py
@@ -308,55 +308,38 @@ class Moongen(ITrafficGenerator):
collected_results = Moongen.run_moongen_and_collect_results(self,
test_run=1)
- total_throughput_rx_fps = (
- float(collected_results[ResultsConstants.THROUGHPUT_RX_FPS]))
-
- total_throughput_rx_mbps = (
- float(collected_results[ResultsConstants.THROUGHPUT_RX_MBPS]))
-
- total_throughput_rx_pct = (
- float(collected_results[ResultsConstants.THROUGHPUT_RX_PERCENT]))
-
- total_throughput_tx_fps = (
- float(collected_results[ResultsConstants.TX_RATE_FPS]))
-
- total_throughput_tx_mbps = (
- float(collected_results[ResultsConstants.TX_RATE_MBPS]))
-
- total_throughput_tx_pct = (
- float(collected_results[ResultsConstants.TX_RATE_PERCENT]))
-
- total_min_latency_ns = 0
- total_max_latency_ns = 0
- total_avg_latency_ns = 0
-
results = OrderedDict()
+
results[ResultsConstants.THROUGHPUT_RX_FPS] = (
- '{:.6f}'.format(total_throughput_rx_fps))
+ '{:.6f}'.format(
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_FPS])))
results[ResultsConstants.THROUGHPUT_RX_MBPS] = (
- '{:.3f}'.format(total_throughput_rx_mbps))
+ '{:.3f}'.format(
+ float(collected_results[ResultsConstants.THROUGHPUT_RX_MBPS])))
results[ResultsConstants.THROUGHPUT_RX_PERCENT] = (
- '{:.3f}'.format(total_throughput_rx_pct))
+ '{:.3f}'.format(
+ float(
+ collected_results[ResultsConstants.THROUGHPUT_RX_PERCENT])))
results[ResultsConstants.TX_RATE_FPS] = (
- '{:.6f}'.format(total_throughput_tx_fps))
+ '{:.3f}'.format(
+ float(collected_results[ResultsConstants.TX_RATE_FPS])))
results[ResultsConstants.TX_RATE_MBPS] = (
- '{:.3f}'.format(total_throughput_tx_mbps))
+ '{:.3f}'.format(
+ float(collected_results[ResultsConstants.TX_RATE_MBPS])))
results[ResultsConstants.TX_RATE_PERCENT] = (
- '{:.3f}'.format(total_throughput_tx_pct))
+ '{:.3f}'.format(
+ float(collected_results[ResultsConstants.TX_RATE_PERCENT])))
- results[ResultsConstants.MIN_LATENCY_NS] = (
- '{:.3f}'.format(total_min_latency_ns))
+ results[ResultsConstants.MIN_LATENCY_NS] = 0
- results[ResultsConstants.MAX_LATENCY_NS] = (
- '{:.3f}'.format(total_max_latency_ns))
+ results[ResultsConstants.MAX_LATENCY_NS] = 0
- results[ResultsConstants.AVG_LATENCY_NS] = (
- '{:.3f}'.format(total_avg_latency_ns))
+ results[ResultsConstants.AVG_LATENCY_NS] = 0
return results
@@ -572,70 +555,75 @@ class Moongen(ITrafficGenerator):
duration=duration,
acceptable_loss_pct=lossrate)
- total_throughput_rx_fps = 0
- total_throughput_rx_mbps = 0
- total_throughput_rx_pct = 0
- total_throughput_tx_fps = 0
- total_throughput_tx_mbps = 0
- total_throughput_tx_pct = 0
- total_min_latency_ns = 0
- total_max_latency_ns = 0
- total_avg_latency_ns = 0
+ # Initialize RFC 2544 throughput specific results
+ results = OrderedDict()
+ results[ResultsConstants.THROUGHPUT_RX_FPS] = 0
+ results[ResultsConstants.THROUGHPUT_RX_MBPS] = 0
+ results[ResultsConstants.THROUGHPUT_RX_PERCENT] = 0
+ results[ResultsConstants.TX_RATE_FPS] = 0
+ results[ResultsConstants.TX_RATE_MBPS] = 0
+ results[ResultsConstants.TX_RATE_PERCENT] = 0
+ results[ResultsConstants.MIN_LATENCY_NS] = 0
+ results[ResultsConstants.MAX_LATENCY_NS] = 0
+ results[ResultsConstants.AVG_LATENCY_NS] = 0
for test_run in range(1, tests+1):
collected_results = (
Moongen.run_moongen_and_collect_results(self, test_run=test_run))
- total_throughput_rx_fps += (
+ results[ResultsConstants.THROUGHPUT_RX_FPS] += (
float(collected_results[ResultsConstants.THROUGHPUT_RX_FPS]))
- total_throughput_rx_mbps += (
+ results[ResultsConstants.THROUGHPUT_RX_MBPS] += (
float(collected_results[ResultsConstants.THROUGHPUT_RX_MBPS]))
- total_throughput_rx_pct += (
+ results[ResultsConstants.THROUGHPUT_RX_PERCENT] += (
float(collected_results[ResultsConstants.THROUGHPUT_RX_PERCENT]))
- total_throughput_tx_fps += (
+ results[ResultsConstants.TX_RATE_FPS] += (
float(collected_results[ResultsConstants.TX_RATE_FPS]))
- total_throughput_tx_mbps += (
+ results[ResultsConstants.TX_RATE_MBPS] += (
float(collected_results[ResultsConstants.TX_RATE_MBPS]))
- total_throughput_tx_pct += (
+ results[ResultsConstants.TX_RATE_PERCENT] += (
float(collected_results[ResultsConstants.TX_RATE_PERCENT]))
- # Latency not supported now, leaving as placeholder
- total_min_latency_ns = 0
- total_max_latency_ns = 0
- total_avg_latency_ns = 0
-
- results = OrderedDict()
results[ResultsConstants.THROUGHPUT_RX_FPS] = (
- '{:.6f}'.format(total_throughput_rx_fps / tests))
+ '{:.6f}'.format(results[ResultsConstants.THROUGHPUT_RX_FPS] /
+ tests))
results[ResultsConstants.THROUGHPUT_RX_MBPS] = (
- '{:.3f}'.format(total_throughput_rx_mbps / tests))
+ '{:.3f}'.format(results[ResultsConstants.THROUGHPUT_RX_MBPS] /
+ tests))
results[ResultsConstants.THROUGHPUT_RX_PERCENT] = (
- '{:.3f}'.format(total_throughput_rx_pct / tests))
+ '{:.3f}'.format(results[ResultsConstants.THROUGHPUT_RX_PERCENT] /
+ tests))
results[ResultsConstants.TX_RATE_FPS] = (
- '{:.6f}'.format(total_throughput_tx_fps / tests))
+ '{:.6f}'.format(results[ResultsConstants.TX_RATE_FPS] /
+ tests))
results[ResultsConstants.TX_RATE_MBPS] = (
- '{:.3f}'.format(total_throughput_tx_mbps / tests))
+ '{:.3f}'.format(results[ResultsConstants.TX_RATE_MBPS] /
+ tests))
results[ResultsConstants.TX_RATE_PERCENT] = (
- '{:.3f}'.format(total_throughput_tx_pct / tests))
+ '{:.3f}'.format(results[ResultsConstants.TX_RATE_PERCENT] /
+ tests))
results[ResultsConstants.MIN_LATENCY_NS] = (
- '{:.3f}'.format(total_min_latency_ns / tests))
+ '{:.3f}'.format(results[ResultsConstants.MIN_LATENCY_NS] /
+ tests))
results[ResultsConstants.MAX_LATENCY_NS] = (
- '{:.3f}'.format(total_max_latency_ns / tests))
+ '{:.3f}'.format(results[ResultsConstants.MAX_LATENCY_NS] /
+ tests))
results[ResultsConstants.AVG_LATENCY_NS] = (
- '{:.3f}'.format(total_avg_latency_ns / tests))
+ '{:.3f}'.format(results[ResultsConstants.AVG_LATENCY_NS] /
+ tests))
return results
@@ -671,6 +659,7 @@ class Moongen(ITrafficGenerator):
Back to Back Count (frames), Frame Loss (frames), Frame Loss (%)
:rtype: :class:`Back2BackResult`
"""
+ self._logger.info("In moongen send_rfc2544_back2back method")
self._params.clear()
self._params['traffic'] = self.traffic_defaults.copy()
@@ -683,6 +672,7 @@ class Moongen(ITrafficGenerator):
duration=duration,
acceptable_loss_pct=lossrate)
+ # Initialize RFC 2544 B2B specific results
results = OrderedDict()
results[ResultsConstants.B2B_RX_FPS] = 0
results[ResultsConstants.B2B_TX_FPS] = 0
diff --git a/tools/pkt_gen/testcenter/testcenter-rfc2889-rest.py b/tools/pkt_gen/testcenter/testcenter-rfc2889-rest.py
new file mode 100644
index 00000000..cfa425e8
--- /dev/null
+++ b/tools/pkt_gen/testcenter/testcenter-rfc2889-rest.py
@@ -0,0 +1,304 @@
+# Copyright 2016 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+@author Spirent Communications
+
+This test automates the RFC2544 tests using the Spirent
+TestCenter REST APIs. This test supports Python 3.4
+
+'''
+import argparse
+import logging
+import os
+
+# Logger Configuration
+logger = logging.getLogger(__name__)
+
+
+def create_dir(path):
+ """Create the directory as specified in path """
+ if not os.path.exists(path):
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ logger.error("Failed to create directory %s: %s", path, str(e))
+ raise
+
+
+def write_query_results_to_csv(results_path, csv_results_file_prefix,
+ query_results):
+ """ Write the results of the query to the CSV """
+ create_dir(results_path)
+ filec = os.path.join(results_path, csv_results_file_prefix + ".csv")
+ with open(filec, "wb") as f:
+ f.write(query_results["Columns"].replace(" ", ",") + "\n")
+ for row in (query_results["Output"].replace("} {", ",").
+ replace("{", "").replace("}", "").split(",")):
+ f.write(row.replace(" ", ",") + "\n")
+
+
+def positive_int(value):
+ """ Positive Integer type for Arguments """
+ ivalue = int(value)
+ if ivalue <= 0:
+ raise argparse.ArgumentTypeError(
+ "%s is an invalid positive int value" % value)
+ return ivalue
+
+
+def percent_float(value):
+ """ Floating type for Arguments """
+ pvalue = float(value)
+ if pvalue < 0.0 or pvalue > 100.0:
+ raise argparse.ArgumentTypeError(
+ "%s not in range [0.0, 100.0]" % pvalue)
+ return pvalue
+
+
+def main():
+ """ Read the arguments, Invoke Test and Return the results"""
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ required_named = parser.add_argument_group("required named arguments")
+ required_named.add_argument("--lab_server_addr",
+ required=True,
+ help=("The IP address of the "
+ "Spirent Lab Server"),
+ dest="lab_server_addr")
+ required_named.add_argument("--license_server_addr",
+ required=True,
+ help=("The IP address of the Spirent "
+ "License Server"),
+ dest="license_server_addr")
+ required_named.add_argument("--location_list",
+ required=True,
+ help=("A comma-delimited list of test port "
+ "locations"),
+ dest="location_list")
+ # Optional parameters
+ optional_named = parser.add_argument_group("optional named arguments")
+ optional_named.add_argument("--metric",
+ required=False,
+ help=("One among - Forwarding,\
+ Address Caching and Congestion"),
+ choices=["forwarding", "caching",
+ "congestion"],
+ default="forwarding",
+ dest="metric")
+ optional_named.add_argument("--test_session_name",
+ required=False,
+ default="Rfc2889Ses",
+ help=("The friendly name to identify "
+ "the Spirent Lab Server test session"),
+ dest="test_session_name")
+
+ optional_named.add_argument("--test_user_name",
+ required=False,
+ default="Rfc2889Usr",
+ help=("The friendly name to identify the "
+ "Spirent Lab Server test user"),
+ dest="test_user_name")
+ optional_named.add_argument("--results_dir",
+ required=False,
+ default="./Results",
+ help="The directory to copy results to",
+ dest="results_dir")
+ optional_named.add_argument("--csv_results_file_prefix",
+ required=False,
+ default="Rfc2889MaxFor",
+ help="The prefix for the CSV results files",
+ dest="csv_results_file_prefix")
+ optional_named.add_argument("--num_trials",
+ type=positive_int,
+ required=False,
+ default=1,
+ help=("The number of trials to execute during "
+ "the test"),
+ dest="num_trials")
+ optional_named.add_argument("--trial_duration_sec",
+ type=positive_int,
+ required=False,
+ default=60,
+ help=("The duration of each trial executed "
+ "during the test"),
+ dest="trial_duration_sec")
+ optional_named.add_argument("--traffic_pattern",
+ required=False,
+ choices=["BACKBONE", "MESH", "PAIR"],
+ default="MESH",
+ help="The traffic pattern between endpoints",
+ dest="traffic_pattern")
+ optional_named.add_argument("--frame_size_list",
+ type=lambda s: [int(item)
+ for item in s.split(',')],
+ required=False,
+ default=[256],
+ help="A comma-delimited list of frame sizes",
+ dest="frame_size_list")
+ parser.add_argument("-v",
+ "--verbose",
+ required=False,
+ default=True,
+ help="More output during operation when present",
+ action="store_true",
+ dest="verbose")
+ args = parser.parse_args()
+
+ if args.verbose:
+ logger.debug("Creating results directory")
+ create_dir(args.results_dir)
+ locationList = [str(item) for item in args.location_list.split(',')]
+
+ session_name = args.test_session_name
+ user_name = args.test_user_name
+
+ try:
+ # Load Spirent REST Library
+ from stcrestclient import stchttp
+
+ stc = stchttp.StcHttp(args.lab_server_addr)
+ session_id = stc.new_session(user_name, session_name)
+ stc.join_session(session_id)
+ except RuntimeError as e:
+ logger.error(e)
+ raise
+
+ # Retrieve and display the server information
+ if args.verbose:
+ logger.debug("SpirentTestCenter system version: %s",
+ stc.get("system1", "version"))
+
+ try:
+ if args.verbose:
+ logger.debug("Bring up license server")
+ license_mgr = stc.get("system1", "children-licenseservermanager")
+ if args.verbose:
+ logger.debug("license_mgr = %s", license_mgr)
+ stc.create("LicenseServer", under=license_mgr, attributes={
+ "server": args.license_server_addr})
+
+ # Create the root project object
+ if args.verbose:
+ logger.debug("Creating project ...")
+ project = stc.get("System1", "children-Project")
+
+ # Create ports
+ if args.verbose:
+ logger.debug("Creating ports ...")
+
+ for location in locationList:
+ stc.perform("CreateAndReservePorts", params={"locationList":
+ location,
+ "RevokeOwner":
+ "FALSE"})
+
+ port_list_get = stc.get("System1.project", "children-port")
+
+ if args.verbose:
+ logger.debug("Adding Host Gen PArams")
+ gen_params = stc.create("EmulatedDeviceGenParams",
+ under=project,
+ attributes={"Port": port_list_get})
+
+ # Create the DeviceGenEthIIIfParams object
+ stc.create("DeviceGenEthIIIfParams",
+ under=gen_params)
+ # Configuring Ipv4 interfaces
+ stc.create("DeviceGenIpv4IfParams",
+ under=gen_params)
+
+ stc.perform("DeviceGenConfigExpand",
+ params={"DeleteExisting": "No",
+ "GenParams": gen_params})
+
+ if args.verbose:
+ logger.debug("Set up the RFC2889 Forwarding test...")
+ stc.perform("Rfc2889SetupMaxForwardingRateTestCommand",
+ params={"Duration": args.trial_duration_sec,
+ "FrameSizeList": args.frame_size_list,
+ "NumOfTrials": args.num_trials,
+ "TrafficPattern": args.traffic_pattern})
+
+ # Save the configuration
+ stc.perform("SaveToTcc", params={"Filename": "2889.tcc"})
+ # Connect to the hardware...
+ stc.perform("AttachPorts", params={"portList": stc.get(
+ "system1.project", "children-port"), "autoConnect": "TRUE"})
+ # Apply configuration.
+ if args.verbose:
+ logger.debug("Apply configuration...")
+ stc.apply()
+
+ if args.verbose:
+ logger.debug("Starting the sequencer...")
+ stc.perform("SequencerStart")
+
+ # Wait for sequencer to finish
+ logger.info(
+ "Starting test... Please wait for the test to complete...")
+ stc.wait_until_complete()
+ logger.info("The test has completed... Saving results...")
+
+ # Determine what the results database filename is...
+ lab_server_resultsdb = stc.get(
+ "system1.project.TestResultSetting", "CurrentResultFileName")
+
+ if args.verbose:
+ logger.debug("The lab server results database is %s",
+ lab_server_resultsdb)
+
+ stc.perform("CSSynchronizeFiles",
+ params={"DefaultDownloadDir": args.results_dir})
+
+ resultsdb = args.results_dir + \
+ lab_server_resultsdb.split("/Results")[1]
+
+ logger.info(
+ "The local summary DB file has been saved to %s", resultsdb)
+
+ resultsdict = (
+ stc.perform("QueryResult",
+ params={
+ "DatabaseConnectionString":
+ resultsdb,
+ "ResultPath":
+ ("RFC2889MaxForwardingRateTestResultDetailed"
+ "SummaryView")}))
+ if args.verbose:
+ logger.debug("resultsdict[\"Columns\"]: %s",
+ resultsdict["Columns"])
+ logger.debug("resultsdict[\"Output\"]: %s", resultsdict["Output"])
+ logger.debug("Result paths: %s",
+ stc.perform("GetTestResultSettingPaths"))
+
+ # Write results to csv
+ if args.verbose:
+ logger.debug("Writing CSV file to results directory %s",
+ args.results_dir)
+ write_query_results_to_csv(
+ args.results_dir, args.csv_results_file_prefix, resultsdict)
+
+ except RuntimeError as err:
+ logger.error(err)
+
+ if args.verbose:
+ logger.debug("Destroy session on lab server")
+
+ stc.end_session()
+
+ logger.info("Test complete!")
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/pkt_gen/testcenter/testcenter.py b/tools/pkt_gen/testcenter/testcenter.py
index a6cea2e1..701d451c 100644
--- a/tools/pkt_gen/testcenter/testcenter.py
+++ b/tools/pkt_gen/testcenter/testcenter.py
@@ -115,6 +115,25 @@ def get_rfc2544_custom_settings(framesize, custom_tr, tests):
return args
+def get_rfc2889_settings(framesize, tests, duration):
+ args = [settings.getValue("TRAFFICGEN_STC_PYTHON2_PATH"),
+ os.path.join(
+ settings.getValue("TRAFFICGEN_STC_TESTCENTER_PATH"),
+ settings.getValue(
+ "TRAFFICGEN_STC_RFC2889_TEST_FILE_NAME")),
+ "--lab_server_addr",
+ settings.getValue("TRAFFICGEN_STC_LAB_SERVER_ADDR"),
+ "--license_server_addr",
+ settings.getValue("TRAFFICGEN_STC_LICENSE_SERVER_ADDR"),
+ "--location_list",
+ settings.getValue("TRAFFICGEN_STC_RFC2889_LOCATIONS"),
+ "--frame_size_list",
+ str(framesize),
+ "--num_trials",
+ str(tests)]
+ return args
+
+
class TestCenter(trafficgen.ITrafficGenerator):
"""
Spirent TestCenter
@@ -139,6 +158,48 @@ class TestCenter(trafficgen.ITrafficGenerator):
"""
return None
+ def send_rfc2889_congestion(self, traffic=None, tests=1, duration=20):
+ """
+ Do nothing.
+ """
+ return None
+
+ def send_rfc2889_caching(self, traffic=None, tests=1, duration=20):
+ """
+ Do nothing.
+ """
+ return None
+
+ def get_rfc2889_results(self, filename):
+ """
+ Reads the CSV file and return the results
+ """
+ result = {}
+ with open(filename, "r") as csvfile:
+ csvreader = csv.DictReader(csvfile)
+ for row in csvreader:
+ self._logger.info("Row: %s", row)
+ duration = int((float(row["TxSignatureFrameCount"])) /
+ (float(row["OfferedLoad(fps)"])))
+ tx_fps = (float(row["OfferedLoad(fps)"]))
+ rx_fps = float((float(row["RxFrameCount"])) /
+ float(duration))
+ tx_mbps = ((tx_fps * float(row["FrameSize"])) /
+ (1000000.0))
+ rx_mbps = ((rx_fps * float(row["FrameSize"])) /
+ (1000000.0))
+ result[ResultsConstants.TX_RATE_FPS] = tx_fps
+ result[ResultsConstants.THROUGHPUT_RX_FPS] = rx_fps
+ result[ResultsConstants.TX_RATE_MBPS] = tx_mbps
+ result[ResultsConstants.THROUGHPUT_RX_MBPS] = rx_mbps
+ result[ResultsConstants.TX_RATE_PERCENT] = float(
+ row["OfferedLoad(%)"])
+ result[ResultsConstants.FRAME_LOSS_PERCENT] = float(
+ row["PercentFrameLoss(%)"])
+ result[ResultsConstants.FORWARDING_RATE_FPS] = float(
+ row["ForwardingRate(fps)"])
+ return result
+
def get_rfc2544_results(self, filename):
"""
Reads the CSV file and return the results
@@ -211,6 +272,31 @@ class TestCenter(trafficgen.ITrafficGenerator):
return self.get_rfc2544_results(filec)
+ def send_rfc2889_forwarding(self, traffic=None, tests=1, duration=20):
+ """
+ Send traffic per RFC2544 throughput test specifications.
+ """
+ framesize = settings.getValue("TRAFFICGEN_STC_FRAME_SIZE")
+ if traffic and 'l2' in traffic:
+ if 'framesize' in traffic['l2']:
+ framesize = traffic['l2']['framesize']
+ args = get_rfc2889_settings(framesize, tests, duration)
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ args.append("--verbose")
+ verbose = True
+ self._logger.debug("Arguments used to call test: %s", args)
+ subprocess.check_call(args)
+
+ filec = os.path.join(settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"),
+ settings.getValue(
+ "TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX") +
+ ".csv")
+
+ if verbose:
+ self._logger.debug("file: %s", filec)
+
+ return self.get_rfc2889_results(filec)
+
def send_rfc2544_throughput(self, traffic=None, tests=1, duration=20,
lossrate=0.0):
"""
diff --git a/tools/report/report.py b/tools/report/report.py
index 7d991011..1115f052 100644
--- a/tools/report/report.py
+++ b/tools/report/report.py
@@ -20,8 +20,8 @@ Generate reports in format defined by X.
import sys
import os
-import jinja2
import logging
+import jinja2
from core.results.results_constants import ResultsConstants
from conf import settings as S
@@ -64,7 +64,8 @@ def _get_env(result):
if result[ResultsConstants.DEPLOYMENT].count('v'):
env.update({'vnf': systeminfo.get_version(S.getValue('VNF')),
'guest_image': S.getValue('GUEST_IMAGE'),
- 'loopback_app': list(map(systeminfo.get_version, S.getValue('GUEST_LOOPBACK'))),
+ 'loopback_app': list(map(systeminfo.get_loopback_version,
+ S.getValue('GUEST_LOOPBACK'))),
})
return env
diff --git a/tools/systeminfo.py b/tools/systeminfo.py
index 50dc17e0..fb1616d4 100644
--- a/tools/systeminfo.py
+++ b/tools/systeminfo.py
@@ -19,6 +19,7 @@ import os
import platform
import subprocess
import locale
+import re
from conf import settings as S
@@ -176,6 +177,40 @@ def pid_isalive(pid):
"""
return os.path.isdir('/proc/' + str(pid))
+def get_bin_version(binary, regex):
+ """ get version of given binary selected by given regex
+
+ :returns: version string or None
+ """
+ try:
+ output = subprocess.check_output(binary, shell=True).decode().rstrip('\n')
+ except subprocess.CalledProcessError:
+ return None
+
+ versions = re.findall(regex, output)
+ if len(versions):
+ return versions[0]
+ else:
+ return None
+
+def get_git_tag(path):
+ """ get tag of recent commit from repository located at 'path'
+
+ :returns: git tag in form of string with commit hash or None if there
+ isn't any git repository at given path
+ """
+ try:
+ if os.path.isdir(path):
+ return subprocess.check_output('cd {}; git rev-parse HEAD'.format(path), shell=True,
+ stderr=subprocess.DEVNULL).decode().rstrip('\n')
+ elif os.path.isfile(path):
+ return subprocess.check_output('cd $(dirname {}); git log -1 --pretty="%H" {}'.format(path, path),
+ shell=True, stderr=subprocess.DEVNULL).decode().rstrip('\n')
+ else:
+ return None
+ except subprocess.CalledProcessError:
+ return None
+
# This function uses long switch per purpose, so let us suppress pylint warning too-many-branches
# pylint: disable=R0912
def get_version(app_name):
@@ -186,45 +221,38 @@ def get_version(app_name):
"""
app_version_file = {
- 'ovs' : os.path.join(S.getValue('OVS_DIR'), 'include/openvswitch/version.h'),
- 'dpdk' : os.path.join(S.getValue('RTE_SDK'), 'lib/librte_eal/common/include/rte_version.h'),
- 'qemu' : os.path.join(S.getValue('QEMU_DIR'), 'VERSION'),
- 'l2fwd' : os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/l2fwd.c'),
+ 'ovs' : r'Open vSwitch\) ([0-9.]+)',
+ 'testpmd' : r'RTE Version: \'\S+ ([0-9.]+)',
+ 'qemu' : r'QEMU emulator version ([0-9.]+)',
+ 'loopback_l2fwd' : os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/l2fwd.c'),
+ 'loopback_testpmd' : os.path.join(S.getValue('TOOLS')['dpdk_src'],
+ 'lib/librte_eal/common/include/rte_version.h'),
'ixnet' : os.path.join(S.getValue('TRAFFICGEN_IXNET_LIB_PATH'), 'pkgIndex.tcl'),
}
- def get_git_tag(path):
- """ get tag of recent commit from repository located at 'path'
-
- :returns: git tag in form of string with commit hash or None if there
- isn't any git repository at given path
- """
- try:
- if os.path.isdir(path):
- return subprocess.check_output('cd {}; git rev-parse HEAD'.format(path), shell=True,
- stderr=subprocess.DEVNULL).decode().rstrip('\n')
- elif os.path.isfile(path):
- return subprocess.check_output('cd $(dirname {}); git log -1 --pretty="%H" {}'.format(path, path),
- shell=True, stderr=subprocess.DEVNULL).decode().rstrip('\n')
- else:
- return None
- except subprocess.CalledProcessError:
- return None
-
app_version = None
app_git_tag = None
if app_name.lower().startswith('ovs'):
- app_version = match_line(app_version_file['ovs'], '#define OVS_PACKAGE_VERSION')
- if app_version:
- app_version = app_version.split('"')[1]
- app_git_tag = get_git_tag(S.getValue('OVS_DIR'))
+ app_version = get_bin_version('{} --version'.format(S.getValue('TOOLS')['ovs-vswitchd']),
+ app_version_file['ovs'])
+ if 'vswitch_src' in S.getValue('TOOLS'):
+ app_git_tag = get_git_tag(S.getValue('TOOLS')['vswitch_src'])
elif app_name.lower() in ['dpdk', 'testpmd']:
+ app_version = get_bin_version('{} -v -h'.format(S.getValue('TOOLS')['testpmd']),
+ app_version_file['testpmd'])
+ # we have to consult PATHS settings to be sure, that dpdk/testpmd
+ # were build from the sources
+ if S.getValue('PATHS')[app_name.lower()]['type'] == 'src':
+ app_git_tag = get_git_tag(S.getValue('TOOLS')['dpdk_src'])
+ elif app_name.lower() == 'loopback_testpmd':
+ # testpmd inside the guest is compiled from downloaded sources
+ # stored at TOOS['dpdk_src'] directory
tmp_ver = ['', '', '']
dpdk_16 = False
- with open(app_version_file['dpdk']) as file_:
+ with open(app_version_file['loopback_testpmd']) as file_:
for line in file_:
if not line.strip():
continue
@@ -263,10 +291,12 @@ def get_version(app_name):
if len(tmp_ver[0]):
app_version = '.'.join(tmp_ver)
- app_git_tag = get_git_tag(S.getValue('RTE_SDK'))
+ app_git_tag = get_git_tag(S.getValue('TOOLS')['dpdk_src'])
elif app_name.lower().startswith('qemu'):
- app_version = match_line(app_version_file['qemu'], '')
- app_git_tag = get_git_tag(S.getValue('QEMU_DIR'))
+ app_version = get_bin_version('{} --version'.format(S.getValue('TOOLS')['qemu-system']),
+ app_version_file['qemu'])
+ if 'qemu_src' in S.getValue('TOOLS'):
+ app_git_tag = get_git_tag(S.getValue('TOOLS')['qemu_src'])
elif app_name.lower() == 'ixnet':
app_version = match_line(app_version_file['ixnet'], 'package provide IxTclNetwork')
if app_version:
@@ -283,13 +313,23 @@ def get_version(app_name):
elif app_name.lower() == 'vswitchperf':
app_git_tag = get_git_tag(S.getValue('ROOT_DIR'))
elif app_name.lower() == 'l2fwd':
- app_version = match_line(app_version_file[app_name], 'MODULE_VERSION')
+ app_version = match_line(app_version_file['loopback_l2fwd'], 'MODULE_VERSION')
if app_version:
app_version = app_version.split('"')[1]
- app_git_tag = get_git_tag(app_version_file[app_name])
+ app_git_tag = get_git_tag(app_version_file['loopback_l2fwd'])
elif app_name.lower() in ['linux_bridge', 'buildin']:
# without login into running VM, it is not possible to check bridge_utils version
app_version = 'NA'
app_git_tag = 'NA'
return {'name' : app_name, 'version' : app_version, 'git_tag' : app_git_tag}
+
+def get_loopback_version(loopback_app_name):
+ """ Get version of given guest loopback application and its git tag
+
+ :returns: dictionary {'name' : app_name, 'version' : app_version, 'git_tag' : app_git_tag) in case that
+ version or git tag are not known or not applicaple, than None is returned for any unknown value
+ """
+ version = get_version("loopback_{}".format(loopback_app_name))
+ version['name'] = loopback_app_name
+ return version
diff --git a/vnfs/qemu/qemu.py b/vnfs/qemu/qemu.py
index c9569ae6..01c16a0f 100644
--- a/vnfs/qemu/qemu.py
+++ b/vnfs/qemu/qemu.py
@@ -85,7 +85,7 @@ class IVnfQemu(IVnf):
vnc = ':%d' % self._number
# don't use taskset to affinize main qemu process; It causes hangup
# of 2nd VM in case of DPDK. It also slows down VM responsivnes.
- self._cmd = ['sudo', '-E', S.getValue('QEMU_BIN'),
+ self._cmd = ['sudo', '-E', S.getValue('TOOLS')['qemu-system'],
'-m', S.getValue('GUEST_MEMORY')[self._number],
'-smp', str(S.getValue('GUEST_SMP')[self._number]),
'-cpu', 'host,migratable=off',
diff --git a/vnfs/qemu/qemu_dpdk_vhost_user.py b/vnfs/qemu/qemu_dpdk_vhost_user.py
index fc46aba1..51c10242 100644
--- a/vnfs/qemu/qemu_dpdk_vhost_user.py
+++ b/vnfs/qemu/qemu_dpdk_vhost_user.py
@@ -39,6 +39,12 @@ class QemuDpdkVhostUser(IVnfQemu):
else:
queue_str, mq_vector_str = '', ''
+ # Guest merge buffer setting
+ if S.getValue('GUEST_NIC_MERGE_BUFFERS_DISABLE')[self._number]:
+ merge_buff = 'mrg_rxbuf=off,'
+ else:
+ merge_buff = ''
+
# calculate index of first interface, i.e. check how many
# interfaces has been created for previous VMs, where 1st NIC
# of 1st VM has index 0
@@ -52,7 +58,7 @@ class QemuDpdkVhostUser(IVnfQemu):
self._cmd += ['-chardev',
'socket,id=char' + ifi +
- ',path=' + S.getValue('OVS_VAR_DIR') +
+ ',path=' + S.getValue('TOOLS')['ovs_var_tmp'] +
'dpdkvhostuser' + ifi,
'-netdev',
'type=vhost-user,id=' + net +
@@ -60,7 +66,8 @@ class QemuDpdkVhostUser(IVnfQemu):
'-device',
'virtio-net-pci,mac=' +
self._nics[nic]['mac'] +
- ',netdev=' + net + ',csum=off,gso=off,' +
+ ',netdev=' + net + ',csum=off,' + merge_buff +
+ 'gso=off,' +
'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
mq_vector_str,
]
diff --git a/vnfs/qemu/qemu_pci_passthrough.py b/vnfs/qemu/qemu_pci_passthrough.py
index 951d6086..f32f33d3 100644
--- a/vnfs/qemu/qemu_pci_passthrough.py
+++ b/vnfs/qemu/qemu_pci_passthrough.py
@@ -19,7 +19,6 @@
import logging
import subprocess
import os
-import glob
from conf import settings as S
from vnfs.qemu.qemu import IVnfQemu
@@ -27,7 +26,6 @@ from tools import tasks
from tools.module_manager import ModuleManager
_MODULE_MANAGER = ModuleManager()
-_RTE_PCI_TOOL = glob.glob(os.path.join(S.getValue('RTE_SDK'), 'tools', 'dpdk*bind.py'))[0]
class QemuPciPassthrough(IVnfQemu):
"""
@@ -39,7 +37,7 @@ class QemuPciPassthrough(IVnfQemu):
"""
super(QemuPciPassthrough, self).__init__()
self._logger = logging.getLogger(__name__)
- self._nics = S.getValue('NICS')
+ self._host_nics = S.getValue('NICS')
# in case of SRIOV and PCI passthrough we must ensure, that MAC addresses are swapped
if S.getValue('SRIOV_ENABLED') and not self._testpmd_fwd_mode.startswith('mac'):
@@ -47,7 +45,7 @@ class QemuPciPassthrough(IVnfQemu):
self._testpmd_fwd_mode, 'mac')
self._testpmd_fwd_mode = 'mac'
- for nic in self._nics:
+ for nic in self._host_nics:
self._cmd += ['-device', 'vfio-pci,host=' + nic['pci']]
def start(self):
@@ -59,12 +57,12 @@ class QemuPciPassthrough(IVnfQemu):
# bind every interface to vfio-pci driver
try:
- nics_list = list(tmp_nic['pci'] for tmp_nic in self._nics)
- tasks.run_task(['sudo', _RTE_PCI_TOOL, '--bind=vfio-pci'] + nics_list,
+ nics_list = list(tmp_nic['pci'] for tmp_nic in self._host_nics)
+ tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'], '--bind=vfio-pci'] + nics_list,
self._logger, 'Binding NICs %s...' % nics_list, True)
except subprocess.CalledProcessError:
- self._logger.error('Unable to bind NICs %s', self._nics)
+ self._logger.error('Unable to bind NICs %s', self._host_nics)
super(QemuPciPassthrough, self).start()
@@ -75,10 +73,10 @@ class QemuPciPassthrough(IVnfQemu):
super(QemuPciPassthrough, self).stop()
# bind original driver to every interface
- for nic in self._nics:
+ for nic in self._host_nics:
if nic['driver']:
try:
- tasks.run_task(['sudo', _RTE_PCI_TOOL, '--bind=' + nic['driver'], nic['pci']],
+ tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'], '--bind=' + nic['driver'], nic['pci']],
self._logger, 'Binding NIC %s...' % nic['pci'], True)
except subprocess.CalledProcessError:
diff --git a/vsperf b/vsperf
index e942b84d..a12560da 100755
--- a/vsperf
+++ b/vsperf
@@ -550,7 +550,7 @@ def main():
if args['vnf'] not in vnfs:
_LOGGER.error('there are no vnfs matching \'%s\' found in'
' \'%s\'. exiting...', args['vnf'],
- settings.getValue('vnf_dir'))
+ settings.getValue('VNF_DIR'))
sys.exit(1)
if args['exact_test_name'] and args['tests']:
diff --git a/vswitches/ovs.py b/vswitches/ovs.py
index d2814b6a..886a98e0 100644
--- a/vswitches/ovs.py
+++ b/vswitches/ovs.py
@@ -17,19 +17,17 @@
import logging
import os
-import pexpect
import re
import time
+import datetime
+import random
+import pexpect
from conf import settings
from src.ovs import OFBridge, flow_key, flow_match
from tools import tasks
from vswitches.vswitch import IVSwitch
-_OVS_VAR_DIR = settings.getValue('OVS_VAR_DIR')
-_OVS_ETC_DIR = settings.getValue('OVS_ETC_DIR')
-
-
class IVSwitchOvs(IVSwitch, tasks.Process):
"""Open vSwitch base class implementation
@@ -37,23 +35,26 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
implementation. For generic information of the nature of the methods,
see the interface.
"""
- _logfile = os.path.join(settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_VSWITCHD'))
- _ovsdb_pidfile_path = os.path.join(_OVS_VAR_DIR, "ovsdb-server.pid")
- _vswitchd_pidfile_path = os.path.join(_OVS_VAR_DIR, "ovs-vswitchd.pid")
_proc_name = 'ovs-vswitchd'
def __init__(self):
"""See IVswitch for general description
"""
+ self._logfile = os.path.join(settings.getValue('LOG_DIR'),
+ settings.getValue('LOG_FILE_VSWITCHD'))
+ self._ovsdb_pidfile_path = os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'],
+ "ovsdb-server.pid")
+ self._vswitchd_pidfile_path = os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'],
+ "{}.pid".format(self._proc_name))
self._logger = logging.getLogger(__name__)
- self._expect = r'bridge|INFO|ovs-vswitchd'
+ self._expect = r'bridge|INFO|{}'.format(self._proc_name)
self._timeout = 30
self._bridges = {}
self._vswitchd_args = ['--pidfile=' + self._vswitchd_pidfile_path,
'--overwrite-pidfile', '--log-file=' + self._logfile]
self._cmd = []
- self._cmd_template = ['sudo', '-E', os.path.join(settings.getValue('OVS_DIR'),
- 'vswitchd', 'ovs-vswitchd')]
+ self._cmd_template = ['sudo', '-E', settings.getValue('TOOLS')['ovs-vswitchd']]
+ self._stamp = None
def start(self):
""" Start ``ovsdb-server`` and ``ovs-vswitchd`` instance.
@@ -62,6 +63,10 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
"""
self._logger.info("Starting vswitchd...")
+ # insert kernel modules if required
+ if 'vswitch_modules' in settings.getValue('TOOLS'):
+ self._module_manager.insert_modules(settings.getValue('TOOLS')['vswitch_modules'])
+
self._cmd = self._cmd_template + self._vswitchd_args
# DB must be started before vswitchd
@@ -123,7 +128,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
raise NotImplementedError
def add_veth_pair_port(self, switch_name=None, remote_switch_name=None,
- local_opts=None, remote_opts=None):
+ local_opts=None, remote_opts=None):
"""Creates veth-pair port between 'switch_name' and 'remote_switch_name'
"""
@@ -140,8 +145,8 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
'type=patch',
'options:peer=' + remote_port_name]
remote_params = ['--', 'set', 'Interface', remote_port_name,
- 'type=patch',
- 'options:peer=' + local_port_name]
+ 'type=patch',
+ 'options:peer=' + local_port_name]
if local_opts is not None:
local_params = local_params + local_opts
@@ -296,14 +301,20 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
"""
self._logger.info('Resetting system after last run...')
- tasks.run_task(['sudo', 'rm', '-rf', _OVS_VAR_DIR], self._logger)
- tasks.run_task(['sudo', 'mkdir', '-p', _OVS_VAR_DIR], self._logger)
- tasks.run_task(['sudo', 'rm', '-rf', _OVS_ETC_DIR], self._logger)
- tasks.run_task(['sudo', 'mkdir', '-p', _OVS_ETC_DIR], self._logger)
-
- tasks.run_task(['sudo', 'rm', '-f',
- os.path.join(_OVS_ETC_DIR, 'conf.db')],
- self._logger)
+ # create a backup of ovs_var_tmp and ovs_etc_tmp; It is
+ # essential for OVS installed from binary packages.
+ self._stamp = '{:%Y%m%d_%H%M%S}_{}'.format(datetime.datetime.now(),
+ random.randrange(1000, 9999))
+ for tmp_dir in ['ovs_var_tmp', 'ovs_etc_tmp']:
+ if os.path.exists(settings.getValue('TOOLS')[tmp_dir]):
+ orig_dir = os.path.normpath(settings.getValue('TOOLS')[tmp_dir])
+ self._logger.info('Creating backup of %s directory...', tmp_dir)
+ tasks.run_task(['sudo', 'mv', orig_dir, '{}.{}'.format(orig_dir, self._stamp)],
+ self._logger)
+
+ # create fresh tmp dirs
+ tasks.run_task(['sudo', 'mkdir', '-p', settings.getValue('TOOLS')['ovs_var_tmp']], self._logger)
+ tasks.run_task(['sudo', 'mkdir', '-p', settings.getValue('TOOLS')['ovs_etc_tmp']], self._logger)
self._logger.info('System reset after last run.')
@@ -312,21 +323,18 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
:returns: None
"""
- ovsdb_tool_bin = os.path.join(
- settings.getValue('OVS_DIR'), 'ovsdb', 'ovsdb-tool')
+ ovsdb_tool_bin = settings.getValue('TOOLS')['ovsdb-tool']
tasks.run_task(['sudo', ovsdb_tool_bin, 'create',
- os.path.join(_OVS_ETC_DIR, 'conf.db'),
- os.path.join(settings.getValue('OVS_DIR'), 'vswitchd',
- 'vswitch.ovsschema')],
+ os.path.join(settings.getValue('TOOLS')['ovs_etc_tmp'], 'conf.db'),
+ settings.getValue('TOOLS')['ovsschema']],
self._logger,
'Creating ovsdb configuration database...')
- ovsdb_server_bin = os.path.join(
- settings.getValue('OVS_DIR'), 'ovsdb', 'ovsdb-server')
+ ovsdb_server_bin = settings.getValue('TOOLS')['ovsdb-server']
tasks.run_background_task(
['sudo', ovsdb_server_bin,
- '--remote=punix:%s' % os.path.join(_OVS_VAR_DIR, 'db.sock'),
+ '--remote=punix:%s' % os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'], 'db.sock'),
'--remote=db:Open_vSwitch,Open_vSwitch,manager_options',
'--pidfile=' + self._ovsdb_pidfile_path, '--overwrite-pidfile'],
self._logger,
@@ -346,13 +354,24 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
if ovsdb_pid:
tasks.terminate_task(ovsdb_pid, logger=self._logger)
+ # restore original content of ovs_var_tmp and ovs_etc_tmp; It is
+ # essential for OVS installed from binary packages.
+ if self._stamp:
+ for tmp_dir in ['ovs_var_tmp', 'ovs_etc_tmp']:
+ orig_dir = os.path.normpath(settings.getValue('TOOLS')[tmp_dir])
+ if os.path.exists('{}.{}'.format(orig_dir, self._stamp)):
+ self._logger.info('Restoring backup of %s directory...', tmp_dir)
+ tasks.run_task(['sudo', 'rm', '-rf', orig_dir], self._logger)
+ tasks.run_task(['sudo', 'mv', '{}.{}'.format(orig_dir, self._stamp), orig_dir],
+ self._logger)
+
@staticmethod
def get_db_sock_path():
"""Method returns location of db.sock file
:returns: path to db.sock file.
"""
- return os.path.join(_OVS_VAR_DIR, 'db.sock')
+ return os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'], 'db.sock')
#
# validate methods required for integration testcases
diff --git a/vswitches/ovs_dpdk_vhost.py b/vswitches/ovs_dpdk_vhost.py
index c0764c87..327a697d 100644
--- a/vswitches/ovs_dpdk_vhost.py
+++ b/vswitches/ovs_dpdk_vhost.py
@@ -84,7 +84,6 @@ class OvsDpdkVhost(IVSwitchOvs):
super(OvsDpdkVhost, self).stop()
dpdk.cleanup()
- dpdk.remove_vhost_modules()
def add_switch(self, switch_name, params=None):
"""See IVswitch for general description
@@ -147,7 +146,7 @@ class OvsDpdkVhost(IVSwitchOvs):
:returns: True if legacy --dpdk option is supported, otherwise it returns False
"""
- ovs_vswitchd_bin = os.path.join(settings.getValue('OVS_DIR'), 'vswitchd', 'ovs-vswitchd')
+ ovs_vswitchd_bin = settings.getValue('TOOLS')['ovs-vswitchd']
try:
subprocess.check_output(ovs_vswitchd_bin + r' --help | grep "\-\-dpdk"', shell=True)
return True
diff --git a/vswitches/ovs_vanilla.py b/vswitches/ovs_vanilla.py
index 40ca970e..12a460af 100644
--- a/vswitches/ovs_vanilla.py
+++ b/vswitches/ovs_vanilla.py
@@ -43,15 +43,6 @@ class OvsVanilla(IVSwitchOvs):
self._vswitchd_args += settings.getValue('VSWITCHD_VANILLA_ARGS')
self._module_manager = ModuleManager()
- def start(self):
- """See IVswitch for general description
-
- Activates kernel modules, ovsdb and vswitchd.
- """
- self._module_manager.insert_modules(
- settings.getValue('VSWITCH_VANILLA_KERNEL_MODULES'))
- super(OvsVanilla, self).start()
-
def stop(self):
"""See IVswitch for general description