aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xci/build-vsperf.sh6
-rw-r--r--conf/00_common.conf3
-rw-r--r--conf/03_traffic.conf43
-rw-r--r--conf/05_collector.conf26
-rw-r--r--conf/08_llcmanagement.conf62
-rw-r--r--conf/10_custom.conf13
-rw-r--r--conf/__init__.py13
-rw-r--r--conf/integration/01_testcases.conf159
-rw-r--r--core/results/results_constants.py4
-rw-r--r--core/traffic_controller.py7
-rw-r--r--docs/testing/developer/devguide/design/vswitchperf_design.rst24
-rw-r--r--docs/testing/user/configguide/installation.rst27
-rw-r--r--docs/testing/user/configguide/trafficgen.rst43
-rw-r--r--docs/testing/user/userguide/index.rst1
-rw-r--r--docs/testing/user/userguide/teststeps.rst19
-rw-r--r--docs/testing/user/userguide/trafficcapture.rst297
-rw-r--r--requirements.txt6
-rw-r--r--src/package-list.mk8
-rwxr-xr-xsystems/centos/build_base_machine.sh12
-rwxr-xr-xsystems/centos/prepare_python_env.sh6
-rw-r--r--systems/fedora/24/prepare_python_env.sh2
-rw-r--r--systems/fedora/25/prepare_python_env.sh2
-rw-r--r--systems/fedora/26/prepare_python_env.sh2
-rwxr-xr-xsystems/opensuse/42.2/prepare_python_env.sh2
-rwxr-xr-xsystems/opensuse/42.3/prepare_python_env.sh2
-rwxr-xr-xsystems/opensuse/build_base_machine.sh95
-rwxr-xr-xsystems/opensuse/prepare_python_env.sh28
-rwxr-xr-xsystems/rhel/7.2/build_base_machine.sh31
-rwxr-xr-xsystems/rhel/7.2/prepare_python_env.sh8
-rwxr-xr-xsystems/rhel/7.3/build_base_machine.sh29
-rwxr-xr-xsystems/rhel/7.3/prepare_python_env.sh6
-rwxr-xr-xsystems/sles/15/build_base_machine.sh89
-rwxr-xr-xsystems/sles/15/prepare_python_env.sh28
-rwxr-xr-xsystems/ubuntu/14.04/prepare_python_env.sh4
-rw-r--r--testcases/testcase.py32
-rwxr-xr-xtools/collectors/collectd/__init__.py17
-rw-r--r--tools/collectors/collectd/collectd.py265
-rw-r--r--tools/collectors/collectd/collectd_bucky.py769
-rw-r--r--tools/functions.py10
-rw-r--r--tools/llc_management/__init__.py17
-rw-r--r--tools/llc_management/rmd.py198
-rwxr-xr-xtools/pkt_gen/ixia/ixia.py8
-rwxr-xr-xtools/pkt_gen/ixnet/ixnet.py8
-rw-r--r--tools/pkt_gen/trex/trex.py205
-rw-r--r--tools/tasks.py30
-rw-r--r--tools/teststepstools.py17
-rw-r--r--vswitches/ovs_dpdk_vhost.py5
-rw-r--r--vswitches/ovs_vanilla.py13
-rw-r--r--vswitches/vpp_dpdk_vhost.py3
49 files changed, 2562 insertions, 142 deletions
diff --git a/ci/build-vsperf.sh b/ci/build-vsperf.sh
index 755fb51a..00a548ba 100755
--- a/ci/build-vsperf.sh
+++ b/ci/build-vsperf.sh
@@ -120,6 +120,12 @@ function terminate_vsperf() {
sleep 1
sudo pkill ovsdb-server &> /dev/null
sleep 1
+ sudo pkill vppctl &> /dev/null
+ sleep 1
+ sudo pkill vpp &> /dev/null
+ sleep 1
+ sudo pkill -9 vpp &> /dev/null
+ sleep 1
}
# check and print testcase execution status
diff --git a/conf/00_common.conf b/conf/00_common.conf
index 4c25b0b8..279c67b4 100644
--- a/conf/00_common.conf
+++ b/conf/00_common.conf
@@ -90,6 +90,9 @@ PATHS = {}
# shell command to use when running commands through Pexpect
SHELL_CMD = ['/bin/bash', '-c']
+# internal list to keep track of PIDs of jobs executed by vsperf
+_EXECUTED_PIDS = []
+
# ############################
# Logging configuration
# ############################
diff --git a/conf/03_traffic.conf b/conf/03_traffic.conf
index a88e4bcc..67318893 100644
--- a/conf/03_traffic.conf
+++ b/conf/03_traffic.conf
@@ -147,6 +147,30 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# congestion (DEI header field).
# Data type: int (NOTE: must fit to 1 bit)
# Default value: 0
+# 'capture' - A dictionary with traffic capture configuration.
+# NOTE: It is supported only by T-Rex traffic generator.
+# 'enabled' - Specifies if traffic should be captured
+# Data type: bool
+# Default value: False
+# 'tx_ports' - A list of ports, where frames transmitted towards DUT will
+# be captured. Ports have numbers 0 and 1. TX packet capture
+# is disabled if list of ports is empty.
+# Data type: list
+# Default value: [0]
+# 'rx_ports' - A list of ports, where frames received from DUT will
+# be captured. Ports have numbers 0 and 1. RX packet capture
+# is disabled if list of ports is empty.
+# Data type: list
+# Default value: [1]
+# 'count' - A number of frames to be captured. The same count value
+# is applied to both TX and RX captures.
+# Data type: int
+# Default value: 1
+# 'filter' - An expression used to filter TX and RX packets. It uses the same
+# syntax as pcap library. See pcap-filter man page for additional
+# details.
+# Data type: str
+# Default value: ''
TRAFFIC = {
'traffic_type' : 'rfc2544_throughput',
'frame_rate' : 100,
@@ -179,6 +203,13 @@ TRAFFIC = {
'priority': 0,
'cfi': 0,
},
+ 'capture': {
+ 'enabled': False,
+ 'tx_ports' : [0],
+ 'rx_ports' : [1],
+ 'count': 1,
+ 'filter': '',
+ },
}
#path to traffic generators directory.
@@ -451,11 +482,11 @@ TRAFFICGEN_TREX_RFC2544_TPUT_THRESHOLD = 0.05
# Parameter below defines frequency of packets used for latency measurement in PPS.
# Value 0 will disable latency specific streams.
TRAFFICGEN_TREX_LATENCY_PPS = 1000
-# Example 10 Gbps: TRAFFICGEN_TREXINE_SPEED_GBPS = '10'
-# Today only 10 Gbps is supported
-TRAFFICGEN_TREX_LINE_SPEED_GBPS = '10'
+# Enablement of learning packets before sending test traffic
+TRAFFICGEN_TREX_LEARNING_MODE = True
+TRAFFICGEN_TREX_LEARNING_DURATION = 5
# FOR SR-IOV or multistream layer 2 tests to work with T-Rex enable Promiscuous mode
-TRAFFICGEN_TREX_PROMISCUOUS=False
+TRAFFICGEN_TREX_PROMISCUOUS = False
PATHS['trafficgen'] = {
'Trex': {
'type' : 'src',
@@ -464,5 +495,9 @@ PATHS['trafficgen'] = {
}
}
}
+# TRex validation option for RFC2544
+TRAFFICGEN_TREX_VERIFICATION_MODE = False
+TRAFFICGEN_TREX_VERIFICATION_DURATION = 60
+TRAFFICGEN_TREX_MAXIMUM_VERIFICATION_TRIALS = 10
# TREX Configuration and Connection Info-- END
##############################################
diff --git a/conf/05_collector.conf b/conf/05_collector.conf
index 9fd2558c..a1bb41f8 100644
--- a/conf/05_collector.conf
+++ b/conf/05_collector.conf
@@ -1,4 +1,4 @@
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation, Spirent Communications
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,3 +31,27 @@ PIDSTAT_SAMPLE_INTERVAL = 1
# prefix of pidstat's log file; separate log file is created
# for each testcase in the directory with results
LOG_FILE_PIDSTAT = 'pidstat'
+
+##########################################
+# Collectd Specific configuration
+##########################################
+COLLECTD_IP = "127.0.0.1"
+COLLECTD_PORT = 25826
+COLLECTD_SECURITY_LEVEL = 0
+COLLECTD_AUTH_FILE = ''
+LOG_FILE_COLLECTD = 'collectd'
+
+# Configure filters - Interested (KEYS), Not-Interested (XKEYS)
+COLLECTD_CPU_KEYS = ['system', 'idle']
+COLLECTD_PROCESSES_KEYS = ['user', 'system']
+COLLECTD_INTERFACE_KEYS = ['dropped']
+COLLECTD_OVSSTAT_KEYS = ['dropped', 'broadcast']
+COLLECTD_DPDKSTAT_KEYS = ['dropped']
+COLLECTD_INTELRDT_KEYS = ['llc']
+
+# Interface types to exclude
+COLLECTD_INTERFACE_XKEYS = ['docker', 'lo']
+# Core-Ids to Exclude from
+# Provide individual core-ids or range of core-ids.
+# The range is specified using '-'
+COLLECTD_INTELRDT_XKEYS = [ ]
diff --git a/conf/08_llcmanagement.conf b/conf/08_llcmanagement.conf
new file mode 100644
index 00000000..92e6367c
--- /dev/null
+++ b/conf/08_llcmanagement.conf
@@ -0,0 +1,62 @@
+# Copyright 2017-2018 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##################################
+# LLC Management Configuration #
+##################################
+
+####################################################################
+# Specify how the policy is defined.
+# Select any one of the following: COS, CUSTOM.
+####################################################################
+POLICY_TYPE = 'COS'
+
+####################################################################
+# Policy Definition by COS
+# Choose any one class of service among Gold, Silver and Bronze.
+# The min-cache and max-cache for these 3 services vary.
+# gold - has the maximum with 'guaranteed' allocation.
+# sliver-bf- lower than gold, and best effort.
+# bronze-shared - least and shared.
+# This value will be used for "policy" variable in the REST call.
+####################################################################
+VSWITCH_COS = "silver-bf"
+VNF_COS = "silver-bf"
+PMD_COS = "gold"
+NOISEVM_COS = "bronze-shared"
+
+####################################################################
+# CUSTOM Policy Definition
+# Specify Minimum and Maximum Cache Values each workload
+# [mincache, maxcache]
+####################################################################
+VSWITCH_CA = [10, 18]
+VNF_CA = [8, 10]
+PMD_CA = [10, 16]
+NOISEVM_CA = [1, 1]
+
+####################################################################
+# Intel RMD Server Specific Configuration
+# Port: 8081 (Debug) 8888 (normal)
+# Version: v1
+# IP: only localhost.
+####################################################################
+RMD_PORT = 8081
+RMD_SERVER_IP = '127.0.0.1'
+RMD_API_VERSION = 'v1'
+
+####################################################################
+# LLC Allocation Control.
+####################################################################
+LLC_ALLOCATION = False
diff --git a/conf/10_custom.conf b/conf/10_custom.conf
index 8020bb93..917d16b4 100644
--- a/conf/10_custom.conf
+++ b/conf/10_custom.conf
@@ -133,11 +133,15 @@ TRAFFICGEN_TREX_PORT2 = ''
# Parameter below defines frequency of packets used for latency measurement in PPS.
# Value 0 will disable latency specific streams.
TRAFFICGEN_TREX_LATENCY_PPS = 1000
-# Example 10 Gbps: TRAFFICGEN_TREXINE_SPEED_GBPS = '10'
-# Today only 10 Gbps is supported
-TRAFFICGEN_TREX_LINE_SPEED_GBPS = '10'
+# Enablement of learning packets before sending test traffic
+TRAFFICGEN_TREX_LEARNING_MODE = True
+TRAFFICGEN_TREX_LEARNING_DURATION = 5
# FOR SR-IOV or multistream layer 2 tests to work with T-Rex enable Promiscuous mode
-TRAFFICGEN_TREX_PROMISCUOUS=False
+TRAFFICGEN_TREX_PROMISCUOUS = False
+# TRex validation option for RFC2544
+TRAFFICGEN_TREX_VERIFICATION_MODE = False
+TRAFFICGEN_TREX_VERIFICATION_DURATION = 60
+TRAFFICGEN_TREX_MAXIMUM_VERIFICATION_TRIALS = 10
# TREX Configuration and Connection Info-- END
####################################################
@@ -161,3 +165,4 @@ PACKAGE_LIST = "src/package-list.mk"
# 'openvswitch']
#PATHS['vswitch']['OvsVanilla']['type'] = 'bin'
+
diff --git a/conf/__init__.py b/conf/__init__.py
index a7c0ee5d..d5d26757 100644
--- a/conf/__init__.py
+++ b/conf/__init__.py
@@ -124,6 +124,13 @@ class Settings(object):
if name is not None and value is not None:
super(Settings, self).__setattr__(name, value)
+ def resetValue(self, attr):
+ """If parameter was overridden by TEST_PARAMS, then it will
+ be set to its original value.
+ """
+ if attr in self.__dict__['TEST_PARAMS']:
+ self.__dict__['TEST_PARAMS'].pop(attr)
+
def load_from_file(self, path):
"""Update ``settings`` with values found in module at ``path``.
"""
@@ -324,6 +331,12 @@ class Settings(object):
assert value == self.__dict__[name]
return True
+ def validate_resetValue(self, dummy_result, attr):
+ """Verifies, that value was correctly reset
+ """
+ return 'TEST_PARAMS' not in self.__dict__ or \
+ attr not in self.__dict__['TEST_PARAMS']
+
settings = Settings()
def get_test_param(key, default=None):
diff --git a/conf/integration/01_testcases.conf b/conf/integration/01_testcases.conf
index dfc8a4c2..bb2809b8 100644
--- a/conf/integration/01_testcases.conf
+++ b/conf/integration/01_testcases.conf
@@ -1000,6 +1000,165 @@ INTEGRATION_TESTS = [
# END of VPP tests used by VERIFY and MERGE jobs by OPNFV Jenkins
#
+ #
+ # Examples of functional testcases with traffic capture validation
+ #
+ # Capture Example 1 - Traffic capture inside VM (PVP scenario)
+ # This TestCase will modify VLAN ID set by the traffic generator to the new value.
+ # Correct VLAN ID settings is verified by inspection of captured frames.
+ {
+ "Name": "capture_pvp_modify_vid",
+ "Deployment": "pvp",
+ "Description": "Test and verify VLAN ID modification by Open vSwitch",
+ "Parameters" : {
+ "VSWITCH" : "OvsDpdkVhost", # works also for Vanilla OVS
+ "TRAFFICGEN_DURATION" : 5,
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_continuous",
+ "frame_rate" : 100,
+ 'vlan': {
+ 'enabled': True,
+ 'id': 8,
+ 'priority': 1,
+ 'cfi': 0,
+ },
+ },
+ "GUEST_LOOPBACK" : ['linux_bridge'],
+ },
+ "TestSteps": [
+ # replace original flows with vlan ID modification
+ ['!vswitch', 'add_flow', 'br0', {'in_port': '1', 'actions': ['mod_vlan_vid:4','output:3']}],
+ ['!vswitch', 'add_flow', 'br0', {'in_port': '2', 'actions': ['mod_vlan_vid:4','output:4']}],
+ ['vswitch', 'dump_flows', 'br0'],
+ # verify that received frames have modified vlan ID
+ ['VNF0', 'execute_and_wait', 'tcpdump -i eth0 -c 5 -w dump.pcap vlan 4 &'],
+ ['trafficgen', 'send_traffic',{}],
+ ['!VNF0', 'execute_and_wait', 'tcpdump -qer dump.pcap vlan 4 2>/dev/null | wc -l','|^(\d+)$'],
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ ],
+ },
+]
+# Capture Example 2 - Setup with 2 NICs, where traffic is captured after it is
+# processed by NIC under the test (2nd NIC). See documentation for further details.
+# This TestCase will strip VLAN headers from traffic sent by the traffic generator.
+# The removal of VLAN headers is verified by inspection of captured frames.
+#
+# NOTE: This setup expects a DUT with two NICs with two ports each. First NIC is
+# connected to the traffic generator (standard VSPERF setup). Ports of a second NIC
+# are interconnected by a patch cable. PCI addresses of all four ports have to be
+# properly configured in the WHITELIST_NICS parameter.
+_CAPTURE_P2P2P_OVS_ACTION = ''
+_CAPTURE_P2P2P_SETUP = [
+ # restore original NICS configuration, so we can refer to NICS elements
+ ['settings', 'resetValue', 'WHITELIST_NICS'],
+ ['settings', 'resetValue', 'NICS'],
+ # create and configure two bridges to forward traffic through NIC under
+ # the test and back to the traffic generator
+ # 1st bridge:
+ ['vswitch', 'add_switch', 'br0'],
+ ['tools', 'exec_shell', 'sudo ip addr flush dev $NICS[0]["device"]'],
+ ['tools', 'exec_shell', 'sudo ip link set dev $NICS[0]["device"] up'],
+ ['tools', 'exec_shell', '$TOOLS["ovs-vsctl"] add-port br0 $NICS[0]["device"]'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["bind-tool"] --bind igb_uio $NICS[3]["pci"]'],
+ ['tools', 'exec_shell', '$TOOLS["ovs-vsctl"] add-port br0 dpdk0 -- '
+ 'set Interface dpdk0 type=dpdk options:dpdk-devargs=$NICS[3]["pci"]'],
+ ['tools', 'exec_shell', '$TOOLS["ovs-ofctl"] add-flow br0 in_port=1,action='
+ '$_CAPTURE_P2P2P_OVS_ACTION,output:2'],
+ # 2nd bridge:
+ ['vswitch', 'add_switch', 'br1'],
+ ['tools', 'exec_shell', 'sudo ip addr flush dev $NICS[2]["device"]'],
+ ['tools', 'exec_shell', 'sudo ip link set dev $NICS[2]["device"] up'],
+ ['tools', 'exec_shell', '$TOOLS["ovs-vsctl"] add-port br1 $NICS[2]["device"]'],
+ ['tools', 'exec_shell', 'sudo ip addr flush dev $NICS[1]["device"]'],
+ ['tools', 'exec_shell', 'sudo ip link set dev $NICS[1]["device"] up'],
+ ['tools', 'exec_shell', '$TOOLS["ovs-vsctl"] add-port br1 $NICS[1]["device"]'],
+ ['vswitch', 'add_flow', 'br1', {'in_port': '1', 'actions': ['output:2']}],
+ # log flow details
+ ['vswitch', 'dump_flows', 'br0'],
+ ['vswitch', 'dump_flows', 'br1'],
+]
+INTEGRATION_TESTS += [
+ {
+ "Name": "capture_p2p2p_strip_vlan_ovs",
+ "Deployment": "clean",
+ "Description": "P2P Continuous Stream",
+ "Parameters" : {
+ "_CAPTURE_P2P2P_OVS_ACTION" : 'strip_vlan',
+ "TRAFFIC" : {
+ "bidir" : "False",
+ "traffic_type" : "rfc2544_continuous",
+ "frame_rate" : 100,
+ 'l2': {
+ 'srcmac': "ca:fe:00:00:00:00",
+ 'dstmac': "00:00:00:00:00:01"
+ },
+ 'vlan': {
+ 'enabled': True,
+ 'id': 8,
+ 'priority': 1,
+ 'cfi': 0,
+ },
+ },
+ # suppress DPDK configuration, so physical interfaces are not bound to DPDK driver
+ 'WHITELIST_NICS' : [],
+ 'NICS' : [],
+ },
+ "TestSteps": _CAPTURE_P2P2P_SETUP + [
+ # capture traffic after processing by NIC under the test (after possible egress HW offloading)
+ ['tools', 'exec_shell_background', 'tcpdump -i $NICS[2]["device"] -c 5 -w capture.pcap '
+ 'ether src $TRAFFIC["l2"]["srcmac"]'],
+ ['trafficgen', 'send_traffic', {}],
+ ['vswitch', 'dump_flows', 'br0'],
+ ['vswitch', 'dump_flows', 'br1'],
+ # there must be 5 captured frames...
+ ['tools', 'exec_shell', 'tcpdump -r capture.pcap | wc -l', '|^(\d+)$'],
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ # ...but no vlan headers
+ ['tools', 'exec_shell', 'tcpdump -r capture.pcap vlan | wc -l', '|^(\d+)$'],
+ ['tools', 'assert', '#STEP[-1][0] == 0'],
+ ],
+ },
+ # Capture Example 3 - Traffic capture by traffic generator.
+ # This TestCase uses OVS flow to add VLAN tag with given ID into every
+ # frame send by traffic generator. Correct frame modificaiton is verified by
+ # inspection of packet capture received by T-Rex.
+ {
+ "Name": "capture_p2p_add_vlan_ovs_trex",
+ "Deployment": "clean",
+ "Description": "OVS: Test VLAN tag modification and verify it by traffic capture",
+ "vSwitch" : "OvsDpdkVhost", # works also for Vanilla OVS
+ "Parameters" : {
+ "TRAFFICGEN" : "Trex",
+ "TRAFFICGEN_DURATION" : 5,
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_continuous",
+ "frame_rate" : 100,
+ # enable capture of five RX frames
+ 'capture': {
+ 'enabled': True,
+ 'tx_ports' : [],
+ 'rx_ports' : [1],
+ 'count' : 5,
+ },
+ },
+ },
+ "TestSteps" : STEP_VSWITCH_P2P_INIT + [
+ # replace standard L2 flows by flows, which will add VLAN tag with ID 3
+ ['!vswitch', 'add_flow', 'int_br0', {'in_port': '1', 'actions': ['mod_vlan_vid:3','output:2']}],
+ ['!vswitch', 'add_flow', 'int_br0', {'in_port': '2', 'actions': ['mod_vlan_vid:3','output:1']}],
+ ['vswitch', 'dump_flows', 'int_br0'],
+ ['trafficgen', 'send_traffic', {}],
+ ['trafficgen', 'get_results'],
+ # verify that captured frames have vlan tag with ID 3
+ ['tools', 'exec_shell', 'tcpdump -qer $RESULTS_PATH/#STEP[-1][0]["capture_rx"] vlan 3 '
+ '2>/dev/null | wc -l', '|^(\d+)$'],
+ # number of received frames with expected VLAN id must match the number of captured frames
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ ] + STEP_VSWITCH_P2P_FINIT,
+ },
+ #
+ # End of examples of functional testcases with traffic capture validation
+ #
]
# Example of TC definition with exact vSwitch, VNF and TRAFFICGEN values.
diff --git a/core/results/results_constants.py b/core/results/results_constants.py
index ef2df847..967adbf9 100644
--- a/core/results/results_constants.py
+++ b/core/results/results_constants.py
@@ -69,6 +69,10 @@ class ResultsConstants(object):
TEST_START_TIME = "start_time"
TEST_STOP_TIME = "stop_time"
+ # files with traffic capture
+ CAPTURE_TX = "capture_tx"
+ CAPTURE_RX = "capture_rx"
+
@staticmethod
def get_traffic_constants():
"""Method returns all Constants used to store results.
diff --git a/core/traffic_controller.py b/core/traffic_controller.py
index d6e7629c..de82dddf 100644
--- a/core/traffic_controller.py
+++ b/core/traffic_controller.py
@@ -43,6 +43,7 @@ class TrafficController(object):
self._duration = None
self._lossrate = None
self._packet_sizes = None
+ self._connected = False
self._mode = str(settings.getValue('mode')).lower()
self._results = []
@@ -51,6 +52,10 @@ class TrafficController(object):
"""Set configuration values just before test execution so they
can be changed during runtime by test steps.
"""
+ if not self._connected:
+ self._traffic_gen_class.connect()
+ self._connected = True
+
self._duration = int(settings.getValue('TRAFFICGEN_DURATION'))
self._lossrate = float(settings.getValue('TRAFFICGEN_LOSSRATE'))
self._packet_sizes = settings.getValue('TRAFFICGEN_PKT_SIZES')
@@ -62,7 +67,7 @@ class TrafficController(object):
def __enter__(self):
"""Call initialisation function.
"""
- self._traffic_gen_class.connect()
+ pass
def __exit__(self, type_, value, traceback):
"""Stop traffic, clean up.
diff --git a/docs/testing/developer/devguide/design/vswitchperf_design.rst b/docs/testing/developer/devguide/design/vswitchperf_design.rst
index 33051493..96ffcf62 100644
--- a/docs/testing/developer/devguide/design/vswitchperf_design.rst
+++ b/docs/testing/developer/devguide/design/vswitchperf_design.rst
@@ -415,6 +415,30 @@ Detailed description of ``TRAFFIC`` dictionary items follows:
congestion (DEI header field).
Data type: int (NOTE: must fit to 1 bit)
Default value: 0
+ 'capture' - A dictionary with traffic capture configuration.
+ NOTE: It is supported only by T-Rex traffic generator.
+ 'enabled' - Specifies if traffic should be captured
+ Data type: bool
+ Default value: False
+ 'tx_ports' - A list of ports, where frames transmitted towards DUT will
+ be captured. Ports have numbers 0 and 1. TX packet capture
+ is disabled if list of ports is empty.
+ Data type: list
+ Default value: [0]
+ 'rx_ports' - A list of ports, where frames received from DUT will
+ be captured. Ports have numbers 0 and 1. RX packet capture
+ is disabled if list of ports is empty.
+ Data type: list
+ Default value: [1]
+ 'count' - A number of frames to be captured. The same count value
+ is applied to both TX and RX captures.
+ Data type: int
+ Default value: 1
+ 'filter' - An expression used to filter TX and RX packets. It uses the same
+ syntax as pcap library. See pcap-filter man page for additional
+ details.
+ Data type: str
+ Default value: ''
.. _configuration-of-guest-options:
diff --git a/docs/testing/user/configguide/installation.rst b/docs/testing/user/configguide/installation.rst
index 8bad4efd..51588007 100644
--- a/docs/testing/user/configguide/installation.rst
+++ b/docs/testing/user/configguide/installation.rst
@@ -49,6 +49,8 @@ Supported Operating Systems
* Fedora 25 (kernel 4.9 requires DPDK 16.11 and newer)
* openSUSE 42.2
* openSUSE 42.3
+* openSUSE Tumbleweed
+* SLES 15
* RedHat 7.2 Enterprise Linux
* RedHat 7.3 Enterprise Linux
* Ubuntu 14.04
@@ -134,13 +136,19 @@ The test suite requires Python 3.3 or newer and relies on a number of other
system and python packages. These need to be installed for the test suite
to function.
+Updated kernel and certain development packages are required by DPDK,
+OVS (especially Vanilla OVS) and QEMU. It is necessary to check if the
+versions of these packages are not being **held-back** and if the
+DNF/APT/YUM configuration does not prevent their modification, by
+enforcing settings such as **"exclude-kernel"**.
+
Installation of required packages, preparation of Python 3 virtual
environment and compilation of OVS, DPDK and QEMU is performed by
-script **systems/build_base_machine.sh**. It should be executed under
+script **systems/build_base_machine.sh**. It should be executed under the
user account, which will be used for vsperf execution.
**NOTE:** Password-less sudo access must be configured for given
-user account before script is executed.
+user account before the script is executed.
.. code:: bash
@@ -154,13 +162,14 @@ automatically.
Script **build_base_machine.sh** will install all the vsperf dependencies
in terms of system packages, Python 3.x and required Python modules.
In case of CentOS 7 or RHEL it will install Python 3.3 from an additional
-repository provided by Software Collections (`a link`_). Installation script
+repository provided by Software Collections (`a link`_). The installation script
will also use `virtualenv`_ to create a vsperf virtual environment, which is
-isolated from the default Python environment. This environment will reside in a
-directory called **vsperfenv** in $HOME. It will ensure, that system wide Python
-installation is not modified or broken by VSPERF installation. The complete list
-of Python packages installed inside virtualenv can be found at file
-``requirements.txt``, which is located at vswitchperf repository.
+isolated from the default Python environment, using the Python3 package located
+in **/usr/bin/python3**. This environment will reside in a directory called
+**vsperfenv** in $HOME. It will ensure, that system wide Python installation
+ is not modified or broken by VSPERF installation. The complete list of Python
+packages installed inside virtualenv can be found in the file
+``requirements.txt``, which is located at the vswitchperf repository.
**NOTE:** For RHEL 7.3 Enterprise and CentOS 7.3 OVS Vanilla is not
built from upstream source due to kernel incompatibilities. Please see the
@@ -193,7 +202,7 @@ new shell session. Its activation is specific to your OS:
.. code:: bash
- $ scl enable python33 bash
+ $ scl enable rh-python34 bash
$ source $HOME/vsperfenv/bin/activate
* Fedora and Ubuntu
diff --git a/docs/testing/user/configguide/trafficgen.rst b/docs/testing/user/configguide/trafficgen.rst
index 4b9eec6e..33824486 100644
--- a/docs/testing/user/configguide/trafficgen.rst
+++ b/docs/testing/user/configguide/trafficgen.rst
@@ -44,7 +44,8 @@ and is configured as follows:
'stream_type' : 'L4',
'pre_installed_flows' : 'No', # used by vswitch implementation
'flow_type' : 'port', # used by vswitch implementation
-
+ 'flow_control' : False, # supported only by IxNet
+ 'learning_frames' : True, # supported only by IxNet
'l2': {
'framesize': 64,
'srcmac': '00:00:00:00:00:00',
@@ -67,6 +68,13 @@ and is configured as follows:
'priority': 0,
'cfi': 0,
},
+ 'capture': {
+ 'enabled': False,
+ 'tx_ports' : [0],
+ 'rx_ports' : [1],
+ 'count': 1,
+ 'filter': '',
+ },
}
The framesize parameter can be overridden from the configuration
@@ -783,6 +791,10 @@ It is neccesary for proper connection between Trex server and VSPERF.
cd trex-core/scripts/
./t-rex-64 -i
+**NOTE:** Please check your firewall settings at both DUT and T-Rex server.
+Firewall must allow a connection from DUT (VSPERF) to the T-Rex server running
+at TCP port 4501.
+
For additional information about Trex stateless mode see Trex stateless documentation:
https://trex-tgn.cisco.com/trex/doc/trex_stateless.html
@@ -826,6 +838,15 @@ Default value of this parameter is defined in conf/03_traffic.conf as follows:
TRAFFICGEN_TREX_RFC2544_TPUT_THRESHOLD = ''
+T-Rex can have learning packets enabled. For certain tests it may be beneficial
+to send some packets before starting test traffic to allow switch learning to take
+place. This can be adjusted with the following configurations:
+
+.. code-block:: console
+
+ TRAFFICGEN_TREX_LEARNING_MODE=True
+ TRAFFICGEN_TREX_LEARNING_DURATION=5
+
SR-IOV and Multistream layer 2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
T-Rex by default only accepts packets on the receive side if the destination mac matches the
@@ -840,3 +861,23 @@ modified. Enable Promiscuous mode when doing multistream at layer 2 testing with
.. code-block:: console
TRAFFICGEN_TREX_PROMISCUOUS=True
+
+RFC2544 Validation
+~~~~~~~~~~~~~~~~~~
+
+T-Rex can perform a verification run for a longer duration once the binary search of the
+RFC2544 trials have completed. This duration should be at least 60 seconds. This is similar
+to other traffic generator functionality where a more sustained time can be attempted to
+verify longer runs from the result of the search. This can be configured with the following
+params
+
+.. code-block:: console
+
+ TRAFFICGEN_TREX_VERIFICATION_MODE = False
+ TRAFFICGEN_TREX_VERIFICATION_DURATION = 60
+ TRAFFICGEN_TREX_MAXIMUM_VERIFICATION_TRIALS = 10
+
+The duration and maximum number of attempted verification trials can be set to change the
+behavior of this step. If the verification step fails, it will resume the binary search
+with new values where the maximum output will be the last attempted frame rate minus the
+current set thresh hold.
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index 64d91657..350fbe54 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -16,5 +16,6 @@ VSPERF Test Guide
./testusage.rst
./teststeps.rst
./integration.rst
+ ./trafficcapture.rst
./yardstick.rst
./testlist.rst
diff --git a/docs/testing/user/userguide/teststeps.rst b/docs/testing/user/userguide/teststeps.rst
index 8be67310..08c95311 100644
--- a/docs/testing/user/userguide/teststeps.rst
+++ b/docs/testing/user/userguide/teststeps.rst
@@ -131,6 +131,21 @@ of supported objects and their most common functions follows:
['vnf2', 'stop'],
['vnf1', 'stop'],
+ * ``VNF[ID]`` - provides access to VNFs deployed automatically by testcase deployment
+ scenario. For Example ``pvvp`` deployment automatically starts two VNFs before any
+ TestStep is executed. It is possible to access these VNFs by VNF0 and VNF1 labels.
+
+ List of supported functions is identical to ``vnf[ID]`` option above except functions
+ ``start`` and ``stop``.
+
+ Examples:
+
+ .. code-block:: python
+
+ ['VNF0', 'execute_and_wait', 'ifconfig eth2 5.5.5.1/24 up'],
+ ['VNF1', 'execute_and_wait', 'ifconfig eth2 5.5.5.2/24 up', 120, 'root.*#'],
+ ['VNF2', 'execute_and_wait', 'ping -c1 5.5.5.1'],
+
* ``trafficgen`` - triggers traffic generation
List of supported functions:
@@ -256,7 +271,9 @@ of supported objects and their most common functions follows:
in case that condition is not ``True``
* ``Eval expression`` - evaluates given expression as a python code and returns
its result
- * ``Exec_Shell command`` - executes a shell command
+ * ``Exec_Shell command`` - executes a shell command and wait until it finishes
+ * ``Exec_Shell_Background command`` - executes a shell command at background;
+ Command will be automatically terminated at the end of testcase execution.
* ``Exec_Python code`` - executes a python code
diff --git a/docs/testing/user/userguide/trafficcapture.rst b/docs/testing/user/userguide/trafficcapture.rst
new file mode 100644
index 00000000..fa09bfed
--- /dev/null
+++ b/docs/testing/user/userguide/trafficcapture.rst
@@ -0,0 +1,297 @@
+Traffic Capture
+---------------
+
+Tha ability to capture traffic at multiple points of the system is crucial to
+many of the functional tests. It allows the verification of functionality for
+both the vSwitch and the NICs using hardware acceleration for packet
+manipulation and modification.
+
+There are three different methods of traffic capture supported by VSPERF.
+Detailed descriptions of these methods as well as their pros and cons can be
+found in the following chapters.
+
+Traffic Capture inside of a VM
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This method uses the standard PVP scenario, in which vSwitch first processes
+and modifies the packet before forwarding it to the VM. Inside of the VM we
+capture the traffic using **tcpdump** or a similiar technique. The capture
+information is the used to verify the expected modifications to the packet done
+by vSwitch.
+
+.. code-block:: console
+
+ _
+ +--------------------------------------------------+ |
+ | | |
+ | +------------------------------------------+ | |
+ | | Traffic capture and Packet Forwarding | | |
+ | +------------------------------------------+ | |
+ | ^ : | |
+ | | | | | Guest
+ | : v | |
+ | +---------------+ +---------------+ | |
+ | | logical port 0| | logical port 1| | |
+ +---+---------------+----------+---------------+---+ _|
+ ^ :
+ | |
+ : v _
+ +---+---------------+----------+---------------+---+ |
+ | | logical port 0| | logical port 1| | |
+ | +---------------+ +---------------+ | |
+ | ^ : | |
+ | | | | | Host
+ | : v | |
+ | +--------------+ +--------------+ | |
+ | | phy port | vSwitch | phy port | | |
+ +---+--------------+------------+--------------+---+ _|
+ ^ :
+ | |
+ : v
+ +--------------------------------------------------+
+ | |
+ | traffic generator |
+ | |
+ +--------------------------------------------------+
+
+PROS:
+
+- supports testing with all traffic generators
+- easy to use and implement into test
+- allows testing hardware offloading on the ingress side
+
+CONS:
+
+- does not allow testing hardware offloading on the egress side
+
+An example of Traffic Capture in VM test:
+
+.. code-block:: python
+
+ # Capture Example 1 - Traffic capture inside VM (PVP scenario)
+ # This TestCase will modify VLAN ID set by the traffic generator to the new value.
+ # Correct VLAN ID settings is verified by inspection of captured frames.
+ {
+ Name: capture_pvp_modify_vid,
+ Deployment: pvp,
+ Description: Test and verify VLAN ID modification by Open vSwitch,
+ Parameters : {
+ VSWITCH : OvsDpdkVhost, # works also for Vanilla OVS
+ TRAFFICGEN_DURATION : 5,
+ TRAFFIC : {
+ traffic_type : rfc2544_continuous,
+ frame_rate : 100,
+ 'vlan': {
+ 'enabled': True,
+ 'id': 8,
+ 'priority': 1,
+ 'cfi': 0,
+ },
+ },
+ GUEST_LOOPBACK : ['linux_bridge'],
+ },
+ TestSteps: [
+ # replace original flows with vlan ID modification
+ ['!vswitch', 'add_flow', 'br0', {'in_port': '1', 'actions': ['mod_vlan_vid:4','output:3']}],
+ ['!vswitch', 'add_flow', 'br0', {'in_port': '2', 'actions': ['mod_vlan_vid:4','output:4']}],
+ ['vswitch', 'dump_flows', 'br0'],
+ # verify that received frames have modified vlan ID
+ ['VNF0', 'execute_and_wait', 'tcpdump -i eth0 -c 5 -w dump.pcap vlan 4 &'],
+ ['trafficgen', 'send_traffic',{}],
+ ['!VNF0', 'execute_and_wait', 'tcpdump -qer dump.pcap vlan 4 2>/dev/null | wc -l','|^(\d+)$'],
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ ],
+ },
+
+Traffic Capture for testing NICs with HW offloading/acceleration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The NIC with hardware acceleration/offloading is inserted as an additional card
+into the server. Two ports on this card are then connected together using
+a patch cable as shown in the diagram. Only a single port of the tested NIC is
+setup with DPDK acceleration, while the other is handled by the Linux Ip stack
+allowing for traffic capture. The two NICs are then connected by vSwitch so the
+original card can forward the processed packets to the traffic generator. The
+ports handled by Linux IP stack allow for capturing packets, which are then
+analyzed for changes done by both the vSwitch and the NIC with hardware
+acceleration.
+
+.. code-block:: console
+
+ _
+ +------------------------------------------------+ |
+ | | |
+ | +----------------------------------------+ | |
+ | | vSwitch | | |
+ | | +----------------------------------+ | | |
+ | | | | | | |
+ | | | +------------------+ | | | |
+ | | | | | v | | |
+ | +----------------------------------------+ | | Device under Test
+ | ^ | ^ | | |
+ | | | | | | |
+ | | v | v | |
+ | +--------------+ +--------------+ | |
+ | | | | NIC w HW acc | | |
+ | | phy ports | | phy ports | | |
+ +---+--------------+----------+--------------+---+ _|
+ ^ : ^ :
+ | | | |
+ | | +-------+
+ : v Patch Cable
+ +------------------------------------------------+
+ | |
+ | traffic generator |
+ | |
+ +------------------------------------------------+
+
+PROS:
+
+- allows testing hardware offloading on both the ingress and egress side
+- supports testing with all traffic generators
+- relatively easy to use and implement into tests
+
+CONS:
+
+- a more complex setup with two cards
+- if the tested card only has one port, an additional card is needed
+
+An example of Traffic Capture for testing NICs with HW offloading test:
+
+.. code-block:: python
+
+ # Capture Example 2 - Setup with 2 NICs, where traffic is captured after it is
+ # processed by NIC under the test (2nd NIC). See documentation for further details.
+ # This TestCase will strip VLAN headers from traffic sent by the traffic generator.
+ # The removal of VLAN headers is verified by inspection of captured frames.
+ #
+ # NOTE: This setup expects a DUT with two NICs with two ports each. First NIC is
+ # connected to the traffic generator (standard VSPERF setup). Ports of a second NIC
+ # are interconnected by a patch cable. PCI addresses of all four ports have to be
+ # properly configured in the WHITELIST_NICS parameter.
+ {
+ Name: capture_p2p2p_strip_vlan_ovs,
+ Deployment: clean,
+ Description: P2P Continuous Stream,
+ Parameters : {
+ _CAPTURE_P2P2P_OVS_ACTION : 'strip_vlan',
+ TRAFFIC : {
+ bidir : False,
+ traffic_type : rfc2544_continuous,
+ frame_rate : 100,
+ 'l2': {
+ 'srcmac': ca:fe:00:00:00:00,
+ 'dstmac': 00:00:00:00:00:01
+ },
+ 'vlan': {
+ 'enabled': True,
+ 'id': 8,
+ 'priority': 1,
+ 'cfi': 0,
+ },
+ },
+ # suppress DPDK configuration, so physical interfaces are not bound to DPDK driver
+ 'WHITELIST_NICS' : [],
+ 'NICS' : [],
+ },
+ TestSteps: _CAPTURE_P2P2P_SETUP + [
+ # capture traffic after processing by NIC under the test (after possible egress HW offloading)
+ ['tools', 'exec_shell_background', 'tcpdump -i [2][device] -c 5 -w capture.pcap '
+ 'ether src [l2][srcmac]'],
+ ['trafficgen', 'send_traffic', {}],
+ ['vswitch', 'dump_flows', 'br0'],
+ ['vswitch', 'dump_flows', 'br1'],
+ # there must be 5 captured frames...
+ ['tools', 'exec_shell', 'tcpdump -r capture.pcap | wc -l', '|^(\d+)$'],
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ # ...but no vlan headers
+ ['tools', 'exec_shell', 'tcpdump -r capture.pcap vlan | wc -l', '|^(\d+)$'],
+ ['tools', 'assert', '#STEP[-1][0] == 0'],
+ ],
+ },
+
+
+Traffic Capture on the Traffic Generator
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using the functionality of the Traffic generator makes it possible to configure
+Traffic Capture on both it's ports. With Traffic Capture enabled, VSPERF
+instructs the Traffic Generator to automatically export captured data into
+a pcap file. The captured packets are then sent to VSPERF for analysis and
+verification, monitoring any changes done by both vSwitch and the NICs.
+
+Vsperf currently only supports this functionality with the **T-Rex** generator.
+
+.. code-block:: console
+
+ _
+ +--------------------------------------------------+ |
+ | | |
+ | +--------------------------+ | |
+ | | | | |
+ | | v | | Host
+ | +--------------+ +--------------+ | |
+ | | phy port | vSwitch | phy port | | |
+ +---+--------------+------------+--------------+---+ _|
+ ^ :
+ | |
+ : v
+ +--------------------------------------------------+
+ | |
+ | traffic generator |
+ | |
+ +--------------------------------------------------+
+
+PROS:
+
+- allows testing hardware offloading on both the ingress and egress side
+- does not require an additional NIC
+
+CONS:
+
+- currently only supported by **T-Rex** traffic generator
+
+An example Traffic Capture on the Traffic Generator test:
+
+.. code-block:: python
+
+
+ # Capture Example 3 - Traffic capture by traffic generator.
+ # This TestCase uses OVS flow to add VLAN tag with given ID into every
+ # frame send by traffic generator. Correct frame modificaiton is verified by
+ # inspection of packet capture received by T-Rex.
+ {
+ Name: capture_p2p_add_vlan_ovs_trex,
+ Deployment: clean,
+ Description: OVS: Test VLAN tag modification and verify it by traffic capture,
+ vSwitch : OvsDpdkVhost, # works also for Vanilla OVS
+ Parameters : {
+ TRAFFICGEN : Trex,
+ TRAFFICGEN_DURATION : 5,
+ TRAFFIC : {
+ traffic_type : rfc2544_continuous,
+ frame_rate : 100,
+ # enable capture of five RX frames
+ 'capture': {
+ 'enabled': True,
+ 'tx_ports' : [],
+ 'rx_ports' : [1],
+ 'count' : 5,
+ },
+ },
+ },
+ TestSteps : STEP_VSWITCH_P2P_INIT + [
+ # replace standard L2 flows by flows, which will add VLAN tag with ID 3
+ ['!vswitch', 'add_flow', 'int_br0', {'in_port': '1', 'actions': ['mod_vlan_vid:3','output:2']}],
+ ['!vswitch', 'add_flow', 'int_br0', {'in_port': '2', 'actions': ['mod_vlan_vid:3','output:1']}],
+ ['vswitch', 'dump_flows', 'int_br0'],
+ ['trafficgen', 'send_traffic', {}],
+ ['trafficgen', 'get_results'],
+ # verify that captured frames have vlan tag with ID 3
+ ['tools', 'exec_shell', 'tcpdump -qer /#STEP[-1][0][capture_rx] vlan 3 '
+ '2>/dev/null | wc -l', '|^(\d+)$'],
+ # number of received frames with expected VLAN id must match the number of captured frames
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ ] + STEP_VSWITCH_P2P_FINIT,
+ },
+
diff --git a/requirements.txt b/requirements.txt
index 33bee1bf..3a366d70 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-# Copyright (c) 2015-2017 Intel corporation.
+# Copyright (c) 2015-2018 Intel corporation, Spirent Communications
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -14,3 +14,7 @@ netaddr==0.7.18
scapy-python3==0.18
pyzmq==14.5.0
distro
+stcrestclient
+matplotlib
+numpy
+pycrypto
diff --git a/src/package-list.mk b/src/package-list.mk
index cf2ff57f..7b82ee6f 100644
--- a/src/package-list.mk
+++ b/src/package-list.mk
@@ -13,19 +13,19 @@
# dpdk section
# DPDK_URL ?= git://dpdk.org/dpdk
DPDK_URL ?= http://dpdk.org/git/dpdk
-DPDK_TAG ?= v17.02
+DPDK_TAG ?= v17.08
# OVS section
OVS_URL ?= https://github.com/openvswitch/ovs
-OVS_TAG ?= cc3a32f3b6891168cee98812e8f5e3d8a5a52c98
+OVS_TAG ?= v2.8.1
# VPP section
VPP_URL ?= https://git.fd.io/vpp
-VPP_TAG ?= v17.04
+VPP_TAG ?= v17.07
# QEMU section
QEMU_URL ?= https://github.com/qemu/qemu.git
-QEMU_TAG ?= v2.5.0
+QEMU_TAG ?= v2.9.1
# TREX section
TREX_URL ?= https://github.com/cisco-system-traffic-generator/trex-core.git
diff --git a/systems/centos/build_base_machine.sh b/systems/centos/build_base_machine.sh
index f2efb541..a45b0c3d 100755
--- a/systems/centos/build_base_machine.sh
+++ b/systems/centos/build_base_machine.sh
@@ -60,6 +60,8 @@ pixman-devel
socat
numactl
numactl-devel
+libpng-devel
+freetype-devel
# install gvim
vim-X11
@@ -68,13 +70,13 @@ vim-X11
epel-release
" | grep -v ^#)
-# install SCL for python33
-sudo yum -y install centos-release-scl
+# install SCL for python34
+sudo yum -y install centos-release-scl-rh
-# install python33 packages and git-review tool
+# install python34 packages and git-review tool
yum -y install $(echo "
-python33
-python33-python-tkinter
+rh-python34
+rh-python34-python-tkinter
git-review
" | grep -v ^#)
# prevent ovs vanilla from building from source due to kernel incompatibilities
diff --git a/systems/centos/prepare_python_env.sh b/systems/centos/prepare_python_env.sh
index 5777448b..ac7ccba4 100755
--- a/systems/centos/prepare_python_env.sh
+++ b/systems/centos/prepare_python_env.sh
@@ -2,7 +2,7 @@
#
# Prepare Python 3 environment for vsperf execution
#
-# Copyright 2015 OPNFV
+# Copyright 2015-2017 OPNFV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,8 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable python33 "
-virtualenv "$VSPERFENV_DIR"
+scl enable rh-python34 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python34/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
pip install pylint
diff --git a/systems/fedora/24/prepare_python_env.sh b/systems/fedora/24/prepare_python_env.sh
index b099df5e..920604c2 100644
--- a/systems/fedora/24/prepare_python_env.sh
+++ b/systems/fedora/24/prepare_python_env.sh
@@ -23,7 +23,7 @@ fi
# enable virtual environment in a subshell, so QEMU build can use python 2.7
-(virtualenv-3.5 "$VSPERFENV_DIR"
+(virtualenv-3.5 "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
pip install pylint)
diff --git a/systems/fedora/25/prepare_python_env.sh b/systems/fedora/25/prepare_python_env.sh
index 4a85eb35..c4613ca4 100644
--- a/systems/fedora/25/prepare_python_env.sh
+++ b/systems/fedora/25/prepare_python_env.sh
@@ -23,7 +23,7 @@ fi
# enable virtual environment in a subshell, so QEMU build can use python 2.7
-(virtualenv-3.5 "$VSPERFENV_DIR"
+(virtualenv-3.5 "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
pip install pylint
diff --git a/systems/fedora/26/prepare_python_env.sh b/systems/fedora/26/prepare_python_env.sh
index 33615cbd..05eedfd9 100644
--- a/systems/fedora/26/prepare_python_env.sh
+++ b/systems/fedora/26/prepare_python_env.sh
@@ -23,7 +23,7 @@ fi
# enable virtual environment in a subshell, so QEMU build can use python 2.7
-(virtualenv-3.6 "$VSPERFENV_DIR"
+(virtualenv-3.6 "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
pip install pylint
diff --git a/systems/opensuse/42.2/prepare_python_env.sh b/systems/opensuse/42.2/prepare_python_env.sh
index 66f94cf7..ab668ca4 100755
--- a/systems/opensuse/42.2/prepare_python_env.sh
+++ b/systems/opensuse/42.2/prepare_python_env.sh
@@ -21,7 +21,7 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-virtualenv "$VSPERFENV_DIR"
+virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
pip install pylint
diff --git a/systems/opensuse/42.3/prepare_python_env.sh b/systems/opensuse/42.3/prepare_python_env.sh
index 66f94cf7..ab668ca4 100755
--- a/systems/opensuse/42.3/prepare_python_env.sh
+++ b/systems/opensuse/42.3/prepare_python_env.sh
@@ -21,7 +21,7 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-virtualenv "$VSPERFENV_DIR"
+virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
pip install pylint
diff --git a/systems/opensuse/build_base_machine.sh b/systems/opensuse/build_base_machine.sh
new file mode 100755
index 00000000..8b26440c
--- /dev/null
+++ b/systems/opensuse/build_base_machine.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+#
+# Build a base machine for openSUSE Tumbleweed systems
+#
+# Copyright (c) 2017 SUSE LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Contributors:
+# Jose Lausuch, SUSE LINUX GmbH
+
+zypper -q -n dup
+zypper -q -n in -y $(echo "
+# compiler, tools and dependencies
+make
+automake
+gcc
+gcc-c++
+glibc
+glibc-devel
+fuse
+fuse-devel
+glib2-devel
+zlib-devel
+ncurses-devel
+kernel-default
+kernel-default-devel
+pkg-config
+findutils-locate
+curl
+automake
+autoconf
+vim
+wget
+git
+pciutils
+cifs-utils
+socat
+sysstat
+java-1_8_0-openjdk
+git-review
+mlocate
+
+# python
+python3
+python-pip
+python3-pip
+python3-setuptools
+python3-devel
+python3-tk
+python3-virtualenv
+
+# libraries
+libnuma1
+libnuma-devel
+libpixman-1-0
+libpixman-1-0-devel
+libtool
+libpcap-devel
+libnet9
+libncurses6
+libcurl4
+libcurl-devel
+libxml2
+libfuse2
+libopenssl1_1_0
+libopenssl-devel
+libpython3_6m1_0
+
+" | grep -v ^#)
+
+updatedb
+
+# fix for the Ixia TclClient
+ln -sf $(locate libc.so.6) /lib/libc.so.6
+
+# virtual environment for python
+pip3 install virtualenv
+
+# hugepages setup
+mkdir -p /dev/hugepages
+
+# fix for non-utf8 characters in file
+cp /etc/services /etc/services.bak
+iconv -o /etc/services -f utf-8 -t utf-8 -c /etc/services.bak
diff --git a/systems/opensuse/prepare_python_env.sh b/systems/opensuse/prepare_python_env.sh
new file mode 100755
index 00000000..69871670
--- /dev/null
+++ b/systems/opensuse/prepare_python_env.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Prepare Python environment for vsperf execution on openSUSE Tumbleweed systems
+#
+# Copyright (c) 2017 SUSE LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ -d "$VSPERFENV_DIR" ] ; then
+ echo "Directory $VSPERFENV_DIR already exists. Skipping python virtualenv creation."
+ exit
+fi
+
+virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
+source "$VSPERFENV_DIR"/bin/activate
+pip install -r ../requirements.txt
+pip install pylint
+
diff --git a/systems/rhel/7.2/build_base_machine.sh b/systems/rhel/7.2/build_base_machine.sh
index 9eb8bbd2..858092df 100755
--- a/systems/rhel/7.2/build_base_machine.sh
+++ b/systems/rhel/7.2/build_base_machine.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Build a base machine for RHEL 7.2
+# Build a base machine for RHEL 7.3
#
# Copyright 2016 OPNFV, Intel Corporation & Red Hat Inc.
#
@@ -52,6 +52,7 @@ pkglist=(
wget\
numactl\
numactl-devel\
+ libpng-devel
)
# python tools for proper QEMU, DPDK, and OVS make
@@ -78,28 +79,24 @@ if [ "${#failedinstall[*]}" -gt 0 ]; then
exit 1
fi
-# install SCL for python33 by adding a repo to find its location to install it
-cat <<'EOT' >> /etc/yum.repos.d/python33.repo
-[rhscl-python33-el7]
-name=Copr repo for python33-el7 owned by rhscl
-baseurl=https://copr-be.cloud.fedoraproject.org/results/rhscl/python33-el7/epel-7-$basearch/
-type=rpm-md
-skip_if_unavailable=True
-gpgcheck=1
-gpgkey=https://copr-be.cloud.fedoraproject.org/results/rhscl/python33-el7/pubkey.gpg
-repo_gpgcheck=0
+# install SCL for python34 by adding a repo to find its location to install it
+cat <<'EOT' >> /etc/yum.repos.d/python34.repo
+[centos-sclo-rh]
+name=CentOS-7 - SCLo rh
+baseurl=http://mirror.centos.org/centos/7/sclo/$basearch/rh/
+gpgcheck=0
enabled=1
-enabled_metadata=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-SCLo
EOT
-# install python33 packages and git-review tool
+# install python34 packages and git-review tool
yum -y install $(echo "
-python33
-python33-python-tkinter
+rh-python34
+rh-python34-python-tkinter
" | grep -v ^#)
-# cleanup python 33 repo file
-rm -f /etc/yum.repos.d/python33.repo
+# cleanup python 34 repo file
+rm -f /etc/yum.repos.d/python34.repo
# Create hugepage dirs
mkdir -p /dev/hugepages
diff --git a/systems/rhel/7.2/prepare_python_env.sh b/systems/rhel/7.2/prepare_python_env.sh
index bd468d80..e137aaab 100755
--- a/systems/rhel/7.2/prepare_python_env.sh
+++ b/systems/rhel/7.2/prepare_python_env.sh
@@ -1,8 +1,8 @@
#!/bin/bash
#
-# Prepare Python environment for vsperf execution on Red Hat 7.2 systems.
+# Prepare Python environment for vsperf execution on RHEL 7.3 systems.
#
-# Copyright 2016 OPNFV, Intel Corporation, Red Hat Inc.
+# Copyright 2016-2017 OPNFV, Intel Corporation, Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,8 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable python33 "
-virtualenv "$VSPERFENV_DIR"
+scl enable rh-python34 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python34/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
pip install pylint
diff --git a/systems/rhel/7.3/build_base_machine.sh b/systems/rhel/7.3/build_base_machine.sh
index 5a9b4b2e..58953e23 100755
--- a/systems/rhel/7.3/build_base_machine.sh
+++ b/systems/rhel/7.3/build_base_machine.sh
@@ -52,6 +52,7 @@ pkglist=(
wget\
numactl\
numactl-devel\
+ libpng-devel
)
# python tools for proper QEMU, DPDK, and OVS make
@@ -78,28 +79,24 @@ if [ "${#failedinstall[*]}" -gt 0 ]; then
exit 1
fi
-# install SCL for python33 by adding a repo to find its location to install it
-cat <<'EOT' >> /etc/yum.repos.d/python33.repo
-[rhscl-python33-el7]
-name=Copr repo for python33-el7 owned by rhscl
-baseurl=https://copr-be.cloud.fedoraproject.org/results/rhscl/python33-el7/epel-7-$basearch/
-type=rpm-md
-skip_if_unavailable=True
-gpgcheck=1
-gpgkey=https://copr-be.cloud.fedoraproject.org/results/rhscl/python33-el7/pubkey.gpg
-repo_gpgcheck=0
+# install SCL for python34 by adding a repo to find its location to install it
+cat <<'EOT' >> /etc/yum.repos.d/python34.repo
+[centos-sclo-rh]
+name=CentOS-7 - SCLo rh
+baseurl=http://mirror.centos.org/centos/7/sclo/$basearch/rh/
+gpgcheck=0
enabled=1
-enabled_metadata=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-SCLo
EOT
-# install python33 packages and git-review tool
+# install python34 packages and git-review tool
yum -y install $(echo "
-python33
-python33-python-tkinter
+rh-python34
+rh-python34-python-tkinter
" | grep -v ^#)
-# cleanup python 33 repo file
-rm -f /etc/yum.repos.d/python33.repo
+# cleanup python 34 repo file
+rm -f /etc/yum.repos.d/python34.repo
# Create hugepage dirs
mkdir -p /dev/hugepages
diff --git a/systems/rhel/7.3/prepare_python_env.sh b/systems/rhel/7.3/prepare_python_env.sh
index 3ba049e8..e137aaab 100755
--- a/systems/rhel/7.3/prepare_python_env.sh
+++ b/systems/rhel/7.3/prepare_python_env.sh
@@ -2,7 +2,7 @@
#
# Prepare Python environment for vsperf execution on RHEL 7.3 systems.
#
-# Copyright 2016 OPNFV, Intel Corporation, Red Hat Inc.
+# Copyright 2016-2017 OPNFV, Intel Corporation, Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,8 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable python33 "
-virtualenv "$VSPERFENV_DIR"
+scl enable rh-python34 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python34/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
pip install pylint
diff --git a/systems/sles/15/build_base_machine.sh b/systems/sles/15/build_base_machine.sh
new file mode 100755
index 00000000..9c161dd7
--- /dev/null
+++ b/systems/sles/15/build_base_machine.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+#
+# Build a base machine for SLES15 systems
+#
+# Copyright (c) 2017 SUSE LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Contributors:
+# Jose Lausuch, SUSE LINUX GmbH
+
+zypper -q -n dup
+zypper -q -n in -y $(echo "
+# compiler, tools and dependencies
+make
+automake
+gcc
+gcc-c++
+glibc
+glibc-devel
+fuse
+fuse-devel
+glib2-devel
+zlib-devel
+ncurses-devel
+kernel-default
+kernel-default-devel
+pkg-config
+findutils-locate
+curl
+automake
+autoconf
+vim
+wget
+git
+pciutils
+cifs-utils
+socat
+sysstat
+java-9-openjdk
+mlocate
+
+# python
+python3
+python3-pip
+python3-setuptools
+python3-devel
+python3-tk
+
+# libraries
+libnuma1
+libnuma-devel
+libpixman-1-0
+libpixman-1-0-devel
+libtool
+libpcap-devel
+libnet9
+libncurses5
+libcurl4
+libcurl-devel
+libxml2
+libfuse2
+libopenssl1_1_0
+libopenssl-devel
+libpython3_6m1_0
+
+" | grep -v ^#)
+
+updatedb
+
+# fix for the Ixia TclClient
+ln -sf $(locate libc.so.6) /lib/libc.so.6
+
+# virtual environment for python
+pip3 install virtualenv
+
+# hugepages setup
+mkdir -p /dev/hugepages
+
diff --git a/systems/sles/15/prepare_python_env.sh b/systems/sles/15/prepare_python_env.sh
new file mode 100755
index 00000000..12ada3cc
--- /dev/null
+++ b/systems/sles/15/prepare_python_env.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Prepare Python environment for vsperf execution on SLES15 systems
+#
+# Copyright (c) 2017 SUSE LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ -d "$VSPERFENV_DIR" ] ; then
+ echo "Directory $VSPERFENV_DIR already exists. Skipping python virtualenv creation."
+ exit
+fi
+
+virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
+source "$VSPERFENV_DIR"/bin/activate
+pip install -r ../requirements.txt
+pip install pylint
+
diff --git a/systems/ubuntu/14.04/prepare_python_env.sh b/systems/ubuntu/14.04/prepare_python_env.sh
index 6ef8680d..4c98dc42 100755
--- a/systems/ubuntu/14.04/prepare_python_env.sh
+++ b/systems/ubuntu/14.04/prepare_python_env.sh
@@ -2,7 +2,7 @@
#
# Prepare Python environment for vsperf execution on Ubuntu 14.04 systems
#
-# Copyright 2015 OPNFV, Intel Corporation.
+# Copyright 2015-2017 OPNFV, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ fi
# enable virtual environment in a subshell, so QEMU build can use python 2.7
-(virtualenv "$VSPERFENV_DIR"
+(virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
pip install pylint)
diff --git a/testcases/testcase.py b/testcases/testcase.py
index 34861091..ebf1e797 100644
--- a/testcases/testcase.py
+++ b/testcases/testcase.py
@@ -36,6 +36,7 @@ from tools import functions
from tools import namespace
from tools import veth
from tools.teststepstools import TestStepsTools
+from tools.llc_management import rmd
CHECK_PREFIX = 'validate_'
@@ -78,6 +79,7 @@ class TestCase(object):
self._step_result_mapping = {}
self._step_status = None
self._step_send_traffic = False # indication if send_traffic was called within test steps
+ self._vnf_list = []
self._testcase_run_time = None
S.setValue('VSWITCH', cfg.get('vSwitch', S.getValue('VSWITCH')))
@@ -168,7 +170,7 @@ class TestCase(object):
self._traffic['l3'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L3')
self._traffic['l4'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L4')
self._traffic['l2']['dstmac'] = S.getValue('NICS')[1]['mac']
- elif len(S.getValue('NICS')) and \
+ elif len(S.getValue('NICS')) >= 2 and \
(S.getValue('NICS')[0]['type'] == 'vf' or
S.getValue('NICS')[1]['type'] == 'vf'):
mac1 = S.getValue('NICS')[0]['mac']
@@ -184,6 +186,10 @@ class TestCase(object):
if step[0].startswith('vnf'):
self._step_vnf_list[step[0]] = None
+ # if llc allocation is required, initialize it.
+ if S.getValue('LLC_ALLOCATION'):
+ self._rmd = rmd.CacheAllocator()
+
def run_initialize(self):
""" Prepare test execution environment
"""
@@ -201,6 +207,8 @@ class TestCase(object):
loader.get_vnf_class(),
len(self._step_vnf_list))
+ self._vnf_list = self._vnf_ctl.get_vnfs()
+
# verify enough hugepages are free to run the testcase
if not self._check_for_enough_hugepages():
raise RuntimeError('Not enough hugepages free to run test.')
@@ -254,6 +262,10 @@ class TestCase(object):
self._step_status = {'status' : True, 'details' : ''}
+ # Perform LLC-allocations
+ if S.getValue('LLC_ALLOCATION'):
+ self._rmd.setup_llc_allocation()
+
self._logger.debug("Setup:")
def run_finalize(self):
@@ -262,6 +274,13 @@ class TestCase(object):
# Stop all VNFs started by TestSteps in case that something went wrong
self.step_stop_vnfs()
+ # Cleanup any LLC-allocations
+ if S.getValue('LLC_ALLOCATION'):
+ self._rmd.cleanup_llc_allocation()
+
+ # Stop all processes executed by testcase
+ tasks.terminate_all_tasks(self._logger)
+
# umount hugepages if mounted
self._umount_hugepages()
@@ -792,10 +811,21 @@ class TestCase(object):
# so it is not sent again after the execution of teststeps
self._step_send_traffic = True
elif step[0].startswith('vnf'):
+ # use vnf started within TestSteps
if not self._step_vnf_list[step[0]]:
# initialize new VM
self._step_vnf_list[step[0]] = loader.get_vnf_class()()
test_object = self._step_vnf_list[step[0]]
+ elif step[0].startswith('VNF'):
+ if step[1] in ('start', 'stop'):
+ raise RuntimeError("Cannot execute start() or stop() method of "
+ "VNF deployed automatically by scenario.")
+ # use vnf started by scenario deployment (e.g. pvp)
+ vnf_index = int(step[0][3:])
+ try:
+ test_object = self._vnf_list[vnf_index]
+ except IndexError:
+ raise RuntimeError("VNF with index {} is not running.".format(vnf_index))
elif step[0] == 'wait':
input(os.linesep + "Step {}: Press Enter to continue with "
"the next step...".format(i) + os.linesep + os.linesep)
diff --git a/tools/collectors/collectd/__init__.py b/tools/collectors/collectd/__init__.py
new file mode 100755
index 00000000..25e2c3c2
--- /dev/null
+++ b/tools/collectors/collectd/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2017 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for Collectd as a collector
+"""
diff --git a/tools/collectors/collectd/collectd.py b/tools/collectors/collectd/collectd.py
new file mode 100644
index 00000000..90df6b04
--- /dev/null
+++ b/tools/collectors/collectd/collectd.py
@@ -0,0 +1,265 @@
+# Copyright 2017-2018 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Collects samples from collectd through collectd_bucky.
+Depending on the policy - decides to keep the sample or discard.
+Plot the values of the stored samples once the test is completed
+"""
+
+import copy
+import csv
+import logging
+import multiprocessing
+import os
+from collections import OrderedDict
+import queue
+
+import matplotlib.pyplot as plt
+import numpy as np
+import tools.collectors.collectd.collectd_bucky as cb
+from tools.collectors.collector import collector
+from conf import settings
+
+# The y-lables. Keys in this dictionary are used as y-labels.
+YLABELS = {'No/Of Packets': ['dropped', 'packets', 'if_octets', 'errors',
+ 'if_rx_octets', 'if_tx_octets'],
+ 'Jiffies': ['cputime'],
+ 'Bandwidth b/s': ['memory_bandwidth'],
+ 'Bytes': ['bytes.llc']}
+
+
+def get_label(sample):
+ """
+ Returns the y-label for the plot.
+ """
+ for label in YLABELS:
+ if any(r in sample for r in YLABELS[label]):
+ return label
+
+
+def plot_graphs(dict_of_arrays):
+ """
+ Plot the values
+ Store the data used for plotting.
+ """
+ i = 1
+ results_dir = settings.getValue('RESULTS_PATH')
+ for key in dict_of_arrays:
+ tup_list = dict_of_arrays[key]
+ two_lists = list(map(list, zip(*tup_list)))
+ y_axis_list = two_lists[0]
+ x_axis_list = two_lists[1]
+ if np.count_nonzero(y_axis_list) > 0:
+ with open(os.path.join(results_dir,
+ str(key) + '.data'), "w") as pfile:
+ writer = csv.writer(pfile, delimiter='\t')
+ writer.writerows(zip(x_axis_list, y_axis_list))
+ plt.figure(i)
+ plt.plot(x_axis_list, y_axis_list)
+ plt.xlabel("Time (Ticks)")
+ plt.ylabel(get_label(key))
+ plt.savefig(os.path.join(results_dir, str(key) + '.png'))
+ plt.cla()
+ plt.clf()
+ plt.close()
+ i = i + 1
+
+
+def get_results_to_print(dict_of_arrays):
+ """
+ Return a results dictionary for report tool to
+ print the process-statistics.
+ """
+ presults = OrderedDict()
+ results = OrderedDict()
+ for key in dict_of_arrays:
+ if ('processes' in key and
+ any(proc in key for proc in ['ovs', 'vpp', 'qemu'])):
+ reskey = '.'.join(key.split('.')[2:])
+ preskey = key.split('.')[1] + '_collectd'
+ tup_list = dict_of_arrays[key]
+ two_lists = list(map(list, zip(*tup_list)))
+ y_axis_list = two_lists[0]
+ mean = 0.0
+ if np.count_nonzero(y_axis_list) > 0:
+ mean = np.mean(y_axis_list)
+ results[reskey] = mean
+ presults[preskey] = results
+ return presults
+
+
+class Receiver(multiprocessing.Process):
+ """
+ Wrapper Receiver (of samples) class
+ """
+ def __init__(self, pd_dict, control):
+ """
+ Initialize.
+ A queue will be shared with collectd_bucky
+ """
+ super(Receiver, self).__init__()
+ self.daemon = False
+ self.q_of_samples = multiprocessing.Queue()
+ self.server = cb.get_collectd_server(self.q_of_samples)
+ self.control = control
+ self.pd_dict = pd_dict
+ self.collectd_cpu_keys = settings.getValue('COLLECTD_CPU_KEYS')
+ self.collectd_processes_keys = settings.getValue(
+ 'COLLECTD_PROCESSES_KEYS')
+ self.collectd_iface_keys = settings.getValue(
+ 'COLLECTD_INTERFACE_KEYS')
+ self.collectd_iface_xkeys = settings.getValue(
+ 'COLLECTD_INTERFACE_XKEYS')
+ self.collectd_intelrdt_keys = settings.getValue(
+ 'COLLECTD_INTELRDT_KEYS')
+ self.collectd_ovsstats_keys = settings.getValue(
+ 'COLLECTD_OVSSTAT_KEYS')
+ self.collectd_dpdkstats_keys = settings.getValue(
+ 'COLLECTD_DPDKSTAT_KEYS')
+ self.collectd_intelrdt_xkeys = settings.getValue(
+ 'COLLECTD_INTELRDT_XKEYS')
+ self.exclude_coreids = []
+ # Expand the ranges in the intelrdt-xkeys
+ for xkey in self.collectd_intelrdt_xkeys:
+ if '-' not in xkey:
+ self.exclude_coreids.append(int(xkey))
+ else:
+ left, right = map(int, xkey.split('-'))
+ self.exclude_coreids += range(left, right + 1)
+
+ def run(self):
+ """
+ Start receiving the samples.
+ """
+ while not self.control.value:
+ try:
+ sample = self.q_of_samples.get(True, 1)
+ if not sample:
+ break
+ self.handle(sample)
+ except queue.Empty:
+ pass
+ except IOError:
+ continue
+ except (ValueError, IndexError, KeyError, MemoryError):
+ self.stop()
+ break
+
+ # pylint: disable=too-many-boolean-expressions
+ def handle(self, sample):
+ ''' Store values and names if names matches following:
+ 1. cpu + keys
+ 2. processes + keys
+ 3. interface + keys + !xkeys
+ 4. ovs_stats + keys
+ 5. dpdkstat + keys
+ 6. intel_rdt + keys + !xkeys
+ sample[1] is the name of the sample, which is . separated strings.
+ The first field in sample[1] is the type - cpu, proceesses, etc.
+ For intel_rdt, the second field contains the core-id, which is
+ used to make the decision on 'exclusions'
+ sample[0]: Contains the host information - which is not considered.
+ sample[2]: Contains the Value.
+ sample[3]: Contains the Time (in ticks)
+ '''
+ if (('cpu' in sample[1] and
+ any(c in sample[1] for c in self.collectd_cpu_keys)) or
+ ('processes' in sample[1] and
+ any(p in sample[1] for p in self.collectd_processes_keys)) or
+ ('interface' in sample[1] and
+ (any(i in sample[1] for i in self.collectd_iface_keys) and
+ any(x not in sample[1]
+ for x in self.collectd_iface_xkeys))) or
+ ('ovs_stats' in sample[1] and
+ any(o in sample[1] for o in self.collectd_ovsstats_keys)) or
+ ('dpdkstat' in sample[1] and
+ any(d in sample[1] for d in self.collectd_dpdkstats_keys)) or
+ ('intel_rdt' in sample[1] and
+ any(r in sample[1] for r in self.collectd_intelrdt_keys) and
+ (int(sample[1].split('.')[1]) not in self.exclude_coreids))):
+ if sample[1] not in self.pd_dict:
+ self.pd_dict[sample[1]] = list()
+ val = self.pd_dict[sample[1]]
+ val.append((sample[2], sample[3]))
+ self.pd_dict[sample[1]] = val
+
+ def stop(self):
+ """
+ Stop receiving the samples.
+ """
+ self.server.close()
+ self.q_of_samples.put(None)
+ self.control.value = True
+
+
+# inherit from collector.Icollector.
+class Collectd(collector.ICollector):
+ """A collector of system statistics based on collectd
+
+ It starts a UDP server, receives metrics from collectd
+ and plot the results.
+ """
+
+ def __init__(self, results_dir, test_name):
+ """
+ Initialize collection of statistics
+ """
+ self._log = os.path.join(results_dir,
+ settings.getValue('LOG_FILE_COLLECTD') +
+ '_' + test_name + '.log')
+ self.results = {}
+ self.sample_dict = multiprocessing.Manager().dict()
+ self.control = multiprocessing.Value('b', False)
+ self.receiver = Receiver(self.sample_dict, self.control)
+
+ def start(self):
+ """
+ Start receiving samples
+ """
+ self.receiver.server.start()
+ self.receiver.start()
+
+ def stop(self):
+ """
+ Stop receiving samples
+ """
+ self.control.value = True
+ self.receiver.stop()
+ self.receiver.server.join(5)
+ self.receiver.join(5)
+ if self.receiver.server.is_alive():
+ self.receiver.server.terminate()
+ if self.receiver.is_alive():
+ self.receiver.terminate()
+ self.results = copy.deepcopy(self.sample_dict)
+
+ def get_results(self):
+ """
+ Return the results.
+ """
+ return get_results_to_print(self.results)
+
+ def print_results(self):
+ """
+ Print - Plot and save raw-data.
+ log the collected statistics
+ """
+ plot_graphs(self.results)
+ proc_stats = get_results_to_print(self.results)
+ for process in proc_stats:
+ logging.info("Process: " + '_'.join(process.split('_')[:-1]))
+ for(key, value) in proc_stats[process].items():
+ logging.info(" Statistic: " + str(key) +
+ ", Value: " + str(value))
diff --git a/tools/collectors/collectd/collectd_bucky.py b/tools/collectors/collectd/collectd_bucky.py
new file mode 100644
index 00000000..bac24ed7
--- /dev/null
+++ b/tools/collectors/collectd/collectd_bucky.py
@@ -0,0 +1,769 @@
+# Copyright 2014-2018 TRBS, Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This file is a modified version of scripts present in bucky software
+# details of bucky can be found at https://github.com/trbs/bucky
+
+"""
+This module receives the samples from collectd, processes it and
+enqueues it in a format suitable for easy processing.
+It also handles secure communication with collectd.
+"""
+import copy
+import hmac
+import logging
+import multiprocessing
+import os
+import socket
+import struct
+import sys
+from hashlib import sha1, sha256
+
+from Crypto.Cipher import AES
+from conf import settings
+
+logging.basicConfig()
+LOG = logging.getLogger(__name__)
+
+
+class CollectdError(Exception):
+ """
+ Custom error class.
+ """
+ def __init__(self, mesg):
+ super(CollectdError, self).__init__(mesg)
+ self.mesg = mesg
+
+ def __str__(self):
+ return self.mesg
+
+
+class ConnectError(CollectdError):
+ """
+ Custom connect error
+ """
+ pass
+
+
+class ConfigError(CollectdError):
+ """
+ Custom config error
+ """
+ pass
+
+
+class ProtocolError(CollectdError):
+ """
+ Custom protocol error
+ """
+ pass
+
+
+class UDPServer(multiprocessing.Process):
+ """
+ Actual UDP server receiving collectd samples over network
+ """
+ def __init__(self, ip, port):
+ super(UDPServer, self).__init__()
+ self.daemon = True
+ addrinfo = socket.getaddrinfo(ip, port,
+ socket.AF_UNSPEC, socket.SOCK_DGRAM)
+ afamily, _, _, _, addr = addrinfo[0]
+ ip, port = addr[:2]
+ self.ip_addr = ip
+ self.port = port
+ self.sock = socket.socket(afamily, socket.SOCK_DGRAM)
+ self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ self.sock.bind((ip, port))
+ LOG.info("Bound socket socket %s:%s", ip, port)
+ except socket.error:
+ LOG.exception("Error binding socket %s:%s.", ip, port)
+ sys.exit(1)
+
+ self.sock_recvfrom = self.sock.recvfrom
+
+ def run(self):
+ """
+ Start receiving messages
+ """
+ recvfrom = self.sock_recvfrom
+ while True:
+ try:
+ data, addr = recvfrom(65535)
+ except (IOError, KeyboardInterrupt):
+ continue
+ addr = addr[:2] # for compatibility with longer ipv6 tuples
+ if data == b'EXIT':
+ break
+ if not self.handle(data, addr):
+ break
+ try:
+ self.pre_shutdown()
+ except SystemExit:
+ LOG.exception("Failed pre_shutdown method for %s",
+ self.__class__.__name__)
+
+ def handle(self, data, addr):
+ """
+ Handle the message.
+ """
+ raise NotImplementedError()
+
+ def pre_shutdown(self):
+ """ Pre shutdown hook """
+ pass
+
+ def close(self):
+ """
+ Close the communication
+ """
+ self.send('EXIT')
+
+ def send(self, data):
+ """
+ Send over the network
+ """
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ if not isinstance(data, bytes):
+ data = data.encode()
+ sock.sendto(data, 0, (self.ip_addr, self.port))
+
+
+class CPUConverter(object):
+ """
+ Converter for CPU samples fom collectd.
+ """
+ PRIORITY = -1
+
+ def __call__(self, sample):
+ return ["cpu", sample["plugin_instance"], sample["type_instance"]]
+
+
+class InterfaceConverter(object):
+ """
+ Converter for Interface samples from collectd
+ """
+ PRIORITY = -1
+
+ def __call__(self, sample):
+ parts = []
+ parts.append("interface")
+ if sample.get("plugin_instance", ""):
+ parts.append(sample["plugin_instance"].strip())
+ stypei = sample.get("type_instance", "").strip()
+ if stypei:
+ parts.append(stypei)
+ stype = sample.get("type").strip()
+ if stype:
+ parts.append(stype)
+ vname = sample.get("value_name").strip()
+ if vname:
+ parts.append(vname)
+ return parts
+
+
+class MemoryConverter(object):
+ """
+ Converter for Memory samples from collectd
+ """
+ PRIORITY = -1
+
+ def __call__(self, sample):
+ return ["memory", sample["type_instance"]]
+
+
+class DefaultConverter(object):
+ """
+ Default converter for samples from collectd
+ """
+ PRIORITY = -1
+
+ def __call__(self, sample):
+ parts = []
+ parts.append(sample["plugin"].strip())
+ if sample.get("plugin_instance"):
+ parts.append(sample["plugin_instance"].strip())
+ stype = sample.get("type", "").strip()
+ if stype and stype != "value":
+ parts.append(stype)
+ stypei = sample.get("type_instance", "").strip()
+ if stypei:
+ parts.append(stypei)
+ vname = sample.get("value_name").strip()
+ if vname and vname != "value":
+ parts.append(vname)
+ return parts
+
+
+DEFAULT_CONVERTERS = {
+ "cpu": CPUConverter(),
+ "interface": InterfaceConverter(),
+ "memory": MemoryConverter(),
+ "_default": DefaultConverter(),
+}
+
+
+class CollectDTypes(object):
+ """
+ Class to handle the sample types. The types.db that comes
+ with collectd, usually, defines the various types.
+ """
+ def __init__(self, types_dbs=None):
+ if types_dbs is None:
+ types_dbs = []
+ dirs = ["/opt/collectd/share/collectd/types.db",
+ "/usr/local/share/collectd/types.db"]
+ self.types = {}
+ self.type_ranges = {}
+ if not types_dbs:
+ types_dbs = [tdb for tdb in dirs if os.path.exists(tdb)]
+ if not types_dbs:
+ raise ConfigError("Unable to locate types.db")
+ self.types_dbs = types_dbs
+ self._load_types()
+
+ def get(self, name):
+ """
+ Get the name of the type
+ """
+ t_name = self.types.get(name)
+ if t_name is None:
+ raise ProtocolError("Invalid type name: %s" % name)
+ return t_name
+
+ def _load_types(self):
+ """
+ Load all the types from types_db
+ """
+ for types_db in self.types_dbs:
+ with open(types_db) as handle:
+ for line in handle:
+ if line.lstrip()[:1] == "#":
+ continue
+ if not line.strip():
+ continue
+ self._add_type_line(line)
+ LOG.info("Loaded collectd types from %s", types_db)
+
+ def _add_type_line(self, line):
+ """
+ Add types information
+ """
+ types = {
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "DERIVE": 2,
+ "ABSOLUTE": 3
+ }
+ name, spec = line.split(None, 1)
+ self.types[name] = []
+ self.type_ranges[name] = {}
+ vals = spec.split(", ")
+ for val in vals:
+ vname, vtype, minv, maxv = val.strip().split(":")
+ vtype = types.get(vtype)
+ if vtype is None:
+ raise ValueError("Invalid value type: %s" % vtype)
+ minv = None if minv == "U" else float(minv)
+ maxv = None if maxv == "U" else float(maxv)
+ self.types[name].append((vname, vtype))
+ self.type_ranges[name][vname] = (minv, maxv)
+
+
+class CollectDParser(object):
+ """
+ Parser class: Implements the sample parsing operations.
+ The types definition defines the parsing process.
+ """
+ def __init__(self, types_dbs=None, counter_eq_derive=False):
+ if types_dbs is None:
+ types_dbs = []
+ self.types = CollectDTypes(types_dbs=types_dbs)
+ self.counter_eq_derive = counter_eq_derive
+
+ def parse(self, data):
+ """
+ Parse individual samples
+ """
+ for sample in self.parse_samples(data):
+ yield sample
+
+ def parse_samples(self, data):
+ """
+ Extract all the samples from the message.
+ """
+ types = {
+ 0x0000: self._parse_string("host"),
+ 0x0001: self._parse_time("time"),
+ 0x0008: self._parse_time_hires("time"),
+ 0x0002: self._parse_string("plugin"),
+ 0x0003: self._parse_string("plugin_instance"),
+ 0x0004: self._parse_string("type"),
+ 0x0005: self._parse_string("type_instance"),
+ 0x0006: None, # handle specially
+ 0x0007: self._parse_time("interval"),
+ 0x0009: self._parse_time_hires("interval")
+ }
+ sample = {}
+ for (ptype, pdata) in self.parse_data(data):
+ if ptype not in types:
+ LOG.debug("Ignoring part type: 0x%02x", ptype)
+ continue
+ if ptype != 0x0006:
+ types[ptype](sample, pdata)
+ continue
+ for vname, vtype, val in self.parse_values(sample["type"], pdata):
+ sample["value_name"] = vname
+ sample["value_type"] = vtype
+ sample["value"] = val
+ yield copy.deepcopy(sample)
+
+ @staticmethod
+ def parse_data(data):
+ """
+ Parse the message
+ """
+ types = set([
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004,
+ 0x0005, 0x0006, 0x0007, 0x0008, 0x0009,
+ 0x0100, 0x0101, 0x0200, 0x0210
+ ])
+ while data:
+ if len(data) < 4:
+ raise ProtocolError("Truncated header.")
+ (part_type, part_len) = struct.unpack("!HH", data[:4])
+ data = data[4:]
+ if part_type not in types:
+ raise ProtocolError("Invalid part type: 0x%02x" % part_type)
+ part_len -= 4 # includes four header bytes we just parsed
+ if len(data) < part_len:
+ raise ProtocolError("Truncated value.")
+ part_data, data = data[:part_len], data[part_len:]
+ yield (part_type, part_data)
+
+ def parse_values(self, stype, data):
+ """
+ Parse the value of a particular type
+ """
+ types = {0: "!Q", 1: "<d", 2: "!q", 3: "!Q"}
+ (nvals,) = struct.unpack("!H", data[:2])
+ data = data[2:]
+ if len(data) != 9 * nvals:
+ raise ProtocolError("Invalid value structure length.")
+ vtypes = self.types.get(stype)
+ if nvals != len(vtypes):
+ raise ProtocolError("Values different than types.db info.")
+ for i in range(nvals):
+ vtype = data[i]
+ if vtype != vtypes[i][1]:
+ if self.counter_eq_derive and \
+ (vtype, vtypes[i][1]) in ((0, 2), (2, 0)):
+ # if counter vs derive don't break, assume server is right
+ LOG.debug("Type mismatch (counter/derive) for %s/%s",
+ stype, vtypes[i][0])
+ else:
+ raise ProtocolError("Type mismatch with types.db")
+ data = data[nvals:]
+ for i in range(nvals):
+ vdata, data = data[:8], data[8:]
+ (val,) = struct.unpack(types[vtypes[i][1]], vdata)
+ yield vtypes[i][0], vtypes[i][1], val
+
+ @staticmethod
+ def _parse_string(name):
+ """
+ Parse string value
+ """
+ def _parser(sample, data):
+ """
+ Actual string parser
+ """
+ data = data.decode()
+ if data[-1] != '\0':
+ raise ProtocolError("Invalid string detected.")
+ sample[name] = data[:-1]
+ return _parser
+
+ @staticmethod
+ def _parse_time(name):
+ """
+ Parse time value
+ """
+ def _parser(sample, data):
+ """
+ Actual time parser
+ """
+ if len(data) != 8:
+ raise ProtocolError("Invalid time data length.")
+ (val,) = struct.unpack("!Q", data)
+ sample[name] = float(val)
+ return _parser
+
+ @staticmethod
+ def _parse_time_hires(name):
+ """
+ Parse time hires value
+ """
+ def _parser(sample, data):
+ """
+ Actual time hires parser
+ """
+ if len(data) != 8:
+ raise ProtocolError("Invalid hires time data length.")
+ (val,) = struct.unpack("!Q", data)
+ sample[name] = val * (2 ** -30)
+ return _parser
+
+
+class CollectDCrypto(object):
+ """
+ Handle the sercured communications with collectd daemon
+ """
+ def __init__(self):
+ sec_level = settings.getValue('COLLECTD_SECURITY_LEVEL')
+ if sec_level in ("sign", "SIGN", "Sign", 1):
+ self.sec_level = 1
+ elif sec_level in ("encrypt", "ENCRYPT", "Encrypt", 2):
+ self.sec_level = 2
+ else:
+ self.sec_level = 0
+ if self.sec_level:
+ self.auth_file = settings.getValue('COLLECTD_AUTH_FILE')
+ self.auth_db = {}
+ if self.auth_file:
+ self.load_auth_file()
+ if not self.auth_file:
+ raise ConfigError("Collectd security level configured but no "
+ "auth file specified in configuration")
+ if not self.auth_db:
+ LOG.warning("Collectd security level configured but no "
+ "user/passwd entries loaded from auth file")
+
+ def load_auth_file(self):
+ """
+ Loading the authentication file.
+ """
+ try:
+ fil = open(self.auth_file)
+ except IOError as exc:
+ raise ConfigError("Unable to load collectd's auth file: %r" % exc)
+ self.auth_db.clear()
+ for line in fil:
+ line = line.strip()
+ if not line or line[0] == "#":
+ continue
+ user, passwd = line.split(":", 1)
+ user = user.strip()
+ passwd = passwd.strip()
+ if not user or not passwd:
+ LOG.warning("Found line with missing user or password")
+ continue
+ if user in self.auth_db:
+ LOG.warning("Found multiple entries for single user")
+ self.auth_db[user] = passwd
+ fil.close()
+ LOG.info("Loaded collectd's auth file from %s", self.auth_file)
+
+ def parse(self, data):
+ """
+ Parse the non-encrypted message
+ """
+ if len(data) < 4:
+ raise ProtocolError("Truncated header.")
+ part_type, part_len = struct.unpack("!HH", data[:4])
+ sec_level = {0x0200: 1, 0x0210: 2}.get(part_type, 0)
+ if sec_level < self.sec_level:
+ raise ProtocolError("Packet has lower security level than allowed")
+ if not sec_level:
+ return data
+ if sec_level == 1 and not self.sec_level:
+ return data[part_len:]
+ data = data[4:]
+ part_len -= 4
+ if len(data) < part_len:
+ raise ProtocolError("Truncated part payload.")
+ if sec_level == 1:
+ return self.parse_signed(part_len, data)
+ if sec_level == 2:
+ return self.parse_encrypted(part_len, data)
+
+ def parse_signed(self, part_len, data):
+ """
+ Parse the signed message
+ """
+
+ if part_len <= 32:
+ raise ProtocolError("Truncated signed part.")
+ sig, data = data[:32], data[32:]
+ uname_len = part_len - 32
+ uname = data[:uname_len].decode()
+ if uname not in self.auth_db:
+ raise ProtocolError("Signed packet, unknown user '%s'" % uname)
+ password = self.auth_db[uname].encode()
+ sig2 = hmac.new(password, msg=data, digestmod=sha256).digest()
+ if not self._hashes_match(sig, sig2):
+ raise ProtocolError("Bad signature from user '%s'" % uname)
+ data = data[uname_len:]
+ return data
+
+ def parse_encrypted(self, part_len, data):
+ """
+ Parse the encrypted message
+ """
+ if part_len != len(data):
+ raise ProtocolError("Enc pkt size disaggrees with header.")
+ if len(data) <= 38:
+ raise ProtocolError("Truncated encrypted part.")
+ uname_len, data = struct.unpack("!H", data[:2])[0], data[2:]
+ if len(data) <= uname_len + 36:
+ raise ProtocolError("Truncated encrypted part.")
+ uname, data = data[:uname_len].decode(), data[uname_len:]
+ if uname not in self.auth_db:
+ raise ProtocolError("Couldn't decrypt, unknown user '%s'" % uname)
+ ival, data = data[:16], data[16:]
+ password = self.auth_db[uname].encode()
+ key = sha256(password).digest()
+ pad_bytes = 16 - (len(data) % 16)
+ data += b'\0' * pad_bytes
+ data = AES.new(key, IV=ival, mode=AES.MODE_OFB).decrypt(data)
+ data = data[:-pad_bytes]
+ tag, data = data[:20], data[20:]
+ tag2 = sha1(data).digest()
+ if not self._hashes_match(tag, tag2):
+ raise ProtocolError("Bad checksum on enc pkt for '%s'" % uname)
+ return data
+
+ @staticmethod
+ def _hashes_match(val_a, val_b):
+ """Constant time comparison of bytes """
+ if len(val_a) != len(val_b):
+ return False
+ diff = 0
+ for val_x, val_y in zip(val_a, val_b):
+ diff |= val_x ^ val_y
+ return not diff
+
+
+class CollectDConverter(object):
+ """
+ Handle all conversions.
+ Coversion: Convert the sample received from collectd to an
+ appropriate format - for easy processing
+ """
+ def __init__(self):
+ self.converters = dict(DEFAULT_CONVERTERS)
+
+ def convert(self, sample):
+ """
+ Main conversion handling.
+ """
+ default = self.converters["_default"]
+ handler = self.converters.get(sample["plugin"], default)
+ try:
+ name_parts = handler(sample)
+ if name_parts is None:
+ return # treat None as "ignore sample"
+ name = '.'.join(name_parts)
+ except (AttributeError, IndexError, MemoryError, RuntimeError):
+ LOG.exception("Exception in sample handler %s (%s):",
+ sample["plugin"], handler)
+ return
+ host = sample.get("host", "")
+ return (
+ host,
+ name,
+ sample["value_type"],
+ sample["value"],
+ int(sample["time"])
+ )
+
+ def _add_converter(self, name, inst, source="unknown"):
+ """
+ Add new converter types
+ """
+ if name not in self.converters:
+ LOG.info("Converter: %s from %s", name, source)
+ self.converters[name] = inst
+ return
+ kpriority = getattr(inst, "PRIORITY", 0)
+ ipriority = getattr(self.converters[name], "PRIORITY", 0)
+ if kpriority > ipriority:
+ LOG.info("Replacing: %s", name)
+ LOG.info("Converter: %s from %s", name, source)
+ self.converters[name] = inst
+ return
+ LOG.info("Ignoring: %s (%s) from %s (priority: %s vs %s)",
+ name, inst, source, kpriority, ipriority)
+
+
+class CollectDHandler(object):
+ """Wraps all CollectD parsing functionality in a class"""
+
+ def __init__(self):
+ self.crypto = CollectDCrypto()
+ collectd_types = []
+ collectd_counter_eq_derive = False
+ self.parser = CollectDParser(collectd_types,
+ collectd_counter_eq_derive)
+ self.converter = CollectDConverter()
+ self.prev_samples = {}
+ self.last_sample = None
+
+ def parse(self, data):
+ """
+ Parse the samples from collectd
+ """
+ try:
+ data = self.crypto.parse(data)
+ except ProtocolError as error:
+ LOG.error("Protocol error in CollectDCrypto: %s", error)
+ return
+ try:
+ for sample in self.parser.parse(data):
+ self.last_sample = sample
+ stype = sample["type"]
+ vname = sample["value_name"]
+ sample = self.converter.convert(sample)
+ if sample is None:
+ continue
+ host, name, vtype, val, time = sample
+ if not name.strip():
+ continue
+ val = self.calculate(host, name, vtype, val, time)
+ val = self.check_range(stype, vname, val)
+ if val is not None:
+ yield host, name, val, time
+ except ProtocolError as error:
+ LOG.error("Protocol error: %s", error)
+ if self.last_sample is not None:
+ LOG.info("Last sample: %s", self.last_sample)
+
+ def check_range(self, stype, vname, val):
+ """
+ Check the value range
+ """
+ if val is None:
+ return
+ try:
+ vmin, vmax = self.parser.types.type_ranges[stype][vname]
+ except KeyError:
+ LOG.error("Couldn't find vmin, vmax in CollectDTypes")
+ return val
+ if vmin is not None and val < vmin:
+ LOG.debug("Invalid value %s (<%s) for %s", val, vmin, vname)
+ LOG.debug("Last sample: %s", self.last_sample)
+ return
+ if vmax is not None and val > vmax:
+ LOG.debug("Invalid value %s (>%s) for %s", val, vmax, vname)
+ LOG.debug("Last sample: %s", self.last_sample)
+ return
+ return val
+
+ def calculate(self, host, name, vtype, val, time):
+ """
+ Perform calculations for handlers
+ """
+ handlers = {
+ 0: self._calc_counter, # counter
+ 1: lambda _host, _name, v, _time: v, # gauge
+ 2: self._calc_derive, # derive
+ 3: self._calc_absolute # absolute
+ }
+ if vtype not in handlers:
+ LOG.error("Invalid value type %s for %s", vtype, name)
+ LOG.info("Last sample: %s", self.last_sample)
+ return
+ return handlers[vtype](host, name, val, time)
+
+ def _calc_counter(self, host, name, val, time):
+ """
+ Calculating counter values
+ """
+ key = (host, name)
+ if key not in self.prev_samples:
+ self.prev_samples[key] = (val, time)
+ return
+ pval, ptime = self.prev_samples[key]
+ self.prev_samples[key] = (val, time)
+ if time <= ptime:
+ LOG.error("Invalid COUNTER update for: %s:%s", key[0], key[1])
+ LOG.info("Last sample: %s", self.last_sample)
+ return
+ if val < pval:
+ # this is supposed to handle counter wrap around
+ # see https://collectd.org/wiki/index.php/Data_source
+ LOG.debug("COUNTER wrap-around for: %s:%s (%s -> %s)",
+ host, name, pval, val)
+ if pval < 0x100000000:
+ val += 0x100000000 # 2**32
+ else:
+ val += 0x10000000000000000 # 2**64
+ return float(val - pval) / (time - ptime)
+
+ def _calc_derive(self, host, name, val, time):
+ """
+ Calculating derived values
+ """
+ key = (host, name)
+ if key not in self.prev_samples:
+ self.prev_samples[key] = (val, time)
+ return
+ pval, ptime = self.prev_samples[key]
+ self.prev_samples[key] = (val, time)
+ if time <= ptime:
+ LOG.debug("Invalid DERIVE update for: %s:%s", key[0], key[1])
+ LOG.debug("Last sample: %s", self.last_sample)
+ return
+ return float(abs(val - pval)) / (time - ptime)
+
+ def _calc_absolute(self, host, name, val, time):
+ """
+ Calculating absolute values
+ """
+ key = (host, name)
+ if key not in self.prev_samples:
+ self.prev_samples[key] = (val, time)
+ return
+ _, ptime = self.prev_samples[key]
+ self.prev_samples[key] = (val, time)
+ if time <= ptime:
+ LOG.error("Invalid ABSOLUTE update for: %s:%s", key[0], key[1])
+ LOG.info("Last sample: %s", self.last_sample)
+ return
+ return float(val) / (time - ptime)
+
+
+class CollectDServer(UDPServer):
+ """Single processes CollectDServer"""
+
+ def __init__(self, queue):
+ super(CollectDServer, self).__init__(settings.getValue('COLLECTD_IP'),
+ settings.getValue('COLLECTD_PORT'))
+ self.handler = CollectDHandler()
+ self.queue = queue
+
+ def handle(self, data, addr):
+ for sample in self.handler.parse(data):
+ self.queue.put(sample)
+ return True
+
+ def pre_shutdown(self):
+ LOG.info("Sutting down CollectDServer")
+
+
+def get_collectd_server(queue):
+ """Get the collectd server """
+ server = CollectDServer
+ return server(queue)
diff --git a/tools/functions.py b/tools/functions.py
index c0d1e5f7..d35f1f84 100644
--- a/tools/functions.py
+++ b/tools/functions.py
@@ -190,17 +190,15 @@ def filter_output(output, regex):
"""
result = []
if isinstance(output, str):
- for line in output.split('\n'):
+ for line in output.splitlines():
result += re.findall(regex, line)
- return result
- elif isinstance(output, list) or isinstance(output, tuple):
- tmp_res = []
+ elif isinstance(output, (list, tuple)):
for item in output:
- tmp_res.append(filter_output(item, regex))
- return tmp_res
+ result.append(filter_output(item, regex))
else:
raise RuntimeError('Only strings and lists are supported by filter_output(), '
'but output has type {}'.format(type(output)))
+ return result
def format_description(desc, length):
""" Split description into multiple lines based on given line length.
diff --git a/tools/llc_management/__init__.py b/tools/llc_management/__init__.py
new file mode 100644
index 00000000..4774dc93
--- /dev/null
+++ b/tools/llc_management/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2017-2018 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for RMD to perform LLC-Management
+"""
diff --git a/tools/llc_management/rmd.py b/tools/llc_management/rmd.py
new file mode 100644
index 00000000..308dda3c
--- /dev/null
+++ b/tools/llc_management/rmd.py
@@ -0,0 +1,198 @@
+# Copyright 2017-2018 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Perform L3-cache allocations for different workloads- VNFs, PMDs, vSwitch etc.
+based on the user-defined policies. This is done using Intel-RMD.
+Details about RMD can be found in: https://github.com/intel/rmd
+"""
+
+
+import itertools
+import json
+import logging
+import math
+import socket
+
+from collections import defaultdict
+from stcrestclient import resthttp
+from conf import settings as S
+
+DEFAULT_PORT = 8888
+DEFAULT_SERVER = '127.0.0.1'
+DEFAULT_VERSION = 'v1'
+
+
+def cpumask2coreids(mask):
+ """
+ Convert CPU mask in hex-string to list of core-IDs
+ """
+ intmask = int(mask, 16)
+ i = 1
+ coreids = []
+ while i <= intmask:
+ if i & intmask:
+ coreids.append(str(math.frexp(i)[1] - 1))
+ i = i << 1
+ return coreids
+
+
+def get_cos(category):
+ """
+ Obtain the Classof service for a particular category
+ """
+ return S.getValue(category.upper() + '_COS')
+
+
+def get_minmax(category):
+ """
+ Obtain the min-max values for a particular category
+ """
+ return S.getValue(category.upper() + '_CA')
+
+
+def guest_vm_settings_expanded(cores):
+ """
+ Check if are running pv+p mode
+ """
+ for core in cores:
+ if isinstance(core, str) and '#' in core:
+ return False
+ return True
+
+
+class IrmdHttp(object):
+ """
+ Intel RMD ReST API wrapper object
+ """
+
+ def __init__(self, server=None, port=None, api_version=None):
+ if not port:
+ server = DEFAULT_SERVER
+ if not port:
+ port = DEFAULT_PORT
+ if not api_version:
+ api_version = DEFAULT_VERSION
+ url = resthttp.RestHttp.url('http', server, port, api_version)
+ rest = resthttp.RestHttp(url, None, None, False, True)
+ try:
+ rest.get_request('workloads')
+ except (socket.error, resthttp.ConnectionError,
+ resthttp.RestHttpError):
+ raise RuntimeError('Cannot connect to RMD server: %s:%s' %
+ (server, port))
+ self._rest = rest
+ self.workloadids = []
+ self._logger = logging.getLogger(__name__)
+
+ def setup_cacheways(self, affinity_map):
+ """
+ Sets up the cacheways using RMD apis.
+ """
+ for cos_cat in affinity_map:
+ if S.getValue('POLICY_TYPE') == 'COS':
+ params = {'core_ids': affinity_map[cos_cat],
+ 'policy': get_cos(cos_cat)}
+ else:
+ minmax = get_minmax(cos_cat)
+ if len(minmax) < 2:
+ return
+ params = {'core_ids': affinity_map[cos_cat],
+ 'min_cache': minmax[0],
+ 'max_cache': minmax[1]}
+ try:
+ _, data = self._rest.post_request('workloads', None,
+ params)
+ if 'id' in data:
+ wl_id = data['id']
+ self.workloadids.append(wl_id)
+
+ except resthttp.RestHttpError as exp:
+ if str(exp).find('already exists') >= 0:
+ raise RuntimeError("The cacheway already exist")
+ else:
+ raise RuntimeError('Failed to connect: ' + str(exp))
+
+ def reset_all_cacheways(self):
+ """
+ Resets the cacheways
+ """
+ try:
+ for wl_id in self.workloadids:
+ self._rest.delete_request('workloads', str(wl_id))
+ except resthttp.RestHttpError as ecp:
+ raise RuntimeError('Failed to connect: ' + str(ecp))
+
+ def log_allocations(self):
+ """
+ Log the current cacheway settings.
+ """
+ try:
+ _, data = self._rest.get_request('workloads')
+ self._logger.info("Current Allocations: %s",
+ json.dumps(data, indent=4, sort_keys=True))
+ except resthttp.RestHttpError as ecp:
+ raise RuntimeError('Failed to connect: ' + str(ecp))
+
+
+class CacheAllocator(object):
+ """
+ This class exposes APIs for VSPERF to perform
+ Cache-allocation management operations.
+ """
+
+ def __init__(self):
+ port = S.getValue('RMD_PORT')
+ api_version = S.getValue('RMD_API_VERSION')
+ server_ip = S.getValue('RMD_SERVER_IP')
+ self.irmd_manager = IrmdHttp(str(server_ip), str(port),
+ str(api_version))
+
+ def setup_llc_allocation(self):
+ """
+ Wrapper for settingup cacheways
+ """
+ cpumap = defaultdict(list)
+ vswitchmask = S.getValue('VSWITCHD_DPDK_CONFIG')['dpdk-lcore-mask']
+ vnfcores = list(itertools.chain.from_iterable(
+ S.getValue('GUEST_CORE_BINDING')))
+ if not guest_vm_settings_expanded(vnfcores):
+ vnfcores = None
+ nncores = None
+ if S.getValue('LOADGEN') == 'StressorVM':
+ nncores = list(itertools.chain.from_iterable(
+ S.getValue('NN_CORE_BINDING')))
+ pmdcores = cpumask2coreids(S.getValue('VSWITCH_PMD_CPU_MASK'))
+ vswitchcores = cpumask2coreids(vswitchmask)
+ if vswitchcores:
+ cpumap['vswitch'] = vswitchcores
+ if vnfcores:
+ cpumap['vnf'] = vnfcores
+ if pmdcores:
+ cpumap['pmd'] = pmdcores
+ if nncores:
+ cpumap['noisevm'] = nncores
+ self.irmd_manager.setup_cacheways(cpumap)
+
+ def cleanup_llc_allocation(self):
+ """
+ Wrapper for cacheway cleanup
+ """
+ self.irmd_manager.reset_all_cacheways()
+
+ def log_allocations(self):
+ """
+ Wrapper for logging cacheway allocations
+ """
+ self.irmd_manager.log_allocations()
diff --git a/tools/pkt_gen/ixia/ixia.py b/tools/pkt_gen/ixia/ixia.py
index e768be06..d4ca56f2 100755
--- a/tools/pkt_gen/ixia/ixia.py
+++ b/tools/pkt_gen/ixia/ixia.py
@@ -111,6 +111,11 @@ def _build_set_cmds(values, prefix='dict set'):
yield subkey
continue
+ if isinstance(value, list):
+ value = '{{{}}}'.format(' '.join(str(x) for x in value))
+ yield ' '.join([prefix, 'set', key, value]).strip()
+ continue
+
# tcl doesn't recognise the strings "True" or "False", only "1"
# or "0". Special case to convert them
if isinstance(value, bool):
@@ -118,6 +123,9 @@ def _build_set_cmds(values, prefix='dict set'):
else:
value = str(value)
+ if isinstance(value, str) and not value:
+ value = '{}'
+
if prefix:
yield ' '.join([prefix, key, value]).strip()
else:
diff --git a/tools/pkt_gen/ixnet/ixnet.py b/tools/pkt_gen/ixnet/ixnet.py
index b8fb1879..d1ba9096 100755
--- a/tools/pkt_gen/ixnet/ixnet.py
+++ b/tools/pkt_gen/ixnet/ixnet.py
@@ -127,6 +127,11 @@ def _build_set_cmds(values, prefix='dict set'):
yield subkey
continue
+ if isinstance(value, list):
+ value = '{{{}}}'.format(' '.join(str(x) for x in value))
+ yield ' '.join([prefix, 'set', key, value]).strip()
+ continue
+
# tcl doesn't recognise the strings "True" or "False", only "1"
# or "0". Special case to convert them
if isinstance(value, bool):
@@ -134,6 +139,9 @@ def _build_set_cmds(values, prefix='dict set'):
else:
value = str(value)
+ if isinstance(value, str) and not value:
+ value = '{}'
+
if prefix:
yield ' '.join([prefix, key, value]).strip()
else:
diff --git a/tools/pkt_gen/trex/trex.py b/tools/pkt_gen/trex/trex.py
index acdaf287..cfe54b78 100644
--- a/tools/pkt_gen/trex/trex.py
+++ b/tools/pkt_gen/trex/trex.py
@@ -19,6 +19,8 @@ Trex Traffic Generator Model
import logging
import subprocess
import sys
+import time
+import os
from collections import OrderedDict
# pylint: disable=unused-import
import netaddr
@@ -81,6 +83,7 @@ class Trex(ITrafficGenerator):
settings.getValue('TRAFFICGEN_TREX_BASE_DIR'))
self._trex_user = settings.getValue('TRAFFICGEN_TREX_USER')
self._stlclient = None
+ self._verification_params = None
def connect(self):
'''Connect to Trex traffic generator
@@ -89,11 +92,11 @@ class Trex(ITrafficGenerator):
the configuration file
'''
self._stlclient = STLClient()
- self._logger.info("TREX: In Trex connect method...")
+ self._logger.info("T-Rex: In Trex connect method...")
if self._trex_host_ip_addr:
cmd_ping = "ping -c1 " + self._trex_host_ip_addr
else:
- raise RuntimeError('TREX: Trex host not defined')
+ raise RuntimeError('T-Rex: Trex host not defined')
ping = subprocess.Popen(cmd_ping, shell=True, stderr=subprocess.PIPE)
output, error = ping.communicate()
@@ -101,7 +104,7 @@ class Trex(ITrafficGenerator):
if ping.returncode:
self._logger.error(error)
self._logger.error(output)
- raise RuntimeError('TREX: Cannot ping Trex host at ' + \
+ raise RuntimeError('T-Rex: Cannot ping Trex host at ' + \
self._trex_host_ip_addr)
connect_trex = "ssh " + self._trex_user + \
@@ -120,13 +123,18 @@ class Trex(ITrafficGenerator):
self._logger.error(error)
self._logger.error(output)
raise RuntimeError(
- 'TREX: Cannot locate Trex program at %s within %s' \
+ 'T-Rex: Cannot locate Trex program at %s within %s' \
% (self._trex_host_ip_addr, self._trex_base_dir))
- self._stlclient = STLClient(username=self._trex_user, server=self._trex_host_ip_addr,
- verbose_level=0)
- self._stlclient.connect()
- self._logger.info("TREX: Trex host successfully found...")
+ try:
+ self._stlclient = STLClient(username=self._trex_user, server=self._trex_host_ip_addr,
+ verbose_level=0)
+ self._stlclient.connect()
+ except STLError:
+ raise RuntimeError('T-Rex: Cannot connect to T-Rex server. Please check if it is '
+ 'running and that firewall allows connection to TCP port 4501.')
+
+ self._logger.info("T-Rex: Trex host successfully found...")
def disconnect(self):
"""Disconnect from the traffic generator.
@@ -138,7 +146,7 @@ class Trex(ITrafficGenerator):
:returns: None
"""
- self._logger.info("TREX: In trex disconnect method")
+ self._logger.info("T-Rex: In trex disconnect method")
self._stlclient.disconnect(stop_traffic=True, release_ports=True)
@staticmethod
@@ -243,11 +251,16 @@ class Trex(ITrafficGenerator):
return (stream_1, stream_2, stream_1_lat, stream_2_lat)
- def generate_traffic(self, traffic, duration):
+ def generate_traffic(self, traffic, duration, disable_capture=False):
"""The method that generate a stream
"""
my_ports = [0, 1]
+
+ # initialize ports
self._stlclient.reset(my_ports)
+ self._stlclient.remove_all_captures()
+ self._stlclient.set_service_mode(ports=my_ports, enabled=False)
+
ports_info = self._stlclient.get_port_info(my_ports)
# for SR-IOV
if settings.getValue('TRAFFICGEN_TREX_PROMISCUOUS'):
@@ -262,10 +275,35 @@ class Trex(ITrafficGenerator):
self._stlclient.add_streams(stream_1_lat, ports=[0])
self._stlclient.add_streams(stream_2_lat, ports=[1])
+ # enable traffic capture if requested
+ pcap_id = {}
+ if traffic['capture']['enabled'] and not disable_capture:
+ for ports in ['tx_ports', 'rx_ports']:
+ if traffic['capture'][ports]:
+ pcap_dir = ports[:2]
+ self._logger.info("T-Rex starting %s traffic capture", pcap_dir.upper())
+ capture = {ports : traffic['capture'][ports],
+ 'limit' : traffic['capture']['count'],
+ 'bpf_filter' : traffic['capture']['filter']}
+ self._stlclient.set_service_mode(ports=traffic['capture'][ports], enabled=True)
+ pcap_id[pcap_dir] = self._stlclient.start_capture(**capture)
+
self._stlclient.clear_stats()
- self._stlclient.start(ports=[0, 1], force=True, duration=duration)
- self._stlclient.wait_on_traffic(ports=[0, 1])
+ self._stlclient.start(ports=my_ports, force=True, duration=duration)
+ self._stlclient.wait_on_traffic(ports=my_ports)
stats = self._stlclient.get_stats(sync_now=True)
+
+ # export captured data into pcap file if possible
+ if pcap_id:
+ for pcap_dir in pcap_id:
+ pcap_file = 'capture_{}.pcap'.format(pcap_dir)
+ self._stlclient.stop_capture(pcap_id[pcap_dir]['id'],
+ os.path.join(settings.getValue('RESULTS_PATH'), pcap_file))
+ stats['capture_{}'.format(pcap_dir)] = pcap_file
+ self._logger.info("T-Rex writing %s traffic capture into %s", pcap_dir.upper(), pcap_file)
+ # disable service mode for all ports used by Trex
+ self._stlclient.set_service_mode(ports=my_ports, enabled=False)
+
return stats
@staticmethod
@@ -323,8 +361,78 @@ class Trex(ITrafficGenerator):
result[ResultsConstants.MIN_LATENCY_NS] = 'Unknown'
result[ResultsConstants.MAX_LATENCY_NS] = 'Unknown'
result[ResultsConstants.AVG_LATENCY_NS] = 'Unknown'
+
+ if 'capture_tx' in stats:
+ result[ResultsConstants.CAPTURE_TX] = stats['capture_tx']
+ if 'capture_rx' in stats:
+ result[ResultsConstants.CAPTURE_RX] = stats['capture_rx']
return result
+ def learning_packets(self, traffic):
+ """
+ Send learning packets before testing
+ :param traffic: traffic structure as per send_cont_traffic guidelines
+ :return: None
+ """
+ self._logger.info("T-Rex sending learning packets")
+ learning_thresh_traffic = copy.deepcopy(traffic)
+ learning_thresh_traffic["frame_rate"] = 1
+ self.generate_traffic(learning_thresh_traffic,
+ settings.getValue("TRAFFICGEN_TREX_LEARNING_DURATION"),
+ disable_capture=True)
+ self._logger.info("T-Rex finished learning packets")
+ time.sleep(3) # allow packets to complete before starting test traffic
+
+ def run_trials(self, traffic, boundaries, duration, lossrate):
+ """
+ Run rfc2544 trial loop
+ :param traffic: traffic profile dictionary
+ :param boundaries: A dictionary of three keys left, right, center to dictate
+ the highest, lowest, and starting point of the binary search.
+ Values are percentages of line rates for each key.
+ :param duration: length in seconds for trials
+ :param lossrate: loweset loss rate percentage calculated from
+ comparision between received and sent packets
+ :return: passing stats as dictionary
+ """
+ threshold = settings.getValue('TRAFFICGEN_TREX_RFC2544_TPUT_THRESHOLD')
+ stats_ok = _EMPTY_STATS
+ new_params = copy.deepcopy(traffic)
+ iteration = 1
+ left = boundaries['left']
+ right = boundaries['right']
+ center = boundaries['center']
+ self._logger.info('Starting RFC2544 trials')
+ while (right - left) > threshold:
+ stats = self.generate_traffic(new_params, duration)
+ test_lossrate = ((stats["total"]["opackets"] - stats[
+ "total"]["ipackets"]) * 100) / stats["total"]["opackets"]
+ if stats["total"]["ipackets"] == 0:
+ self._logger.error('No packets recieved. Test failed')
+ return _EMPTY_STATS
+ if settings.getValue('TRAFFICGEN_TREX_VERIFICATION_MODE'):
+ if test_lossrate <= lossrate:
+ # save the last passing trial for verification
+ self._verification_params = copy.deepcopy(new_params)
+ self._logger.debug("Iteration: %s, frame rate: %s, throughput_rx_fps: %s, frame_loss_percent: %s",
+ iteration, "{:.3f}".format(new_params['frame_rate']), stats['total']['rx_pps'],
+ "{:.3f}".format(test_lossrate))
+ if test_lossrate == 0.0 and new_params['frame_rate'] == traffic['frame_rate']:
+ return copy.deepcopy(stats)
+ elif test_lossrate > lossrate:
+ right = center
+ center = (left + right) / 2
+ new_params = copy.deepcopy(traffic)
+ new_params['frame_rate'] = center
+ else:
+ stats_ok = copy.deepcopy(stats)
+ left = center
+ center = (left + right) / 2
+ new_params = copy.deepcopy(traffic)
+ new_params['frame_rate'] = center
+ iteration += 1
+ return stats_ok
+
def send_cont_traffic(self, traffic=None, duration=30):
"""See ITrafficGenerator for description
"""
@@ -336,6 +444,9 @@ class Trex(ITrafficGenerator):
self._params['traffic'] = merge_spec(
self._params['traffic'], traffic)
+ if settings.getValue('TRAFFICGEN_TREX_LEARNING_MODE'):
+ self.learning_packets(traffic)
+ self._logger.info("T-Rex sending traffic")
stats = self.generate_traffic(traffic, duration)
return self.calculate_results(stats)
@@ -356,45 +467,51 @@ class Trex(ITrafficGenerator):
"""
self._logger.info("In Trex send_rfc2544_throughput method")
self._params.clear()
- threshold = settings.getValue('TRAFFICGEN_TREX_RFC2544_TPUT_THRESHOLD')
- test_lossrate = 0
- left = 0
- iteration = 1
- stats_ok = _EMPTY_STATS
self._params['traffic'] = self.traffic_defaults.copy()
if traffic:
self._params['traffic'] = merge_spec(
self._params['traffic'], traffic)
- new_params = copy.deepcopy(traffic)
- stats = self.generate_traffic(traffic, duration)
- right = traffic['frame_rate']
- center = traffic['frame_rate']
+ if settings.getValue('TRAFFICGEN_TREX_LEARNING_MODE'):
+ self.learning_packets(traffic)
+ self._verification_params = copy.deepcopy(traffic)
- # Loops until the preconfigured difference between frame rate
+ binary_bounds = {'right' : traffic['frame_rate'],
+ 'left' : 0,
+ 'center': traffic['frame_rate'],}
+
+ # Loops until the preconfigured differencde between frame rate
# of successful and unsuccessful iterations is reached
- while (right - left) > threshold:
- test_lossrate = ((stats["total"]["opackets"] - stats["total"]
- ["ipackets"]) * 100) / stats["total"]["opackets"]
- self._logger.debug("Iteration: %s, frame rate: %s, throughput_rx_fps: %s, frame_loss_percent: %s",
- iteration, "{:.3f}".format(new_params['frame_rate']), stats['total']['rx_pps'],
- "{:.3f}".format(test_lossrate))
- if test_lossrate == 0.0 and new_params['frame_rate'] == traffic['frame_rate']:
- stats_ok = copy.deepcopy(stats)
- break
- elif test_lossrate > lossrate:
- right = center
- center = (left+right) / 2
- new_params = copy.deepcopy(traffic)
- new_params['frame_rate'] = center
- stats = self.generate_traffic(new_params, duration)
+ stats_ok = self.run_trials(boundaries=binary_bounds, duration=duration,
+ lossrate=lossrate, traffic=traffic)
+ if settings.getValue('TRAFFICGEN_TREX_VERIFICATION_MODE'):
+ verification_iterations = 1
+ while verification_iterations <= settings.getValue('TRAFFICGEN_TREX_MAXIMUM_VERIFICATION_TRIALS'):
+ self._logger.info('Starting Trex Verification trial for %s seconds at frame rate %s',
+ settings.getValue('TRAFFICGEN_TREX_VERIFICATION_DURATION'),
+ self._verification_params['frame_rate'])
+ stats = self.generate_traffic(self._verification_params,
+ settings.getValue('TRAFFICGEN_TREX_VERIFICATION_DURATION'))
+ verification_lossrate = ((stats["total"]["opackets"] - stats[
+ "total"]["ipackets"]) * 100) / stats["total"]["opackets"]
+ if verification_lossrate <= lossrate:
+ self._logger.info('Trex Verification passed, %s packets were lost',
+ stats["total"]["opackets"] - stats["total"]["ipackets"])
+ stats_ok = copy.deepcopy(stats)
+ break
+ else:
+ self._logger.info('Trex Verification failed, %s packets were lost',
+ stats["total"]["opackets"] - stats["total"]["ipackets"])
+ new_right = self._verification_params['frame_rate'] - settings.getValue(
+ 'TRAFFICGEN_TREX_RFC2544_TPUT_THRESHOLD')
+ self._verification_params['frame_rate'] = new_right
+ binary_bounds = {'right': new_right,
+ 'left': 0,
+ 'center': new_right,}
+ stats_ok = self.run_trials(boundaries=binary_bounds, duration=duration,
+ lossrate=lossrate, traffic=self._verification_params)
+ verification_iterations += 1
else:
- stats_ok = copy.deepcopy(stats)
- left = center
- center = (left+right) / 2
- new_params = copy.deepcopy(traffic)
- new_params['frame_rate'] = center
- stats = self.generate_traffic(new_params, duration)
- iteration += 1
+ self._logger.error('Could not pass Trex Verification. Test failed')
return self.calculate_results(stats_ok)
def start_rfc2544_throughput(self, traffic=None, tests=1, duration=60,
diff --git a/tools/tasks.py b/tools/tasks.py
index 4179291f..4e03f85e 100644
--- a/tools/tasks.py
+++ b/tools/tasks.py
@@ -114,6 +114,13 @@ def run_task(cmd, logger, msg=None, check_error=False):
return ('\n'.join(sout.decode(my_encoding).strip() for sout in stdout),
('\n'.join(sout.decode(my_encoding).strip() for sout in stderr)))
+def update_pids(pid):
+ """update list of running pids, so they can be terminated at the end
+ """
+ pids = settings.getValue('_EXECUTED_PIDS')
+ pids.append(pid)
+ settings.setValue('_EXECUTED_PIDS', pids)
+
def run_background_task(cmd, logger, msg):
"""Run task in background and log when started.
@@ -132,6 +139,8 @@ def run_background_task(cmd, logger, msg):
proc = subprocess.Popen(map(os.path.expanduser, cmd), stdout=_get_stdout(), bufsize=0)
+ update_pids(proc.pid)
+
return proc.pid
@@ -174,14 +183,13 @@ def terminate_task_subtree(pid, signal='-15', sleep=10, logger=None):
:param logger: Logger to write details to
"""
try:
- output = subprocess.check_output("pgrep -P " + str(pid), shell=True).decode().rstrip('\n')
+ children = subprocess.check_output("pgrep -P " + str(pid), shell=True).decode().rstrip('\n').split()
except subprocess.CalledProcessError:
- output = ""
+ children = []
terminate_task(pid, signal, sleep, logger)
# just for case children were kept alive
- children = output.split('\n')
for child in children:
terminate_task(child, signal, sleep, logger)
@@ -208,6 +216,22 @@ def terminate_task(pid, signal='-15', sleep=10, logger=None):
if signal.lstrip('-').upper() not in ('9', 'KILL', 'SIGKILL') and systeminfo.pid_isalive(pid):
terminate_task(pid, '-9', sleep, logger)
+ pids = settings.getValue('_EXECUTED_PIDS')
+ if pid in pids:
+ pids.remove(pid)
+ settings.setValue('_EXECUTED_PIDS', pids)
+
+def terminate_all_tasks(logger):
+ """Terminate all processes executed by vsperf, just for case they were not
+ terminated by standard means.
+ """
+ pids = settings.getValue('_EXECUTED_PIDS')
+ if pids:
+ logger.debug('Following processes will be terminated: %s', pids)
+ for pid in pids:
+ terminate_task_subtree(pid, logger=logger)
+ settings.setValue('_EXECUTED_PIDS', [])
+
class Process(object):
"""Control an instance of a long-running process.
diff --git a/tools/teststepstools.py b/tools/teststepstools.py
index 639e3437..33db8f79 100644
--- a/tools/teststepstools.py
+++ b/tools/teststepstools.py
@@ -19,6 +19,7 @@ import logging
import subprocess
import locale
from tools.functions import filter_output
+from tools.tasks import run_background_task
_LOGGER = logging.getLogger(__name__)
@@ -102,3 +103,19 @@ class TestStepsTools(object):
""" validate result of shell `command' execution
"""
return result is not None
+
+ @staticmethod
+ def Exec_Shell_Background(command):
+ """ Execute a shell `command' at the background and return its PID id
+ """
+ try:
+ pid = run_background_task(command.split(), _LOGGER, "Background task: {}".format(command))
+ return pid
+ except OSError:
+ return None
+
+ @staticmethod
+ def validate_Exec_Shell_Background(result, dummy_command, dummy_regex=None):
+ """ validate result of shell `command' execution on the background
+ """
+ return result is not None
diff --git a/vswitches/ovs_dpdk_vhost.py b/vswitches/ovs_dpdk_vhost.py
index 11b32c88..6deb0c25 100644
--- a/vswitches/ovs_dpdk_vhost.py
+++ b/vswitches/ovs_dpdk_vhost.py
@@ -114,12 +114,15 @@ class OvsDpdkVhost(IVSwitchOvs):
Creates a port of type dpdk.
The new port is named dpdk<n> where n is an integer starting from 0.
"""
+ _nics = S.getValue('NICS')
bridge = self._bridges[switch_name]
dpdk_count = self._get_port_count('type=dpdk')
+ if dpdk_count == len(_nics):
+ raise RuntimeError("Can't add phy port! There are only {} ports defined "
+ "by WHITELIST_NICS parameter!".format(len(_nics)))
port_name = 'dpdk' + str(dpdk_count)
# PCI info. Please note there must be no blank space, eg must be
# like 'options:dpdk-devargs=0000:06:00.0'
- _nics = S.getValue('NICS')
nic_pci = 'options:dpdk-devargs=' + _nics[dpdk_count]['pci']
params = ['--', 'set', 'Interface', port_name, 'type=dpdk', nic_pci]
# multi-queue enable
diff --git a/vswitches/ovs_vanilla.py b/vswitches/ovs_vanilla.py
index cfde3b45..83c52050 100644
--- a/vswitches/ovs_vanilla.py
+++ b/vswitches/ovs_vanilla.py
@@ -16,6 +16,7 @@
"""
import logging
+import time
from conf import settings
from vswitches.ovs import IVSwitchOvs
from src.ovs import DPCtl
@@ -57,10 +58,14 @@ class OvsVanilla(IVSwitchOvs):
tasks.run_task(tap_cmd_list, self._logger, 'Deleting ' + tapx, False)
self._vport_id = 0
- super(OvsVanilla, self).stop()
+ # remove datapath before vswitch shutdown
dpctl = DPCtl()
dpctl.del_dp()
+ super(OvsVanilla, self).stop()
+
+ # give vswitch time to terminate before modules are removed
+ time.sleep(5)
self._module_manager.remove_modules()
def add_phy_port(self, switch_name):
@@ -70,10 +75,8 @@ class OvsVanilla(IVSwitchOvs):
See IVswitch for general description
"""
if self._current_id == len(self._ports):
- self._logger.error("Can't add port! There are only " +
- len(self._ports) + " ports " +
- "defined in config!")
- raise RuntimeError('Failed to add phy port')
+ raise RuntimeError("Can't add phy port! There are only {} ports defined "
+ "by WHITELIST_NICS parameter!".format(len(self._ports)))
if not self._ports[self._current_id]:
self._logger.error("Can't detect device name for NIC %s", self._current_id)
raise ValueError("Invalid device name for %s" % self._current_id)
diff --git a/vswitches/vpp_dpdk_vhost.py b/vswitches/vpp_dpdk_vhost.py
index c62e28d4..58d6bf51 100644
--- a/vswitches/vpp_dpdk_vhost.py
+++ b/vswitches/vpp_dpdk_vhost.py
@@ -225,7 +225,8 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
vpp_nics = self._get_nic_info(key='Pci')
# check if there are any NICs left
if len(self._phy_ports) >= len(S.getValue('NICS')):
- raise RuntimeError('All available NICs are already configured!')
+ raise RuntimeError("Can't add phy port! There are only {} ports defined "
+ "by WHITELIST_NICS parameter!".format(len(S.getValue('NICS'))))
nic = S.getValue('NICS')[len(self._phy_ports)]
if not nic['pci'] in vpp_nics: