aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x3rd_party/ixia/pass_fail.tcl28
-rwxr-xr-xconf/01_testcases.conf12
-rw-r--r--conf/02_vswitch.conf5
-rw-r--r--conf/03_traffic.conf52
-rw-r--r--conf/__init__.py10
-rw-r--r--conf/integration/01_testcases.conf5
-rw-r--r--core/component_factory.py1
-rw-r--r--core/loader/loader_servant.py4
-rw-r--r--core/pktfwd_controller.py8
-rw-r--r--core/traffic_controller.py9
-rw-r--r--core/traffic_controller_rfc2544.py5
-rw-r--r--core/traffic_controller_rfc2889.py2
-rw-r--r--core/vnf_controller.py3
-rw-r--r--core/vswitch_controller_clean.py6
-rw-r--r--core/vswitch_controller_op2p.py12
-rw-r--r--core/vswitch_controller_p2p.py8
-rw-r--r--core/vswitch_controller_ptunp.py12
-rw-r--r--core/vswitch_controller_pxp.py8
-rw-r--r--docs/release/release-notes/release-notes.rst67
-rw-r--r--docs/testing/developer/devguide/design/trafficgen_integration_guide.rst17
-rw-r--r--docs/testing/developer/devguide/design/vswitchperf_design.rst40
-rw-r--r--docs/testing/user/configguide/index.rst1
-rw-r--r--docs/testing/user/configguide/tools.rst177
-rw-r--r--docs/testing/user/configguide/trafficgen.rst99
-rw-r--r--docs/testing/user/userguide/testusage.rst6
-rw-r--r--pylintrc2
-rw-r--r--requirements.txt2
-rw-r--r--src/__init__.py1
-rw-r--r--src/dpdk/dpdk.py4
-rw-r--r--src/ovs/dpctl.py2
-rw-r--r--src/ovs/ofctl.py22
-rwxr-xr-xsystems/opensuse/42.2/build_base_machine.sh1
-rwxr-xr-xsystems/opensuse/42.3/build_base_machine.sh1
-rwxr-xr-xsystems/sles/15/build_base_machine.sh10
-rw-r--r--testcases/integration.py2
-rw-r--r--testcases/performance.py2
-rw-r--r--testcases/testcase.py17
-rw-r--r--tools/collectors/collectd/collectd.py4
-rw-r--r--tools/collectors/collectd/collectd_bucky.py25
-rw-r--r--tools/collectors/sysmetrics/pidstat.py52
-rw-r--r--tools/functions.py2
-rw-r--r--tools/load_gen/stress_ng/stress_ng.py3
-rw-r--r--tools/load_gen/stressorvm/stressor_vm.py5
-rw-r--r--tools/module_manager.py2
-rw-r--r--tools/namespace.py9
-rw-r--r--tools/networkcard.py2
-rwxr-xr-xtools/pkt_gen/dummy/dummy.py20
-rwxr-xr-xtools/pkt_gen/ixia/ixia.py33
-rwxr-xr-xtools/pkt_gen/ixnet/ixnet.py4
-rw-r--r--tools/pkt_gen/moongen/moongen.py45
-rw-r--r--tools/pkt_gen/testcenter/testcenter.py22
-rwxr-xr-xtools/pkt_gen/trafficgen/trafficgen.py5
-rw-r--r--tools/pkt_gen/trex/trex.py223
-rw-r--r--tools/pkt_gen/xena/XenaDriver.py5
-rw-r--r--tools/pkt_gen/xena/json/xena_json.py18
-rwxr-xr-xtools/pkt_gen/xena/xena.py14
-rw-r--r--tools/report/report.py1
-rw-r--r--tools/report/report_rst.jinja4
-rw-r--r--tools/systeminfo.py4
-rw-r--r--tools/teststepstools.py10
-rw-r--r--tools/veth.py7
-rw-r--r--vnfs/__init__.py1
-rw-r--r--vnfs/qemu/__init__.py1
-rw-r--r--vnfs/qemu/qemu.py2
-rw-r--r--vnfs/vnf/__init__.py2
-rw-r--r--vnfs/vnf/vnf.py4
-rwxr-xr-xvsperf19
-rw-r--r--vswitches/__init__.py1
-rw-r--r--vswitches/ovs.py28
-rw-r--r--vswitches/vpp_dpdk_vhost.py39
70 files changed, 948 insertions, 331 deletions
diff --git a/3rd_party/ixia/pass_fail.tcl b/3rd_party/ixia/pass_fail.tcl
index 0a5a7677..bf1fb556 100755
--- a/3rd_party/ixia/pass_fail.tcl
+++ b/3rd_party/ixia/pass_fail.tcl
@@ -431,7 +431,7 @@ proc sendTraffic { flowSpec trafficSpec } {
if {[udp set $::chassis $::card $::port1]} {
errorMsg "Error setting udp on port $::chassis.$::card.$::port1"
}
- errorMsg "frameSize: $frameSize, packetSize: $packetSize, srcMac: $srcMac, dstMac: $dstMac, srcPort: $srcPort, dstPort: $dstPort"
+ errorMsg "frameSize: $frameSize, packetSize: $packetSize, srcMac: $srcMac, dstMac: $dstMac, srcPort: $srcPort, dstPort: $dstPort, framerate: $frameRate %"
if {[info exists protocolPad]} {
errorMsg "protocolPad: $protocolPad, protocolPadBytes: $protocolPadBytes"
}
@@ -544,8 +544,8 @@ proc sendTraffic { flowSpec trafficSpec } {
} else {
errorMsg "Too many packets for capture."
}
-
- set result [list $framesSent $framesRecv $bytesSent $bytesRecv $payError $seqError]
+ lappend result $payError
+ lappend result $seqError
return $result
} else {
errorMsg "streamtype is not supported: '$streamType'"
@@ -638,6 +638,9 @@ proc stopTraffic {} {
logMsg "Frame Rate Sent: $sendRate"
logMsg "Frame Rate Recv: $recvRate\n"
+ logMsg "Bytes Rate Sent: $sendRateBytes"
+ logMsg "Bytes Rate Recv: $recvRateBytes\n"
+
set result [list $framesSent $framesRecv $bytesSent $bytesRecv $sendRate $recvRate $sendRateBytes $recvRateBytes]
return $result
@@ -728,13 +731,6 @@ proc rfcThroughputTest { testSpec trafficSpec } {
set framesDroppedRate 100
}
- # check if we've already found the rate before 10 iterations, i.e.
- # 'percentRate = idealValue'. This is as accurate as we can get with
- # integer values.
- if {[expr "$max - $min"] <= 0.5 } {
- break
- }
-
# handle 'percentRate <= idealValue' case
if {$framesDroppedRate <= $lossRate} {
logMsg "Frame sendRate of '$sendRate' pps succeeded ('$framesDropped' frames dropped)"
@@ -754,6 +750,18 @@ proc rfcThroughputTest { testSpec trafficSpec } {
set max $percentRate
set percentRate [expr "$percentRate - ([expr "$max - $min"] * 0.5)"]
}
+
+ # check if we've already found the rate before 10 iterations, i.e.
+ # 'percentRate = idealValue'. This is as accurate as we can get with
+ # integer values.
+ if {[expr "$max - $min"] <= 0.5 } {
+ logMsg "End of search condition for framerate is met: $max % - $min % <= 0.5 %"
+ break
+ }
+
+ logMsg "Waiting 2000 ms"
+ # wait to process delayed frames
+ after 2000
}
set bestRate [lindex $result 4]
diff --git a/conf/01_testcases.conf b/conf/01_testcases.conf
index c1e82163..4a68ab3f 100755
--- a/conf/01_testcases.conf
+++ b/conf/01_testcases.conf
@@ -256,6 +256,18 @@ PERFORMANCE_TESTS = [
},
},
{
+ "Name": "phy2phy_burst",
+ "Deployment": "p2p",
+ "Description": "Phy2Phy single burst of 1000 frames at 100% frame rate",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "burst",
+ "frame_rate" : 100,
+ "burst_size" : 1000,
+ },
+ },
+ },
+ {
"Name": "pvp_cont",
"Deployment": "pvp",
"Description": "PVP Continuous Stream",
diff --git a/conf/02_vswitch.conf b/conf/02_vswitch.conf
index 6a830a05..4d0fc46b 100644
--- a/conf/02_vswitch.conf
+++ b/conf/02_vswitch.conf
@@ -201,6 +201,11 @@ VSWITCH = "OvsDpdkVhost"
VSWITCH_JUMBO_FRAMES_ENABLED = False
VSWITCH_JUMBO_FRAMES_SIZE = 9000
+# default arguments of OVS ctl tools
+OVS_VSCTL_ARGS = []
+OVS_OFCTL_ARGS = ['-O', 'OpenFlow13'] # backward compatible default value
+OVS_APPCTL_ARGS = []
+
#########################
## VPP
#########################
diff --git a/conf/03_traffic.conf b/conf/03_traffic.conf
index 8aff2e35..f043b4ca 100644
--- a/conf/03_traffic.conf
+++ b/conf/03_traffic.conf
@@ -23,8 +23,8 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# Detailed description of TRAFFIC dictionary items follows:
#
# 'traffic_type' - One of the supported traffic types.
-# E.g. rfc2544_throughput, rfc2544_back2back
-# or rfc2544_continuous
+# E.g. rfc2544_throughput, rfc2544_back2back,
+# rfc2544_continuous or burst
# Data type: str
# Default value: "rfc2544_throughput".
# 'bidir' - Specifies if generated traffic will be full-duplex (True)
@@ -36,6 +36,12 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# continuous stream tests.
# Data type: int
# Default value: 100.
+# 'burst_size' - Defines a number of frames in the single burst, which is sent
+# by burst traffic type. Burst size is applied for each direction,
+# i.e. the total number of tx frames will be 2*burst_size in case of
+# bidirectional traffic.
+# Data type: int
+# Default value: 100.
# 'multistream' - Defines number of flows simulated by traffic generator.
# Value 0 disables multistream feature
# Data type: int
@@ -112,7 +118,7 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# NOTE: It can be modified by vsperf in some scenarios.
# Data type: str
# Default value: "90.90.90.90".
-# 'proto' - Specifies deflaut protocol type.
+# 'proto' - Specifies protocol type.
# Please check particular traffic generator implementation
# for supported protocol types.
# Data type: str
@@ -171,9 +177,38 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# details.
# Data type: str
# Default value: ''
+# 'scapy' - A dictionary with definition of a frame content for both traffic
+# directions. The frame content is defined by a SCAPY notation.
+# NOTE: It is supported only by the T-Rex traffic generator.
+# Following keywords can be used to refer to the related parts of
+# the TRAFFIC dictionary:
+# Ether_src - refers to TRAFFIC['l2']['srcmac']
+# Ether_dst - refers to TRAFFIC['l2']['dstmac']
+# IP_proto - refers to TRAFFIC['l3']['proto']
+# IP_PROTO - refers to upper case version of TRAFFIC['l3']['proto']
+# IP_src - refers to TRAFFIC['l3']['srcip']
+# IP_dst - refers to TRAFFIC['l3']['dstip']
+# IP_PROTO_sport - refers to TRAFFIC['l4']['srcport']
+# IP_PROTO_dport - refers to TRAFFIC['l4']['dstport']
+# Dot1Q_prio - refers to TRAFFIC['vlan']['priority']
+# Dot1Q_id - refers to TRAFFIC['vlan']['cfi']
+# Dot1Q_vlan - refers to TRAFFIC['vlan']['id']
+# '0' - A string with the frame definition for the 1st direction.
+# Data type: str
+# Default value: 'Ether(src={Ether_src}, dst={Ether_dst})/'
+# 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+# 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+# '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})'
+# '1' - A string with the frame definition for the 2nd direction.
+# Data type: str
+# Default value: 'Ether(src={Ether_dst}, dst={Ether_src})/'
+# 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+# 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+# '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
TRAFFIC = {
'traffic_type' : 'rfc2544_throughput',
'frame_rate' : 100,
+ 'burst_size' : 100,
'bidir' : 'True', # will be passed as string in title format to tgen
'multistream' : 0,
'stream_type' : 'L4',
@@ -210,6 +245,17 @@ TRAFFIC = {
'count': 1,
'filter': '',
},
+ 'scapy': {
+ 'enabled': False,
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+ '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+ '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
+ }
}
#path to traffic generators directory.
diff --git a/conf/__init__.py b/conf/__init__.py
index d5d26757..83c5475f 100644
--- a/conf/__init__.py
+++ b/conf/__init__.py
@@ -70,7 +70,7 @@ class Settings(object):
except AttributeError:
pass
return param
- elif isinstance(param, list) or isinstance(param, tuple):
+ elif isinstance(param, (list, tuple)):
tmp_list = []
for item in param:
tmp_list.append(self._eval_param(item))
@@ -229,7 +229,7 @@ class Settings(object):
if key not in self.__dict__ and key not in _EXTRA_TEST_PARAMS:
unknown_keys.append(key)
- if len(unknown_keys):
+ if unknown_keys:
raise RuntimeError('Test parameters contain unknown configuration '
'parameter(s): {}'.format(', '.join(unknown_keys)))
@@ -270,7 +270,7 @@ class Settings(object):
for vmindex in range(vm_number):
value = master_value_str.replace('#VMINDEX', str(vmindex))
for macro, args, param, _, step in re.findall(_PARSE_PATTERN, value):
- multi = int(step) if len(step) and int(step) else 1
+ multi = int(step) if step and int(step) else 1
if macro == '#EVAL':
# pylint: disable=eval-used
tmp_result = str(eval(param))
@@ -325,13 +325,13 @@ class Settings(object):
assert result == self.getValue(attr)
return True
- def validate_setValue(self, dummy_result, name, value):
+ def validate_setValue(self, _dummy_result, name, value):
"""Verifies, that value was correctly set
"""
assert value == self.__dict__[name]
return True
- def validate_resetValue(self, dummy_result, attr):
+ def validate_resetValue(self, _dummy_result, attr):
"""Verifies, that value was correctly reset
"""
return 'TEST_PARAMS' not in self.__dict__ or \
diff --git a/conf/integration/01_testcases.conf b/conf/integration/01_testcases.conf
index bb2809b8..8c013d2a 100644
--- a/conf/integration/01_testcases.conf
+++ b/conf/integration/01_testcases.conf
@@ -1129,10 +1129,11 @@ INTEGRATION_TESTS += [
"vSwitch" : "OvsDpdkVhost", # works also for Vanilla OVS
"Parameters" : {
"TRAFFICGEN" : "Trex",
- "TRAFFICGEN_DURATION" : 5,
+ "TRAFFICGEN_TREX_LEARNING_MODE" : True,
"TRAFFIC" : {
- "traffic_type" : "rfc2544_continuous",
+ "traffic_type" : "burst",
"frame_rate" : 100,
+ "burst_size" : 5,
# enable capture of five RX frames
'capture': {
'enabled': True,
diff --git a/core/component_factory.py b/core/component_factory.py
index bd9a1019..b6bd2677 100644
--- a/core/component_factory.py
+++ b/core/component_factory.py
@@ -121,7 +121,6 @@ def create_loadgen(loadgen_class, loadgen_cfg):
:param loadgen_cfg: Configuration for the loadgen
:return: A new ILoadGenerator class
"""
- # pylint: disable=too-many-function-args
return loadgen_class(loadgen_cfg)
def create_pktfwd(deployment, pktfwd_class):
diff --git a/core/loader/loader_servant.py b/core/loader/loader_servant.py
index 8bad9ab9..6db8e0f2 100644
--- a/core/loader/loader_servant.py
+++ b/core/loader/loader_servant.py
@@ -120,7 +120,7 @@ class LoaderServant(object):
if class_name in results:
logging.info(
- "Class found: " + class_name + ".")
+ "Class found: %s.", class_name)
return results.get(class_name)
return None
@@ -180,7 +180,7 @@ class LoaderServant(object):
mod = imp.load_module(
modname, *imp.find_module(modname, [root]))
except ImportError:
- logging.error('Could not import file ' + filename)
+ logging.error('Could not import file %s', filename)
raise
mods.append((modname, mod))
diff --git a/core/pktfwd_controller.py b/core/pktfwd_controller.py
index b38aefa5..bdc91822 100644
--- a/core/pktfwd_controller.py
+++ b/core/pktfwd_controller.py
@@ -35,12 +35,12 @@ class PktFwdController(object):
self._pktfwd_class = pktfwd_class
self._pktfwd = pktfwd_class(guest=True if deployment == "pvp" and
settings.getValue('VNF') != "QemuPciPassthrough" else False)
- self._logger.debug('Creation using ' + str(self._pktfwd_class))
+ self._logger.debug('Creation using %s', str(self._pktfwd_class))
def setup(self):
"""Sets up the packet forwarder for p2p.
"""
- self._logger.debug('Setup using ' + str(self._pktfwd_class))
+ self._logger.debug('Setup using %s', str(self._pktfwd_class))
try:
self._pktfwd.start()
@@ -56,7 +56,7 @@ class PktFwdController(object):
def setup_for_guest(self):
"""Sets up the packet forwarder for pvp.
"""
- self._logger.debug('Setup using ' + str(self._pktfwd_class))
+ self._logger.debug('Setup using %s', str(self._pktfwd_class))
try:
self._pktfwd.start_for_guest()
@@ -67,7 +67,7 @@ class PktFwdController(object):
def stop(self):
"""Tears down the packet forwarder created in setup().
"""
- self._logger.debug('Stop using ' + str(self._pktfwd_class))
+ self._logger.debug('Stop using %s', str(self._pktfwd_class))
self._pktfwd.stop()
def __enter__(self):
diff --git a/core/traffic_controller.py b/core/traffic_controller.py
index de82dddf..1f21e57d 100644
--- a/core/traffic_controller.py
+++ b/core/traffic_controller.py
@@ -125,7 +125,7 @@ class TrafficController(object):
:param traffic: A dictionary describing the traffic to send.
"""
- self._logger.debug('send_traffic with ' +
+ self._logger.debug('send_traffic with %s',
str(self._traffic_gen_class))
self.configure(traffic)
@@ -144,7 +144,8 @@ class TrafficController(object):
If this function requires more than one argument, all should be
should be passed using the args list and appropriately handled.
"""
- self._logger.debug('send_traffic_async with ' +
+ # pylint: disable=unused-argument
+ self._logger.debug('send_traffic_async with %s',
str(self._traffic_gen_class))
self.configure(traffic)
@@ -158,7 +159,7 @@ class TrafficController(object):
"""
counter = 0
for item in self._results:
- logging.info("Record: " + str(counter))
+ logging.info("Record: %s", str(counter))
counter += 1
for(key, value) in list(item.items()):
logging.info(" Key: " + str(key) +
@@ -169,7 +170,7 @@ class TrafficController(object):
"""
return self._results
- def validate_send_traffic(self, dummy_result, dummy_traffic):
+ def validate_send_traffic(self, _dummy_result, _dummy_traffic):
"""Verify that send traffic has succeeded
"""
if self._results:
diff --git a/core/traffic_controller_rfc2544.py b/core/traffic_controller_rfc2544.py
index 488dde6f..2bb30fec 100644
--- a/core/traffic_controller_rfc2544.py
+++ b/core/traffic_controller_rfc2544.py
@@ -62,6 +62,9 @@ class TrafficControllerRFC2544(TrafficController, IResults):
elif traffic['traffic_type'] == 'rfc2544_continuous':
result = self._traffic_gen_class.send_cont_traffic(
traffic, duration=self._duration)
+ elif traffic['traffic_type'] == 'burst':
+ result = self._traffic_gen_class.send_burst_traffic(
+ traffic, duration=self._duration)
elif traffic['traffic_type'] == 'rfc2544_throughput':
result = self._traffic_gen_class.send_rfc2544_throughput(
traffic, tests=self._tests, duration=self._duration, lossrate=self._lossrate)
@@ -87,7 +90,7 @@ class TrafficControllerRFC2544(TrafficController, IResults):
tests=self._tests,
duration=self._duration)
self._traffic_started = True
- if len(function['args']) > 0:
+ if function['args']:
function['function'](function['args'])
else:
function['function']()
diff --git a/core/traffic_controller_rfc2889.py b/core/traffic_controller_rfc2889.py
index 64ab0ba6..316202c9 100644
--- a/core/traffic_controller_rfc2889.py
+++ b/core/traffic_controller_rfc2889.py
@@ -84,7 +84,7 @@ class TrafficControllerRFC2889(TrafficController, IResults):
trials=self._trials,
duration=self._duration)
self._traffic_started = True
- if len(function['args']) > 0:
+ if function['args']:
function['function'](function['args'])
else:
function['function']()
diff --git a/core/vnf_controller.py b/core/vnf_controller.py
index 78a29258..cbf59b79 100644
--- a/core/vnf_controller.py
+++ b/core/vnf_controller.py
@@ -93,8 +93,7 @@ class VnfController(object):
def get_vnfs_number(self):
"""Returns a number of vnfs controlled by this controller.
"""
- self._logger.debug('get_vnfs_number ' + str(len(self._vnfs)) +
- ' VNF[s]')
+ self._logger.debug('get_vnfs_number %s VNF[s]', str(len(self._vnfs)))
return len(self._vnfs)
def start(self):
diff --git a/core/vswitch_controller_clean.py b/core/vswitch_controller_clean.py
index 61724b9b..432406a7 100644
--- a/core/vswitch_controller_clean.py
+++ b/core/vswitch_controller_clean.py
@@ -37,13 +37,13 @@ class VswitchControllerClean(IVswitchController):
self._vswitch_class = vswitch_class
self._vswitch = vswitch_class()
self._deployment_scenario = "Clean"
- self._logger.debug('Creation using ' + str(self._vswitch_class))
+ self._logger.debug('Creation using %s', str(self._vswitch_class))
self._traffic = traffic.copy()
def setup(self):
"""Sets up the switch for Clean.
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -54,7 +54,7 @@ class VswitchControllerClean(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
def __enter__(self):
diff --git a/core/vswitch_controller_op2p.py b/core/vswitch_controller_op2p.py
index 85bf79bd..3f879f9f 100644
--- a/core/vswitch_controller_op2p.py
+++ b/core/vswitch_controller_op2p.py
@@ -46,12 +46,12 @@ class VswitchControllerOP2P(IVswitchController):
self._deployment_scenario = "OP2P"
self._traffic = traffic.copy()
self._tunnel_operation = tunnel_operation
- self._logger.debug('Creation using ' + str(self._vswitch_class))
+ self._logger.debug('Creation using %s', str(self._vswitch_class))
def setup(self):
""" Sets up the switch for overlay P2P (tunnel encap or decap)
"""
- self._logger.debug('Setting up ' + str(self._tunnel_operation))
+ self._logger.debug('Setting up %s', str(self._tunnel_operation))
if self._tunnel_operation == "encapsulation":
self._setup_encap()
else:
@@ -66,7 +66,7 @@ class VswitchControllerOP2P(IVswitchController):
Create 2 bridges br0 (integration bridge) and br-ext and a VXLAN port
for encapsulation.
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -129,7 +129,7 @@ class VswitchControllerOP2P(IVswitchController):
def _setup_decap(self):
""" Sets up the switch for overlay P2P decapsulation test
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -189,7 +189,7 @@ class VswitchControllerOP2P(IVswitchController):
def _setup_decap_vanilla(self):
""" Sets up the switch for overlay P2P decapsulation test
"""
- self._logger.debug('Setup decap vanilla ' + str(self._vswitch_class))
+ self._logger.debug('Setup decap vanilla %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -261,7 +261,7 @@ class VswitchControllerOP2P(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
def __enter__(self):
diff --git a/core/vswitch_controller_p2p.py b/core/vswitch_controller_p2p.py
index 0d41b145..eb1f57f0 100644
--- a/core/vswitch_controller_p2p.py
+++ b/core/vswitch_controller_p2p.py
@@ -46,13 +46,13 @@ class VswitchControllerP2P(IVswitchController):
self._vswitch_class = vswitch_class
self._vswitch = vswitch_class()
self._deployment_scenario = "P2P"
- self._logger.debug('Creation using ' + str(self._vswitch_class))
+ self._logger.debug('Creation using %s', str(self._vswitch_class))
self._traffic = traffic.copy()
def setup(self):
"""Sets up the switch for p2p.
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -109,7 +109,7 @@ class VswitchControllerP2P(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
def __enter__(self):
@@ -126,7 +126,7 @@ class VswitchControllerP2P(IVswitchController):
def get_ports_info(self):
"""See IVswitchController for description
"""
- self._logger.debug('get_ports_info using ' + str(self._vswitch_class))
+ self._logger.debug('get_ports_info using %s', str(self._vswitch_class))
return self._vswitch.get_ports(settings.getValue('VSWITCH_BRIDGE_NAME'))
def dump_vswitch_flows(self):
diff --git a/core/vswitch_controller_ptunp.py b/core/vswitch_controller_ptunp.py
index 27d26789..853c7d5c 100644
--- a/core/vswitch_controller_ptunp.py
+++ b/core/vswitch_controller_ptunp.py
@@ -59,13 +59,13 @@ class VswitchControllerPtunP(IVswitchController):
self.br_mod_ip1 = settings.getValue('TUNNEL_MODIFY_BRIDGE_IP1')
self.br_mod_ip2 = settings.getValue('TUNNEL_MODIFY_BRIDGE_IP2')
self.tunnel_type = settings.getValue('TUNNEL_TYPE')
- self._logger.debug('Creation using ' + str(self._vswitch_class))
+ self._logger.debug('Creation using %s', str(self._vswitch_class))
def setup(self):
""" Sets up the switch for VxLAN overlay PTUNP (tunnel encap or decap)
"""
self._logger.debug('Setting up phy-tun-phy tunneling scenario')
- if self.tunnel_type is 'vxlan':
+ if self.tunnel_type == 'vxlan':
self._setup_vxlan_encap_decap()
else:
self._logger.error("Only VxLAN is supported for now")
@@ -78,7 +78,7 @@ class VswitchControllerPtunP(IVswitchController):
physical ports. Two more bridges br-mod1 and br-mod2 to mangle
and redirect the packets from one tunnel port to other.
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
self._vswitch.add_switch(self.bridge_phy1)
@@ -204,7 +204,7 @@ class VswitchControllerPtunP(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
def __enter__(self):
@@ -221,7 +221,7 @@ class VswitchControllerPtunP(IVswitchController):
def get_ports_info(self):
"""See IVswitchController for description
"""
- self._logger.debug('get_ports_info using ' + str(self._vswitch_class))
+ self._logger.debug('get_ports_info using %s', str(self._vswitch_class))
ports = self._vswitch.get_ports(self.bridge_phy1) +\
self._vswitch.get_ports(self.bridge_mod1) +\
self._vswitch.get_ports(self.bridge_phy2) +\
@@ -231,7 +231,7 @@ class VswitchControllerPtunP(IVswitchController):
def dump_vswitch_flows(self):
"""See IVswitchController for description
"""
- self._logger.debug('dump_flows using ' + str(self._vswitch_class))
+ self._logger.debug('dump_flows using %s', str(self._vswitch_class))
self._vswitch.dump_flows(self.bridge_phy1)
self._vswitch.dump_flows(self.bridge_mod1)
self._vswitch.dump_flows(self.bridge_phy2)
diff --git a/core/vswitch_controller_pxp.py b/core/vswitch_controller_pxp.py
index d4d1e764..e3c208a3 100644
--- a/core/vswitch_controller_pxp.py
+++ b/core/vswitch_controller_pxp.py
@@ -57,13 +57,13 @@ class VswitchControllerPXP(IVswitchController):
self._traffic = traffic.copy()
self._bidir = True if self._traffic['bidir'] == 'True' else False
- self._logger.debug('Creation using ' + str(self._vswitch_class))
+ self._logger.debug('Creation using %s', str(self._vswitch_class))
self._bridge = settings.getValue('VSWITCH_BRIDGE_NAME')
def setup(self):
""" Sets up the switch for PXP
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -182,7 +182,7 @@ class VswitchControllerPXP(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
def _add_flow(self, flow, port1, port2, reverse_flow=False):
@@ -212,7 +212,7 @@ class VswitchControllerPXP(IVswitchController):
def get_ports_info(self):
"""See IVswitchController for description
"""
- self._logger.debug('get_ports_info using ' + str(self._vswitch_class))
+ self._logger.debug('get_ports_info using %s', str(self._vswitch_class))
return self._vswitch.get_ports(self._bridge)
def dump_vswitch_flows(self):
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 860cca77..46eb74c0 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -2,6 +2,73 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Intel Corporation, AT&T and others.
+OPNFV Fraser Release
+====================
+
+* Supported Versions - DPDK:17.08, OVS:2.8.1, VPP:17.07, QEMU:2.9.1
+* Pylint 1.8.2 code conformity
+* Python virtualenv moved to python-3.
+* LTD: Requirements specification for Soak/Long Duration Tests
+* Performance Matrix functionality support
+* Several bugfixes and minor improvements
+
+* Documentation
+
+ * Configuration and installation of additional tools.
+ * Xena install document update.
+ * Installation prerequisites update
+ * Traffic Capture methods explained
+
+* Virtual-Switches
+
+ * OVS: Configurable arguments for ovs-\*ctl
+ * OVS: Fix vswitch shutdown process
+ * VPP: Define vppctl socket name
+ * VPP: Multiqueue support for VPP
+ * OVS and VPP: Improve add_phy_port error messages
+ * OVS and VPP: Updated to recent version
+
+* Tools
+
+ * Support for Stressor-VMs as a Loadgen
+ * Support for collectd as one of the collectors
+ * Support for LLC management with Intel RMD
+
+* Traffic Generators
+
+ * All Traffic-Gens: Postponed call of connect operation.
+ * Ixia: Added support of LISTs in TRAFFIC
+ * T-Rex: Version v2.38 support added.
+ * T-Rex: Support for T-Rex Traffic generator in a VM.
+ * T-Rex: Add logic for dealing with high speed cards.
+ * T-Rex: Improve error handling.
+ * T-Rex: Added support for traffic capture.
+ * T-Rex: RFC2544 verification functionality included.
+ * T-Rex: Added learning packet option.
+ * T-Rex: Added packet counts for reporting
+ * T-Rex: Added multistream support
+ * T-Rex: Added promiscuous option for SRIOV tests
+ * T-Rex: RFC2544 Throughput bugfixing
+
+* Tests
+
+ * Tests with T-Rex in VM
+ * Improvements of step driven Testcases
+ * OVS/DPDK regression tests
+ * Traffic Capture testcases added.
+
+* Installation Scripts
+
+ * Support for SLES15 and openSuse Tumbleweed
+ * Fedora installation script update
+ * rhel_path_fix: Fix pathing issue introduce by other commit
+ * Updated build scripts for Centos and RHEL to python34
+
+* CI
+
+ * Update hugepages configuration
+ * Support disabling VPP tests, if required
+
OPNFV Euphrates Release
=======================
diff --git a/docs/testing/developer/devguide/design/trafficgen_integration_guide.rst b/docs/testing/developer/devguide/design/trafficgen_integration_guide.rst
index c88b80ed..671c7fd8 100644
--- a/docs/testing/developer/devguide/design/trafficgen_integration_guide.rst
+++ b/docs/testing/developer/devguide/design/trafficgen_integration_guide.rst
@@ -199,13 +199,20 @@ functions:
Note: There are parameters specific to testing of tunnelling protocols,
which are discussed in detail at :ref:`integration-tests` userguide.
+ Note: A detailed description of the ``TRAFFIC`` dictionary can be found at
+ :ref:`configuration-of-traffic-dictionary`.
+
* param **traffic_type**: One of the supported traffic types,
- e.g. **rfc2544_throughput**, **rfc2544_continuous**
- or **rfc2544_back2back**.
- * param **frame_rate**: Defines desired percentage of frame
- rate used during continuous stream tests.
+ e.g. **rfc2544_throughput**, **rfc2544_continuous**,
+ **rfc2544_back2back** or **burst**.
* param **bidir**: Specifies if generated traffic will be full-duplex
(true) or half-duplex (false).
+ * param **frame_rate**: Defines desired percentage of frame
+ rate used during continuous stream tests.
+ * param **burst_size**: Defines a number of frames in the single burst,
+ which is sent by burst traffic type. Burst size is applied for each
+ direction, i.e. the total number of tx frames will be 2*burst_size
+ in case of bidirectional traffic.
* param **multistream**: Defines number of flows simulated by traffic
generator. Value 0 disables MultiStream feature.
* param **stream_type**: Stream Type defines ISO OSI network layer
@@ -224,6 +231,8 @@ functions:
**dstport** and l4 on/off switch **enabled**.
* param **vlan**: A dictionary with vlan specific parameters,
e.g. **priority**, **cfi**, **id** and vlan on/off switch **enabled**.
+ * param **scapy**: A dictionary with definition of the frame content for both traffic
+ directions. The frame content is defined by a SCAPY notation.
* param **tests**: Number of times the test is executed.
* param **duration**: Duration of continuous test or per iteration duration
diff --git a/docs/testing/developer/devguide/design/vswitchperf_design.rst b/docs/testing/developer/devguide/design/vswitchperf_design.rst
index 96ffcf62..7fbde886 100644
--- a/docs/testing/developer/devguide/design/vswitchperf_design.rst
+++ b/docs/testing/developer/devguide/design/vswitchperf_design.rst
@@ -291,8 +291,8 @@ Detailed description of ``TRAFFIC`` dictionary items follows:
.. code-block:: console
'traffic_type' - One of the supported traffic types.
- E.g. rfc2544_throughput, rfc2544_back2back
- or rfc2544_continuous
+ E.g. rfc2544_throughput, rfc2544_back2back,
+ rfc2544_continuous or burst
Data type: str
Default value: "rfc2544_throughput".
'bidir' - Specifies if generated traffic will be full-duplex (True)
@@ -304,6 +304,12 @@ Detailed description of ``TRAFFIC`` dictionary items follows:
continuous stream tests.
Data type: int
Default value: 100.
+ 'burst_size' - Defines a number of frames in the single burst, which is sent
+ by burst traffic type. Burst size is applied for each direction,
+ i.e. the total number of tx frames will be 2*burst_size in case of
+ bidirectional traffic.
+ Data type: int
+ Default value: 100.
'multistream' - Defines number of flows simulated by traffic generator.
Value 0 disables multistream feature
Data type: int
@@ -439,6 +445,34 @@ Detailed description of ``TRAFFIC`` dictionary items follows:
details.
Data type: str
Default value: ''
+ 'scapy' - A dictionary with definition of a frame content for both traffic
+ directions. The frame content is defined by a SCAPY notation.
+ NOTE: It is supported only by the T-Rex traffic generator.
+ Following keywords can be used to refer to the related parts of
+ the TRAFFIC dictionary:
+ Ether_src - refers to TRAFFIC['l2']['srcmac']
+ Ether_dst - refers to TRAFFIC['l2']['dstmac']
+ IP_proto - refers to TRAFFIC['l3']['proto']
+ IP_PROTO - refers to upper case version of TRAFFIC['l3']['proto']
+ IP_src - refers to TRAFFIC['l3']['srcip']
+ IP_dst - refers to TRAFFIC['l3']['dstip']
+ IP_PROTO_sport - refers to TRAFFIC['l4']['srcport']
+ IP_PROTO_dport - refers to TRAFFIC['l4']['dstport']
+ Dot1Q_prio - refers to TRAFFIC['vlan']['priority']
+ Dot1Q_id - refers to TRAFFIC['vlan']['cfi']
+ Dot1Q_vlan - refers to TRAFFIC['vlan']['id']
+ '0' - A string with the frame definition for the 1st direction.
+ Data type: str
+ Default value: 'Ether(src={Ether_src}, dst={Ether_dst})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+ '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})'
+ '1' - A string with the frame definition for the 2nd direction.
+ Data type: str
+ Default value: 'Ether(src={Ether_dst}, dst={Ether_src})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+ '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
.. _configuration-of-guest-options:
@@ -786,7 +820,7 @@ ITrafficGenerator
connect()
disconnect()
- send_burst_traffic(traffic, numpkts, time, framerate)
+ send_burst_traffic(traffic, time)
send_cont_traffic(traffic, time, framerate)
start_cont_traffic(traffic, time, framerate)
diff --git a/docs/testing/user/configguide/index.rst b/docs/testing/user/configguide/index.rst
index 83908a97..75a2082d 100644
--- a/docs/testing/user/configguide/index.rst
+++ b/docs/testing/user/configguide/index.rst
@@ -48,6 +48,7 @@ VSPERF Install and Configuration
./installation.rst
./upgrade.rst
./trafficgen.rst
+ ./tools.rst
=================
VSPERF Test Guide
diff --git a/docs/testing/user/configguide/tools.rst b/docs/testing/user/configguide/tools.rst
new file mode 100644
index 00000000..907e86d2
--- /dev/null
+++ b/docs/testing/user/configguide/tools.rst
@@ -0,0 +1,177 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation, Spirent, AT&T and others.
+
+.. _additional-tools-configuration:
+
+=============================================
+'vsperf' Additional Tools Configuration Guide
+=============================================
+
+Overview
+--------
+
+VSPERF supports the following categories additional tools:
+
+ * `Infrastructure Metrics Collectors`_
+ * `Load Generators`_
+ * `L3 Cache Management`_
+
+Under each category, there are one or more tools supported by VSPERF.
+This guide provides the details of how to install (if required)
+and configure the above mentioned tools.
+
+.. _`Infrastructure Metrics Collectors`:
+
+Infrastructure Metrics Collection
+---------------------------------
+
+VSPERF supports following two tools for collecting and reporting the metrics:
+
+* pidstat
+* collectd
+
+*pidstat* is a command in linux systems, which is used for monitoring individual
+tasks currently being managed by Linux kernel. In VSPERF this command is used to
+monitor *ovs-vswitchd*, *ovsdb-server* and *kvm* processes.
+
+*collectd* is linux application that collects, stores and transfers various system
+metrics. For every category of metrics, there is a separate plugin in collectd. For
+example, CPU plugin and Interface plugin provides all the cpu metrics and interface
+metrics, respectively. CPU metrics may include user-time, system-time, etc., whereas
+interface metrics may include received-packets, dropped-packets, etc.
+
+Installation
+^^^^^^^^^^^^
+
+No installation is required for *pidstat*, whereas, collectd has to be installed
+separately. For installation of collectd, we recommend to follow the process described
+in *OPNFV-Barometer* project, which can be found here `Barometer-Euphrates <http://docs.opnfv.org/en/stable-euphrates/submodules/barometer/docs/release/userguide/feature.userguide.html#building-all-barometer-upstreamed-plugins-from-scratch>`_ or the most
+recent release.
+
+VSPERF assumes that collectd is installed and configured to send metrics over localhost.
+The metrics sent should be for the following categories: CPU, Processes, Interface,
+OVS, DPDK, Intel-RDT.
+
+Configuration
+^^^^^^^^^^^^^
+
+The configuration file for the collectors can be found in **conf/05_collector.conf**.
+*pidstat* specific configuration includes:
+
+* ``PIDSTAT_MONITOR`` - processes to be monitored by pidstat
+* ``PIDSTAT_OPTIONS`` - options which will be passed to pidstat command
+* ``PIDSTAT_SAMPLE_INTERVAL`` - sampling interval used by pidstat to collect statistics
+* ``LOG_FILE_PIDSTAT`` - prefix of pidstat's log file
+
+The *collectd* configuration option includes:
+
+* ``COLLECTD_IP`` - IP address where collectd is running
+* ``COLLECTD_PORT`` - Port number over which collectd is sending the metrics
+* ``COLLECTD_SECURITY_LEVEL`` - Security level for receiving metrics
+* ``COLLECTD_AUTH_FILE`` - Authentication file for receiving metrics
+* ``LOG_FILE_COLLECTD`` - Prefix for collectd's log file.
+* ``COLLECTD_CPU_KEYS`` - Interesting metrics from CPU
+* ``COLLECTD_PROCESSES_KEYS`` - Interesting metrics from processes
+* ``COLLECTD_INTERFACE_KEYS`` - Interesting metrics from interface
+* ``COLLECTD_OVSSTAT_KEYS`` - Interesting metrics from OVS
+* ``COLLECTD_DPDKSTAT_KEYS`` - Interesting metrics from DPDK.
+* ``COLLECTD_INTELRDT_KEYS`` - Interesting metrics from Intel-RDT
+* ``COLLECTD_INTERFACE_XKEYS`` - Metrics to exclude from Interface
+* ``COLLECTD_INTELRDT_XKEYS`` - Metrics to exclude from Intel-RDT
+
+
+.. _`Load Generators`:
+
+
+Load Generation
+---------------
+
+In VSPERF, load generation refers to creating background cpu and memory loads to
+study the impact of these loads on system under test. There are two options to
+create loads in VSPERF. These options are used for different use-cases. The options are:
+
+* stress or stress-ng
+* Stressor-VMs
+
+*stress and stress-ng* are linux tools to stress the system in various ways.
+It can stress different subsystems such as CPU and memory. *stress-ng* is the
+improvised version of *stress*. StressorVMs are custom build virtual-machines
+for the noisy-neighbor use-cases.
+
+Installation
+^^^^^^^^^^^^
+
+stress and stress-ng can be installed through standard linux installation process.
+Information about stress-ng, including the steps for installing can be found
+here: `stress-ng <https://github.com/ColinIanKing/stress-ng>`_
+
+There are two options for StressorVMs - one is VMs based on stress-ng and second
+is VM based on Spirent's cloudstress. VMs based on stress-ng can be found in this
+`link <https://github.com/opensource-tnbt/stressng-images>`_ . Spirent's cloudstress
+based VM can be downloaded from this `site <https://github.com/spirent/cloudstress>`_
+
+These stressorVMs are of OSV based VMs, which are very small in size. Download
+these VMs and place it in appropriate location, and this location will used in
+the configuration - as mentioned below.
+
+Configuration
+^^^^^^^^^^^^^
+
+The configuration file for loadgens can be found in **conf/07_loadgen.conf**.
+There are no specific configurations for stress and stress-ng commands based
+load-generation. However, for StressorVMs, following configurations apply:
+
+* ``NN_COUNT`` - Number of stressor VMs required.
+* ``NN_MEMORY`` - Comma separated memory configuration for each VM
+* ``NN_SMP`` - Comma separated configuration for each VM
+* ``NN_IMAGE`` - Comma separated list of Paths for each VM image
+* ``NN_SHARED_DRIVE_TYPE`` - Comma separated list of shaed drive type for each VM
+* ``NN_BOOT_DRIVE_TYPE`` - Comma separated list of boot drive type for each VM
+* ``NN_CORE_BINDING`` - Comma separated lists of list specifying the cores associated with each VM.
+* ``NN_NICS_NR`` - Comma seprated list of number of NICS for each VM
+* ``NN_BASE_VNC_PORT`` - Base VNC port Index.
+* ``NN_LOG_FILE`` - Name of the log file
+
+.. _`L3 Cache Management`:
+
+Last Level Cache Management
+---------------------------
+
+VSPERF support last-level cache management using Intel's RDT tool(s) - the
+relavant ones are `Intel CAT-CMT <https://github.com/intel/intel-cmt-cat>`_ and
+`Intel RMD <https://github.com/intel/rmd>`_. RMD is a linux daemon that runs on
+individual hosts, and provides a REST API for control/orchestration layer to
+request LLC for the VMs/Containers/Applications. RDT receives resource policy
+form orchestration layer - in this case, from VSPERF - and enforce it on the host.
+It achieves this enforcement via kernel interfaces such as resctrlfs and libpqos.
+The resource here refer to the last-level cache. User can configure policies to
+define how much of cache a CPU can get. The policy configuration is described below.
+
+Installation
+^^^^^^^^^^^^
+
+For installation of RMD tool, please install CAT-CMT first and then install RMD.
+The details of installation can be found here: `Intel CAT-CMT <https://github.com/intel/intel-cmt-cat>`_
+and `Intel RMD <https://github.com/intel/rmd>`_
+
+Configuration
+^^^^^^^^^^^^^
+
+The configuration file for cache management can be found in **conf/08_llcmanagement.conf**.
+
+VSPERF provides following configuration options, for user to define and enforce policies via RMD.
+
+* ``LLC_ALLOCATION`` - Enable or Disable LLC management.
+* ``RMD_PORT`` - RMD port (port number on which API server is listening)
+* ``RMD_SERVER_IP`` - IP address where RMD is running. Currently only localhost.
+* ``RMD_API_VERSION`` - RMD version. Currently it is 'v1'
+* ``POLICY_TYPE`` - Specify how the policy is defined - either COS or CUSTOM
+* ``VSWITCH_COS`` - Class of service (CoS for Vswitch. CoS can be gold, silver-bf or bronze-shared.
+* ``VNF_COS`` - Class of service for VNF
+* ``PMD_COS`` - Class of service for PMD
+* ``NOISEVM_COS`` - Class of service of Noisy VM.
+* ``VSWITCH_CA`` - [min-cache-value, maxi-cache-value] for vswitch
+* ``VNF_CA`` - [min-cache-value, max-cache-value] for VNF
+* ``PMD_CA`` - [min-cache-value, max-cache-value] for PMD
+* ``NOISEVM_CA`` - [min-cache-value, max-cache-value] for Noisy VM
diff --git a/docs/testing/user/configguide/trafficgen.rst b/docs/testing/user/configguide/trafficgen.rst
index 52b1b4a5..f9e2db11 100644
--- a/docs/testing/user/configguide/trafficgen.rst
+++ b/docs/testing/user/configguide/trafficgen.rst
@@ -39,6 +39,7 @@ and is configured as follows:
TRAFFIC = {
'traffic_type' : 'rfc2544_throughput',
'frame_rate' : 100,
+ 'burst_size' : 100,
'bidir' : 'True', # will be passed as string in title format to tgen
'multistream' : 0,
'stream_type' : 'L4',
@@ -75,8 +76,22 @@ and is configured as follows:
'count': 1,
'filter': '',
},
+ 'scapy': {
+ 'enabled': False,
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+ '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+ '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
+ }
}
+A detailed description of the ``TRAFFIC`` dictionary can be found at
+:ref:`configuration-of-traffic-dictionary`.
+
The framesize parameter can be overridden from the configuration
files by adding the following to your custom configuration file
``10_custom.conf``:
@@ -857,6 +872,21 @@ place. This can be adjusted with the following configurations:
TRAFFICGEN_TREX_LEARNING_MODE=True
TRAFFICGEN_TREX_LEARNING_DURATION=5
+Latency measurements have impact on T-Rex performance. Thus vswitchperf uses a separate
+latency stream for each direction with limited speed. This workaround is used for RFC2544
+**Throughput** and **Continuous** traffic types. In case of **Burst** traffic type,
+the latency statistics are measured for all frames in the burst. Collection of latency
+statistics is driven by configuration option ``TRAFFICGEN_TREX_LATENCY_PPS`` as follows:
+
+ * value ``0`` - disables latency measurements
+ * non zero integer value - enables latency measurements; In case of Throughput
+ and Continuous traffic types, it specifies a speed of latency specific stream
+ in PPS. In case of burst traffic type, it enables latency measurements for all frames.
+
+.. code-block:: console
+
+ TRAFFICGEN_TREX_LATENCY_PPS = 1000
+
SR-IOV and Multistream layer 2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
T-Rex by default only accepts packets on the receive side if the destination mac matches the
@@ -907,3 +937,72 @@ The duration and maximum number of attempted verification trials can be set to c
behavior of this step. If the verification step fails, it will resume the binary search
with new values where the maximum output will be the last attempted frame rate minus the
current set thresh hold.
+
+Scapy frame definition
+~~~~~~~~~~~~~~~~~~~~~~
+
+It is possible to use a SCAPY frame definition to generate various network protocols
+by the **T-Rex** traffic generator. In case that particular network protocol layer
+is disabled by the TRAFFIC dictionary (e.g. TRAFFIC['vlan']['enabled'] = False),
+then disabled layer will be removed from the scapy format definition by VSPERF.
+
+The scapy frame definition can refer to values defined by the TRAFFIC dictionary
+by following keywords. These keywords are used in next examples.
+
+* ``Ether_src`` - refers to ``TRAFFIC['l2']['srcmac']``
+* ``Ether_dst`` - refers to ``TRAFFIC['l2']['dstmac']``
+* ``IP_proto`` - refers to ``TRAFFIC['l3']['proto']``
+* ``IP_PROTO`` - refers to upper case version of ``TRAFFIC['l3']['proto']``
+* ``IP_src`` - refers to ``TRAFFIC['l3']['srcip']``
+* ``IP_dst`` - refers to ``TRAFFIC['l3']['dstip']``
+* ``IP_PROTO_sport`` - refers to ``TRAFFIC['l4']['srcport']``
+* ``IP_PROTO_dport`` - refers to ``TRAFFIC['l4']['dstport']``
+* ``Dot1Q_prio`` - refers to ``TRAFFIC['vlan']['priority']``
+* ``Dot1Q_id`` - refers to ``TRAFFIC['vlan']['cfi']``
+* ``Dot1Q_vlan`` - refers to ``TRAFFIC['vlan']['id']``
+
+In following examples of SCAPY frame definition only relevant parts of TRAFFIC
+dictionary are shown. The rest of the TRAFFIC dictionary is set to default values
+as they are defined in ``conf/03_traffic.conf``.
+
+Please check official documentation of SCAPY project for details about SCAPY frame
+definition and supported network layers at: http://www.secdev.org/projects/scapy
+
+#. Generate ICMP frames:
+
+ .. code-block:: console
+
+ 'scapy': {
+ 'enabled': True,
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/ICMP()',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/ICMP()',
+ }
+
+#. Generate IPv6 ICMP Echo Request
+
+ .. code-block:: console
+
+ 'l3' : {
+ 'srcip': 'feed::01',
+ 'dstip': 'feed::02',
+ },
+ 'scapy': {
+ 'enabled': True,
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/IPv6(src={IP_src}, dst={IP_dst})/ICMPv6EchoRequest()',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/IPv6(src={IP_dst}, dst={IP_src})/ICMPv6EchoRequest()',
+ }
+
+#. Generate SCTP frames:
+
+ Example uses default SCAPY frame definition, which can reflect ``TRAFFIC['l3']['proto']`` settings. The same
+ approach can be used to generate other protocols, e.g. TCP.
+
+ .. code-block:: console
+
+ 'l3' : {
+ 'proto' : 'sctp',
+ },
+ 'scapy': {
+ 'enabled': True,
+ }
+
diff --git a/docs/testing/user/userguide/testusage.rst b/docs/testing/user/userguide/testusage.rst
index f679566e..9b331869 100644
--- a/docs/testing/user/userguide/testusage.rst
+++ b/docs/testing/user/userguide/testusage.rst
@@ -216,6 +216,12 @@ A Kernel Module that provides OSI Layer 2 Ipv4 termination or forwarding with
support for Destination Network Address Translation (DNAT) for both the MAC and
IP addresses. l2fwd can be found in <vswitchperf_dir>/src/l2fwd
+Additional Tools Setup
+^^^^^^^^^^^^^^^^^^^^^^
+
+Follow the `Additional tools instructions <additional-tools-configuration>` to
+install and configure additional tools such as collectors and loadgens.
+
Executing tests
^^^^^^^^^^^^^^^
diff --git a/pylintrc b/pylintrc
index d35114e1..3e7e9645 100644
--- a/pylintrc
+++ b/pylintrc
@@ -76,7 +76,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-disable=E1602,E1603,E1601,E1606,E1607,E1604,E1605,E1608,W0401,W1604,W1605,W1606,W1607,W1601,W1602,W1603,W1622,W1623,W1620,W1621,W1608,W1609,W1624,W1625,W1618,W1626,W1627,I0021,I0020,W0704,R0903,W1613,W1638,W1611,W1610,W1617,W1616,W1615,W1614,W1630,W1619,W1632,W1635,W1634,W1637,W1636,W1639,W1612,W1628,W1633,W1629,I0011,W1640
+disable=E1602,E1603,E1601,E1606,E1607,E1604,E1605,E1608,W0401,W1604,W1605,W1606,W1607,W1601,W1602,W1603,W1622,W1623,W1620,W1621,W1608,W1609,W1624,W1625,W1618,W1626,W1627,I0021,I0020,W0704,R0903,W1613,W1638,W1611,W1610,W1617,W1616,W1615,W1614,W1630,W1619,W1632,W1635,W1634,W1637,W1636,W1639,W1612,W1628,W1633,W1629,I0011,W1640,R1705
[REPORTS]
diff --git a/requirements.txt b/requirements.txt
index 894c1cc5..fdcff13b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,7 +12,7 @@ xmlrunner==1.7.7
requests==2.8.1
netaddr==0.7.18
scapy-python3==0.18
-pylint==1.5.6
+pylint==1.8.2
pyzmq==14.5.0
distro
stcrestclient
diff --git a/src/__init__.py b/src/__init__.py
index 9293b4f8..c784ea3c 100644
--- a/src/__init__.py
+++ b/src/__init__.py
@@ -18,4 +18,3 @@ No functionality is expected for this package and its purpose is just to
keep Python package structure intact without extra requirements for
PYTHONPATH.
"""
-
diff --git a/src/dpdk/dpdk.py b/src/dpdk/dpdk.py
index 2f120129..c2e656ef 100644
--- a/src/dpdk/dpdk.py
+++ b/src/dpdk/dpdk.py
@@ -140,7 +140,7 @@ def _vhost_user_cleanup():
def _bind_nics():
"""Bind NICs using the bind tool specified in the configuration.
"""
- if not len(_NICS_PCI):
+ if not _NICS_PCI:
_LOGGER.info('NICs are not configured - nothing to bind')
return
try:
@@ -171,7 +171,7 @@ def _bind_nics():
def _unbind_nics():
"""Unbind NICs using the bind tool specified in the configuration.
"""
- if not len(_NICS_PCI):
+ if not _NICS_PCI:
_LOGGER.info('NICs are not configured - nothing to unbind')
return
try:
diff --git a/src/ovs/dpctl.py b/src/ovs/dpctl.py
index 015fb38c..5030223e 100644
--- a/src/ovs/dpctl.py
+++ b/src/ovs/dpctl.py
@@ -60,5 +60,5 @@ class DPCtl(object):
:return: None
"""
- self.logger.debug('delete datapath ' + dp_name)
+ self.logger.debug('delete datapath %s', dp_name)
self.run_dpctl(['del-dp', dp_name])
diff --git a/src/ovs/ofctl.py b/src/ovs/ofctl.py
index 64d54466..b023e080 100644
--- a/src/ovs/ofctl.py
+++ b/src/ovs/ofctl.py
@@ -25,10 +25,10 @@ import re
import netaddr
from tools import tasks
-from conf import settings
+from conf import settings as S
-_OVS_BRIDGE_NAME = settings.getValue('VSWITCH_BRIDGE_NAME')
-_OVS_CMD_TIMEOUT = settings.getValue('OVS_CMD_TIMEOUT')
+_OVS_BRIDGE_NAME = S.getValue('VSWITCH_BRIDGE_NAME')
+_OVS_CMD_TIMEOUT = S.getValue('OVS_CMD_TIMEOUT')
_CACHE_FILE_NAME = '/tmp/vsperf_flows_cache'
@@ -62,9 +62,11 @@ class OFBase(object):
:return: None
"""
if self.timeout == -1:
- cmd = ['sudo', settings.getValue('TOOLS')['ovs-vsctl'], '--no-wait'] + args
+ cmd = ['sudo', S.getValue('TOOLS')['ovs-vsctl'], '--no-wait'] + \
+ S.getValue('OVS_VSCTL_ARGS') + args
else:
- cmd = ['sudo', settings.getValue('TOOLS')['ovs-vsctl'], '--timeout', str(self.timeout)] + args
+ cmd = ['sudo', S.getValue('TOOLS')['ovs-vsctl'], '--timeout',
+ str(self.timeout)] + S.getValue('OVS_VSCTL_ARGS') + args
return tasks.run_task(
cmd, self.logger, 'Running ovs-vsctl...', check_error)
@@ -77,9 +79,9 @@ class OFBase(object):
:return: None
"""
- cmd = ['sudo', settings.getValue('TOOLS')['ovs-appctl'],
+ cmd = ['sudo', S.getValue('TOOLS')['ovs-appctl'],
'--timeout',
- str(self.timeout)] + args
+ str(self.timeout)] + S.getValue('OVS_APPCTL_ARGS') + args
return tasks.run_task(
cmd, self.logger, 'Running ovs-appctl...', check_error)
@@ -180,8 +182,8 @@ class OFBridge(OFBase):
:return: None
"""
tmp_timeout = self.timeout if timeout is None else timeout
- cmd = ['sudo', settings.getValue('TOOLS')['ovs-ofctl'], '-O',
- 'OpenFlow13', '--timeout', str(tmp_timeout)] + args
+ cmd = ['sudo', S.getValue('TOOLS')['ovs-ofctl'], '--timeout',
+ str(tmp_timeout)] + S.getValue('OVS_OFCTL_ARGS') + args
return tasks.run_task(
cmd, self.logger, 'Running ovs-ofctl...', check_error)
@@ -467,4 +469,4 @@ def flow_match(flow_dump, flow_src):
for rule in flow_src_list:
if rule in flow_dump_list:
flow_src_ctrl.remove(rule)
- return True if not len(flow_src_ctrl) else False
+ return True if not flow_src_ctrl else False
diff --git a/systems/opensuse/42.2/build_base_machine.sh b/systems/opensuse/42.2/build_base_machine.sh
index 44d6f02b..9915b634 100755
--- a/systems/opensuse/42.2/build_base_machine.sh
+++ b/systems/opensuse/42.2/build_base_machine.sh
@@ -49,6 +49,7 @@ socat
sysstat
java-1_8_0-openjdk
git-review
+sshpass
# python
python3
diff --git a/systems/opensuse/42.3/build_base_machine.sh b/systems/opensuse/42.3/build_base_machine.sh
index cc9f24ef..2124e6cb 100755
--- a/systems/opensuse/42.3/build_base_machine.sh
+++ b/systems/opensuse/42.3/build_base_machine.sh
@@ -50,6 +50,7 @@ sysstat
java-1_8_0-openjdk
git-review
mlocate
+sshpass
# python
python3
diff --git a/systems/sles/15/build_base_machine.sh b/systems/sles/15/build_base_machine.sh
index 9c161dd7..166fe649 100755
--- a/systems/sles/15/build_base_machine.sh
+++ b/systems/sles/15/build_base_machine.sh
@@ -20,6 +20,7 @@
# Jose Lausuch, SUSE LINUX GmbH
zypper -q -n dup
+zypper ar -G http://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/ backports
zypper -q -n in -y $(echo "
# compiler, tools and dependencies
make
@@ -32,7 +33,6 @@ fuse
fuse-devel
glib2-devel
zlib-devel
-ncurses-devel
kernel-default
kernel-default-devel
pkg-config
@@ -47,7 +47,7 @@ pciutils
cifs-utils
socat
sysstat
-java-9-openjdk
+java-10-openjdk
mlocate
# python
@@ -65,14 +65,16 @@ libpixman-1-0-devel
libtool
libpcap-devel
libnet9
-libncurses5
+libncurses6
libcurl4
libcurl-devel
libxml2
libfuse2
-libopenssl1_1_0
+libopenssl1_1
libopenssl-devel
libpython3_6m1_0
+libzmq5
+sshpass
" | grep -v ^#)
diff --git a/testcases/integration.py b/testcases/integration.py
index f87a8ee2..8cfe5af5 100644
--- a/testcases/integration.py
+++ b/testcases/integration.py
@@ -17,7 +17,7 @@
import logging
from collections import OrderedDict
-from testcases import TestCase
+from testcases.testcase import TestCase
class IntegrationTestCase(TestCase):
"""IntegrationTestCase class
diff --git a/testcases/performance.py b/testcases/performance.py
index a82b5d1c..1b67911e 100644
--- a/testcases/performance.py
+++ b/testcases/performance.py
@@ -16,7 +16,7 @@
import logging
-from testcases import TestCase
+from testcases.testcase import TestCase
from tools.report import report
class PerformanceTestCase(TestCase):
diff --git a/testcases/testcase.py b/testcases/testcase.py
index 18c3186c..f28519fa 100644
--- a/testcases/testcase.py
+++ b/testcases/testcase.py
@@ -287,7 +287,7 @@ class TestCase(object):
# cleanup any namespaces created
if os.path.isdir('/tmp/namespaces'):
namespace_list = os.listdir('/tmp/namespaces')
- if len(namespace_list):
+ if namespace_list:
self._logger.info('Cleaning up namespaces')
for name in namespace_list:
namespace.delete_namespace(name)
@@ -295,7 +295,7 @@ class TestCase(object):
# cleanup any veth ports created
if os.path.isdir('/tmp/veth'):
veth_list = os.listdir('/tmp/veth')
- if len(veth_list):
+ if veth_list:
self._logger.info('Cleaning up veth ports')
for eth in veth_list:
port1, port2 = eth.split('-')
@@ -322,8 +322,8 @@ class TestCase(object):
if len(self._tc_results) < len(results):
if len(self._tc_results) > 1:
raise RuntimeError('Testcase results do not match:'
- 'results: {}\n'
- 'trafficgen results: {}\n',
+ 'results: %s\n'
+ 'trafficgen results: %s\n' %
self._tc_results,
results)
else:
@@ -379,7 +379,7 @@ class TestCase(object):
self._testcase_run_time = time.strftime("%H:%M:%S",
time.gmtime(self._testcase_stop_time -
self._testcase_start_time))
- logging.info("Testcase execution time: " + self._testcase_run_time)
+ logging.info("Testcase execution time: %s", self._testcase_run_time)
# report test results
self.run_report()
@@ -562,7 +562,7 @@ class TestCase(object):
"""
with open(output, 'a') as csvfile:
- logging.info("Write results to file: " + output)
+ logging.info("Write results to file: %s", output)
fieldnames = TestCase._get_unique_keys(results)
writer = csv.DictWriter(csvfile, fieldnames)
@@ -720,7 +720,7 @@ class TestCase(object):
self._logger.debug("Skipping %s as it isn't a configuration "
"parameter.", '${}'.format(macro[0]))
return param
- elif isinstance(param, list) or isinstance(param, tuple):
+ elif isinstance(param, (list, tuple)):
tmp_list = []
for item in param:
tmp_list.append(self.step_eval_param(item, step_result))
@@ -758,9 +758,6 @@ class TestCase(object):
# initialize list with results
self._step_result = [None] * len(self.test)
- # We have to suppress pylint report, because test_object has to be set according
- # to the test step definition
- # pylint: disable=redefined-variable-type
# run test step by step...
for i, step in enumerate(self.test):
step_ok = not self._step_check
diff --git a/tools/collectors/collectd/collectd.py b/tools/collectors/collectd/collectd.py
index 90df6b04..700aef47 100644
--- a/tools/collectors/collectd/collectd.py
+++ b/tools/collectors/collectd/collectd.py
@@ -47,7 +47,7 @@ def get_label(sample):
for label in YLABELS:
if any(r in sample for r in YLABELS[label]):
return label
-
+ return None
def plot_graphs(dict_of_arrays):
"""
@@ -259,7 +259,7 @@ class Collectd(collector.ICollector):
plot_graphs(self.results)
proc_stats = get_results_to_print(self.results)
for process in proc_stats:
- logging.info("Process: " + '_'.join(process.split('_')[:-1]))
+ logging.info("Process: %s", '_'.join(process.split('_')[:-1]))
for(key, value) in proc_stats[process].items():
logging.info(" Statistic: " + str(key) +
", Value: " + str(value))
diff --git a/tools/collectors/collectd/collectd_bucky.py b/tools/collectors/collectd/collectd_bucky.py
index bac24ed7..f6061c55 100644
--- a/tools/collectors/collectd/collectd_bucky.py
+++ b/tools/collectors/collectd/collectd_bucky.py
@@ -498,6 +498,7 @@ class CollectDCrypto(object):
return self.parse_signed(part_len, data)
if sec_level == 2:
return self.parse_encrypted(part_len, data)
+ return None
def parse_signed(self, part_len, data):
"""
@@ -574,12 +575,12 @@ class CollectDConverter(object):
try:
name_parts = handler(sample)
if name_parts is None:
- return # treat None as "ignore sample"
+ return None # treat None as "ignore sample"
name = '.'.join(name_parts)
except (AttributeError, IndexError, MemoryError, RuntimeError):
LOG.exception("Exception in sample handler %s (%s):",
sample["plugin"], handler)
- return
+ return None
host = sample.get("host", "")
return (
host,
@@ -655,7 +656,7 @@ class CollectDHandler(object):
Check the value range
"""
if val is None:
- return
+ return None
try:
vmin, vmax = self.parser.types.type_ranges[stype][vname]
except KeyError:
@@ -664,11 +665,11 @@ class CollectDHandler(object):
if vmin is not None and val < vmin:
LOG.debug("Invalid value %s (<%s) for %s", val, vmin, vname)
LOG.debug("Last sample: %s", self.last_sample)
- return
+ return None
if vmax is not None and val > vmax:
LOG.debug("Invalid value %s (>%s) for %s", val, vmax, vname)
LOG.debug("Last sample: %s", self.last_sample)
- return
+ return None
return val
def calculate(self, host, name, vtype, val, time):
@@ -684,7 +685,7 @@ class CollectDHandler(object):
if vtype not in handlers:
LOG.error("Invalid value type %s for %s", vtype, name)
LOG.info("Last sample: %s", self.last_sample)
- return
+ return None
return handlers[vtype](host, name, val, time)
def _calc_counter(self, host, name, val, time):
@@ -694,13 +695,13 @@ class CollectDHandler(object):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
- return
+ return None
pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
LOG.error("Invalid COUNTER update for: %s:%s", key[0], key[1])
LOG.info("Last sample: %s", self.last_sample)
- return
+ return None
if val < pval:
# this is supposed to handle counter wrap around
# see https://collectd.org/wiki/index.php/Data_source
@@ -719,13 +720,13 @@ class CollectDHandler(object):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
- return
+ return None
pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
LOG.debug("Invalid DERIVE update for: %s:%s", key[0], key[1])
LOG.debug("Last sample: %s", self.last_sample)
- return
+ return None
return float(abs(val - pval)) / (time - ptime)
def _calc_absolute(self, host, name, val, time):
@@ -735,13 +736,13 @@ class CollectDHandler(object):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
- return
+ return None
_, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
LOG.error("Invalid ABSOLUTE update for: %s:%s", key[0], key[1])
LOG.info("Last sample: %s", self.last_sample)
- return
+ return None
return float(val) / (time - ptime)
diff --git a/tools/collectors/sysmetrics/pidstat.py b/tools/collectors/sysmetrics/pidstat.py
index 99341ccf..277fdb11 100644
--- a/tools/collectors/sysmetrics/pidstat.py
+++ b/tools/collectors/sysmetrics/pidstat.py
@@ -70,13 +70,13 @@ class Pidstat(collector.ICollector):
into the file in directory with test results
"""
monitor = settings.getValue('PIDSTAT_MONITOR')
- self._logger.info('Statistics are requested for: ' + ', '.join(monitor))
+ self._logger.info('Statistics are requested for: %s', ', '.join(monitor))
pids = systeminfo.get_pids(monitor)
if pids:
with open(self._log, 'w') as logfile:
cmd = ['sudo', 'LC_ALL=' + settings.getValue('DEFAULT_CMD_LOCALE'),
'pidstat', settings.getValue('PIDSTAT_OPTIONS'),
- '-p', ','.join(pids),
+ '-t', '-p', ','.join(pids),
str(settings.getValue('PIDSTAT_SAMPLE_INTERVAL'))]
self._logger.debug('%s', ' '.join(cmd))
self._pid = subprocess.Popen(cmd, stdout=logfile, bufsize=0).pid
@@ -116,16 +116,48 @@ class Pidstat(collector.ICollector):
# combine stored header fields with actual values
tmp_res = OrderedDict(zip(tmp_header,
line[8:].split()))
- # use process's name and its pid as unique key
- key = tmp_res.pop('Command') + '_' + tmp_res['PID']
- # store values for given command into results dict
- if key in self._results:
- self._results[key].update(tmp_res)
- else:
- self._results[key] = tmp_res
+ cmd = tmp_res.pop('Command')
+ # remove unused fields (given by option '-t')
+ tmp_res.pop('UID')
+ tmp_res.pop('TID')
+ if '|_' not in cmd: # main process
+ # use process's name and its pid as unique key
+ tmp_pid = tmp_res.pop('TGID')
+ tmp_key = "%s_%s" % (cmd, tmp_pid)
+ # do not trust cpu usage of pid
+ # see VSPERF-569 for more details
+ if 'CPU' not in tmp_header:
+ self.update_results(tmp_key, tmp_res, False)
+ else: # thread
+ # accumulate cpu usage of all threads
+ if 'CPU' in tmp_header:
+ tmp_res.pop('TGID')
+ self.update_results(tmp_key, tmp_res, True)
line = logfile.readline()
+ def update_results(self, key, result, accumulate=False):
+ """
+ Update final results dictionary. If ``accumulate`` param is set to
+ ``True``, try to accumulate existing values.
+ """
+ # store values for given command into results dict
+ if key not in self._results:
+ self._results[key] = result
+ elif accumulate:
+ for field in result:
+ if field not in self._results[key]:
+ self._results[key][field] = result[field]
+ else:
+ try:
+ val = float(self._results[key][field]) + float(result[field])
+ self._results[key][field] = '{0:.2f}'.format(val)
+ except ValueError:
+ # cannot cast to float, let's update with the previous value
+ self._results[key][field] = result[field]
+ else:
+ self._results[key].update(result)
+
def get_results(self):
"""Returns collected statistics.
"""
@@ -135,7 +167,7 @@ class Pidstat(collector.ICollector):
"""Logs collected statistics.
"""
for process in self._results:
- logging.info("Process: " + '_'.join(process.split('_')[:-1]))
+ logging.info("Process: %s", '_'.join(process.split('_')[:-1]))
for(key, value) in self._results[process].items():
logging.info(" Statistic: " + str(key) +
", Value: " + str(value))
diff --git a/tools/functions.py b/tools/functions.py
index d35f1f84..65c9978b 100644
--- a/tools/functions.py
+++ b/tools/functions.py
@@ -127,7 +127,7 @@ def settings_update_paths():
# expand OS wildcards in paths if needed
if glob.has_magic(tmp_tool):
tmp_glob = glob.glob(tmp_tool)
- if len(tmp_glob) == 0:
+ if not tmp_glob:
raise RuntimeError('Path to the {} is not valid: {}.'.format(tool, tmp_tool))
elif len(tmp_glob) > 1:
raise RuntimeError('Path to the {} is ambiguous {}'.format(tool, tmp_glob))
diff --git a/tools/load_gen/stress_ng/stress_ng.py b/tools/load_gen/stress_ng/stress_ng.py
index c2592dd1..41bfe990 100644
--- a/tools/load_gen/stress_ng/stress_ng.py
+++ b/tools/load_gen/stress_ng/stress_ng.py
@@ -30,6 +30,3 @@ class StressNg(Stress):
'name': 'stress-ng'
}
_logger = logging.getLogger(__name__)
-
- def __init__(self, stress_config):
- super(StressNg, self).__init__(stress_config)
diff --git a/tools/load_gen/stressorvm/stressor_vm.py b/tools/load_gen/stressorvm/stressor_vm.py
index 410f10e3..f4936743 100644
--- a/tools/load_gen/stressorvm/stressor_vm.py
+++ b/tools/load_gen/stressorvm/stressor_vm.py
@@ -45,7 +45,7 @@ class QemuVM(tasks.Process):
try:
os.makedirs(self._shared_dir)
except OSError as exp:
- raise OSError("Failed to create shared directory %s: %s",
+ raise OSError("Failed to create shared directory %s: %s" %
self._shared_dir, exp)
self.nics_nr = S.getValue('NN_NICS_NR')[self._number]
@@ -96,8 +96,7 @@ class StressorVM(ILoadGenerator):
"""
Wrapper Class for Load-Generation through stressor-vm
"""
- # pylint: disable=unused-argument
- def __init__(self, config):
+ def __init__(self, _config):
self.qvm_list = []
for vmindex in range(int(S.getValue('NN_COUNT'))):
qvm = QemuVM(vmindex)
diff --git a/tools/module_manager.py b/tools/module_manager.py
index dd1d92be..943399ba 100644
--- a/tools/module_manager.py
+++ b/tools/module_manager.py
@@ -160,7 +160,7 @@ class ModuleManager(object):
self._logger.info('Unable to get list of dependecies for module \'%s\'.', module)
# ...and try to continue, just for case that dependecies are already loaded
- if len(deps):
+ if deps:
return deps.split(',')
else:
return []
diff --git a/tools/namespace.py b/tools/namespace.py
index 9131398f..50374b95 100644
--- a/tools/namespace.py
+++ b/tools/namespace.py
@@ -135,9 +135,8 @@ def reset_port_to_root(port, name):
port, name), False)
-# pylint: disable=unused-argument
# pylint: disable=invalid-name
-def validate_add_ip_to_namespace_eth(result, port, name, ip_addr, cidr):
+def validate_add_ip_to_namespace_eth(_result, port, name, ip_addr, cidr):
"""
Validation function for integration testcases
"""
@@ -147,7 +146,7 @@ def validate_add_ip_to_namespace_eth(result, port, name, ip_addr, cidr):
_LOGGER, 'Validating ip address in namespace...', False))
-def validate_assign_port_to_namespace(result, port, name, port_up=False):
+def validate_assign_port_to_namespace(_result, port, name, _port_up=False):
"""
Validation function for integration testcases
"""
@@ -157,14 +156,14 @@ def validate_assign_port_to_namespace(result, port, name, port_up=False):
_LOGGER, 'Validating port in namespace...'))
-def validate_create_namespace(result, name):
+def validate_create_namespace(_result, name):
"""
Validation function for integration testcases
"""
return name in get_system_namespace_list()
-def validate_delete_namespace(result, name):
+def validate_delete_namespace(_result, name):
"""
Validation function for integration testcases
"""
diff --git a/tools/networkcard.py b/tools/networkcard.py
index 2cd296fb..758010d2 100644
--- a/tools/networkcard.py
+++ b/tools/networkcard.py
@@ -191,7 +191,7 @@ def get_mac(pci_handle):
"""
mac_path = glob.glob(os.path.join(_PCI_DIR, _PCI_NET, '*', 'address').format(pci_handle))
# kernel driver is loaded and MAC can be read
- if len(mac_path) and os.path.isfile(mac_path[0]):
+ if mac_path and os.path.isfile(mac_path[0]):
with open(mac_path[0], 'r') as _file:
return _file.readline().rstrip('\n')
diff --git a/tools/pkt_gen/dummy/dummy.py b/tools/pkt_gen/dummy/dummy.py
index 3dc5448e..ef4b37d9 100755
--- a/tools/pkt_gen/dummy/dummy.py
+++ b/tools/pkt_gen/dummy/dummy.py
@@ -25,6 +25,7 @@ own.
import json
+from collections import OrderedDict
from conf import settings
from conf import merge_spec
from tools.pkt_gen import trafficgen
@@ -108,41 +109,41 @@ class Dummy(trafficgen.ITrafficGenerator):
"""
pass
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""
Send a burst of traffic.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
results = get_user_traffic(
'burst',
- '%dpkts, %dmS' % (numpkts, duration),
+ '%dpkts, %dmS' % (traffic['burst_size'], duration),
traffic_,
('frames rx', 'payload errors', 'sequence errors'))
# builds results by using user-supplied values where possible
# and guessing remainder using available info
- result[ResultsConstants.TX_FRAMES] = numpkts
+ result[ResultsConstants.TX_FRAMES] = traffic['burst_size']
result[ResultsConstants.RX_FRAMES] = results[0]
result[ResultsConstants.TX_BYTES] = traffic_['l2']['framesize'] \
- * numpkts
+ * traffic['burst_size']
result[ResultsConstants.RX_BYTES] = traffic_['l2']['framesize'] \
* results[0]
result[ResultsConstants.PAYLOAD_ERR] = results[1]
result[ResultsConstants.SEQ_ERR] = results[2]
- return results
+ return result
def send_cont_traffic(self, traffic=None, duration=30):
"""
Send a continuous flow of traffic.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
@@ -179,7 +180,7 @@ class Dummy(trafficgen.ITrafficGenerator):
Send traffic per RFC2544 throughput test specifications.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
@@ -216,7 +217,7 @@ class Dummy(trafficgen.ITrafficGenerator):
Send traffic per RFC2544 back2back test specifications.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
@@ -273,4 +274,5 @@ if __name__ == '__main__':
print(dev.send_cont_traffic(traffic=TRAFFIC))
print(dev.send_rfc2544_throughput(traffic=TRAFFIC))
print(dev.send_rfc2544_back2back(traffic=TRAFFIC))
+ # pylint: disable=no-member
print(dev.send_rfc(traffic=TRAFFIC))
diff --git a/tools/pkt_gen/ixia/ixia.py b/tools/pkt_gen/ixia/ixia.py
index d4ca56f2..31f51246 100755
--- a/tools/pkt_gen/ixia/ixia.py
+++ b/tools/pkt_gen/ixia/ixia.py
@@ -157,8 +157,8 @@ class Ixia(trafficgen.ITrafficGenerator):
return NotImplementedError(
'Ixia start back2back traffic not implemented')
- def send_rfc2544_back2back(self, traffic=None, duration=60,
- lossrate=0.0, tests=1):
+ def send_rfc2544_back2back(self, traffic=None, tests=1, duration=60,
+ lossrate=0.0):
return NotImplementedError(
'Ixia send back2back traffic not implemented')
@@ -242,11 +242,11 @@ class Ixia(trafficgen.ITrafficGenerator):
return result
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""See ITrafficGenerator for description
"""
flow = {
- 'numpkts': numpkts,
+ 'numpkts': traffic['burst_size'],
'duration': duration,
'type': 'stopStream',
'framerate': traffic['frame_rate'],
@@ -254,9 +254,9 @@ class Ixia(trafficgen.ITrafficGenerator):
result = self._send_traffic(flow, traffic)
- assert len(result) == 6 # fail-fast if underlying Tcl code changes
+ assert len(result) == 10 # fail-fast if underlying Tcl code changes
- #NOTE - implement Burst results setting via TrafficgenResults.
+ return Ixia._create_result(result)
def send_cont_traffic(self, traffic=None, duration=30):
"""See ITrafficGenerator for description
@@ -317,20 +317,25 @@ class Ixia(trafficgen.ITrafficGenerator):
:returns: dictionary strings representing results from
traffic generator.
"""
- assert len(result) == 8 # fail-fast if underlying Tcl code changes
+ assert len(result) == 8 or len(result) == 10 # fail-fast if underlying Tcl code changes
+
+ # content of result common for all tests
+ # [framesSent, framesRecv, bytesSent, bytesRecv, sendRate, recvRate, sendRateBytes, recvRateBytes]
+ # burst test has additional two values at the end: payError, seqError
if float(result[0]) == 0:
loss_rate = 100
else:
- loss_rate = (float(result[0]) - float(result[1])) / float(result[0]) * 100
+ loss_rate = round((float(result[0]) - float(result[1])) / float(result[0]) * 100, 5)
result_dict = OrderedDict()
- # drop the first 4 elements as we don't use/need them. In
- # addition, IxExplorer does not support latency or % line rate
+ # IxExplorer does not support latency or % line rate
# metrics so we have to return dummy values for these metrics
- result_dict[ResultsConstants.THROUGHPUT_RX_FPS] = result[4]
- result_dict[ResultsConstants.TX_RATE_FPS] = result[5]
- result_dict[ResultsConstants.THROUGHPUT_RX_MBPS] = str(round(int(result[6]) / 1000000, 3))
- result_dict[ResultsConstants.TX_RATE_MBPS] = str(round(int(result[7]) / 1000000, 3))
+ result_dict[ResultsConstants.TX_FRAMES] = result[0]
+ result_dict[ResultsConstants.RX_FRAMES] = result[1]
+ result_dict[ResultsConstants.TX_RATE_FPS] = result[4]
+ result_dict[ResultsConstants.THROUGHPUT_RX_FPS] = result[5]
+ result_dict[ResultsConstants.TX_RATE_MBPS] = str(round(int(result[6]) * 8 / 1e6, 3))
+ result_dict[ResultsConstants.THROUGHPUT_RX_MBPS] = str(round(int(result[7]) * 8 / 1e6, 3))
result_dict[ResultsConstants.FRAME_LOSS_PERCENT] = loss_rate
result_dict[ResultsConstants.TX_RATE_PERCENT] = \
ResultsConstants.UNKNOWN_VALUE
diff --git a/tools/pkt_gen/ixnet/ixnet.py b/tools/pkt_gen/ixnet/ixnet.py
index d1ba9096..87fb2c65 100755
--- a/tools/pkt_gen/ixnet/ixnet.py
+++ b/tools/pkt_gen/ixnet/ixnet.py
@@ -370,7 +370,7 @@ class IxNet(trafficgen.ITrafficGenerator):
next(reader)
for row in reader:
#Replace null entries added by Ixia with 0s.
- row = [entry if len(entry) > 0 else '0' for entry in row]
+ row = [entry if entry else '0' for entry in row]
# tx_fps and tx_mps cannot be reliably calculated
# as the DUT may be modifying the frame size
@@ -528,7 +528,7 @@ class IxNet(trafficgen.ITrafficGenerator):
return parse_ixnet_rfc_results(parse_result_string(output[0]))
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
return NotImplementedError('IxNet does not implement send_burst_traffic')
if __name__ == '__main__':
diff --git a/tools/pkt_gen/moongen/moongen.py b/tools/pkt_gen/moongen/moongen.py
index 570720e8..b7d55c4d 100644
--- a/tools/pkt_gen/moongen/moongen.py
+++ b/tools/pkt_gen/moongen/moongen.py
@@ -64,46 +64,46 @@ class Moongen(ITrafficGenerator):
:param one_shot: No RFC 2544 binary search,
just packet flow at traffic specifics
"""
- logging.debug("traffic['frame_rate'] = " + \
+ logging.debug("traffic['frame_rate'] = %s", \
str(traffic['frame_rate']))
- logging.debug("traffic['multistream'] = " + \
+ logging.debug("traffic['multistream'] = %s", \
str(traffic['multistream']))
- logging.debug("traffic['stream_type'] = " + \
+ logging.debug("traffic['stream_type'] = %s", \
str(traffic['stream_type']))
- logging.debug("traffic['l2']['srcmac'] = " + \
+ logging.debug("traffic['l2']['srcmac'] = %s", \
str(traffic['l2']['srcmac']))
- logging.debug("traffic['l2']['dstmac'] = " + \
+ logging.debug("traffic['l2']['dstmac'] = %s", \
str(traffic['l2']['dstmac']))
- logging.debug("traffic['l3']['proto'] = " + \
+ logging.debug("traffic['l3']['proto'] = %s", \
str(traffic['l3']['proto']))
- logging.debug("traffic['l3']['srcip'] = " + \
+ logging.debug("traffic['l3']['srcip'] = %s", \
str(traffic['l3']['srcip']))
- logging.debug("traffic['l3']['dstip'] = " + \
+ logging.debug("traffic['l3']['dstip'] = %s", \
str(traffic['l3']['dstip']))
- logging.debug("traffic['l4']['srcport'] = " + \
+ logging.debug("traffic['l4']['srcport'] = %s", \
str(traffic['l4']['srcport']))
- logging.debug("traffic['l4']['dstport'] = " + \
+ logging.debug("traffic['l4']['dstport'] = %s", \
str(traffic['l4']['dstport']))
- logging.debug("traffic['vlan']['enabled'] = " + \
+ logging.debug("traffic['vlan']['enabled'] = %s", \
str(traffic['vlan']['enabled']))
- logging.debug("traffic['vlan']['id'] = " + \
+ logging.debug("traffic['vlan']['id'] = %s", \
str(traffic['vlan']['id']))
- logging.debug("traffic['vlan']['priority'] = " + \
+ logging.debug("traffic['vlan']['priority'] = %s", \
str(traffic['vlan']['priority']))
- logging.debug("traffic['vlan']['cfi'] = " + \
+ logging.debug("traffic['vlan']['cfi'] = %s", \
str(traffic['vlan']['cfi']))
logging.debug(traffic['l2']['framesize'])
@@ -160,9 +160,9 @@ class Moongen(ITrafficGenerator):
(traffic['frame_rate'] / 100) * (self._moongen_line_speed / \
(8 * (traffic['l2']['framesize'] + 20)) / math.pow(10, 6)))
- logging.debug("startRate = " + start_rate)
+ logging.debug("startRate = %s", start_rate)
- out_file.write("startRate = " + \
+ out_file.write("startRate = %s" % \
start_rate + "\n")
out_file.write("}" + "\n")
@@ -240,14 +240,13 @@ class Moongen(ITrafficGenerator):
"""
self._logger.info("MOONGEN: In moongen disconnect method")
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""Send a burst of traffic.
- Send a ``numpkts`` packets of traffic, using ``traffic``
+ Send a ``traffic['burst_traffic']`` packets of traffic, using ``traffic``
configuration, with a timeout of ``time``.
:param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags
- :param numpkts: Number of packets to send
:param duration: Time to wait to receive packets
:returns: dictionary of strings with following data:
@@ -508,8 +507,8 @@ class Moongen(ITrafficGenerator):
return moongen_results
- def send_rfc2544_throughput(self, traffic=None, duration=20,
- lossrate=0.0, tests=1):
+ def send_rfc2544_throughput(self, traffic=None, tests=1, duration=20,
+ lossrate=0.0):
#
# Send traffic per RFC2544 throughput test specifications.
#
@@ -631,8 +630,8 @@ class Moongen(ITrafficGenerator):
"""
self._logger.info('In moongen wait_rfc2544_throughput')
- def send_rfc2544_back2back(self, traffic=None, duration=60,
- lossrate=0.0, tests=1):
+ def send_rfc2544_back2back(self, traffic=None, tests=1, duration=60,
+ lossrate=0.0):
"""Send traffic per RFC2544 back2back test specifications.
Send packets at a fixed rate, using ``traffic``
diff --git a/tools/pkt_gen/testcenter/testcenter.py b/tools/pkt_gen/testcenter/testcenter.py
index 9980ae7c..487566bf 100644
--- a/tools/pkt_gen/testcenter/testcenter.py
+++ b/tools/pkt_gen/testcenter/testcenter.py
@@ -182,7 +182,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
"""
pass
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""
Do nothing.
"""
@@ -246,8 +246,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
row["ForwardingRate(fps)"])
return result
- # pylint: disable=unused-argument
- def send_rfc2889_forwarding(self, traffic=None, tests=1, duration=20):
+ def send_rfc2889_forwarding(self, traffic=None, tests=1, _duration=20):
"""
Send traffic per RFC2889 Forwarding test specifications.
"""
@@ -257,7 +256,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
framesize = traffic['l2']['framesize']
args = get_rfc2889_common_settings(framesize, tests,
traffic['traffic_type'])
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -273,7 +272,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
return self.get_rfc2889_forwarding_results(filec)
- def send_rfc2889_caching(self, traffic=None, tests=1, duration=20):
+ def send_rfc2889_caching(self, traffic=None, tests=1, _duration=20):
"""
Send as per RFC2889 Addr-Caching test specifications.
"""
@@ -286,7 +285,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
custom_args = get_rfc2889_custom_settings()
args = common_args + custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -302,7 +301,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
return self.get_rfc2889_addr_caching_results(filec)
- def send_rfc2889_learning(self, traffic=None, tests=1, duration=20):
+ def send_rfc2889_learning(self, traffic=None, tests=1, _duration=20):
"""
Send traffic per RFC2889 Addr-Learning test specifications.
"""
@@ -315,7 +314,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
custom_args = get_rfc2889_custom_settings()
args = common_args + custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -387,7 +386,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
custom, 1)
args = rfc2544_common_args + stc_common_args + rfc2544_custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -420,7 +419,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
tests)
args = rfc2544_common_args + stc_common_args + rfc2544_custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -453,7 +452,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
tests)
args = rfc2544_common_args + stc_common_args + rfc2544_custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.info("Arguments used to call test: %s", args)
@@ -498,4 +497,5 @@ if __name__ == '__main__':
}
with TestCenter() as dev:
print(dev.send_rfc2544_throughput(traffic=TRAFFIC))
+ # pylint: disable=no-member
print(dev.send_rfc2544_backtoback(traffic=TRAFFIC))
diff --git a/tools/pkt_gen/trafficgen/trafficgen.py b/tools/pkt_gen/trafficgen/trafficgen.py
index 262df71d..a6f7edcc 100755
--- a/tools/pkt_gen/trafficgen/trafficgen.py
+++ b/tools/pkt_gen/trafficgen/trafficgen.py
@@ -81,15 +81,14 @@ class ITrafficGenerator(object):
"""
raise NotImplementedError('Please call an implementation.')
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""Send a burst of traffic.
- Send a ``numpkts`` packets of traffic, using ``traffic``
+ Send a ``traffic['burst_size']`` packets of traffic, using ``traffic``
configuration, for ``duration`` seconds.
Attributes:
:param traffic: Detailed "traffic" spec, see design docs for details
- :param numpkts: Number of packets to send
:param duration: Time to wait to receive packets
:returns: dictionary of strings with following data:
diff --git a/tools/pkt_gen/trex/trex.py b/tools/pkt_gen/trex/trex.py
index e0ce4c48..94b793d6 100644
--- a/tools/pkt_gen/trex/trex.py
+++ b/tools/pkt_gen/trex/trex.py
@@ -15,12 +15,14 @@
"""
Trex Traffic Generator Model
"""
+
# pylint: disable=undefined-variable
import logging
import subprocess
import sys
import time
import os
+import re
from collections import OrderedDict
# pylint: disable=unused-import
import netaddr
@@ -69,6 +71,20 @@ _EMPTY_STATS = {
'tx_pps': 0.0,
'tx_util': 0.0,}}
+# Default frame definition, which can be overridden by TRAFFIC['scapy'].
+# The content of the frame and its network layers are driven by TRAFFIC
+# dictionary, i.e. 'l2', 'l3, 'l4' and 'vlan' parts.
+_SCAPY_FRAME = {
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+ '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+ '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
+}
+
class Trex(ITrafficGenerator):
"""Trex Traffic generator wrapper."""
@@ -165,35 +181,77 @@ class Trex(ITrafficGenerator):
self._logger.info("T-Rex: In trex disconnect method")
self._stlclient.disconnect(stop_traffic=True, release_ports=True)
- @staticmethod
- def create_packets(traffic, ports_info):
+ def create_packets(self, traffic, ports_info):
"""Create base packet according to traffic specification.
If traffic haven't specified srcmac and dstmac fields
- packet will be create with mac address of trex server.
+ packet will be created with mac address of trex server.
"""
- mac_add = [li['hw_mac'] for li in ports_info]
-
- if traffic and traffic['l2']['framesize'] > 0:
- if traffic['l2']['dstmac'] == '00:00:00:00:00:00' and \
- traffic['l2']['srcmac'] == '00:00:00:00:00:00':
- base_pkt_a = Ether(src=mac_add[0], dst=mac_add[1])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['srcip'],
- dst=traffic['l3']['dstip'])/ \
- UDP(dport=traffic['l4']['dstport'], sport=traffic['l4']['srcport'])
- base_pkt_b = Ether(src=mac_add[1], dst=mac_add[0])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
- dst=traffic['l3']['srcip'])/ \
- UDP(dport=traffic['l4']['srcport'], sport=traffic['l4']['dstport'])
- else:
- base_pkt_a = Ether(src=traffic['l2']['srcmac'], dst=traffic['l2']['dstmac'])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
- dst=traffic['l3']['srcip'])/ \
- UDP(dport=traffic['l4']['dstport'], sport=traffic['l4']['srcport'])
+ if not traffic or traffic['l2']['framesize'] <= 0:
+ return (None, None)
- base_pkt_b = Ether(src=traffic['l2']['dstmac'], dst=traffic['l2']['srcmac'])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
- dst=traffic['l3']['srcip'])/ \
- UDP(dport=traffic['l4']['srcport'], sport=traffic['l4']['dstport'])
+ if traffic['l2']['dstmac'] == '00:00:00:00:00:00' and \
+ traffic['l2']['srcmac'] == '00:00:00:00:00:00':
+
+ mac_add = [li['hw_mac'] for li in ports_info]
+ src_mac = mac_add[0]
+ dst_mac = mac_add[1]
+ else:
+ src_mac = traffic['l2']['srcmac']
+ dst_mac = traffic['l2']['dstmac']
+
+ if traffic['scapy']['enabled']:
+ base_pkt_a = traffic['scapy']['0']
+ base_pkt_b = traffic['scapy']['1']
+ else:
+ base_pkt_a = _SCAPY_FRAME['0']
+ base_pkt_b = _SCAPY_FRAME['1']
+
+ # check and remove network layers disabled by TRAFFIC dictionary
+ # Note: In general, it is possible to remove layers from scapy object by
+ # e.g. del base_pkt_a['IP']. However it doesn't work for all layers
+ # (e.g. Dot1Q). Thus it is safer to modify string with scapy frame definition
+ # directly, before it is converted to the real scapy object.
+ if not traffic['vlan']['enabled']:
+ self._logger.info('VLAN headers are disabled by TRAFFIC')
+ base_pkt_a = re.sub(r'(^|\/)Dot1Q?\([^\)]*\)', '', base_pkt_a)
+ base_pkt_b = re.sub(r'(^|\/)Dot1Q?\([^\)]*\)', '', base_pkt_b)
+ if not traffic['l3']['enabled']:
+ self._logger.info('IP headers are disabled by TRAFFIC')
+ base_pkt_a = re.sub(r'(^|\/)IP(v6)?\([^\)]*\)', '', base_pkt_a)
+ base_pkt_b = re.sub(r'(^|\/)IP(v6)?\([^\)]*\)', '', base_pkt_b)
+ if not traffic['l4']['enabled']:
+ self._logger.info('%s headers are disabled by TRAFFIC',
+ traffic['l3']['proto'].upper())
+ base_pkt_a = re.sub(r'(^|\/)(UDP|TCP|SCTP|{{IP_PROTO}}|{})\([^\)]*\)'.format(
+ traffic['l3']['proto'].upper()), '', base_pkt_a)
+ base_pkt_b = re.sub(r'(^|\/)(UDP|TCP|SCTP|{{IP_PROTO}}|{})\([^\)]*\)'.format(
+ traffic['l3']['proto'].upper()), '', base_pkt_b)
+
+ # pylint: disable=eval-used
+ base_pkt_a = eval(base_pkt_a.format(
+ Ether_src=repr(src_mac),
+ Ether_dst=repr(dst_mac),
+ Dot1Q_prio=traffic['vlan']['priority'],
+ Dot1Q_id=traffic['vlan']['cfi'],
+ Dot1Q_vlan=traffic['vlan']['id'],
+ IP_proto=repr(traffic['l3']['proto']),
+ IP_PROTO=traffic['l3']['proto'].upper(),
+ IP_src=repr(traffic['l3']['srcip']),
+ IP_dst=repr(traffic['l3']['dstip']),
+ IP_PROTO_sport=traffic['l4']['srcport'],
+ IP_PROTO_dport=traffic['l4']['dstport']))
+ base_pkt_b = eval(base_pkt_b.format(
+ Ether_src=repr(src_mac),
+ Ether_dst=repr(dst_mac),
+ Dot1Q_prio=traffic['vlan']['priority'],
+ Dot1Q_id=traffic['vlan']['cfi'],
+ Dot1Q_vlan=traffic['vlan']['id'],
+ IP_proto=repr(traffic['l3']['proto']),
+ IP_PROTO=traffic['l3']['proto'].upper(),
+ IP_src=repr(traffic['l3']['srcip']),
+ IP_dst=repr(traffic['l3']['dstip']),
+ IP_PROTO_sport=traffic['l4']['srcport'],
+ IP_PROTO_dport=traffic['l4']['dstport']))
return (base_pkt_a, base_pkt_b)
@@ -248,22 +306,48 @@ class Trex(ITrafficGenerator):
pkt_a = STLPktBuilder(pkt=base_pkt_a / payload_a)
pkt_b = STLPktBuilder(pkt=base_pkt_b / payload_b)
- stream_1 = STLStream(packet=pkt_a,
- name='stream_1',
- mode=STLTXCont(percentage=traffic['frame_rate']))
- stream_2 = STLStream(packet=pkt_b,
- name='stream_2',
- mode=STLTXCont(percentage=traffic['frame_rate']))
lat_pps = settings.getValue('TRAFFICGEN_TREX_LATENCY_PPS')
- if lat_pps > 0:
- stream_1_lat = STLStream(packet=pkt_a,
+ if traffic['traffic_type'] == 'burst':
+ if lat_pps > 0:
+ # latency statistics are requested; in case of frame burst we can enable
+ # statistics for all frames
+ stream_1 = STLStream(packet=pkt_a,
flow_stats=STLFlowLatencyStats(pg_id=0),
- name='stream_1_lat',
- mode=STLTXCont(pps=lat_pps))
- stream_2_lat = STLStream(packet=pkt_b,
+ name='stream_1',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ stream_2 = STLStream(packet=pkt_b,
flow_stats=STLFlowLatencyStats(pg_id=1),
- name='stream_2_lat',
- mode=STLTXCont(pps=lat_pps))
+ name='stream_2',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ else:
+ stream_1 = STLStream(packet=pkt_a,
+ name='stream_1',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ stream_2 = STLStream(packet=pkt_b,
+ name='stream_2',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ else:
+ stream_1 = STLStream(packet=pkt_a,
+ name='stream_1',
+ mode=STLTXCont(percentage=traffic['frame_rate']))
+ stream_2 = STLStream(packet=pkt_b,
+ name='stream_2',
+ mode=STLTXCont(percentage=traffic['frame_rate']))
+ # workaround for latency statistics, which can't be enabled for streams
+ # with high framerate due to the huge performance impact
+ if lat_pps > 0:
+ stream_1_lat = STLStream(packet=pkt_a,
+ flow_stats=STLFlowLatencyStats(pg_id=0),
+ name='stream_1_lat',
+ mode=STLTXCont(pps=lat_pps))
+ stream_2_lat = STLStream(packet=pkt_b,
+ flow_stats=STLFlowLatencyStats(pg_id=1),
+ name='stream_2_lat',
+ mode=STLTXCont(pps=lat_pps))
return (stream_1, stream_2, stream_1_lat, stream_2_lat)
@@ -293,13 +377,13 @@ class Trex(ITrafficGenerator):
# since we can only control both ports at once take the lower of the two
max_speed = min(max_speed_1, max_speed_2)
gbps_speed = (max_speed / 1000) * (float(traffic['frame_rate']) / 100.0)
- self._logger.debug('Starting traffic at %s Gpbs speed', gbps_speed)
+ self._logger.debug('Starting traffic at %s Gbps speed', gbps_speed)
# for SR-IOV
if settings.getValue('TRAFFICGEN_TREX_PROMISCUOUS'):
self._stlclient.set_port_attr(my_ports, promiscuous=True)
- packet_1, packet_2 = Trex.create_packets(traffic, ports_info)
+ packet_1, packet_2 = self.create_packets(traffic, ports_info)
self.show_packet_info(packet_1, packet_2)
stream_1, stream_2, stream_1_lat, stream_2_lat = Trex.create_streams(packet_1, packet_2, traffic)
self._stlclient.add_streams(stream_1, ports=[0])
@@ -382,20 +466,29 @@ class Trex(ITrafficGenerator):
result[ResultsConstants.FRAME_LOSS_PERCENT] = 100
if settings.getValue('TRAFFICGEN_TREX_LATENCY_PPS') > 0 and stats['latency']:
- result[ResultsConstants.MIN_LATENCY_NS] = (
- '{:.3f}'.format(
- (float(min(stats["latency"][0]["latency"]["total_min"],
- stats["latency"][1]["latency"]["total_min"])))))
-
- result[ResultsConstants.MAX_LATENCY_NS] = (
- '{:.3f}'.format(
- (float(max(stats["latency"][0]["latency"]["total_max"],
- stats["latency"][1]["latency"]["total_max"])))))
-
- result[ResultsConstants.AVG_LATENCY_NS] = (
- '{:.3f}'.format(
- float((stats["latency"][0]["latency"]["average"]+
- stats["latency"][1]["latency"]["average"])/2)))
+ try:
+ result[ResultsConstants.MIN_LATENCY_NS] = (
+ '{:.3f}'.format(
+ (float(min(stats["latency"][0]["latency"]["total_min"],
+ stats["latency"][1]["latency"]["total_min"])))))
+ except TypeError:
+ result[ResultsConstants.MIN_LATENCY_NS] = 'Unknown'
+
+ try:
+ result[ResultsConstants.MAX_LATENCY_NS] = (
+ '{:.3f}'.format(
+ (float(max(stats["latency"][0]["latency"]["total_max"],
+ stats["latency"][1]["latency"]["total_max"])))))
+ except TypeError:
+ result[ResultsConstants.MAX_LATENCY_NS] = 'Unknown'
+
+ try:
+ result[ResultsConstants.AVG_LATENCY_NS] = (
+ '{:.3f}'.format(
+ float((stats["latency"][0]["latency"]["average"]+
+ stats["latency"][1]["latency"]["average"])/2)))
+ except TypeError:
+ result[ResultsConstants.AVG_LATENCY_NS] = 'Unknown'
else:
result[ResultsConstants.MIN_LATENCY_NS] = 'Unknown'
@@ -568,9 +661,25 @@ class Trex(ITrafficGenerator):
raise NotImplementedError(
'Trex wait rfc2544 throughput not implemented')
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=5):
- raise NotImplementedError(
- 'Trex send burst traffic not implemented')
+ def send_burst_traffic(self, traffic=None, duration=20):
+ """See ITrafficGenerator for description
+ """
+ self._logger.info("In Trex send_burst_traffic method")
+ self._params.clear()
+
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(
+ self._params['traffic'], traffic)
+
+ if settings.getValue('TRAFFICGEN_TREX_LEARNING_MODE'):
+ self.learning_packets(traffic)
+ self._logger.info("T-Rex sending traffic")
+ stats = self.generate_traffic(traffic, duration)
+
+ time.sleep(3) # allow packets to complete before reading stats
+
+ return self.calculate_results(stats)
def send_rfc2544_back2back(self, traffic=None, tests=1, duration=30,
lossrate=0.0):
diff --git a/tools/pkt_gen/xena/XenaDriver.py b/tools/pkt_gen/xena/XenaDriver.py
index 6e39e47a..cdc82838 100644
--- a/tools/pkt_gen/xena/XenaDriver.py
+++ b/tools/pkt_gen/xena/XenaDriver.py
@@ -170,8 +170,7 @@ class KeepAliveThread(threading.Thread):
self.finished = threading.Event()
self.setDaemon(True)
_LOGGER.debug(
- 'Xena Socket keep alive thread initiated, interval ' +
- '{} seconds'.format(self.interval))
+ 'Xena Socket keep alive thread initiated, interval %s seconds', self.interval)
def stop(self):
""" Thread stop. See python thread docs for more info
@@ -904,7 +903,7 @@ class XenaRXStats(object):
statdict[entry_id] = self._pack_stats(param, 3)
elif param[1] == 'PR_TPLDS':
tid_list = self._pack_tplds_stats(param, 2)
- if len(tid_list):
+ if tid_list:
statdict['pr_tplds'] = tid_list
elif param[1] == 'PR_TPLDTRAFFIC':
if 'pr_tpldstraffic' in statdict:
diff --git a/tools/pkt_gen/xena/json/xena_json.py b/tools/pkt_gen/xena/json/xena_json.py
index b1eed720..df2aa55f 100644
--- a/tools/pkt_gen/xena/json/xena_json.py
+++ b/tools/pkt_gen/xena/json/xena_json.py
@@ -28,8 +28,6 @@ import locale
import logging
import os
-import scapy.layers.inet as inet
-
from tools.pkt_gen.xena.json import json_utilities
_LOGGER = logging.getLogger(__name__)
@@ -279,6 +277,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage.
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['layer2'] = [
inet.Ether(dst=dst_mac, src=src_mac, **kwargs),
inet.Ether(dst=src_mac, src=dst_mac, **kwargs)]
@@ -293,6 +295,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['layer3'] = [
inet.IP(src=src_ip, dst=dst_ip, proto=protocol.lower(), **kwargs),
inet.IP(src=dst_ip, dst=src_ip, proto=protocol.lower(), **kwargs)]
@@ -305,6 +311,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['layer4'] = [
inet.UDP(sport=source_port, dport=destination_port, **kwargs),
inet.UDP(sport=source_port, dport=destination_port, **kwargs)]
@@ -316,6 +326,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['vlan'] = [
inet.Dot1Q(vlan=vlan_id, **kwargs),
inet.Dot1Q(vlan=vlan_id, **kwargs)]
diff --git a/tools/pkt_gen/xena/xena.py b/tools/pkt_gen/xena/xena.py
index 19b44f0b..33864079 100755
--- a/tools/pkt_gen/xena/xena.py
+++ b/tools/pkt_gen/xena/xena.py
@@ -32,8 +32,6 @@ import xml.etree.ElementTree as ET
from collections import OrderedDict
from time import sleep
-import scapy.layers.inet as inet
-
from conf import merge_spec
from conf import settings
from core.results.results_constants import ResultsConstants
@@ -149,6 +147,10 @@ class Xena(ITrafficGenerator):
:param reverse: Swap source and destination info when building header
:return: packet header in hex
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
srcmac = self._params['traffic']['l2'][
'srcmac'] if not reverse else self._params['traffic']['l2'][
'dstmac']
@@ -274,10 +276,6 @@ class Xena(ITrafficGenerator):
enable the pairs topology
:return: None
"""
- # set duplex mode, this code is valid, pylint complaining with a
- # warning that many have complained about online.
- # pylint: disable=redefined-variable-type
-
try:
if self._params['traffic']['bidir'] == "True":
j_file = XenaJSONMesh()
@@ -568,7 +566,7 @@ class Xena(ITrafficGenerator):
self._xsocket.disconnect()
self._xsocket = None
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""Send a burst of traffic.
See ITrafficGenerator for description
@@ -579,7 +577,7 @@ class Xena(ITrafficGenerator):
if traffic:
self._params['traffic'] = merge_spec(self._params['traffic'],
traffic)
- self._start_traffic_api(numpkts)
+ self._start_traffic_api(traffic['burst_size'])
return self._stop_api_traffic()
def send_cont_traffic(self, traffic=None, duration=20):
diff --git a/tools/report/report.py b/tools/report/report.py
index b3f15c1b..5d05e7ad 100644
--- a/tools/report/report.py
+++ b/tools/report/report.py
@@ -137,7 +137,6 @@ def generate(testcase):
'tests': tests,
}
i = 0
- # pylint: disable=no-member
for output_file in output_files:
template = template_env.get_template(_TEMPLATE_FILES[i])
output_text = template.render(template_vars)
diff --git a/tools/report/report_rst.jinja b/tools/report/report_rst.jinja
index eda0c01e..6b51807a 100644
--- a/tools/report/report_rst.jinja
+++ b/tools/report/report_rst.jinja
@@ -90,7 +90,9 @@ Testing Activities/Events
~~~~~~~~~~~~~~~~~~~~~~~~~
pidstat is used to collect the process statistics, as such some values such as
%CPU and %USER maybe > 100% as the values are summed across multiple cores. For
-more info on pidstat please see: http://linux.die.net/man/1/pidstat.
+more info on pidstat please see: http://linux.die.net/man/1/pidstat. Please
+note that vsperf recalculates the CPU consumption of a process by aggregating
+the CPU usage of each thread.
Known issues: Some reported metrics have the value "unkown". These values are
marked unknown as they are not values retrieved from the external tester
diff --git a/tools/systeminfo.py b/tools/systeminfo.py
index f34bcce6..6020d0e2 100644
--- a/tools/systeminfo.py
+++ b/tools/systeminfo.py
@@ -191,7 +191,7 @@ def get_bin_version(binary, regex):
return None
versions = re.findall(regex, output)
- if len(versions):
+ if versions:
return versions[0]
else:
return None
@@ -297,7 +297,7 @@ def get_version(app_name):
if not '16' in release:
tmp_ver[2] += line.rstrip('\n').split(' ')[2]
- if len(tmp_ver[0]):
+ if tmp_ver[0]:
app_version = '.'.join(tmp_ver)
app_git_tag = get_git_tag(S.getValue('TOOLS')['dpdk_src'])
elif app_name.lower().startswith('qemu'):
diff --git a/tools/teststepstools.py b/tools/teststepstools.py
index 33db8f79..db2d53e6 100644
--- a/tools/teststepstools.py
+++ b/tools/teststepstools.py
@@ -43,7 +43,7 @@ class TestStepsTools(object):
return True
@staticmethod
- def validate_Assert(result, dummy_condition):
+ def validate_Assert(result, _dummy_condition):
""" Validate evaluation of given `condition'
"""
return result
@@ -56,7 +56,7 @@ class TestStepsTools(object):
return eval(expression)
@staticmethod
- def validate_Eval(result, dummy_expression):
+ def validate_Eval(result, _dummy_expression):
""" Validate result of python `expression' evaluation
"""
return result is not None
@@ -76,7 +76,7 @@ class TestStepsTools(object):
return True
@staticmethod
- def validate_Exec_Python(result, dummy_code):
+ def validate_Exec_Python(result, _dummy_code):
""" Validate result of python `code' execution
"""
return result
@@ -99,7 +99,7 @@ class TestStepsTools(object):
return output
@staticmethod
- def validate_Exec_Shell(result, dummy_command, dummy_regex=None):
+ def validate_Exec_Shell(result, _dummy_command, _dummy_regex=None):
""" validate result of shell `command' execution
"""
return result is not None
@@ -115,7 +115,7 @@ class TestStepsTools(object):
return None
@staticmethod
- def validate_Exec_Shell_Background(result, dummy_command, dummy_regex=None):
+ def validate_Exec_Shell_Background(result, _dummy_command, _dummy_regex=None):
""" validate result of shell `command' execution on the background
"""
return result is not None
diff --git a/tools/veth.py b/tools/veth.py
index 6418d11a..6d7c9962 100644
--- a/tools/veth.py
+++ b/tools/veth.py
@@ -84,8 +84,7 @@ def del_veth_port(port, peer_port):
port, peer_port), False)
-# pylint: disable=unused-argument
-def validate_add_veth_port(result, port, peer_port):
+def validate_add_veth_port(_result, port, peer_port):
"""
Validation function for integration testcases
"""
@@ -93,7 +92,7 @@ def validate_add_veth_port(result, port, peer_port):
return all([port in devs, peer_port in devs])
-def validate_bring_up_eth_port(result, eth_port, namespace=None):
+def validate_bring_up_eth_port(_result, eth_port, namespace=None):
"""
Validation function for integration testcases
"""
@@ -110,7 +109,7 @@ def validate_bring_up_eth_port(result, eth_port, namespace=None):
return True
-def validate_del_veth_port(result, port, peer_port):
+def validate_del_veth_port(_result, port, peer_port):
"""
Validation function for integration testcases
"""
diff --git a/vnfs/__init__.py b/vnfs/__init__.py
index 34cacf4f..1743faf8 100644
--- a/vnfs/__init__.py
+++ b/vnfs/__init__.py
@@ -17,4 +17,3 @@
This package contains an interface the VSPERF core uses for controlling
VNFs and VNF-specific implementation modules of this interface.
"""
-
diff --git a/vnfs/qemu/__init__.py b/vnfs/qemu/__init__.py
index 82f32eb9..6ed326dd 100644
--- a/vnfs/qemu/__init__.py
+++ b/vnfs/qemu/__init__.py
@@ -17,4 +17,3 @@
This package contains an implementation of the interface the VSPERF core
uses for controlling VNFs using QEMU and DPDK's testpmd application.
"""
-
diff --git a/vnfs/qemu/qemu.py b/vnfs/qemu/qemu.py
index a0128f44..7ba58c05 100644
--- a/vnfs/qemu/qemu.py
+++ b/vnfs/qemu/qemu.py
@@ -286,7 +286,7 @@ class IVnfQemu(IVnf):
elif self._guest_loopback == 'linux_bridge':
self._configure_linux_bridge()
elif self._guest_loopback != 'clean':
- raise RuntimeError('Unsupported guest loopback method "%s" was specified.',
+ raise RuntimeError('Unsupported guest loopback method "%s" was specified.' %
self._guest_loopback)
def wait(self, prompt=None, timeout=30):
diff --git a/vnfs/vnf/__init__.py b/vnfs/vnf/__init__.py
index b7c43217..6a7a1547 100644
--- a/vnfs/vnf/__init__.py
+++ b/vnfs/vnf/__init__.py
@@ -15,4 +15,4 @@
"""VNF interface and helpers.
"""
-from vnfs import *
+import vnfs
diff --git a/vnfs/vnf/vnf.py b/vnfs/vnf/vnf.py
index 5ac2ada3..3ad1dcda 100644
--- a/vnfs/vnf/vnf.py
+++ b/vnfs/vnf/vnf.py
@@ -138,7 +138,7 @@ class IVnf(tasks.Process):
self.execute(cmd)
return self.wait(prompt=prompt, timeout=timeout)
- def validate_start(self, dummyresult):
+ def validate_start(self, _dummyresult):
""" Validate call of VNF start()
"""
if self._child and self._child.isalive():
@@ -152,7 +152,7 @@ class IVnf(tasks.Process):
return not self.validate_start(result)
@staticmethod
- def validate_execute_and_wait(result, dummy_cmd, dummy_timeout=30, dummy_prompt=''):
+ def validate_execute_and_wait(result, _dummy_cmd, _dummy_timeout=30, _dummy_prompt=''):
""" Validate command execution within VNF
"""
return len(result) > 0
diff --git a/vsperf b/vsperf
index d205ad1f..a1417420 100755
--- a/vsperf
+++ b/vsperf
@@ -116,7 +116,6 @@ def parse_arguments():
e.g. --test-params "['x=z; y=(a,b)','x=z']"
"""
def __call__(self, parser, namespace, values, option_string=None):
-
if values[0] == '[':
input_list = ast.literal_eval(values)
parameter_list = []
@@ -327,7 +326,7 @@ def get_vswitch_names(rst_files):
""" Function will return a list of vSwitches detected in given ``rst_files``.
"""
vswitch_names = set()
- if len(rst_files):
+ if rst_files:
try:
output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
for line in output:
@@ -335,7 +334,7 @@ def get_vswitch_names(rst_files):
if match:
vswitch_names.add(match.group(1))
- if len(vswitch_names):
+ if vswitch_names:
return list(vswitch_names)
except subprocess.CalledProcessError:
@@ -366,7 +365,7 @@ def generate_final_report():
# check if there are any results in rst format
rst_results = glob.glob(os.path.join(path, 'result*rst'))
pkt_processors = get_vswitch_names(rst_results)
- if len(rst_results):
+ if rst_results:
try:
test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
# create report caption directly - it is not worth to execute jinja machinery
@@ -474,7 +473,7 @@ def enable_sriov(nic_list):
sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})
# sriov is required for some NICs
- if len(sriov_nic):
+ if sriov_nic:
for nic in sriov_nic:
# check if SRIOV is supported and enough virt interfaces are available
if not networkcard.is_sriov_supported(nic) \
@@ -590,7 +589,7 @@ def vsperf_finalize():
if os.path.exists(results_path):
files_list = os.listdir(results_path)
if files_list == []:
- _LOGGER.info("Removing empty result directory: " + results_path)
+ _LOGGER.info("Removing empty result directory: %s", results_path)
shutil.rmtree(results_path)
except AttributeError:
# skip it if parameter doesn't exist
@@ -758,7 +757,7 @@ def main():
# create results directory
if not os.path.exists(results_path):
- _LOGGER.info("Creating result directory: " + results_path)
+ _LOGGER.info("Creating result directory: %s", results_path)
os.makedirs(results_path)
# pylint: disable=too-many-nested-blocks
if settings.getValue('mode') == 'trafficgen':
@@ -802,15 +801,11 @@ def main():
# Default - run all tests
selected_tests = testcases
- if not len(selected_tests):
+ if not selected_tests:
_LOGGER.error("No tests matched --tests option or positional args. Done.")
vsperf_finalize()
sys.exit(1)
- # run tests
- # Add pylint exception: Redefinition of test type from
- # testcases.integration.IntegrationTestCase to testcases.performance.PerformanceTestCase
- # pylint: disable=redefined-variable-type
suite = unittest.TestSuite()
settings_snapshot = copy.deepcopy(settings.__dict__)
diff --git a/vswitches/__init__.py b/vswitches/__init__.py
index a34475be..20a715e0 100644
--- a/vswitches/__init__.py
+++ b/vswitches/__init__.py
@@ -17,4 +17,3 @@
This package contains an interface the VSPERF core uses for controlling
vSwitches and vSwitch-specific implementation modules of this interface.
"""
-
diff --git a/vswitches/ovs.py b/vswitches/ovs.py
index 76cabb0d..9e49b2ee 100644
--- a/vswitches/ovs.py
+++ b/vswitches/ovs.py
@@ -166,7 +166,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
"""
if switch_name is None or remote_switch_name is None:
- return
+ return None
bridge = self._bridges[switch_name]
remote_bridge = self._bridges[remote_switch_name]
@@ -382,7 +382,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
with open(self._ovsdb_pidfile_path, "r") as pidfile:
ovsdb_pid = pidfile.read().strip()
- self._logger.info("Killing ovsdb with pid: " + ovsdb_pid)
+ self._logger.info("Killing ovsdb with pid: %s", ovsdb_pid)
if ovsdb_pid:
tasks.terminate_task(ovsdb_pid, logger=self._logger)
@@ -409,7 +409,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
#
# validate methods required for integration testcases
#
- def validate_add_switch(self, dummy_result, switch_name, dummy_params=None):
+ def validate_add_switch(self, _dummy_result, switch_name, _dummy_params=None):
"""Validate - Create a new logical switch with no ports
"""
bridge = self._bridges[switch_name]
@@ -420,7 +420,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
# Method could be a function
# pylint: disable=no-self-use
- def validate_del_switch(self, dummy_result, switch_name):
+ def validate_del_switch(self, _dummy_result, switch_name):
"""Validate removal of switch
"""
bridge = OFBridge('tmp')
@@ -444,7 +444,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
"""
return self.validate_add_phy_port(result, switch_name)
- def validate_del_port(self, dummy_result, switch_name, port_name):
+ def validate_del_port(self, _dummy_result, switch_name, port_name):
""" Validate that port_name was removed from bridge.
"""
bridge = self._bridges[switch_name]
@@ -453,7 +453,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
assert 'Port "%s"' % port_name not in output[0]
return True
- def validate_add_flow(self, dummy_result, switch_name, flow, dummy_cache='off'):
+ def validate_add_flow(self, _dummy_result, switch_name, flow, _dummy_cache='off'):
""" Validate insertion of the flow into the switch
"""
@@ -474,44 +474,44 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
return True
return False
- def validate_del_flow(self, dummy_result, switch_name, flow=None):
+ def validate_del_flow(self, _dummy_result, switch_name, flow=None):
""" Validate removal of the flow
"""
if not flow:
# what else we can do?
return True
- return not self.validate_add_flow(dummy_result, switch_name, flow)
+ return not self.validate_add_flow(_dummy_result, switch_name, flow)
- def validate_dump_flows(self, dummy_result, dummy_switch_name):
+ def validate_dump_flows(self, _dummy_result, _dummy_switch_name):
""" Validate call of flow dump
"""
return True
- def validate_disable_rstp(self, dummy_result, switch_name):
+ def validate_disable_rstp(self, _dummy_result, switch_name):
""" Validate rstp disable
"""
bridge = self._bridges[switch_name]
return 'rstp_enable : false' in ''.join(bridge.bridge_info())
- def validate_enable_rstp(self, dummy_result, switch_name):
+ def validate_enable_rstp(self, _dummy_result, switch_name):
""" Validate rstp enable
"""
bridge = self._bridges[switch_name]
return 'rstp_enable : true' in ''.join(bridge.bridge_info())
- def validate_disable_stp(self, dummy_result, switch_name):
+ def validate_disable_stp(self, _dummy_result, switch_name):
""" Validate stp disable
"""
bridge = self._bridges[switch_name]
return 'stp_enable : false' in ''.join(bridge.bridge_info())
- def validate_enable_stp(self, dummy_result, switch_name):
+ def validate_enable_stp(self, _dummy_result, switch_name):
""" Validate stp enable
"""
bridge = self._bridges[switch_name]
return 'stp_enable : true' in ''.join(bridge.bridge_info())
- def validate_restart(self, dummy_result):
+ def validate_restart(self, _dummy_result):
""" Validate restart
"""
return True
diff --git a/vswitches/vpp_dpdk_vhost.py b/vswitches/vpp_dpdk_vhost.py
index 58d6bf51..bdb5ff81 100644
--- a/vswitches/vpp_dpdk_vhost.py
+++ b/vswitches/vpp_dpdk_vhost.py
@@ -205,6 +205,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
def add_switch(self, switch_name, dummy_params=None):
"""See IVswitch for general description
"""
+ # pylint: disable=unused-argument
if switch_name in self._switches:
self._logger.warning("switch %s already exists...", switch_name)
else:
@@ -221,6 +222,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
"""See IVswitch for general description
:raises: RuntimeError
"""
+ # pylint: disable=unused-argument
# get list of physical interfaces with PCI addresses
vpp_nics = self._get_nic_info(key='Pci')
# check if there are any NICs left
@@ -239,6 +241,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
def add_vport(self, dummy_switch_name):
"""See IVswitch for general description
"""
+ # pylint: disable=unused-argument
socket_name = S.getValue('TOOLS')['ovs_var_tmp'] + 'dpdkvhostuser' + str(len(self._virt_ports))
if S.getValue('VSWITCH_VHOSTUSER_SERVER_MODE'):
mode = ['server']
@@ -280,7 +283,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
if bidir:
self.run_vppctl(['set', 'interface', 'l2', 'xconnect', port2, port1])
- def add_bridge(self, switch_name, port1, port2, dummy_bidir=False):
+ def add_bridge(self, switch_name, port1, port2, _dummy_bidir=False):
"""Add given ports to bridge ``switch_name``
"""
self.run_vppctl(['set', 'interface', 'l2', 'bridge', port1,
@@ -301,7 +304,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
elif mode == 'bridge':
self.add_bridge(switch_name, port1, port2)
else:
- raise RuntimeError('VPP: Unsupported l2 connection mode detected %s', mode)
+ raise RuntimeError('VPP: Unsupported l2 connection mode detected %s' % mode)
def del_l2patch(self, port1, port2, bidir=False):
"""Remove l2patch connection between given ports
@@ -314,12 +317,12 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
if bidir:
self.run_vppctl(['test', 'l2patch', 'rx', port2, 'tx', port1, 'del'])
- def del_xconnect(self, dummy_port1, dummy_port2, dummy_bidir=False):
+ def del_xconnect(self, _dummy_port1, _dummy_port2, _dummy_bidir=False):
"""Remove xconnect connection between given ports
"""
self._logger.warning('VPP: Removal of l2 xconnect is not implemented.')
- def del_bridge(self, dummy_switch_name, dummy_port1, dummy_port2):
+ def del_bridge(self, _dummy_switch_name, _dummy_port1, _dummy_port2):
"""Remove given ports from the bridge
"""
self._logger.warning('VPP: Removal of interfaces from bridge is not implemented.')
@@ -337,7 +340,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
elif mode == 'bridge':
self.del_bridge(switch_name, port1, port2)
else:
- raise RuntimeError('VPP: Unsupported l2 connection mode detected %s', mode)
+ raise RuntimeError('VPP: Unsupported l2 connection mode detected %s' % mode)
def dump_l2patch(self):
"""Dump l2patch connections
@@ -369,7 +372,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
elif mode == 'bridge':
self.dump_bridge(switch_name)
else:
- raise RuntimeError('VPP: Unsupported l2 connection mode detected %s', mode)
+ raise RuntimeError('VPP: Unsupported l2 connection mode detected %s' % mode)
def run_vppctl(self, args, check_error=False):
"""Run ``vppctl`` with supplied arguments.
@@ -385,50 +388,50 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
#
# Validate methods
#
- def validate_add_switch(self, dummy_result, switch_name, dummy_params=None):
+ def validate_add_switch(self, _dummy_result, switch_name, _dummy_params=None):
"""Validate - Create a new logical switch with no ports
"""
return switch_name in self._switches
- def validate_del_switch(self, dummy_result, switch_name):
+ def validate_del_switch(self, _dummy_result, switch_name):
"""Validate removal of switch
"""
- return not self.validate_add_switch(dummy_result, switch_name)
+ return not self.validate_add_switch(_dummy_result, switch_name)
- def validate_add_phy_port(self, result, dummy_switch_name):
+ def validate_add_phy_port(self, result, _dummy_switch_name):
""" Validate that physical port was added to bridge.
"""
return result[0] in self._phy_ports
- def validate_add_vport(self, result, dummy_switch_name):
+ def validate_add_vport(self, result, _dummy_switch_name):
""" Validate that virtual port was added to bridge.
"""
return result[0] in self._virt_ports
- def validate_del_port(self, dummy_result, dummy_switch_name, port_name):
+ def validate_del_port(self, _dummy_result, _dummy_switch_name, port_name):
""" Validate that port_name was removed from bridge.
"""
return not (port_name in self._phy_ports or port_name in self._virt_ports)
# pylint: disable=no-self-use
- def validate_add_connection(self, dummy_result, dummy_switch_name, dummy_port1,
- dummy_port2, dummy_bidir=False):
+ def validate_add_connection(self, _dummy_result, _dummy_switch_name, _dummy_port1,
+ _dummy_port2, _dummy_bidir=False):
""" Validate that connection was added
"""
return True
- def validate_del_connection(self, dummy_result, dummy_switch_name, dummy_port1,
- dummy_port2, dummy_bidir=False):
+ def validate_del_connection(self, _dummy_result, _dummy_switch_name, _dummy_port1,
+ _dummy_port2, _dummy_bidir=False):
""" Validate that connection was deleted
"""
return True
- def validate_dump_connections(self, dummy_result, dummy_switch_name):
+ def validate_dump_connections(self, _dummy_result, _dummy_switch_name):
""" Validate dump connections call
"""
return True
- def validate_run_vppctl(self, result, dummy_args, dummy_check_error=False):
+ def validate_run_vppctl(self, result, _dummy_args, _dummy_check_error=False):
"""validate execution of ``vppctl`` with supplied arguments.
"""
# there shouldn't be any stderr