aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/collectors/collectd/collectd.py4
-rw-r--r--tools/collectors/collectd/collectd_bucky.py25
-rw-r--r--tools/collectors/sysmetrics/pidstat.py52
-rw-r--r--tools/functions.py2
-rw-r--r--tools/load_gen/stress_ng/stress_ng.py3
-rw-r--r--tools/load_gen/stressorvm/stressor_vm.py5
-rw-r--r--tools/module_manager.py2
-rw-r--r--tools/namespace.py9
-rw-r--r--tools/networkcard.py2
-rwxr-xr-xtools/pkt_gen/dummy/dummy.py20
-rwxr-xr-xtools/pkt_gen/ixia/ixia.py33
-rwxr-xr-xtools/pkt_gen/ixnet/ixnet.py4
-rw-r--r--tools/pkt_gen/moongen/moongen.py45
-rw-r--r--tools/pkt_gen/testcenter/testcenter.py22
-rwxr-xr-xtools/pkt_gen/trafficgen/trafficgen.py5
-rw-r--r--tools/pkt_gen/trex/trex.py223
-rw-r--r--tools/pkt_gen/xena/XenaDriver.py5
-rw-r--r--tools/pkt_gen/xena/json/xena_json.py18
-rwxr-xr-xtools/pkt_gen/xena/xena.py14
-rw-r--r--tools/report/report.py1
-rw-r--r--tools/report/report_rst.jinja4
-rw-r--r--tools/systeminfo.py4
-rw-r--r--tools/teststepstools.py10
-rw-r--r--tools/veth.py7
24 files changed, 336 insertions, 183 deletions
diff --git a/tools/collectors/collectd/collectd.py b/tools/collectors/collectd/collectd.py
index 90df6b04..700aef47 100644
--- a/tools/collectors/collectd/collectd.py
+++ b/tools/collectors/collectd/collectd.py
@@ -47,7 +47,7 @@ def get_label(sample):
for label in YLABELS:
if any(r in sample for r in YLABELS[label]):
return label
-
+ return None
def plot_graphs(dict_of_arrays):
"""
@@ -259,7 +259,7 @@ class Collectd(collector.ICollector):
plot_graphs(self.results)
proc_stats = get_results_to_print(self.results)
for process in proc_stats:
- logging.info("Process: " + '_'.join(process.split('_')[:-1]))
+ logging.info("Process: %s", '_'.join(process.split('_')[:-1]))
for(key, value) in proc_stats[process].items():
logging.info(" Statistic: " + str(key) +
", Value: " + str(value))
diff --git a/tools/collectors/collectd/collectd_bucky.py b/tools/collectors/collectd/collectd_bucky.py
index bac24ed7..f6061c55 100644
--- a/tools/collectors/collectd/collectd_bucky.py
+++ b/tools/collectors/collectd/collectd_bucky.py
@@ -498,6 +498,7 @@ class CollectDCrypto(object):
return self.parse_signed(part_len, data)
if sec_level == 2:
return self.parse_encrypted(part_len, data)
+ return None
def parse_signed(self, part_len, data):
"""
@@ -574,12 +575,12 @@ class CollectDConverter(object):
try:
name_parts = handler(sample)
if name_parts is None:
- return # treat None as "ignore sample"
+ return None # treat None as "ignore sample"
name = '.'.join(name_parts)
except (AttributeError, IndexError, MemoryError, RuntimeError):
LOG.exception("Exception in sample handler %s (%s):",
sample["plugin"], handler)
- return
+ return None
host = sample.get("host", "")
return (
host,
@@ -655,7 +656,7 @@ class CollectDHandler(object):
Check the value range
"""
if val is None:
- return
+ return None
try:
vmin, vmax = self.parser.types.type_ranges[stype][vname]
except KeyError:
@@ -664,11 +665,11 @@ class CollectDHandler(object):
if vmin is not None and val < vmin:
LOG.debug("Invalid value %s (<%s) for %s", val, vmin, vname)
LOG.debug("Last sample: %s", self.last_sample)
- return
+ return None
if vmax is not None and val > vmax:
LOG.debug("Invalid value %s (>%s) for %s", val, vmax, vname)
LOG.debug("Last sample: %s", self.last_sample)
- return
+ return None
return val
def calculate(self, host, name, vtype, val, time):
@@ -684,7 +685,7 @@ class CollectDHandler(object):
if vtype not in handlers:
LOG.error("Invalid value type %s for %s", vtype, name)
LOG.info("Last sample: %s", self.last_sample)
- return
+ return None
return handlers[vtype](host, name, val, time)
def _calc_counter(self, host, name, val, time):
@@ -694,13 +695,13 @@ class CollectDHandler(object):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
- return
+ return None
pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
LOG.error("Invalid COUNTER update for: %s:%s", key[0], key[1])
LOG.info("Last sample: %s", self.last_sample)
- return
+ return None
if val < pval:
# this is supposed to handle counter wrap around
# see https://collectd.org/wiki/index.php/Data_source
@@ -719,13 +720,13 @@ class CollectDHandler(object):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
- return
+ return None
pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
LOG.debug("Invalid DERIVE update for: %s:%s", key[0], key[1])
LOG.debug("Last sample: %s", self.last_sample)
- return
+ return None
return float(abs(val - pval)) / (time - ptime)
def _calc_absolute(self, host, name, val, time):
@@ -735,13 +736,13 @@ class CollectDHandler(object):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
- return
+ return None
_, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
LOG.error("Invalid ABSOLUTE update for: %s:%s", key[0], key[1])
LOG.info("Last sample: %s", self.last_sample)
- return
+ return None
return float(val) / (time - ptime)
diff --git a/tools/collectors/sysmetrics/pidstat.py b/tools/collectors/sysmetrics/pidstat.py
index 99341ccf..277fdb11 100644
--- a/tools/collectors/sysmetrics/pidstat.py
+++ b/tools/collectors/sysmetrics/pidstat.py
@@ -70,13 +70,13 @@ class Pidstat(collector.ICollector):
into the file in directory with test results
"""
monitor = settings.getValue('PIDSTAT_MONITOR')
- self._logger.info('Statistics are requested for: ' + ', '.join(monitor))
+ self._logger.info('Statistics are requested for: %s', ', '.join(monitor))
pids = systeminfo.get_pids(monitor)
if pids:
with open(self._log, 'w') as logfile:
cmd = ['sudo', 'LC_ALL=' + settings.getValue('DEFAULT_CMD_LOCALE'),
'pidstat', settings.getValue('PIDSTAT_OPTIONS'),
- '-p', ','.join(pids),
+ '-t', '-p', ','.join(pids),
str(settings.getValue('PIDSTAT_SAMPLE_INTERVAL'))]
self._logger.debug('%s', ' '.join(cmd))
self._pid = subprocess.Popen(cmd, stdout=logfile, bufsize=0).pid
@@ -116,16 +116,48 @@ class Pidstat(collector.ICollector):
# combine stored header fields with actual values
tmp_res = OrderedDict(zip(tmp_header,
line[8:].split()))
- # use process's name and its pid as unique key
- key = tmp_res.pop('Command') + '_' + tmp_res['PID']
- # store values for given command into results dict
- if key in self._results:
- self._results[key].update(tmp_res)
- else:
- self._results[key] = tmp_res
+ cmd = tmp_res.pop('Command')
+ # remove unused fields (given by option '-t')
+ tmp_res.pop('UID')
+ tmp_res.pop('TID')
+ if '|_' not in cmd: # main process
+ # use process's name and its pid as unique key
+ tmp_pid = tmp_res.pop('TGID')
+ tmp_key = "%s_%s" % (cmd, tmp_pid)
+ # do not trust cpu usage of pid
+ # see VSPERF-569 for more details
+ if 'CPU' not in tmp_header:
+ self.update_results(tmp_key, tmp_res, False)
+ else: # thread
+ # accumulate cpu usage of all threads
+ if 'CPU' in tmp_header:
+ tmp_res.pop('TGID')
+ self.update_results(tmp_key, tmp_res, True)
line = logfile.readline()
+ def update_results(self, key, result, accumulate=False):
+ """
+ Update final results dictionary. If ``accumulate`` param is set to
+ ``True``, try to accumulate existing values.
+ """
+ # store values for given command into results dict
+ if key not in self._results:
+ self._results[key] = result
+ elif accumulate:
+ for field in result:
+ if field not in self._results[key]:
+ self._results[key][field] = result[field]
+ else:
+ try:
+ val = float(self._results[key][field]) + float(result[field])
+ self._results[key][field] = '{0:.2f}'.format(val)
+ except ValueError:
+ # cannot cast to float, let's update with the previous value
+ self._results[key][field] = result[field]
+ else:
+ self._results[key].update(result)
+
def get_results(self):
"""Returns collected statistics.
"""
@@ -135,7 +167,7 @@ class Pidstat(collector.ICollector):
"""Logs collected statistics.
"""
for process in self._results:
- logging.info("Process: " + '_'.join(process.split('_')[:-1]))
+ logging.info("Process: %s", '_'.join(process.split('_')[:-1]))
for(key, value) in self._results[process].items():
logging.info(" Statistic: " + str(key) +
", Value: " + str(value))
diff --git a/tools/functions.py b/tools/functions.py
index d35f1f84..65c9978b 100644
--- a/tools/functions.py
+++ b/tools/functions.py
@@ -127,7 +127,7 @@ def settings_update_paths():
# expand OS wildcards in paths if needed
if glob.has_magic(tmp_tool):
tmp_glob = glob.glob(tmp_tool)
- if len(tmp_glob) == 0:
+ if not tmp_glob:
raise RuntimeError('Path to the {} is not valid: {}.'.format(tool, tmp_tool))
elif len(tmp_glob) > 1:
raise RuntimeError('Path to the {} is ambiguous {}'.format(tool, tmp_glob))
diff --git a/tools/load_gen/stress_ng/stress_ng.py b/tools/load_gen/stress_ng/stress_ng.py
index c2592dd1..41bfe990 100644
--- a/tools/load_gen/stress_ng/stress_ng.py
+++ b/tools/load_gen/stress_ng/stress_ng.py
@@ -30,6 +30,3 @@ class StressNg(Stress):
'name': 'stress-ng'
}
_logger = logging.getLogger(__name__)
-
- def __init__(self, stress_config):
- super(StressNg, self).__init__(stress_config)
diff --git a/tools/load_gen/stressorvm/stressor_vm.py b/tools/load_gen/stressorvm/stressor_vm.py
index 410f10e3..f4936743 100644
--- a/tools/load_gen/stressorvm/stressor_vm.py
+++ b/tools/load_gen/stressorvm/stressor_vm.py
@@ -45,7 +45,7 @@ class QemuVM(tasks.Process):
try:
os.makedirs(self._shared_dir)
except OSError as exp:
- raise OSError("Failed to create shared directory %s: %s",
+ raise OSError("Failed to create shared directory %s: %s" %
self._shared_dir, exp)
self.nics_nr = S.getValue('NN_NICS_NR')[self._number]
@@ -96,8 +96,7 @@ class StressorVM(ILoadGenerator):
"""
Wrapper Class for Load-Generation through stressor-vm
"""
- # pylint: disable=unused-argument
- def __init__(self, config):
+ def __init__(self, _config):
self.qvm_list = []
for vmindex in range(int(S.getValue('NN_COUNT'))):
qvm = QemuVM(vmindex)
diff --git a/tools/module_manager.py b/tools/module_manager.py
index dd1d92be..943399ba 100644
--- a/tools/module_manager.py
+++ b/tools/module_manager.py
@@ -160,7 +160,7 @@ class ModuleManager(object):
self._logger.info('Unable to get list of dependecies for module \'%s\'.', module)
# ...and try to continue, just for case that dependecies are already loaded
- if len(deps):
+ if deps:
return deps.split(',')
else:
return []
diff --git a/tools/namespace.py b/tools/namespace.py
index 9131398f..50374b95 100644
--- a/tools/namespace.py
+++ b/tools/namespace.py
@@ -135,9 +135,8 @@ def reset_port_to_root(port, name):
port, name), False)
-# pylint: disable=unused-argument
# pylint: disable=invalid-name
-def validate_add_ip_to_namespace_eth(result, port, name, ip_addr, cidr):
+def validate_add_ip_to_namespace_eth(_result, port, name, ip_addr, cidr):
"""
Validation function for integration testcases
"""
@@ -147,7 +146,7 @@ def validate_add_ip_to_namespace_eth(result, port, name, ip_addr, cidr):
_LOGGER, 'Validating ip address in namespace...', False))
-def validate_assign_port_to_namespace(result, port, name, port_up=False):
+def validate_assign_port_to_namespace(_result, port, name, _port_up=False):
"""
Validation function for integration testcases
"""
@@ -157,14 +156,14 @@ def validate_assign_port_to_namespace(result, port, name, port_up=False):
_LOGGER, 'Validating port in namespace...'))
-def validate_create_namespace(result, name):
+def validate_create_namespace(_result, name):
"""
Validation function for integration testcases
"""
return name in get_system_namespace_list()
-def validate_delete_namespace(result, name):
+def validate_delete_namespace(_result, name):
"""
Validation function for integration testcases
"""
diff --git a/tools/networkcard.py b/tools/networkcard.py
index 2cd296fb..758010d2 100644
--- a/tools/networkcard.py
+++ b/tools/networkcard.py
@@ -191,7 +191,7 @@ def get_mac(pci_handle):
"""
mac_path = glob.glob(os.path.join(_PCI_DIR, _PCI_NET, '*', 'address').format(pci_handle))
# kernel driver is loaded and MAC can be read
- if len(mac_path) and os.path.isfile(mac_path[0]):
+ if mac_path and os.path.isfile(mac_path[0]):
with open(mac_path[0], 'r') as _file:
return _file.readline().rstrip('\n')
diff --git a/tools/pkt_gen/dummy/dummy.py b/tools/pkt_gen/dummy/dummy.py
index 3dc5448e..ef4b37d9 100755
--- a/tools/pkt_gen/dummy/dummy.py
+++ b/tools/pkt_gen/dummy/dummy.py
@@ -25,6 +25,7 @@ own.
import json
+from collections import OrderedDict
from conf import settings
from conf import merge_spec
from tools.pkt_gen import trafficgen
@@ -108,41 +109,41 @@ class Dummy(trafficgen.ITrafficGenerator):
"""
pass
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""
Send a burst of traffic.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
results = get_user_traffic(
'burst',
- '%dpkts, %dmS' % (numpkts, duration),
+ '%dpkts, %dmS' % (traffic['burst_size'], duration),
traffic_,
('frames rx', 'payload errors', 'sequence errors'))
# builds results by using user-supplied values where possible
# and guessing remainder using available info
- result[ResultsConstants.TX_FRAMES] = numpkts
+ result[ResultsConstants.TX_FRAMES] = traffic['burst_size']
result[ResultsConstants.RX_FRAMES] = results[0]
result[ResultsConstants.TX_BYTES] = traffic_['l2']['framesize'] \
- * numpkts
+ * traffic['burst_size']
result[ResultsConstants.RX_BYTES] = traffic_['l2']['framesize'] \
* results[0]
result[ResultsConstants.PAYLOAD_ERR] = results[1]
result[ResultsConstants.SEQ_ERR] = results[2]
- return results
+ return result
def send_cont_traffic(self, traffic=None, duration=30):
"""
Send a continuous flow of traffic.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
@@ -179,7 +180,7 @@ class Dummy(trafficgen.ITrafficGenerator):
Send traffic per RFC2544 throughput test specifications.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
@@ -216,7 +217,7 @@ class Dummy(trafficgen.ITrafficGenerator):
Send traffic per RFC2544 back2back test specifications.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
@@ -273,4 +274,5 @@ if __name__ == '__main__':
print(dev.send_cont_traffic(traffic=TRAFFIC))
print(dev.send_rfc2544_throughput(traffic=TRAFFIC))
print(dev.send_rfc2544_back2back(traffic=TRAFFIC))
+ # pylint: disable=no-member
print(dev.send_rfc(traffic=TRAFFIC))
diff --git a/tools/pkt_gen/ixia/ixia.py b/tools/pkt_gen/ixia/ixia.py
index d4ca56f2..31f51246 100755
--- a/tools/pkt_gen/ixia/ixia.py
+++ b/tools/pkt_gen/ixia/ixia.py
@@ -157,8 +157,8 @@ class Ixia(trafficgen.ITrafficGenerator):
return NotImplementedError(
'Ixia start back2back traffic not implemented')
- def send_rfc2544_back2back(self, traffic=None, duration=60,
- lossrate=0.0, tests=1):
+ def send_rfc2544_back2back(self, traffic=None, tests=1, duration=60,
+ lossrate=0.0):
return NotImplementedError(
'Ixia send back2back traffic not implemented')
@@ -242,11 +242,11 @@ class Ixia(trafficgen.ITrafficGenerator):
return result
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""See ITrafficGenerator for description
"""
flow = {
- 'numpkts': numpkts,
+ 'numpkts': traffic['burst_size'],
'duration': duration,
'type': 'stopStream',
'framerate': traffic['frame_rate'],
@@ -254,9 +254,9 @@ class Ixia(trafficgen.ITrafficGenerator):
result = self._send_traffic(flow, traffic)
- assert len(result) == 6 # fail-fast if underlying Tcl code changes
+ assert len(result) == 10 # fail-fast if underlying Tcl code changes
- #NOTE - implement Burst results setting via TrafficgenResults.
+ return Ixia._create_result(result)
def send_cont_traffic(self, traffic=None, duration=30):
"""See ITrafficGenerator for description
@@ -317,20 +317,25 @@ class Ixia(trafficgen.ITrafficGenerator):
:returns: dictionary strings representing results from
traffic generator.
"""
- assert len(result) == 8 # fail-fast if underlying Tcl code changes
+ assert len(result) == 8 or len(result) == 10 # fail-fast if underlying Tcl code changes
+
+ # content of result common for all tests
+ # [framesSent, framesRecv, bytesSent, bytesRecv, sendRate, recvRate, sendRateBytes, recvRateBytes]
+ # burst test has additional two values at the end: payError, seqError
if float(result[0]) == 0:
loss_rate = 100
else:
- loss_rate = (float(result[0]) - float(result[1])) / float(result[0]) * 100
+ loss_rate = round((float(result[0]) - float(result[1])) / float(result[0]) * 100, 5)
result_dict = OrderedDict()
- # drop the first 4 elements as we don't use/need them. In
- # addition, IxExplorer does not support latency or % line rate
+ # IxExplorer does not support latency or % line rate
# metrics so we have to return dummy values for these metrics
- result_dict[ResultsConstants.THROUGHPUT_RX_FPS] = result[4]
- result_dict[ResultsConstants.TX_RATE_FPS] = result[5]
- result_dict[ResultsConstants.THROUGHPUT_RX_MBPS] = str(round(int(result[6]) / 1000000, 3))
- result_dict[ResultsConstants.TX_RATE_MBPS] = str(round(int(result[7]) / 1000000, 3))
+ result_dict[ResultsConstants.TX_FRAMES] = result[0]
+ result_dict[ResultsConstants.RX_FRAMES] = result[1]
+ result_dict[ResultsConstants.TX_RATE_FPS] = result[4]
+ result_dict[ResultsConstants.THROUGHPUT_RX_FPS] = result[5]
+ result_dict[ResultsConstants.TX_RATE_MBPS] = str(round(int(result[6]) * 8 / 1e6, 3))
+ result_dict[ResultsConstants.THROUGHPUT_RX_MBPS] = str(round(int(result[7]) * 8 / 1e6, 3))
result_dict[ResultsConstants.FRAME_LOSS_PERCENT] = loss_rate
result_dict[ResultsConstants.TX_RATE_PERCENT] = \
ResultsConstants.UNKNOWN_VALUE
diff --git a/tools/pkt_gen/ixnet/ixnet.py b/tools/pkt_gen/ixnet/ixnet.py
index d1ba9096..87fb2c65 100755
--- a/tools/pkt_gen/ixnet/ixnet.py
+++ b/tools/pkt_gen/ixnet/ixnet.py
@@ -370,7 +370,7 @@ class IxNet(trafficgen.ITrafficGenerator):
next(reader)
for row in reader:
#Replace null entries added by Ixia with 0s.
- row = [entry if len(entry) > 0 else '0' for entry in row]
+ row = [entry if entry else '0' for entry in row]
# tx_fps and tx_mps cannot be reliably calculated
# as the DUT may be modifying the frame size
@@ -528,7 +528,7 @@ class IxNet(trafficgen.ITrafficGenerator):
return parse_ixnet_rfc_results(parse_result_string(output[0]))
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
return NotImplementedError('IxNet does not implement send_burst_traffic')
if __name__ == '__main__':
diff --git a/tools/pkt_gen/moongen/moongen.py b/tools/pkt_gen/moongen/moongen.py
index 570720e8..b7d55c4d 100644
--- a/tools/pkt_gen/moongen/moongen.py
+++ b/tools/pkt_gen/moongen/moongen.py
@@ -64,46 +64,46 @@ class Moongen(ITrafficGenerator):
:param one_shot: No RFC 2544 binary search,
just packet flow at traffic specifics
"""
- logging.debug("traffic['frame_rate'] = " + \
+ logging.debug("traffic['frame_rate'] = %s", \
str(traffic['frame_rate']))
- logging.debug("traffic['multistream'] = " + \
+ logging.debug("traffic['multistream'] = %s", \
str(traffic['multistream']))
- logging.debug("traffic['stream_type'] = " + \
+ logging.debug("traffic['stream_type'] = %s", \
str(traffic['stream_type']))
- logging.debug("traffic['l2']['srcmac'] = " + \
+ logging.debug("traffic['l2']['srcmac'] = %s", \
str(traffic['l2']['srcmac']))
- logging.debug("traffic['l2']['dstmac'] = " + \
+ logging.debug("traffic['l2']['dstmac'] = %s", \
str(traffic['l2']['dstmac']))
- logging.debug("traffic['l3']['proto'] = " + \
+ logging.debug("traffic['l3']['proto'] = %s", \
str(traffic['l3']['proto']))
- logging.debug("traffic['l3']['srcip'] = " + \
+ logging.debug("traffic['l3']['srcip'] = %s", \
str(traffic['l3']['srcip']))
- logging.debug("traffic['l3']['dstip'] = " + \
+ logging.debug("traffic['l3']['dstip'] = %s", \
str(traffic['l3']['dstip']))
- logging.debug("traffic['l4']['srcport'] = " + \
+ logging.debug("traffic['l4']['srcport'] = %s", \
str(traffic['l4']['srcport']))
- logging.debug("traffic['l4']['dstport'] = " + \
+ logging.debug("traffic['l4']['dstport'] = %s", \
str(traffic['l4']['dstport']))
- logging.debug("traffic['vlan']['enabled'] = " + \
+ logging.debug("traffic['vlan']['enabled'] = %s", \
str(traffic['vlan']['enabled']))
- logging.debug("traffic['vlan']['id'] = " + \
+ logging.debug("traffic['vlan']['id'] = %s", \
str(traffic['vlan']['id']))
- logging.debug("traffic['vlan']['priority'] = " + \
+ logging.debug("traffic['vlan']['priority'] = %s", \
str(traffic['vlan']['priority']))
- logging.debug("traffic['vlan']['cfi'] = " + \
+ logging.debug("traffic['vlan']['cfi'] = %s", \
str(traffic['vlan']['cfi']))
logging.debug(traffic['l2']['framesize'])
@@ -160,9 +160,9 @@ class Moongen(ITrafficGenerator):
(traffic['frame_rate'] / 100) * (self._moongen_line_speed / \
(8 * (traffic['l2']['framesize'] + 20)) / math.pow(10, 6)))
- logging.debug("startRate = " + start_rate)
+ logging.debug("startRate = %s", start_rate)
- out_file.write("startRate = " + \
+ out_file.write("startRate = %s" % \
start_rate + "\n")
out_file.write("}" + "\n")
@@ -240,14 +240,13 @@ class Moongen(ITrafficGenerator):
"""
self._logger.info("MOONGEN: In moongen disconnect method")
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""Send a burst of traffic.
- Send a ``numpkts`` packets of traffic, using ``traffic``
+ Send a ``traffic['burst_traffic']`` packets of traffic, using ``traffic``
configuration, with a timeout of ``time``.
:param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags
- :param numpkts: Number of packets to send
:param duration: Time to wait to receive packets
:returns: dictionary of strings with following data:
@@ -508,8 +507,8 @@ class Moongen(ITrafficGenerator):
return moongen_results
- def send_rfc2544_throughput(self, traffic=None, duration=20,
- lossrate=0.0, tests=1):
+ def send_rfc2544_throughput(self, traffic=None, tests=1, duration=20,
+ lossrate=0.0):
#
# Send traffic per RFC2544 throughput test specifications.
#
@@ -631,8 +630,8 @@ class Moongen(ITrafficGenerator):
"""
self._logger.info('In moongen wait_rfc2544_throughput')
- def send_rfc2544_back2back(self, traffic=None, duration=60,
- lossrate=0.0, tests=1):
+ def send_rfc2544_back2back(self, traffic=None, tests=1, duration=60,
+ lossrate=0.0):
"""Send traffic per RFC2544 back2back test specifications.
Send packets at a fixed rate, using ``traffic``
diff --git a/tools/pkt_gen/testcenter/testcenter.py b/tools/pkt_gen/testcenter/testcenter.py
index 9980ae7c..487566bf 100644
--- a/tools/pkt_gen/testcenter/testcenter.py
+++ b/tools/pkt_gen/testcenter/testcenter.py
@@ -182,7 +182,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
"""
pass
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""
Do nothing.
"""
@@ -246,8 +246,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
row["ForwardingRate(fps)"])
return result
- # pylint: disable=unused-argument
- def send_rfc2889_forwarding(self, traffic=None, tests=1, duration=20):
+ def send_rfc2889_forwarding(self, traffic=None, tests=1, _duration=20):
"""
Send traffic per RFC2889 Forwarding test specifications.
"""
@@ -257,7 +256,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
framesize = traffic['l2']['framesize']
args = get_rfc2889_common_settings(framesize, tests,
traffic['traffic_type'])
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -273,7 +272,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
return self.get_rfc2889_forwarding_results(filec)
- def send_rfc2889_caching(self, traffic=None, tests=1, duration=20):
+ def send_rfc2889_caching(self, traffic=None, tests=1, _duration=20):
"""
Send as per RFC2889 Addr-Caching test specifications.
"""
@@ -286,7 +285,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
custom_args = get_rfc2889_custom_settings()
args = common_args + custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -302,7 +301,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
return self.get_rfc2889_addr_caching_results(filec)
- def send_rfc2889_learning(self, traffic=None, tests=1, duration=20):
+ def send_rfc2889_learning(self, traffic=None, tests=1, _duration=20):
"""
Send traffic per RFC2889 Addr-Learning test specifications.
"""
@@ -315,7 +314,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
custom_args = get_rfc2889_custom_settings()
args = common_args + custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -387,7 +386,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
custom, 1)
args = rfc2544_common_args + stc_common_args + rfc2544_custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -420,7 +419,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
tests)
args = rfc2544_common_args + stc_common_args + rfc2544_custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -453,7 +452,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
tests)
args = rfc2544_common_args + stc_common_args + rfc2544_custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.info("Arguments used to call test: %s", args)
@@ -498,4 +497,5 @@ if __name__ == '__main__':
}
with TestCenter() as dev:
print(dev.send_rfc2544_throughput(traffic=TRAFFIC))
+ # pylint: disable=no-member
print(dev.send_rfc2544_backtoback(traffic=TRAFFIC))
diff --git a/tools/pkt_gen/trafficgen/trafficgen.py b/tools/pkt_gen/trafficgen/trafficgen.py
index 262df71d..a6f7edcc 100755
--- a/tools/pkt_gen/trafficgen/trafficgen.py
+++ b/tools/pkt_gen/trafficgen/trafficgen.py
@@ -81,15 +81,14 @@ class ITrafficGenerator(object):
"""
raise NotImplementedError('Please call an implementation.')
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""Send a burst of traffic.
- Send a ``numpkts`` packets of traffic, using ``traffic``
+ Send a ``traffic['burst_size']`` packets of traffic, using ``traffic``
configuration, for ``duration`` seconds.
Attributes:
:param traffic: Detailed "traffic" spec, see design docs for details
- :param numpkts: Number of packets to send
:param duration: Time to wait to receive packets
:returns: dictionary of strings with following data:
diff --git a/tools/pkt_gen/trex/trex.py b/tools/pkt_gen/trex/trex.py
index e0ce4c48..94b793d6 100644
--- a/tools/pkt_gen/trex/trex.py
+++ b/tools/pkt_gen/trex/trex.py
@@ -15,12 +15,14 @@
"""
Trex Traffic Generator Model
"""
+
# pylint: disable=undefined-variable
import logging
import subprocess
import sys
import time
import os
+import re
from collections import OrderedDict
# pylint: disable=unused-import
import netaddr
@@ -69,6 +71,20 @@ _EMPTY_STATS = {
'tx_pps': 0.0,
'tx_util': 0.0,}}
+# Default frame definition, which can be overridden by TRAFFIC['scapy'].
+# The content of the frame and its network layers are driven by TRAFFIC
+# dictionary, i.e. 'l2', 'l3, 'l4' and 'vlan' parts.
+_SCAPY_FRAME = {
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+ '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+ '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
+}
+
class Trex(ITrafficGenerator):
"""Trex Traffic generator wrapper."""
@@ -165,35 +181,77 @@ class Trex(ITrafficGenerator):
self._logger.info("T-Rex: In trex disconnect method")
self._stlclient.disconnect(stop_traffic=True, release_ports=True)
- @staticmethod
- def create_packets(traffic, ports_info):
+ def create_packets(self, traffic, ports_info):
"""Create base packet according to traffic specification.
If traffic haven't specified srcmac and dstmac fields
- packet will be create with mac address of trex server.
+ packet will be created with mac address of trex server.
"""
- mac_add = [li['hw_mac'] for li in ports_info]
-
- if traffic and traffic['l2']['framesize'] > 0:
- if traffic['l2']['dstmac'] == '00:00:00:00:00:00' and \
- traffic['l2']['srcmac'] == '00:00:00:00:00:00':
- base_pkt_a = Ether(src=mac_add[0], dst=mac_add[1])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['srcip'],
- dst=traffic['l3']['dstip'])/ \
- UDP(dport=traffic['l4']['dstport'], sport=traffic['l4']['srcport'])
- base_pkt_b = Ether(src=mac_add[1], dst=mac_add[0])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
- dst=traffic['l3']['srcip'])/ \
- UDP(dport=traffic['l4']['srcport'], sport=traffic['l4']['dstport'])
- else:
- base_pkt_a = Ether(src=traffic['l2']['srcmac'], dst=traffic['l2']['dstmac'])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
- dst=traffic['l3']['srcip'])/ \
- UDP(dport=traffic['l4']['dstport'], sport=traffic['l4']['srcport'])
+ if not traffic or traffic['l2']['framesize'] <= 0:
+ return (None, None)
- base_pkt_b = Ether(src=traffic['l2']['dstmac'], dst=traffic['l2']['srcmac'])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
- dst=traffic['l3']['srcip'])/ \
- UDP(dport=traffic['l4']['srcport'], sport=traffic['l4']['dstport'])
+ if traffic['l2']['dstmac'] == '00:00:00:00:00:00' and \
+ traffic['l2']['srcmac'] == '00:00:00:00:00:00':
+
+ mac_add = [li['hw_mac'] for li in ports_info]
+ src_mac = mac_add[0]
+ dst_mac = mac_add[1]
+ else:
+ src_mac = traffic['l2']['srcmac']
+ dst_mac = traffic['l2']['dstmac']
+
+ if traffic['scapy']['enabled']:
+ base_pkt_a = traffic['scapy']['0']
+ base_pkt_b = traffic['scapy']['1']
+ else:
+ base_pkt_a = _SCAPY_FRAME['0']
+ base_pkt_b = _SCAPY_FRAME['1']
+
+ # check and remove network layers disabled by TRAFFIC dictionary
+ # Note: In general, it is possible to remove layers from scapy object by
+ # e.g. del base_pkt_a['IP']. However it doesn't work for all layers
+ # (e.g. Dot1Q). Thus it is safer to modify string with scapy frame definition
+ # directly, before it is converted to the real scapy object.
+ if not traffic['vlan']['enabled']:
+ self._logger.info('VLAN headers are disabled by TRAFFIC')
+ base_pkt_a = re.sub(r'(^|\/)Dot1Q?\([^\)]*\)', '', base_pkt_a)
+ base_pkt_b = re.sub(r'(^|\/)Dot1Q?\([^\)]*\)', '', base_pkt_b)
+ if not traffic['l3']['enabled']:
+ self._logger.info('IP headers are disabled by TRAFFIC')
+ base_pkt_a = re.sub(r'(^|\/)IP(v6)?\([^\)]*\)', '', base_pkt_a)
+ base_pkt_b = re.sub(r'(^|\/)IP(v6)?\([^\)]*\)', '', base_pkt_b)
+ if not traffic['l4']['enabled']:
+ self._logger.info('%s headers are disabled by TRAFFIC',
+ traffic['l3']['proto'].upper())
+ base_pkt_a = re.sub(r'(^|\/)(UDP|TCP|SCTP|{{IP_PROTO}}|{})\([^\)]*\)'.format(
+ traffic['l3']['proto'].upper()), '', base_pkt_a)
+ base_pkt_b = re.sub(r'(^|\/)(UDP|TCP|SCTP|{{IP_PROTO}}|{})\([^\)]*\)'.format(
+ traffic['l3']['proto'].upper()), '', base_pkt_b)
+
+ # pylint: disable=eval-used
+ base_pkt_a = eval(base_pkt_a.format(
+ Ether_src=repr(src_mac),
+ Ether_dst=repr(dst_mac),
+ Dot1Q_prio=traffic['vlan']['priority'],
+ Dot1Q_id=traffic['vlan']['cfi'],
+ Dot1Q_vlan=traffic['vlan']['id'],
+ IP_proto=repr(traffic['l3']['proto']),
+ IP_PROTO=traffic['l3']['proto'].upper(),
+ IP_src=repr(traffic['l3']['srcip']),
+ IP_dst=repr(traffic['l3']['dstip']),
+ IP_PROTO_sport=traffic['l4']['srcport'],
+ IP_PROTO_dport=traffic['l4']['dstport']))
+ base_pkt_b = eval(base_pkt_b.format(
+ Ether_src=repr(src_mac),
+ Ether_dst=repr(dst_mac),
+ Dot1Q_prio=traffic['vlan']['priority'],
+ Dot1Q_id=traffic['vlan']['cfi'],
+ Dot1Q_vlan=traffic['vlan']['id'],
+ IP_proto=repr(traffic['l3']['proto']),
+ IP_PROTO=traffic['l3']['proto'].upper(),
+ IP_src=repr(traffic['l3']['srcip']),
+ IP_dst=repr(traffic['l3']['dstip']),
+ IP_PROTO_sport=traffic['l4']['srcport'],
+ IP_PROTO_dport=traffic['l4']['dstport']))
return (base_pkt_a, base_pkt_b)
@@ -248,22 +306,48 @@ class Trex(ITrafficGenerator):
pkt_a = STLPktBuilder(pkt=base_pkt_a / payload_a)
pkt_b = STLPktBuilder(pkt=base_pkt_b / payload_b)
- stream_1 = STLStream(packet=pkt_a,
- name='stream_1',
- mode=STLTXCont(percentage=traffic['frame_rate']))
- stream_2 = STLStream(packet=pkt_b,
- name='stream_2',
- mode=STLTXCont(percentage=traffic['frame_rate']))
lat_pps = settings.getValue('TRAFFICGEN_TREX_LATENCY_PPS')
- if lat_pps > 0:
- stream_1_lat = STLStream(packet=pkt_a,
+ if traffic['traffic_type'] == 'burst':
+ if lat_pps > 0:
+ # latency statistics are requested; in case of frame burst we can enable
+ # statistics for all frames
+ stream_1 = STLStream(packet=pkt_a,
flow_stats=STLFlowLatencyStats(pg_id=0),
- name='stream_1_lat',
- mode=STLTXCont(pps=lat_pps))
- stream_2_lat = STLStream(packet=pkt_b,
+ name='stream_1',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ stream_2 = STLStream(packet=pkt_b,
flow_stats=STLFlowLatencyStats(pg_id=1),
- name='stream_2_lat',
- mode=STLTXCont(pps=lat_pps))
+ name='stream_2',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ else:
+ stream_1 = STLStream(packet=pkt_a,
+ name='stream_1',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ stream_2 = STLStream(packet=pkt_b,
+ name='stream_2',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ else:
+ stream_1 = STLStream(packet=pkt_a,
+ name='stream_1',
+ mode=STLTXCont(percentage=traffic['frame_rate']))
+ stream_2 = STLStream(packet=pkt_b,
+ name='stream_2',
+ mode=STLTXCont(percentage=traffic['frame_rate']))
+ # workaround for latency statistics, which can't be enabled for streams
+ # with high framerate due to the huge performance impact
+ if lat_pps > 0:
+ stream_1_lat = STLStream(packet=pkt_a,
+ flow_stats=STLFlowLatencyStats(pg_id=0),
+ name='stream_1_lat',
+ mode=STLTXCont(pps=lat_pps))
+ stream_2_lat = STLStream(packet=pkt_b,
+ flow_stats=STLFlowLatencyStats(pg_id=1),
+ name='stream_2_lat',
+ mode=STLTXCont(pps=lat_pps))
return (stream_1, stream_2, stream_1_lat, stream_2_lat)
@@ -293,13 +377,13 @@ class Trex(ITrafficGenerator):
# since we can only control both ports at once take the lower of the two
max_speed = min(max_speed_1, max_speed_2)
gbps_speed = (max_speed / 1000) * (float(traffic['frame_rate']) / 100.0)
- self._logger.debug('Starting traffic at %s Gpbs speed', gbps_speed)
+ self._logger.debug('Starting traffic at %s Gbps speed', gbps_speed)
# for SR-IOV
if settings.getValue('TRAFFICGEN_TREX_PROMISCUOUS'):
self._stlclient.set_port_attr(my_ports, promiscuous=True)
- packet_1, packet_2 = Trex.create_packets(traffic, ports_info)
+ packet_1, packet_2 = self.create_packets(traffic, ports_info)
self.show_packet_info(packet_1, packet_2)
stream_1, stream_2, stream_1_lat, stream_2_lat = Trex.create_streams(packet_1, packet_2, traffic)
self._stlclient.add_streams(stream_1, ports=[0])
@@ -382,20 +466,29 @@ class Trex(ITrafficGenerator):
result[ResultsConstants.FRAME_LOSS_PERCENT] = 100
if settings.getValue('TRAFFICGEN_TREX_LATENCY_PPS') > 0 and stats['latency']:
- result[ResultsConstants.MIN_LATENCY_NS] = (
- '{:.3f}'.format(
- (float(min(stats["latency"][0]["latency"]["total_min"],
- stats["latency"][1]["latency"]["total_min"])))))
-
- result[ResultsConstants.MAX_LATENCY_NS] = (
- '{:.3f}'.format(
- (float(max(stats["latency"][0]["latency"]["total_max"],
- stats["latency"][1]["latency"]["total_max"])))))
-
- result[ResultsConstants.AVG_LATENCY_NS] = (
- '{:.3f}'.format(
- float((stats["latency"][0]["latency"]["average"]+
- stats["latency"][1]["latency"]["average"])/2)))
+ try:
+ result[ResultsConstants.MIN_LATENCY_NS] = (
+ '{:.3f}'.format(
+ (float(min(stats["latency"][0]["latency"]["total_min"],
+ stats["latency"][1]["latency"]["total_min"])))))
+ except TypeError:
+ result[ResultsConstants.MIN_LATENCY_NS] = 'Unknown'
+
+ try:
+ result[ResultsConstants.MAX_LATENCY_NS] = (
+ '{:.3f}'.format(
+ (float(max(stats["latency"][0]["latency"]["total_max"],
+ stats["latency"][1]["latency"]["total_max"])))))
+ except TypeError:
+ result[ResultsConstants.MAX_LATENCY_NS] = 'Unknown'
+
+ try:
+ result[ResultsConstants.AVG_LATENCY_NS] = (
+ '{:.3f}'.format(
+ float((stats["latency"][0]["latency"]["average"]+
+ stats["latency"][1]["latency"]["average"])/2)))
+ except TypeError:
+ result[ResultsConstants.AVG_LATENCY_NS] = 'Unknown'
else:
result[ResultsConstants.MIN_LATENCY_NS] = 'Unknown'
@@ -568,9 +661,25 @@ class Trex(ITrafficGenerator):
raise NotImplementedError(
'Trex wait rfc2544 throughput not implemented')
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=5):
- raise NotImplementedError(
- 'Trex send burst traffic not implemented')
+ def send_burst_traffic(self, traffic=None, duration=20):
+ """See ITrafficGenerator for description
+ """
+ self._logger.info("In Trex send_burst_traffic method")
+ self._params.clear()
+
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(
+ self._params['traffic'], traffic)
+
+ if settings.getValue('TRAFFICGEN_TREX_LEARNING_MODE'):
+ self.learning_packets(traffic)
+ self._logger.info("T-Rex sending traffic")
+ stats = self.generate_traffic(traffic, duration)
+
+ time.sleep(3) # allow packets to complete before reading stats
+
+ return self.calculate_results(stats)
def send_rfc2544_back2back(self, traffic=None, tests=1, duration=30,
lossrate=0.0):
diff --git a/tools/pkt_gen/xena/XenaDriver.py b/tools/pkt_gen/xena/XenaDriver.py
index 6e39e47a..cdc82838 100644
--- a/tools/pkt_gen/xena/XenaDriver.py
+++ b/tools/pkt_gen/xena/XenaDriver.py
@@ -170,8 +170,7 @@ class KeepAliveThread(threading.Thread):
self.finished = threading.Event()
self.setDaemon(True)
_LOGGER.debug(
- 'Xena Socket keep alive thread initiated, interval ' +
- '{} seconds'.format(self.interval))
+ 'Xena Socket keep alive thread initiated, interval %s seconds', self.interval)
def stop(self):
""" Thread stop. See python thread docs for more info
@@ -904,7 +903,7 @@ class XenaRXStats(object):
statdict[entry_id] = self._pack_stats(param, 3)
elif param[1] == 'PR_TPLDS':
tid_list = self._pack_tplds_stats(param, 2)
- if len(tid_list):
+ if tid_list:
statdict['pr_tplds'] = tid_list
elif param[1] == 'PR_TPLDTRAFFIC':
if 'pr_tpldstraffic' in statdict:
diff --git a/tools/pkt_gen/xena/json/xena_json.py b/tools/pkt_gen/xena/json/xena_json.py
index b1eed720..df2aa55f 100644
--- a/tools/pkt_gen/xena/json/xena_json.py
+++ b/tools/pkt_gen/xena/json/xena_json.py
@@ -28,8 +28,6 @@ import locale
import logging
import os
-import scapy.layers.inet as inet
-
from tools.pkt_gen.xena.json import json_utilities
_LOGGER = logging.getLogger(__name__)
@@ -279,6 +277,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage.
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['layer2'] = [
inet.Ether(dst=dst_mac, src=src_mac, **kwargs),
inet.Ether(dst=src_mac, src=dst_mac, **kwargs)]
@@ -293,6 +295,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['layer3'] = [
inet.IP(src=src_ip, dst=dst_ip, proto=protocol.lower(), **kwargs),
inet.IP(src=dst_ip, dst=src_ip, proto=protocol.lower(), **kwargs)]
@@ -305,6 +311,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['layer4'] = [
inet.UDP(sport=source_port, dport=destination_port, **kwargs),
inet.UDP(sport=source_port, dport=destination_port, **kwargs)]
@@ -316,6 +326,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['vlan'] = [
inet.Dot1Q(vlan=vlan_id, **kwargs),
inet.Dot1Q(vlan=vlan_id, **kwargs)]
diff --git a/tools/pkt_gen/xena/xena.py b/tools/pkt_gen/xena/xena.py
index 19b44f0b..33864079 100755
--- a/tools/pkt_gen/xena/xena.py
+++ b/tools/pkt_gen/xena/xena.py
@@ -32,8 +32,6 @@ import xml.etree.ElementTree as ET
from collections import OrderedDict
from time import sleep
-import scapy.layers.inet as inet
-
from conf import merge_spec
from conf import settings
from core.results.results_constants import ResultsConstants
@@ -149,6 +147,10 @@ class Xena(ITrafficGenerator):
:param reverse: Swap source and destination info when building header
:return: packet header in hex
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
srcmac = self._params['traffic']['l2'][
'srcmac'] if not reverse else self._params['traffic']['l2'][
'dstmac']
@@ -274,10 +276,6 @@ class Xena(ITrafficGenerator):
enable the pairs topology
:return: None
"""
- # set duplex mode, this code is valid, pylint complaining with a
- # warning that many have complained about online.
- # pylint: disable=redefined-variable-type
-
try:
if self._params['traffic']['bidir'] == "True":
j_file = XenaJSONMesh()
@@ -568,7 +566,7 @@ class Xena(ITrafficGenerator):
self._xsocket.disconnect()
self._xsocket = None
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""Send a burst of traffic.
See ITrafficGenerator for description
@@ -579,7 +577,7 @@ class Xena(ITrafficGenerator):
if traffic:
self._params['traffic'] = merge_spec(self._params['traffic'],
traffic)
- self._start_traffic_api(numpkts)
+ self._start_traffic_api(traffic['burst_size'])
return self._stop_api_traffic()
def send_cont_traffic(self, traffic=None, duration=20):
diff --git a/tools/report/report.py b/tools/report/report.py
index b3f15c1b..5d05e7ad 100644
--- a/tools/report/report.py
+++ b/tools/report/report.py
@@ -137,7 +137,6 @@ def generate(testcase):
'tests': tests,
}
i = 0
- # pylint: disable=no-member
for output_file in output_files:
template = template_env.get_template(_TEMPLATE_FILES[i])
output_text = template.render(template_vars)
diff --git a/tools/report/report_rst.jinja b/tools/report/report_rst.jinja
index eda0c01e..6b51807a 100644
--- a/tools/report/report_rst.jinja
+++ b/tools/report/report_rst.jinja
@@ -90,7 +90,9 @@ Testing Activities/Events
~~~~~~~~~~~~~~~~~~~~~~~~~
pidstat is used to collect the process statistics, as such some values such as
%CPU and %USER maybe > 100% as the values are summed across multiple cores. For
-more info on pidstat please see: http://linux.die.net/man/1/pidstat.
+more info on pidstat please see: http://linux.die.net/man/1/pidstat. Please
+note that vsperf recalculates the CPU consumption of a process by aggregating
+the CPU usage of each thread.
Known issues: Some reported metrics have the value "unkown". These values are
marked unknown as they are not values retrieved from the external tester
diff --git a/tools/systeminfo.py b/tools/systeminfo.py
index f34bcce6..6020d0e2 100644
--- a/tools/systeminfo.py
+++ b/tools/systeminfo.py
@@ -191,7 +191,7 @@ def get_bin_version(binary, regex):
return None
versions = re.findall(regex, output)
- if len(versions):
+ if versions:
return versions[0]
else:
return None
@@ -297,7 +297,7 @@ def get_version(app_name):
if not '16' in release:
tmp_ver[2] += line.rstrip('\n').split(' ')[2]
- if len(tmp_ver[0]):
+ if tmp_ver[0]:
app_version = '.'.join(tmp_ver)
app_git_tag = get_git_tag(S.getValue('TOOLS')['dpdk_src'])
elif app_name.lower().startswith('qemu'):
diff --git a/tools/teststepstools.py b/tools/teststepstools.py
index 33db8f79..db2d53e6 100644
--- a/tools/teststepstools.py
+++ b/tools/teststepstools.py
@@ -43,7 +43,7 @@ class TestStepsTools(object):
return True
@staticmethod
- def validate_Assert(result, dummy_condition):
+ def validate_Assert(result, _dummy_condition):
""" Validate evaluation of given `condition'
"""
return result
@@ -56,7 +56,7 @@ class TestStepsTools(object):
return eval(expression)
@staticmethod
- def validate_Eval(result, dummy_expression):
+ def validate_Eval(result, _dummy_expression):
""" Validate result of python `expression' evaluation
"""
return result is not None
@@ -76,7 +76,7 @@ class TestStepsTools(object):
return True
@staticmethod
- def validate_Exec_Python(result, dummy_code):
+ def validate_Exec_Python(result, _dummy_code):
""" Validate result of python `code' execution
"""
return result
@@ -99,7 +99,7 @@ class TestStepsTools(object):
return output
@staticmethod
- def validate_Exec_Shell(result, dummy_command, dummy_regex=None):
+ def validate_Exec_Shell(result, _dummy_command, _dummy_regex=None):
""" validate result of shell `command' execution
"""
return result is not None
@@ -115,7 +115,7 @@ class TestStepsTools(object):
return None
@staticmethod
- def validate_Exec_Shell_Background(result, dummy_command, dummy_regex=None):
+ def validate_Exec_Shell_Background(result, _dummy_command, _dummy_regex=None):
""" validate result of shell `command' execution on the background
"""
return result is not None
diff --git a/tools/veth.py b/tools/veth.py
index 6418d11a..6d7c9962 100644
--- a/tools/veth.py
+++ b/tools/veth.py
@@ -84,8 +84,7 @@ def del_veth_port(port, peer_port):
port, peer_port), False)
-# pylint: disable=unused-argument
-def validate_add_veth_port(result, port, peer_port):
+def validate_add_veth_port(_result, port, peer_port):
"""
Validation function for integration testcases
"""
@@ -93,7 +92,7 @@ def validate_add_veth_port(result, port, peer_port):
return all([port in devs, peer_port in devs])
-def validate_bring_up_eth_port(result, eth_port, namespace=None):
+def validate_bring_up_eth_port(_result, eth_port, namespace=None):
"""
Validation function for integration testcases
"""
@@ -110,7 +109,7 @@ def validate_bring_up_eth_port(result, eth_port, namespace=None):
return True
-def validate_del_veth_port(result, port, peer_port):
+def validate_del_veth_port(_result, port, peer_port):
"""
Validation function for integration testcases
"""