summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nfvbench/nfvbench.py28
-rw-r--r--nfvbench/nfvbenchd.py5
-rwxr-xr-xnfvbench/traffic_client.py3
-rw-r--r--nfvbench/traffic_gen/dummy.py4
-rw-r--r--nfvbench/traffic_gen/traffic_utils.py6
-rw-r--r--nfvbench/traffic_gen/trex_gen.py6
-rw-r--r--nfvbench/utils.py27
7 files changed, 51 insertions, 28 deletions
diff --git a/nfvbench/nfvbench.py b/nfvbench/nfvbench.py
index 50b96b6..06ca19d 100644
--- a/nfvbench/nfvbench.py
+++ b/nfvbench/nfvbench.py
@@ -24,6 +24,7 @@ import sys
import traceback
from attrdict import AttrDict
+from logging import FileHandler
import pbr.version
from pkg_resources import resource_string
@@ -173,6 +174,24 @@ class NFVBench(object):
Sanity check on the config is done here as well.
"""
self.config = AttrDict(dict(self.base_config))
+ # Update log file handler if needed after a config update (REST mode)
+ if 'log_file' in opts:
+ if opts['log_file']:
+ (path, _filename) = os.path.split(opts['log_file'])
+ if not os.path.exists(path):
+ LOG.warning(
+ 'Path %s does not exist. Please verify root path is shared with host. Path '
+ 'will be created.', path)
+ os.makedirs(path)
+ LOG.info('%s is created.', path)
+ for h in log.getLogger().handlers:
+ if isinstance(h, FileHandler) and h.baseFilename != opts['log_file']:
+ # clean log file handler
+ log.getLogger().removeHandler(h)
+ # add handler if not existing to avoid duplicates handlers
+ if len(log.getLogger().handlers) == 1:
+ log.add_file_logger(opts['log_file'])
+
self.config.update(opts)
config = self.config
@@ -184,6 +203,15 @@ class NFVBench(object):
config.service_chain = ChainType.EXT
config.no_arp = True
LOG.info('Running L2 loopback: using EXT chain/no ARP')
+
+ # traffic profile override options
+ if 'frame_sizes' in opts:
+ unidir = False
+ if 'unidir' in opts:
+ unidir = opts['unidir']
+ override_custom_traffic(config, opts['frame_sizes'], unidir)
+ LOG.info("Frame size has been set to %s for current configuration", opts['frame_sizes'])
+
config.flow_count = utils.parse_flow_count(config.flow_count)
required_flow_count = config.service_chain_count * 2
if config.flow_count < required_flow_count:
diff --git a/nfvbench/nfvbenchd.py b/nfvbench/nfvbenchd.py
index 73e1342..07f1eea 100644
--- a/nfvbench/nfvbenchd.py
+++ b/nfvbench/nfvbenchd.py
@@ -26,7 +26,6 @@ from flask import request
from .summarizer import NFVBenchSummarizer
from .log import LOG
-from .utils import byteify
from .utils import RunLock
from .__init__ import __version__
@@ -48,7 +47,7 @@ def result_json(status, message, request_id=None):
def load_json(data):
- return json.loads(json.dumps(data), object_hook=byteify)
+ return json.loads(json.dumps(data))
def get_uuid():
@@ -211,6 +210,8 @@ class WebServer(object):
try:
summary = NFVBenchSummarizer(results['result'], self.fluent_logger)
LOG.info(str(summary))
+ if 'json' in config and 'result' in results and results['status']:
+ self.nfvbench_runner.save(results['result'])
except KeyError:
# in case of error, 'result' might be missing
if 'error_message' in results:
diff --git a/nfvbench/traffic_client.py b/nfvbench/traffic_client.py
index f26a747..0247857 100755
--- a/nfvbench/traffic_client.py
+++ b/nfvbench/traffic_client.py
@@ -923,7 +923,8 @@ class TrafficClient(object):
def get_stats(self):
"""Collect final stats for previous run."""
stats = self.gen.get_stats()
- retDict = {'total_tx_rate': stats['total_tx_rate']}
+ retDict = {'total_tx_rate': stats['total_tx_rate'],
+ 'offered_tx_rate_bps': stats['offered_tx_rate_bps']}
tx_keys = ['total_pkts', 'total_pkt_bytes', 'pkt_rate', 'pkt_bit_rate']
rx_keys = tx_keys + ['dropped_pkts']
diff --git a/nfvbench/traffic_gen/dummy.py b/nfvbench/traffic_gen/dummy.py
index 7fd3fdb..272990a 100644
--- a/nfvbench/traffic_gen/dummy.py
+++ b/nfvbench/traffic_gen/dummy.py
@@ -147,6 +147,10 @@ class DummyTG(AbstractTrafficGenerator):
total_tx_pps += tx_pps
# actual total tx rate in pps
result['total_tx_rate'] = total_tx_pps
+ # actual offered tx rate in bps
+ avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
+ total_tx_bps = utils.pps_to_bps(total_tx_pps, avg_packet_size)
+ result['offered_tx_rate_bps'] = total_tx_bps
return result
def get_stream_stats(self, tg_stats, if_stats, latencies, chain_idx):
diff --git a/nfvbench/traffic_gen/traffic_utils.py b/nfvbench/traffic_gen/traffic_utils.py
index c875a5d..4366a6c 100644
--- a/nfvbench/traffic_gen/traffic_utils.py
+++ b/nfvbench/traffic_gen/traffic_utils.py
@@ -14,7 +14,6 @@
import bitmath
-from nfvbench.utils import multiplier_map
# IMIX frame size including the 4-byte FCS field
IMIX_L2_SIZES = [64, 594, 1518]
@@ -23,6 +22,11 @@ IMIX_RATIOS = [7, 4, 1]
IMIX_AVG_L2_FRAME_SIZE = sum(
[1.0 * imix[0] * imix[1] for imix in zip(IMIX_L2_SIZES, IMIX_RATIOS)]) / sum(IMIX_RATIOS)
+multiplier_map = {
+ 'K': 1000,
+ 'M': 1000000,
+ 'G': 1000000000
+}
def convert_rates(l2frame_size, rate, intf_speed):
"""Convert a given rate unit into the other rate units.
diff --git a/nfvbench/traffic_gen/trex_gen.py b/nfvbench/traffic_gen/trex_gen.py
index de9500a..a4f992d 100644
--- a/nfvbench/traffic_gen/trex_gen.py
+++ b/nfvbench/traffic_gen/trex_gen.py
@@ -95,6 +95,7 @@ class TRex(AbstractTrafficGenerator):
self.rates = []
self.capture_id = None
self.packet_list = []
+ self.l2_frame_size = 0
def get_version(self):
"""Get the Trex version."""
@@ -151,6 +152,10 @@ class TRex(AbstractTrafficGenerator):
total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
+ # actual offered tx rate in bps
+ avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
+ total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
+ result['offered_tx_rate_bps'] = total_tx_bps
result["flow_stats"] = in_stats["flow_stats"]
result["latency"] = in_stats["latency"]
return result
@@ -812,6 +817,7 @@ class TRex(AbstractTrafficGenerator):
.format(pps=r['rate_pps'],
bps=r['rate_bps'],
load=r['rate_percent']))
+ self.l2_frame_size = l2frame_size
# a dict of list of streams indexed by port#
# in case of fixed size, has self.chain_count * 2 * 2 streams
# (1 normal + 1 latency stream per direction per chain)
diff --git a/nfvbench/utils.py b/nfvbench/utils.py
index d4482fd..b92c75e 100644
--- a/nfvbench/utils.py
+++ b/nfvbench/utils.py
@@ -24,7 +24,7 @@ import fcntl
from functools import wraps
import json
from .log import LOG
-
+from nfvbench.traffic_gen.traffic_utils import multiplier_map
class TimeoutError(Exception):
pass
@@ -72,22 +72,6 @@ def save_json_result(result, json_file, std_json_path, service_chain, service_ch
default=lambda obj: obj.to_json())
-def byteify(data, ignore_dicts=False):
- # if this is a unicode string, return its string representation
- if isinstance(data, str):
- return data.encode('utf-8')
- # if this is a list of values, return list of byteified values
- if isinstance(data, list):
- return [byteify(item, ignore_dicts=ignore_dicts) for item in data]
- # if this is a dictionary, return dictionary of byteified keys and values
- # but only if we haven't already byteified it
- if isinstance(data, dict) and not ignore_dicts:
- return {byteify(key, ignore_dicts=ignore_dicts): byteify(value, ignore_dicts=ignore_dicts)
- for key, value in list(data.items())}
- # if it's anything else, return it in its original form
- return data
-
-
def dict_to_json_dict(record):
return json.loads(json.dumps(record, default=lambda obj: obj.to_json()))
@@ -170,12 +154,6 @@ def get_intel_pci(nic_slot=None, nic_ports=None):
return pcis
-multiplier_map = {
- 'K': 1000,
- 'M': 1000000,
- 'G': 1000000000
-}
-
def parse_flow_count(flow_count):
flow_count = str(flow_count)
@@ -194,7 +172,8 @@ def parse_flow_count(flow_count):
def cast_integer(value):
- return int(value) if not isnan(value) else value
+ # force 0 value if NaN value from TRex to avoid error in JSON result parsing
+ return int(value) if not isnan(value) else 0
class RunLock(object):