From efc678c9d3843dcfd373b5749a88c51228b0b27c Mon Sep 17 00:00:00 2001 From: ahothan Date: Thu, 21 Dec 2017 17:17:46 -0800 Subject: [NFVBENCH-59] Add Unit Testing of the NDR/PDR convergence algorithm using the dummy traffic gen [NFVBENCH-60] Fix pylint warnings Change-Id: I72deec060bf25774d1be33eaeefc74b42a576483 Signed-off-by: ahothan --- docs/development/design/ndrpdr.rst | 10 + nfvbench/chain_clients.py | 7 +- nfvbench/chain_managers.py | 2 +- nfvbench/compute.py | 17 +- nfvbench/config.py | 2 +- nfvbench/nfvbench.py | 4 +- nfvbench/summarizer.py | 3 +- nfvbench/traffic_client.py | 21 +- nfvbench/traffic_gen/dummy.py | 98 +++++++- nfvbench/traffic_gen/traffic_utils.py | 32 ++- nfvbench/utils.py | 2 +- test/test_nfvbench.py | 408 +++++++++++----------------------- tox.ini | 2 +- 13 files changed, 292 insertions(+), 316 deletions(-) diff --git a/docs/development/design/ndrpdr.rst b/docs/development/design/ndrpdr.rst index 4f611a0..5361174 100644 --- a/docs/development/design/ndrpdr.rst +++ b/docs/development/design/ndrpdr.rst @@ -28,7 +28,17 @@ The default value of 0.1 indicates for example that the measured NDR and PDR are actual NDR/PDR (e.g. 0.1% of 10Gbps is 10Mbps). It also determines how small the search range must be in the binary search. The recursion narrows down the range by half and stops when: + - the range is smaller than the configured load_epsilon value - or when the search hits 100% or 0% of line rate +One particularity of using a software traffic generator is that the requested Tx rate may not always be met due to +resource limitations (e.g. CPU is not fast enough to generate a very high load). The algorithm should take this into +consideration: + +- always monitor the actual Tx rate achieved +- actual Tx rate is always <= requested Tx rate +- the measured drop rate should always be relative to the actual Tx rate +- if the actual Tx rate is < requested Tx rate and the measured drop rate is already within threshold ( self.duration_sec: self.stop() @@ -102,10 +105,10 @@ class IpBlock(object): '''Reserve a range of count consecutive IP addresses spaced by step ''' if self.next_free + count > self.max_available: - raise IndexError('No more IP addresses next free=%d max_available=%d requested=%d', - self.next_free, - self.max_available, - count) + raise IndexError('No more IP addresses next free=%d max_available=%d requested=%d' % + (self.next_free, + self.max_available, + count)) first_ip = self.get_ip(self.next_free) last_ip = self.get_ip(self.next_free + count - 1) self.next_free += count @@ -393,7 +396,7 @@ class TrafficGeneratorFactory(object): class TrafficClient(object): PORTS = [0, 1] - def __init__(self, config, notifier=None): + def __init__(self, config, notifier=None, skip_sleep=False): generator_factory = TrafficGeneratorFactory(config) self.gen = generator_factory.get_generator_client() self.tool = generator_factory.get_tool() @@ -414,6 +417,8 @@ class TrafficClient(object): self.current_total_rate = {'rate_percent': '10'} if self.config.single_run: self.current_total_rate = utils.parse_rate_str(self.config.rate) + # UT with dummy TG can bypass all sleeps + self.skip_sleep = skip_sleep def set_macs(self): for mac, device in zip(self.gen.get_macs(), self.config.generator_config.devices): @@ -461,7 +466,8 @@ class TrafficClient(object): self.gen.clear_stats() self.gen.start_traffic() LOG.info('Waiting for packets to be received back... (%d / %d)', it + 1, retry_count) - time.sleep(self.config.generic_poll_sec) + if not self.skip_sleep: + time.sleep(self.config.generic_poll_sec) self.gen.stop_traffic() stats = self.gen.get_stats() @@ -481,7 +487,8 @@ class TrafficClient(object): LOG.info('End-to-end connectivity ensured') return - time.sleep(self.config.generic_poll_sec) + if not self.skip_sleep: + time.sleep(self.config.generic_poll_sec) raise TrafficClientException('End-to-end connectivity cannot be ensured') diff --git a/nfvbench/traffic_gen/dummy.py b/nfvbench/traffic_gen/dummy.py index d8c01e9..b43030f 100644 --- a/nfvbench/traffic_gen/dummy.py +++ b/nfvbench/traffic_gen/dummy.py @@ -13,6 +13,7 @@ # under the License. from traffic_base import AbstractTrafficGenerator +import traffic_utils as utils class DummyTG(AbstractTrafficGenerator): @@ -22,10 +23,13 @@ class DummyTG(AbstractTrafficGenerator): Useful for unit testing without actually generating any traffic. """ - def __init__(self, runner): - AbstractTrafficGenerator.__init__(self, runner) + def __init__(self, config): + AbstractTrafficGenerator.__init__(self, config) self.port_handle = [] self.rates = [] + self.l2_frame_size = 0 + self.duration_sec = self.config.duration_sec + self.intf_speed = config.generator_config.intf_speed def get_version(self): return "0.1" @@ -33,6 +37,59 @@ class DummyTG(AbstractTrafficGenerator): def init(self): pass + def get_tx_pps_dropped_pps(self, tx_rate): + '''Get actual tx packets based on requested tx rate + + :param tx_rate: requested TX rate with unit ('40%', '1Mbps', '1000pps') + + :return: the actual TX pps and the dropped pps corresponding to the requested TX rate + ''' + dr, tx = self.__get_dr_actual_tx(tx_rate) + actual_tx_bps = utils.load_to_bps(tx, self.intf_speed) + avg_packet_size = utils.get_average_packet_size(self.l2_frame_size) + tx_packets = utils.bps_to_pps(actual_tx_bps, avg_packet_size) + + dropped = tx_packets * dr / 100 + # print '===get_tx_pkts_dropped_pkts req tex=', tx_rate, 'dr=', dr, + # 'actual tx rate=', tx, 'actual tx pkts=', tx_packets, 'dropped=', dropped + return int(tx_packets), int(dropped) + + def set_response_curve(self, lr_dr=0, ndr=100, max_actual_tx=100, max_11_tx=100): + '''Set traffic gen response characteristics + + Specifies the drop rate curve and the actual TX curve + :param float lr_dr: The actual drop rate at TX line rate (in %, 0..100) + :param float ndr: The true NDR (0 packet drop) in % (0..100) of line rate" + :param float max_actual_tx: highest actual TX when requested TX is 100% + :param float max_11_tx: highest requested TX that results in same actual TX + ''' + self.target_ndr = ndr + if ndr < 100: + self.dr_slope = float(lr_dr) / (100 - ndr) + else: + self.dr_slope = 0 + self.max_11_tx = max_11_tx + self.max_actual_tx = max_actual_tx + if max_11_tx < 100: + self.tx_slope = float(max_actual_tx - max_11_tx) / (100 - max_11_tx) + else: + self.tx_slope = 0 + + def __get_dr_actual_tx(self, requested_tx_rate): + '''Get drop rate at given requested tx rate + :param float requested_tx_rate: requested tx rate in % (0..100) + :return: the drop rate and actual tx rate at that requested_tx_rate in % (0..100) + ''' + if requested_tx_rate <= self.max_11_tx: + actual_tx = requested_tx_rate + else: + actual_tx = self.max_11_tx + (requested_tx_rate - self.max_11_tx) * self.tx_slope + if actual_tx <= self.target_ndr: + dr = 0.0 + else: + dr = (actual_tx - self.target_ndr) * self.dr_slope + return dr, actual_tx + def connect(self): ports = list(self.config.generator_config.ports) self.port_handle = ports @@ -44,32 +101,57 @@ class DummyTG(AbstractTrafficGenerator): pass def create_traffic(self, l2frame_size, rates, bidirectional, latency=True): - pass + self.rates = [utils.to_rate_str(rate) for rate in rates] + self.l2_frame_size = l2frame_size def clear_streamblock(self): pass def get_stats(self): + '''Get stats from current run. + + The binary search mainly looks at 2 results to make the decision: + actual tx packets + actual rx dropped packets + From the Requested TX rate - we get the Actual TX rate and the RX drop rate + From the Run duration and actual TX rate - we get the actual total tx packets + From the Actual tx packets and RX drop rate - we get the RX dropped packets + ''' result = {} - for ph in self.port_handle: + total_tx_pps = 0 + + # use dummy values for all other result field as the goal is to + # test the ndr/pdr convergence code + for idx, ph in enumerate(self.port_handle): + requested_tx_rate = utils.get_load_from_rate(self.rates[idx]) + tx_pps, dropped_pps = self.get_tx_pps_dropped_pps(requested_tx_rate) + + # total packets sent per direction - used by binary search + total_pkts = tx_pps * self.duration_sec + dropped_pkts = dropped_pps * self.duration_sec + _, tx_pkt_rate = self.__get_dr_actual_tx(requested_tx_rate) result[ph] = { 'tx': { - 'total_pkts': 1000, + 'total_pkts': total_pkts, 'total_pkt_bytes': 100000, - 'pkt_rate': 100, + 'pkt_rate': tx_pkt_rate, 'pkt_bit_rate': 1000000 }, 'rx': { - 'total_pkts': 1000, + # total packets received + 'total_pkts': total_pkts - dropped_pkts, 'total_pkt_bytes': 100000, 'pkt_rate': 100, 'pkt_bit_rate': 1000000, - 'dropped_pkts': 0 + 'dropped_pkts': dropped_pkts } } result[ph]['rx']['max_delay_usec'] = 10.0 result[ph]['rx']['min_delay_usec'] = 1.0 result[ph]['rx']['avg_delay_usec'] = 2.0 + total_tx_pps += tx_pps + # actual total tx rate in pps + result['total_tx_rate'] = total_tx_pps return result def clear_stats(self): diff --git a/nfvbench/traffic_gen/traffic_utils.py b/nfvbench/traffic_gen/traffic_utils.py index e618c28..4a7f855 100644 --- a/nfvbench/traffic_gen/traffic_utils.py +++ b/nfvbench/traffic_gen/traffic_utils.py @@ -75,6 +75,13 @@ def weighted_avg(weight, count): return sum([x[0] * x[1] for x in zip(weight, count)]) / sum(weight) return float('nan') +def _get_bitmath_rate(rate_bps): + rate = rate_bps.replace('ps', '').strip() + bitmath_rate = bitmath.parse_string(rate) + if bitmath_rate.bits <= 0: + raise Exception('%s is out of valid range' % rate_bps) + return bitmath_rate + def parse_rate_str(rate_str): if rate_str.endswith('pps'): rate_pps = rate_str[:-3] @@ -103,6 +110,26 @@ def parse_rate_str(rate_str): else: raise Exception('Unknown rate string format %s' % rate_str) +def get_load_from_rate(rate_str, avg_frame_size=64, line_rate='10Gbps'): + '''From any rate string (with unit) return the corresponding load (in % unit) + + :param str rate_str: the rate to convert - must end with a unit (e.g. 1Mpps, 30%, 1Gbps) + :param int avg_frame_size: average frame size in bytes (needed only if pps is given) + :param str line_rate: line rate ending with bps unit (e.g. 1Mbps, 10Gbps) is the rate that + corresponds to 100% rate + :return float: the corresponding rate in % of line rate + ''' + rate_dict = parse_rate_str(rate_str) + if 'rate_percent' in rate_dict: + return float(rate_dict['rate_percent']) + lr_bps = _get_bitmath_rate(line_rate).bits + if 'rate_bps' in rate_dict: + bps = int(rate_dict['rate_bps']) + else: + # must be rate_pps + pps = rate_dict['rate_pps'] + bps = pps_to_bps(pps, avg_frame_size) + return bps_to_load(bps, lr_bps) def divide_rate(rate, divisor): if 'rate_pps' in rate: @@ -130,8 +157,9 @@ def to_rate_str(rate): elif 'rate_percent' in rate: load = rate['rate_percent'] return '{}%'.format(load) - else: - assert False + assert False + # avert pylint warning + return None def nan_replace(d): diff --git a/nfvbench/utils.py b/nfvbench/utils.py index 412dfae..20dc588 100644 --- a/nfvbench/utils.py +++ b/nfvbench/utils.py @@ -61,7 +61,7 @@ def save_json_result(result, json_file, std_json_path, service_chain, service_ch if filepaths: for file_path in filepaths: - LOG.info('Saving results in json file: ' + file_path + "...") + LOG.info('Saving results in json file: %s...', file_path) with open(file_path, 'w') as jfp: json.dump(result, jfp, diff --git a/test/test_nfvbench.py b/test/test_nfvbench.py index 2578407..05490e7 100644 --- a/test/test_nfvbench.py +++ b/test/test_nfvbench.py @@ -18,6 +18,8 @@ import logging import os import sys +import pytest + from attrdict import AttrDict from nfvbench.config import config_loads from nfvbench.credentials import Credentials @@ -28,7 +30,6 @@ from nfvbench.network import Network from nfvbench.specs import ChainType from nfvbench.specs import Encaps import nfvbench.traffic_gen.traffic_utils as traffic_utils -import pytest __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) @@ -266,183 +267,6 @@ def test_pvp_chain_run(pvp_chain): assert result == expected_result """ -# ========================================================================= -# PVVP Chain tests -# ========================================================================= - -""" -@pytest.fixture -def pvvp_chain(monkeypatch, openstack_vxlan_spec): - tor_vni1 = Interface('vni-4097', 'n9k', 50, 77) - vsw_vni1 = Interface('vxlan_tunnel0', 'vpp', 77, 48) - vsw_vif1 = Interface('VirtualEthernet0/0/2', 'vpp', 48, 77) - vsw_vif3 = Interface('VirtualEthernet0/0/0', 'vpp', 77, 47) - vsw_vif4 = Interface('VirtualEthernet0/0/1', 'vpp', 45, 77) - vsw_vif2 = Interface('VirtualEthernet0/0/3', 'vpp', 77, 44) - vsw_vni2 = Interface('vxlan_tunnel1', 'vpp', 43, 77) - tor_vni2 = Interface('vni-4098', 'n9k', 77, 40) - - def mock_init(self, *args, **kwargs): - self.vni_ports = [4099, 4100] - self.v2vnet = V2VNetwork() - self.specs = openstack_vxlan_spec - self.clients = { - 'vpp': AttrDict({ - 'get_v2v_network': lambda reverse=None: Network([vsw_vif3, vsw_vif4], reverse), - 'set_interface_counters': lambda pvvp=None: None, - 'set_v2v_counters': lambda: None, - }) - } - self.worker = AttrDict({ - 'run': lambda: None, - }) - - def mock_empty(self, *args, **kwargs): - pass - - def mock_get_network(self, traffic_port, vni_id, reverse=False): - if vni_id == 0: - return Network([tor_vni1, vsw_vni1, vsw_vif1], reverse) - else: - return Network([tor_vni2, vsw_vni2, vsw_vif2], reverse) - - def mock_get_data(self): - return {} - - monkeypatch.setattr(PVVPChain, '_get_network', mock_get_network) - monkeypatch.setattr(PVVPChain, '_get_data', mock_get_data) - monkeypatch.setattr(PVVPChain, '_setup', mock_empty) - monkeypatch.setattr(VxLANWorker, '_clear_interfaces', mock_empty) - monkeypatch.setattr(PVVPChain, '_generate_traffic', mock_empty) - monkeypatch.setattr(PVVPChain, '__init__', mock_init) - - return PVVPChain(None, None, {'vm': None, 'vpp': None, 'tor': None, 'traffic': None}, None) - - -def test_pvvp_chain_run(pvvp_chain): - result = pvvp_chain.run() - - expected_result = { - 'raw_data': {}, - 'stats': None, - 'packet_analysis': - {'direction-forward': [ - OrderedDict([ - ('interface', 'vni-4097'), - ('device', 'n9k'), - ('packet_count', 50) - ]), - OrderedDict([ - ('interface', 'vxlan_tunnel0'), - ('device', 'vpp'), - ('packet_count', 48), - ('packet_drop_count', 2), - ('packet_drop_percentage', 4.0) - ]), - OrderedDict([ - ('interface', 'VirtualEthernet0/0/2'), - ('device', 'vpp'), - ('packet_count', 48), - ('packet_drop_count', 0), - ('packet_drop_percentage', 0.0) - ]), - OrderedDict([ - ('interface', 'VirtualEthernet0/0/0'), - ('device', 'vpp'), - ('packet_count', 47), - ('packet_drop_count', 1), - ('packet_drop_percentage', 2.0) - ]), - OrderedDict([ - ('interface', 'VirtualEthernet0/0/1'), - ('device', 'vpp'), - ('packet_count', 45), - ('packet_drop_count', 2), - ('packet_drop_percentage', 4.0) - ]), - OrderedDict([ - ('interface', 'VirtualEthernet0/0/3'), - ('device', 'vpp'), - ('packet_count', 44), - ('packet_drop_count', 1), - ('packet_drop_percentage', 2.0) - ]), - OrderedDict([ - ('interface', 'vxlan_tunnel1'), - ('device', 'vpp'), - ('packet_count', 43), - ('packet_drop_count', 1), - ('packet_drop_percentage', 2.0) - ]), - OrderedDict([ - ('interface', 'vni-4098'), - ('device', 'n9k'), - ('packet_count', 40), - ('packet_drop_count', 3), - ('packet_drop_percentage', 6.0) - ]) - ], - 'direction-reverse': [ - OrderedDict([ - ('interface', 'vni-4098'), - ('device', 'n9k'), - ('packet_count', 77) - ]), - OrderedDict([ - ('interface', 'vxlan_tunnel1'), - ('device', 'vpp'), - ('packet_count', 77), - ('packet_drop_count', 0), - ('packet_drop_percentage', 0.0) - ]), - OrderedDict([ - ('interface', 'VirtualEthernet0/0/3'), - ('device', 'vpp'), - ('packet_count', 77), - ('packet_drop_count', 0), - ('packet_drop_percentage', 0.0) - ]), - OrderedDict([ - ('interface', 'VirtualEthernet0/0/1'), - ('device', 'vpp'), - ('packet_count', 77), - ('packet_drop_count', 0), - ('packet_drop_percentage', 0.0) - ]), - OrderedDict([ - ('interface', 'VirtualEthernet0/0/0'), - ('device', 'vpp'), - ('packet_count', 77), - ('packet_drop_count', 0), - ('packet_drop_percentage', 0.0) - ]), - OrderedDict([ - ('interface', 'VirtualEthernet0/0/2'), - ('device', 'vpp'), - ('packet_count', 77), - ('packet_drop_count', 0), - ('packet_drop_percentage', 0.0) - ]), - OrderedDict([ - ('interface', 'vxlan_tunnel0'), - ('device', 'vpp'), - ('packet_count', 77), - ('packet_drop_count', 0), - ('packet_drop_percentage', 0.0) - ]), - OrderedDict([ - ('interface', 'vni-4097'), - ('device', 'n9k'), - ('packet_count', 77), - ('packet_drop_count', 0), - ('packet_drop_percentage', 0.0) - ]) - ]} - } - assert result == expected_result -""" - - # ========================================================================= # Traffic client tests # ========================================================================= @@ -473,7 +297,7 @@ def test_parse_rate_str(): except Exception: return True else: - assert False + return False assert should_raise_error('101') assert should_raise_error('201%') @@ -500,6 +324,38 @@ def test_rate_conversion(): assert traffic_utils.pps_to_bps(31.6066319896, 1518) == pytest.approx(388888) assert traffic_utils.pps_to_bps(3225895.85831, 340.3) == pytest.approx(9298322222) +# pps at 10Gbps line rate for 64 byte frames +LR_64B_PPS = 14880952 +LR_1518B_PPS = 812743 + +def assert_equivalence(reference, value, allowance_pct=1): + '''Asserts if a value is equivalent to a reference value with given margin + + :param float reference: reference value to compare to + :param float value: value to compare to reference + :param float allowance_pct: max allowed percentage of margin + 0 : requires exact match + 1 : must be equal within 1% of the reference value + ... + 100: always true + ''' + if reference == 0: + assert value == 0 + else: + assert abs(value - reference) * 100 / reference <= allowance_pct + +def test_load_from_rate(): + assert traffic_utils.get_load_from_rate('100%') == 100 + assert_equivalence(100, traffic_utils.get_load_from_rate(str(LR_64B_PPS) + 'pps')) + assert_equivalence(50, traffic_utils.get_load_from_rate(str(LR_64B_PPS / 2) + 'pps')) + assert_equivalence(100, traffic_utils.get_load_from_rate('10Gbps')) + assert_equivalence(50, traffic_utils.get_load_from_rate('5000Mbps')) + assert_equivalence(1, traffic_utils.get_load_from_rate('100Mbps')) + assert_equivalence(100, traffic_utils.get_load_from_rate(str(LR_1518B_PPS) + 'pps', + avg_frame_size=1518)) + assert_equivalence(100, traffic_utils.get_load_from_rate(str(LR_1518B_PPS * 2) + 'pps', + avg_frame_size=1518, + line_rate='20Gbps')) """ @pytest.fixture @@ -513,112 +369,14 @@ def traffic_client(monkeypatch): 'rates': [{'rate_percent': '10'}, {'rate_pps': '1'}] } - self.config = AttrDict({ - 'generator_config': { - 'intf_speed': 10000000000 - }, - 'ndr_run': True, - 'pdr_run': True, - 'single_run': False, - 'attempts': 1, - 'measurement': { - 'NDR': 0.0, - 'PDR': 0.1, - 'load_epsilon': 0.1 - } - }) - - self.runner = AttrDict({ - 'time_elapsed': lambda: 30, - 'stop': lambda: None, - 'client': AttrDict({'get_stats': lambda: None}) - }) - - self.current_load = None - self.dummy_stats = { - 50.0: 72.6433562831, - 25.0: 45.6095059858, - 12.5: 0.0, - 18.75: 27.218642979, - 15.625: 12.68585861, - 14.0625: 2.47154392563, - 13.28125: 0.000663797066801, - 12.890625: 0.0, - 13.0859375: 0.0, - 13.18359375: 0.00359387347122, - 13.671875: 0.307939922531, - 13.4765625: 0.0207718516156, - 13.57421875: 0.0661795060969 - } - def mock_modify_load(self, load): self.run_config['rates'][0] = {'rate_percent': str(load)} self.current_load = load - def mock_run_traffic(self): - yield { - 'overall': { - 'drop_rate_percent': self.dummy_stats[self.current_load], - 'rx': { - 'total_pkts': 1, - 'avg_delay_usec': 0.0, - 'max_delay_usec': 0.0, - 'min_delay_usec': 0.0 - } - } - } - monkeypatch.setattr(TrafficClient, '__init__', mock_init) monkeypatch.setattr(TrafficClient, 'modify_load', mock_modify_load) - monkeypatch.setattr(TrafficClient, 'run_traffic', mock_run_traffic) return TrafficClient() - - -def test_ndr_pdr_search(traffic_client): - expected_results = { - 'pdr': { - 'l2frame_size': '64', - 'initial_rate_type': 'rate_percent', - 'stats': { - 'overall': { - 'drop_rate_percent': 0.0661795060969, - 'min_delay_usec': 0.0, - 'avg_delay_usec': 0.0, - 'max_delay_usec': 0.0 - } - }, - 'load_percent_per_direction': 13.57421875, - 'rate_percent': 13.57422547, - 'rate_bps': 1357422547.0, - 'rate_pps': 2019974.0282738095, - 'duration_sec': 30 - }, - 'ndr': { - 'l2frame_size': '64', - 'initial_rate_type': 'rate_percent', - 'stats': { - 'overall': { - 'drop_rate_percent': 0.0, - 'min_delay_usec': 0.0, - 'avg_delay_usec': 0.0, - 'max_delay_usec': 0.0 - } - }, - 'load_percent_per_direction': 13.0859375, - 'rate_percent': 13.08594422, - 'rate_bps': 1308594422.0, - 'rate_pps': 1947313.1279761905, - 'duration_sec': 30 - } - } - - results = traffic_client.get_ndr_and_pdr() - assert len(results) == 2 - for result in results.values(): - result.pop('timestamp_sec') - result.pop('time_taken_sec') - assert results == expected_results """ @@ -631,7 +389,6 @@ def test_ndr_pdr_search(traffic_client): def setup_module(module): nfvbench.log.setup(mute_stdout=True) - def test_no_credentials(): cred = Credentials('/completely/wrong/path/openrc', None, False) if cred.rc_auth_url: @@ -667,7 +424,8 @@ except ImportError: # pylint: disable=wrong-import-position,ungrouped-imports from nfvbench.traffic_client import Device from nfvbench.traffic_client import IpBlock - +from nfvbench.traffic_client import TrafficClient +from nfvbench.traffic_client import TrafficGeneratorFactory # pylint: enable=wrong-import-position,ungrouped-imports @@ -828,3 +586,95 @@ def test_fluentd(): raise Exception("test") except Exception: logger.exception("got exception") + +def assert_ndr_pdr(stats, ndr, ndr_dr, pdr, pdr_dr): + assert stats['ndr']['rate_percent'] == ndr + assert stats['ndr']['stats']['overall']['drop_percentage'] == ndr_dr + assert_equivalence(pdr, stats['pdr']['rate_percent']) + assert_equivalence(pdr_dr, stats['pdr']['stats']['overall']['drop_percentage']) + +def get_traffic_client(): + config = AttrDict({ + 'traffic_generator': {'host_name': 'nfvbench_tg', + 'default_profile': 'dummy', + 'generator_profile': [{'name': 'dummy', + 'tool': 'dummy', + 'ip': '127.0.0.1', + 'intf_speed': '10Gbps', + 'interfaces': [{'port': 0, 'pci': 0}, + {'port': 1, 'pci': 0}]}], + 'ip_addrs_step': '0.0.0.1', + 'ip_addrs': ['10.0.0.0/8', '20.0.0.0/8'], + 'tg_gateway_ip_addrs': ['1.1.0.100', '2.2.0.100'], + 'tg_gateway_ip_addrs_step': '0.0.0.1', + 'gateway_ip_addrs': ['1.1.0.2', '2.2.0.2'], + 'gateway_ip_addrs_step': '0.0.0.1', + 'udp_src_port': None, + 'udp_dst_port': None}, + 'generator_profile': 'dummy', + 'service_chain': 'PVP', + 'service_chain_count': 1, + 'flow_count': 10, + 'vlan_tagging': True, + 'no_arp': False, + 'duration_sec': 1, + 'interval_sec': 1, + 'single_run': False, + 'ndr_run': True, + 'pdr_run': True, + 'rate': 'ndr_pdr', + 'check_traffic_time_sec': 200, + 'generic_poll_sec': 2, + 'measurement': {'NDR': 0.001, 'PDR': 0.1, 'load_epsilon': 0.1}, + }) + generator_factory = TrafficGeneratorFactory(config) + config.generator_config = generator_factory.get_generator_config(config.generator_profile) + traffic_client = TrafficClient(config, skip_sleep=True) + traffic_client.start_traffic_generator() + traffic_client.set_traffic('64', True) + return traffic_client + +def test_ndr_at_lr(): + traffic_client = get_traffic_client() + tg = traffic_client.gen + + # this is a perfect sut with no loss at LR + tg.set_response_curve(lr_dr=0, ndr=100, max_actual_tx=100, max_11_tx=100) + # tx packets should be line rate for 64B and no drops... + assert tg.get_tx_pps_dropped_pps(100) == (LR_64B_PPS, 0) + # NDR and PDR should be at 100% + traffic_client.ensure_end_to_end() + results = traffic_client.get_ndr_and_pdr() + + assert_ndr_pdr(results, 200.0, 0.0, 200.0, 0.0) + +def test_ndr_at_50(): + traffic_client = get_traffic_client() + tg = traffic_client.gen + # this is a sut with an NDR of 50% and linear drop rate after NDR up to 20% drops at LR + # (meaning that if you send 100% TX, you will only receive 80% RX) + # the tg requested TX/actual TX ratio is 1up to 50%, after 50% + # is linear up 80% actuak TX when requesting 100% + tg.set_response_curve(lr_dr=20, ndr=50, max_actual_tx=80, max_11_tx=50) + # tx packets should be half line rate for 64B and no drops... + assert tg.get_tx_pps_dropped_pps(50) == (LR_64B_PPS / 2, 0) + # at 100% TX requested, actual TX is 80% where the drop rate is 3/5 of 20% of the actual TX + assert tg.get_tx_pps_dropped_pps(100) == (int(LR_64B_PPS * 0.8), + int(LR_64B_PPS * 0.8 * 0.6 * 0.2)) + results = traffic_client.get_ndr_and_pdr() + assert_ndr_pdr(results, 100.0, 0.0, 100.781, 0.09374) + +def test_ndr_pdr_low_cpu(): + traffic_client = get_traffic_client() + tg = traffic_client.gen + # This test is for the case where the TG is underpowered and cannot send fast enough for the NDR + # true NDR=40%, actual TX at 50% = 30%, actual measured DR is 0% + # The ndr/pdr should bail out with a warning and a best effort measured NDR of 30% + tg.set_response_curve(lr_dr=50, ndr=40, max_actual_tx=60, max_11_tx=0) + # tx packets should be 30% at requested half line rate for 64B and no drops... + assert tg.get_tx_pps_dropped_pps(50) == (int(LR_64B_PPS * 0.3), 0) + results = traffic_client.get_ndr_and_pdr() + assert results + # import pprint + # pp = pprint.PrettyPrinter(indent=4) + # pp.pprint(results) diff --git a/tox.ini b/tox.ini index 1dab8a7..5aa8997 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,7 @@ setenv = VIRTUAL_ENV={envdir} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = py.test -q -s --basetemp={envtmpdir} {posargs} +commands = py.test -q --basetemp={envtmpdir} {posargs} [testenv:pep8] commands = flake8 {toxinidir} -- cgit 1.2.3-korg