aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/network_services/traffic_profile/rfc2544.py
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/network_services/traffic_profile/rfc2544.py')
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py398
1 files changed, 248 insertions, 150 deletions
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index 83020c85c..c24e2f65a 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -11,190 +11,288 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" RFC2544 Throughput implemenation """
-from __future__ import absolute_import
-from __future__ import division
import logging
-from trex_stl_lib.trex_stl_client import STLStream
-from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib import api as Pkt
+from trex_stl_lib import trex_stl_client
+from trex_stl_lib import trex_stl_packet_builder_scapy
+from trex_stl_lib import trex_stl_streams
+
+from yardstick.network_services.traffic_profile import trex_traffic_profile
-from yardstick.network_services.traffic_profile.trex_traffic_profile \
- import TrexProfile
LOGGING = logging.getLogger(__name__)
+SRC_PORT = 'sport'
+DST_PORT = 'dport'
+
+
+class PortPgIDMap(object):
+ """Port and pg_id mapping class
+
+ "pg_id" is the identification STL library gives to each stream. In the
+ RFC2544Profile class, the traffic has a STLProfile per port, which contains
+ one or several streams, one per packet size defined in the IMIX test case
+ description.
+
+ Example of port <-> pg_id map:
+ self._port_pg_id_map = {
+ 0: [1, 2, 3, 4],
+ 1: [5, 6, 7, 8]
+ }
+ """
+
+ def __init__(self):
+ self._pg_id = 0
+ self._last_port = None
+ self._port_pg_id_map = {}
+
+ def add_port(self, port):
+ self._last_port = port
+ self._port_pg_id_map[port] = []
+
+ def get_pg_ids(self, port):
+ return self._port_pg_id_map.get(port)
+
+ def increase_pg_id(self, port=None):
+ port = self._last_port if not port else port
+ if port is None:
+ return
+ pg_id_list = self._port_pg_id_map.get(port)
+ if not pg_id_list:
+ self.add_port(port)
+ pg_id_list = self._port_pg_id_map[port]
+ self._pg_id += 1
+ pg_id_list.append(self._pg_id)
+ return self._pg_id
-class RFC2544Profile(TrexProfile):
- """ This class handles rfc2544 implemenation. """
+class RFC2544Profile(trex_traffic_profile.TrexProfile):
+ """TRex RFC2544 traffic profile"""
+
+ TOLERANCE_LIMIT = 0.05
def __init__(self, traffic_generator):
super(RFC2544Profile, self).__init__(traffic_generator)
self.generator = None
- self.max_rate = None
- self.min_rate = None
- self.ports = None
- self.rate = 100
- self.drop_percent_at_max_tx = None
- self.throughput_max = None
+ self.rate = self.config.frame_rate
+ self.max_rate = self.config.frame_rate
+ self.min_rate = 0
+ self.drop_percent_max = 0
def register_generator(self, generator):
self.generator = generator
- def execute_traffic(self, traffic_generator=None):
- """ Generate the stream and run traffic on the given ports """
+ def stop_traffic(self, traffic_generator=None):
+ """"Stop traffic injection, reset counters and remove streams"""
if traffic_generator is not None and self.generator is None:
self.generator = traffic_generator
- if self.ports is not None:
- return
+ self.generator.client.stop()
+ self.generator.client.reset()
+ self.generator.client.remove_all_streams()
+
+ def execute_traffic(self, traffic_generator=None):
+ """Generate the stream and run traffic on the given ports
+
+ :param traffic_generator: (TrexTrafficGenRFC) traffic generator
+ :return ports: (list of int) indexes of ports
+ port_pg_id: (dict) port indexes and pg_id [1] map
+ [1] https://trex-tgn.cisco.com/trex/doc/cp_stl_docs/api/
+ profile_code.html#stlstream-modes
+ """
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
- self.ports = []
+ port_pg_id = PortPgIDMap()
+ ports = []
for vld_id, intfs in sorted(self.generator.networks.items()):
profile_data = self.params.get(vld_id)
- # no profile for this port
if not profile_data:
continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
+ if (vld_id.startswith(self.DOWNLINK) and
+ self.generator.rfc2544_helper.correlated_traffic):
continue
for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.max_rate = self.rate
- self.min_rate = 0
- self.generator.client.start(ports=self.ports, mult=self.get_multiplier(),
- duration=30, force=True)
- self.drop_percent_at_max_tx = 0
- self.throughput_max = 0
-
- def get_multiplier(self):
- """ Get the rate at which next iteration to run """
- self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
- multiplier = round(self.rate / self.pps, 2)
- return str(multiplier)
-
- def get_drop_percentage(self, generator=None):
- """ Calculate the drop percentage and run the traffic """
- if generator is None:
- generator = self.generator
- run_duration = self.generator.RUN_DURATION
- samples = self.generator.generate_samples(self.ports)
-
- in_packets = sum([value['in_packets'] for value in samples.values()])
- out_packets = sum([value['out_packets'] for value in samples.values()])
-
- packet_drop = abs(out_packets - in_packets)
- drop_percent = 100.0
- try:
- drop_percent = round((packet_drop / float(out_packets)) * 100, 5)
- except ZeroDivisionError:
- LOGGING.info('No traffic is flowing')
+ port_num = int(self.generator.port_num(intf))
+ ports.append(port_num)
+ port_pg_id.add_port(port_num)
+ profile = self._create_profile(profile_data,
+ self.rate, port_pg_id)
+ self.generator.client.add_streams(profile, ports=[port_num])
+
+ self.generator.client.start(ports=ports,
+ duration=self.config.duration,
+ force=True)
+ return ports, port_pg_id
+
+ def _create_profile(self, profile_data, rate, port_pg_id):
+ """Create a STL profile (list of streams) for a port"""
+ streams = []
+ for packet_name in profile_data:
+ imix = (profile_data[packet_name].
+ get('outer_l2', {}).get('framesize'))
+ imix_data = self._create_imix_data(imix)
+ self._create_vm(profile_data[packet_name])
+ _streams = self._create_streams(imix_data, rate, port_pg_id)
+ streams.extend(_streams)
+ return trex_stl_streams.STLProfile(streams)
+
+ def _create_imix_data(self, imix):
+ """Generate the IMIX distribution for a STL profile
+
+ The input information is the framesize dictionary in a test case
+ traffic profile definition. E.g.:
+ downlink_0:
+ ipv4:
+ id: 2
+ outer_l2:
+ framesize:
+ 64B: 10
+ 128B: 20
+ ...
+
+ This function normalizes the sum of framesize weights to 100 and
+ returns a dictionary of frame sizes in bytes and weight in percentage.
+ E.g.:
+ imix_count = {64: 25, 128: 75}
+
+ :param imix: (dict) IMIX size and weight
+ """
+ imix_count = {}
+ if not imix:
+ return imix_count
+
+ imix_count = {size.upper().replace('B', ''): int(weight)
+ for size, weight in imix.items()}
+ imix_sum = sum(imix_count.values())
+ if imix_sum <= 0:
+ imix_count = {64: 100}
+ imix_sum = 100
+
+ weight_normalize = float(imix_sum) / 100
+ return {size: float(weight) / weight_normalize
+ for size, weight in imix_count.items()}
+
+ def _create_vm(self, packet_definition):
+ """Create the STL Raw instructions"""
+ self.ether_packet = Pkt.Ether()
+ self.ip_packet = Pkt.IP()
+ self.ip6_packet = None
+ self.udp_packet = Pkt.UDP()
+ self.udp[DST_PORT] = 'UDP.dport'
+ self.udp[SRC_PORT] = 'UDP.sport'
+ self.qinq = False
+ self.vm_flow_vars = []
+ outer_l2 = packet_definition.get('outer_l2')
+ outer_l3v4 = packet_definition.get('outer_l3v4')
+ outer_l3v6 = packet_definition.get('outer_l3v6')
+ outer_l4 = packet_definition.get('outer_l4')
+ if outer_l2:
+ self._set_outer_l2_fields(outer_l2)
+ if outer_l3v4:
+ self._set_outer_l3v4_fields(outer_l3v4)
+ if outer_l3v6:
+ self._set_outer_l3v6_fields(outer_l3v6)
+ if outer_l4:
+ self._set_outer_l4_fields(outer_l4)
+ self.trex_vm = trex_stl_packet_builder_scapy.STLScVmRaw(
+ self.vm_flow_vars)
+
+ def _create_single_packet(self, size=64):
+ size -= 4
+ ether_packet = self.ether_packet
+ ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
+ udp_packet = self.udp_packet
+ if self.qinq:
+ qinq_packet = self.qinq_packet
+ base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
+ else:
+ base_pkt = ether_packet / ip_packet / udp_packet
+ pad = max(0, size - len(base_pkt)) * 'x'
+ return trex_stl_packet_builder_scapy.STLPktBuilder(
+ pkt=base_pkt / pad, vm=self.trex_vm)
+
+ def _create_streams(self, imix_data, rate, port_pg_id):
+ """Create a list of streams per packet size
+
+ The STL TX mode speed of the generated streams will depend on the frame
+ weight and the frame rate. Both the frame weight and the total frame
+ rate are normalized to 100. The STL TX mode speed, defined in
+ percentage, is the combitation of both percentages. E.g.:
+ frame weight = 100
+ rate = 90
+ --> STLTXmode percentage = 10 (%)
+
+ frame weight = 80
+ rate = 50
+ --> STLTXmode percentage = 40 (%)
+
+ :param imix_data: (dict) IMIX size and weight
+ :param rate: (float) normalized [0..100] total weight
+ :param pg_id: (PortPgIDMap) port / pg_id (list) map
+ """
+ streams = []
+ for size, weight in ((int(size), float(weight)) for (size, weight)
+ in imix_data.items() if float(weight) > 0):
+ packet = self._create_single_packet(size)
+ pg_id = port_pg_id.increase_pg_id()
+ stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
+ mode = trex_stl_streams.STLTXCont(percentage=weight * rate / 100)
+ streams.append(trex_stl_client.STLStream(
+ packet=packet, flow_stats=stl_flow, mode=mode))
+ return streams
+
+ def get_drop_percentage(self, samples, tol_low, tol_high,
+ correlated_traffic):
+ """Calculate the drop percentage and run the traffic"""
+ tx_rate_fps = 0
+ rx_rate_fps = 0
+ for sample in samples:
+ tx_rate_fps += sum(
+ port['tx_throughput_fps'] for port in sample.values())
+ rx_rate_fps += sum(
+ port['rx_throughput_fps'] for port in sample.values())
+ tx_rate_fps = round(float(tx_rate_fps) / len(samples), 2)
+ rx_rate_fps = round(float(rx_rate_fps) / len(samples), 2)
# TODO(esm): RFC2544 doesn't tolerate packet loss, why do we?
- tolerance_low = generator.rfc2544_helper.tolerance_low
- tolerance_high = generator.rfc2544_helper.tolerance_high
-
- tx_rate = out_packets / run_duration
- rx_rate = in_packets / run_duration
-
- throughput_max = self.throughput_max
- drop_percent_at_max_tx = self.drop_percent_at_max_tx
+ out_packets = sum(port['out_packets'] for port in samples[-1].values())
+ in_packets = sum(port['in_packets'] for port in samples[-1].values())
+ drop_percent = 100.0
- if self.drop_percent_at_max_tx is None:
- self.rate = tx_rate
- self.first_run = False
+ # https://tools.ietf.org/html/rfc2544#section-26.3
+ if out_packets:
+ drop_percent = round(
+ (float(abs(out_packets - in_packets)) / out_packets) * 100, 5)
- if drop_percent > tolerance_high:
- # TODO(esm): why don't we discard results that are out of tolerance?
+ tol_high = tol_high if tol_high > self.TOLERANCE_LIMIT else tol_high
+ tol_low = tol_low if tol_low > self.TOLERANCE_LIMIT else tol_low
+ if drop_percent > tol_high:
self.max_rate = self.rate
- if throughput_max == 0:
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= tolerance_low:
- # TODO(esm): why do we update the samples dict in this case
- # and not update our tracking values?
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= self.drop_percent_at_max_tx:
- # TODO(esm): why don't we discard results that are out of tolerance?
+ elif drop_percent < tol_low:
self.min_rate = self.rate
- self.drop_percent_at_max_tx = drop_percent_at_max_tx = drop_percent
- self.throughput_max = throughput_max = rx_rate
+ # else:
+ # NOTE(ralonsoh): the test should finish here
+ # pass
+ last_rate = self.rate
+ self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 5)
- else:
- # TODO(esm): why don't we discard results that are out of tolerance?
- self.min_rate = self.rate
+ throughput = rx_rate_fps * 2 if correlated_traffic else rx_rate_fps
- generator.clear_client_stats(self.ports)
- generator.start_client(self.ports, mult=self.get_multiplier(),
- duration=run_duration, force=True)
+ if drop_percent > self.drop_percent_max:
+ self.drop_percent_max = drop_percent
- # if correlated traffic update the Throughput
- if generator.rfc2544_helper.correlated_traffic:
- throughput_max *= 2
+ latency = {port_num: value['latency']
+ for port_num, value in samples[-1].items()}
- samples.update({
- 'TxThroughput': tx_rate,
- 'RxThroughput': rx_rate,
+ output = {
+ 'TxThroughput': tx_rate_fps,
+ 'RxThroughput': rx_rate_fps,
'CurrentDropPercentage': drop_percent,
- 'Throughput': throughput_max,
- 'DropPercentage': drop_percent_at_max_tx,
- })
-
- return samples
-
- def execute_latency(self, generator=None, samples=None):
- if generator is not None and self.generator is None:
- self.generator = generator
-
- if samples is None:
- samples = self.generator.generate_samples()
-
- self.pps, multiplier = self.calculate_pps(samples)
- self.ports = []
- self.pg_id = self.params['traffic_profile'].get('pg_id', 1)
- for vld_id, intfs in sorted(self.generator.networks.items()):
- profile_data = self.params.get(vld_id)
- if not profile_data:
- continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
- continue
- for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.generator.start_client(ports=self.ports, mult=str(multiplier),
- duration=120, force=True)
- self.first_run = False
-
- def calculate_pps(self, samples):
- pps = round(samples['Throughput'] / 2, 2)
- multiplier = round(self.rate / self.pps, 2)
- return pps, multiplier
-
- def create_single_stream(self, packet_size, pps, isg=0):
- packet = self._create_single_packet(packet_size)
- if pps:
- stl_mode = STLTXCont(pps=pps)
- else:
- stl_mode = STLTXCont(pps=self.pps)
- if self.pg_id:
- LOGGING.debug("pg_id: %s", self.pg_id)
- stl_flow_stats = STLFlowLatencyStats(pg_id=self.pg_id)
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode,
- flow_stats=stl_flow_stats)
- self.pg_id += 1
- else:
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode)
- return stream
+ 'Throughput': throughput,
+ 'DropPercentage': self.drop_percent_max,
+ 'Rate': last_rate,
+ 'Latency': latency
+ }
+ return output