aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/network_services/traffic_profile
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/network_services/traffic_profile')
-rw-r--r--yardstick/network_services/traffic_profile/__init__.py38
-rw-r--r--yardstick/network_services/traffic_profile/base.py70
-rw-r--r--yardstick/network_services/traffic_profile/http.py4
-rw-r--r--yardstick/network_services/traffic_profile/http_ixload.py171
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py485
-rw-r--r--yardstick/network_services/traffic_profile/landslide_profile.py47
-rw-r--r--yardstick/network_services/traffic_profile/pktgen.py61
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py135
-rw-r--r--yardstick/network_services/traffic_profile/prox_irq.py48
-rw-r--r--yardstick/network_services/traffic_profile/prox_profile.py25
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py474
-rw-r--r--yardstick/network_services/traffic_profile/sip.py32
-rw-r--r--yardstick/network_services/traffic_profile/trex_traffic_profile.py (renamed from yardstick/network_services/traffic_profile/traffic_profile.py)234
-rw-r--r--yardstick/network_services/traffic_profile/vpp_rfc2544.py339
14 files changed, 1688 insertions, 475 deletions
diff --git a/yardstick/network_services/traffic_profile/__init__.py b/yardstick/network_services/traffic_profile/__init__.py
index e69de29bb..85b3d54a0 100644
--- a/yardstick/network_services/traffic_profile/__init__.py
+++ b/yardstick/network_services/traffic_profile/__init__.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib
+
+
+def register_modules():
+ modules = [
+ 'yardstick.network_services.traffic_profile.trex_traffic_profile',
+ 'yardstick.network_services.traffic_profile.fixed',
+ 'yardstick.network_services.traffic_profile.http',
+ 'yardstick.network_services.traffic_profile.http_ixload',
+ 'yardstick.network_services.traffic_profile.ixia_rfc2544',
+ 'yardstick.network_services.traffic_profile.prox_ACL',
+ 'yardstick.network_services.traffic_profile.prox_irq',
+ 'yardstick.network_services.traffic_profile.prox_binsearch',
+ 'yardstick.network_services.traffic_profile.prox_profile',
+ 'yardstick.network_services.traffic_profile.prox_ramp',
+ 'yardstick.network_services.traffic_profile.rfc2544',
+ 'yardstick.network_services.traffic_profile.pktgen',
+ 'yardstick.network_services.traffic_profile.landslide_profile',
+ 'yardstick.network_services.traffic_profile.vpp_rfc2544',
+ 'yardstick.network_services.traffic_profile.sip',
+ ]
+
+ for module in modules:
+ importlib.import_module(module)
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index ad256b444..2fdf6ce4a 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -11,10 +11,61 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Base class for the generic traffic profile implementation """
-from __future__ import absolute_import
-from yardstick.common.utils import import_modules_from_package, itersubclasses
+import re
+
+from yardstick.common import exceptions
+from yardstick.common import utils
+
+
+class TrafficProfileConfig(object):
+ """Class to contain the TrafficProfile class information
+
+ This object will parse and validate the traffic profile information.
+ """
+ DEFAULT_SCHEMA = 'nsb:traffic_profile:0.1'
+ DEFAULT_FRAME_RATE = '100'
+ DEFAULT_DURATION = 30
+ RATE_FPS = 'fps'
+ RATE_PERCENTAGE = '%'
+ RATE_REGEX = re.compile(r'([0-9]*\.[0-9]+|[0-9]+)\s*(fps|%)*(.*)')
+
+ def __init__(self, tp_config):
+ self.schema = tp_config.get('schema', self.DEFAULT_SCHEMA)
+ self.name = tp_config.get('name')
+ self.description = tp_config.get('description')
+ tprofile = tp_config['traffic_profile']
+ self.traffic_type = tprofile.get('traffic_type')
+ self.frame_rate, self.rate_unit = self.parse_rate(
+ tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE))
+ self.test_precision = tprofile.get('test_precision')
+ self.packet_sizes = tprofile.get('packet_sizes')
+ self.duration = tprofile.get('duration', self.DEFAULT_DURATION)
+ self.lower_bound = tprofile.get('lower_bound')
+ self.upper_bound = tprofile.get('upper_bound')
+ self.step_interval = tprofile.get('step_interval')
+ self.enable_latency = tprofile.get('enable_latency', False)
+
+ def parse_rate(self, rate):
+ """Parse traffic profile rate
+
+ The line rate can be defined in fps or percentage over the maximum line
+ rate:
+ - frame_rate = 5000 (by default, unit is 'fps')
+ - frame_rate = 5000fps
+ - frame_rate = 25%
+
+ :param rate: (string, int) line rate in fps or %
+ :return: (tuple: int, string) line rate number and unit
+ """
+ match = self.RATE_REGEX.match(str(rate))
+ if not match:
+ exceptions.TrafficProfileRate()
+ rate = float(match.group(1))
+ unit = match.group(2) if match.group(2) else self.RATE_FPS
+ if match.group(3):
+ raise exceptions.TrafficProfileRate()
+ return rate, unit
class TrafficProfile(object):
@@ -33,20 +84,23 @@ class TrafficProfile(object):
:return:
"""
profile_class = tp_config["traffic_profile"]["traffic_type"]
- import_modules_from_package(
- "yardstick.network_services.traffic_profile")
try:
- return next(c for c in itersubclasses(TrafficProfile)
+ return next(c for c in utils.itersubclasses(TrafficProfile)
if c.__name__ == profile_class)(tp_config)
except StopIteration:
- raise RuntimeError("No implementation for %s", profile_class)
+ raise exceptions.TrafficProfileNotImplemented(
+ profile_class=profile_class)
def __init__(self, tp_config):
# e.g. RFC2544 start_ip, stop_ip, drop_rate,
# IMIX = {"10K": 0.1, "100M": 0.5}
self.params = tp_config
+ self.config = TrafficProfileConfig(tp_config)
+
+ def is_ended(self):
+ return False
- def execute_traffic(self, traffic_generator):
+ def execute_traffic(self, traffic_generator, **kawrgs):
""" This methods defines the behavior of the traffic generator.
It will be called in a loop until the traffic generator exits.
diff --git a/yardstick/network_services/traffic_profile/http.py b/yardstick/network_services/traffic_profile/http.py
index 2d00fb849..31ab17ef7 100644
--- a/yardstick/network_services/traffic_profile/http.py
+++ b/yardstick/network_services/traffic_profile/http.py
@@ -24,6 +24,10 @@ class TrafficProfileGenericHTTP(TrafficProfile):
def __init__(self, TrafficProfile):
super(TrafficProfileGenericHTTP, self).__init__(TrafficProfile)
+ def get_links_param(self):
+ return {k: v for k, v in self.params.items() if
+ "uplink" in k or "downlink" in k}
+
def execute(self, traffic_generator):
''' send run traffic for a selected traffic generator'''
pass
diff --git a/yardstick/network_services/traffic_profile/http_ixload.py b/yardstick/network_services/traffic_profile/http_ixload.py
index 348056551..ec0762500 100644
--- a/yardstick/network_services/traffic_profile/http_ixload.py
+++ b/yardstick/network_services/traffic_profile/http_ixload.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,9 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-from __future__ import print_function
-
import sys
import os
import logging
@@ -38,6 +35,10 @@ class ErrorClass(object):
raise AttributeError
+class InvalidRxfFile(Exception):
+ message = 'Loaded rxf file has unexpected format'
+
+
try:
from IxLoad import IxLoad, StatCollectorUtils
except ImportError:
@@ -93,7 +94,7 @@ def validate_non_string_sequence(value, default=None, raise_exc=None):
if isinstance(value, collections.Sequence) and not isinstance(value, str):
return value
if raise_exc:
- raise raise_exc
+ raise raise_exc # pylint: disable=raising-bad-type
return default
@@ -117,8 +118,10 @@ class IXLOADHttpTest(object):
self.chassis = None
self.card = None
self.ports_to_reassign = None
+ self.links_param = None
self.test_input = jsonutils.loads(test_input)
self.parse_run_test()
+ self.test = None
@staticmethod
def format_ports_for_reassignment(ports):
@@ -182,6 +185,145 @@ class IXLOADHttpTest(object):
LOG.error('Error: IxLoad config file not found: %s', config_file)
raise
+ def update_network_address(self, net_traffic, address, gateway, prefix):
+ """Update ip address and gateway for net_traffic object
+
+ This function update field which configure source addresses for
+ traffic which is described by net_traffic object.
+ Do not return anything
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param address: (str) Ipv4 range start address
+ :param gateway: (str) Ipv4 address of gateway
+ :param prefix: (int) subnet prefix
+ :return:
+ """
+ try:
+ ethernet = net_traffic.network.getL1Plugin()
+ ix_net_l2_ethernet_plugin = ethernet.childrenList[0]
+ ix_net_ip_v4_v6_plugin = ix_net_l2_ethernet_plugin.childrenList[0]
+ ix_net_ip_v4_v6_range = ix_net_ip_v4_v6_plugin.rangeList[0]
+
+ ix_net_ip_v4_v6_range.config(
+ prefix=prefix,
+ ipAddress=address,
+ gatewayAddress=gateway)
+ except Exception:
+ raise InvalidRxfFile
+
+ def update_network_mac_address(self, net_traffic, mac):
+ """Update MACaddress for net_traffic object
+
+ This function update field which configure MACaddresses for
+ traffic which is described by net_traffic object.
+ If mac == "auto" then will be configured auto generated mac
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param mac: (str) MAC
+ :return:
+ """
+ try:
+ ethernet = net_traffic.network.getL1Plugin()
+ ix_net_l2_ethernet_plugin = ethernet.childrenList[0]
+ ix_net_ip_v4_v6_plugin = ix_net_l2_ethernet_plugin.childrenList[0]
+ ix_net_ip_v4_v6_range = ix_net_ip_v4_v6_plugin.rangeList[0]
+
+ if str(mac).lower() == "auto":
+ ix_net_ip_v4_v6_range.config(autoMacGeneration=True)
+ else:
+ ix_net_ip_v4_v6_range.config(autoMacGeneration=False)
+ mac_range = ix_net_ip_v4_v6_range.getLowerRelatedRange(
+ "MacRange")
+ mac_range.config(mac=mac)
+ except Exception:
+ raise InvalidRxfFile
+
+ def update_network_param(self, net_traffic, param):
+ """Update net_traffic by parameters specified in param"""
+
+ self.update_network_address(net_traffic, param["address"],
+ param["gateway"], param["subnet_prefix"])
+
+ self.update_network_mac_address(net_traffic, param["mac"])
+
+ def update_config(self):
+ """Update some fields by parameters from traffic profile"""
+
+ net_traffics = {}
+ # self.test.communityList is a IxLoadObjectProxy to some tcl object
+ # which contain all net_traffic objects in scenario.
+ # net_traffic item has a name in format "activity_name@item_name"
+ try:
+ for item in self.test.communityList:
+ net_traffics[item.name.split('@')[1]] = item
+ except Exception: # pylint: disable=broad-except
+ pass
+
+ for name, net_traffic in net_traffics.items():
+ try:
+ param = self.links_param[name]
+ except KeyError:
+ LOG.debug('There is no param for net_traffic %s', name)
+ continue
+
+ self.update_network_param(net_traffic, param["ip"])
+ if "uplink" in name:
+ self.update_http_client_param(net_traffic, param["http_client"])
+
+ def update_http_client_param(self, net_traffic, param):
+ """Update http client object in net_traffic
+
+ Update http client object in net_traffic by parameters
+ specified in param.
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param param: (dict) http_client section from traffic profile
+ :return:
+ """
+ page = param.get("page_object")
+ if page:
+ self.update_page_size(net_traffic, page)
+ users = param.get("simulated_users")
+ if users:
+ self.update_user_count(net_traffic, users)
+
+ def update_page_size(self, net_traffic, page_object):
+ """Update page_object field in http client object in net_traffic
+
+ This function update field which configure page_object
+ which will be loaded from server
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param page_object: (str) path to object on server e.g. "/4k.html"
+ :return:
+ """
+ try:
+ activity = net_traffic.activityList[0]
+ ix_http_command = activity.agent.actionList[0]
+ ix_http_command.config(pageObject=page_object)
+ except Exception:
+ raise InvalidRxfFile
+
+ def update_user_count(self, net_traffic, user_count):
+ """Update userObjectiveValue field in activity object in net_traffic
+
+ This function update field which configure users count
+ which will be simulated by client.
+ Do not return anything.
+
+ :param net_traffic: (IxLoadObjectProxy) proxy obj to tcl net_traffic object
+ :param user_count: (int) number of simulated users
+ :return:
+ """
+ try:
+ activity = net_traffic.activityList[0]
+ activity.config(userObjectiveValue=user_count)
+ except Exception:
+ raise InvalidRxfFile
+
def start_http_test(self):
self.ix_load = IxLoad()
@@ -208,17 +350,19 @@ class IXLOADHttpTest(object):
# Get the first test on the testList
test_name = repository.testList[0].cget("name")
- test = repository.testList.getItem(test_name)
+ self.test = repository.testList.getItem(test_name)
self.set_results_dir(test_controller, self.results_on_windows)
- test.config(statsRequired=1, enableResetPorts=1, csvInterval=2,
- enableForceOwnership=True)
+ self.test.config(statsRequired=1, enableResetPorts=1, csvInterval=2,
+ enableForceOwnership=True)
+
+ self.update_config()
# ---- Remap ports ----
try:
- self.reassign_ports(test, repository, self.ports_to_reassign)
- except Exception:
+ self.reassign_ports(self.test, repository, self.ports_to_reassign)
+ except Exception: # pylint: disable=broad-except
LOG.exception("Exception occurred during reassign_ports")
# -----------------------------------------------------------------------
@@ -257,7 +401,7 @@ class IXLOADHttpTest(object):
self.stat_utils.StartCollector(self.IxL_StatCollectorCommand)
- test_controller.run(test)
+ test_controller.run(self.test)
self.ix_load.waitForTestFinish()
test_controller.releaseConfigWaitFinish()
@@ -269,7 +413,7 @@ class IXLOADHttpTest(object):
test_controller.generateReport(detailedReport=1, format="PDF;HTML")
test_controller.releaseConfigWaitFinish()
- self.ix_load.delete(test)
+ self.ix_load.delete(self.test)
self.ix_load.delete(test_controller)
self.ix_load.delete(logger)
self.ix_load.delete(log_engine)
@@ -307,6 +451,9 @@ class IXLOADHttpTest(object):
LOG.debug("Ports to be reassigned: %s", self.ports_to_reassign)
+ self.links_param = self.test_input["links_param"]
+ LOG.debug("Links param to be applied: %s", self.links_param)
+
def main(args):
# Get the args from cmdline and parse and run the test
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 7881131a7..ca45b500d 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,83 +12,149 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import logging
+import collections
+
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.traffic_profile import trex_traffic_profile
-from yardstick.network_services.traffic_profile.traffic_profile import \
- TrexProfile
LOG = logging.getLogger(__name__)
-class IXIARFC2544Profile(TrexProfile):
+class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
UPLINK = 'uplink'
DOWNLINK = 'downlink'
+ DROP_PERCENT_ROUND = 6
+ STATUS_SUCCESS = "Success"
+ STATUS_FAIL = "Failure"
+
+ def __init__(self, yaml_data):
+ super(IXIARFC2544Profile, self).__init__(yaml_data)
+ self.rate = self.config.frame_rate
+ self.rate_unit = self.config.rate_unit
+ self.iteration = 0
+ self.full_profile = {}
+
+ def _get_ip_and_mask(self, ip_range):
+ _ip_range = ip_range.split('-')
+ if len(_ip_range) == 1:
+ return _ip_range[0], None
+
+ mask = utils.get_mask_from_ip_range(_ip_range[0], _ip_range[1])
+ return _ip_range[0], mask
+
+ def _get_fixed_and_mask(self, port_range):
+ _port_range = str(port_range).split('-')
+ if len(_port_range) == 1:
+ return int(_port_range[0]), 0
- def _get_ixia_traffic_profile(self, profile_data, mac=None, xfile=None, static_traffic=None):
- if mac is None:
- mac = {}
+ return int(_port_range[0]), int(_port_range[1])
+ def _get_ixia_traffic_profile(self, profile_data, mac=None):
+ mac = {} if mac is None else mac
result = {}
for traffickey, values in profile_data.items():
if not traffickey.startswith((self.UPLINK, self.DOWNLINK)):
continue
+ # values should be single-item dict, so just grab the first item
try:
- # values should be single-item dict, so just grab the first item
- try:
- key, value = next(iter(values.items()))
- except StopIteration:
- result[traffickey] = {}
- continue
-
- port_id = value.get('id', 1)
- port_index = port_id - 1
- try:
- ip = value['outer_l3v6']
- except KeyError:
- ip = value['outer_l3v4']
- src_key, dst_key = 'srcip4', 'dstip4'
- else:
- src_key, dst_key = 'srcip6', 'dstip6'
-
- result[traffickey] = {
- 'bidir': False,
- 'iload': '100',
- 'id': port_id,
- 'outer_l2': {
- 'framesize': value['outer_l2']['framesize'],
- 'framesPerSecond': True,
- 'srcmac': mac['src_mac_{}'.format(port_index)],
- 'dstmac': mac['dst_mac_{}'.format(port_index)],
- },
- 'outer_l3': {
- 'count': ip['count'],
- 'dscp': ip['dscp'],
- 'ttl': ip['ttl'],
- src_key: ip[src_key].split("-")[0],
- dst_key: ip[dst_key].split("-")[0],
- 'type': key,
- 'proto': ip['proto'],
- },
- 'outer_l4': value['outer_l4'],
- }
- except Exception:
+ key, value = next(iter(values.items()))
+ except StopIteration:
+ result[traffickey] = {}
continue
+ port_id = value.get('id', 1)
+ port_index = port_id - 1
+
+ result[traffickey] = {
+ 'bidir': False,
+ 'id': port_id,
+ 'rate': self.rate,
+ 'rate_unit': self.rate_unit,
+ 'outer_l2': {},
+ 'outer_l3': {},
+ 'outer_l4': {},
+ }
+
+ frame_rate = value.get('frame_rate')
+ if frame_rate:
+ flow_rate, flow_rate_unit = self.config.parse_rate(frame_rate)
+ result[traffickey]['rate'] = flow_rate
+ result[traffickey]['rate_unit'] = flow_rate_unit
+
+ outer_l2 = value.get('outer_l2')
+ if outer_l2:
+ result[traffickey]['outer_l2'].update({
+ 'framesize': outer_l2.get('framesize'),
+ 'framesPerSecond': True,
+ 'QinQ': outer_l2.get('QinQ'),
+ 'srcmac': mac.get('src_mac_{}'.format(port_index)),
+ 'dstmac': mac.get('dst_mac_{}'.format(port_index)),
+ })
+
+ if value.get('outer_l3v4'):
+ outer_l3 = value['outer_l3v4']
+ src_key, dst_key = 'srcip4', 'dstip4'
+ else:
+ outer_l3 = value.get('outer_l3v6')
+ src_key, dst_key = 'srcip6', 'dstip6'
+ if outer_l3:
+ srcip = srcmask = dstip = dstmask = None
+ if outer_l3.get(src_key):
+ srcip, srcmask = self._get_ip_and_mask(outer_l3[src_key])
+ if outer_l3.get(dst_key):
+ dstip, dstmask = self._get_ip_and_mask(outer_l3[dst_key])
+
+ result[traffickey]['outer_l3'].update({
+ 'count': outer_l3.get('count', 1),
+ 'dscp': outer_l3.get('dscp'),
+ 'ttl': outer_l3.get('ttl'),
+ 'srcseed': outer_l3.get('srcseed', 1),
+ 'dstseed': outer_l3.get('dstseed', 1),
+ 'srcip': srcip,
+ 'dstip': dstip,
+ 'srcmask': srcmask,
+ 'dstmask': dstmask,
+ 'type': key,
+ 'proto': outer_l3.get('proto'),
+ 'priority': outer_l3.get('priority')
+ })
+
+ outer_l4 = value.get('outer_l4')
+ if outer_l4:
+ src_port = src_port_mask = dst_port = dst_port_mask = None
+ if outer_l4.get('srcport'):
+ src_port, src_port_mask = (
+ self._get_fixed_and_mask(outer_l4['srcport']))
+
+ if outer_l4.get('dstport'):
+ dst_port, dst_port_mask = (
+ self._get_fixed_and_mask(outer_l4['dstport']))
+
+ result[traffickey]['outer_l4'].update({
+ 'srcport': src_port,
+ 'dstport': dst_port,
+ 'srcportmask': src_port_mask,
+ 'dstportmask': dst_port_mask,
+ 'count': outer_l4.get('count', 1),
+ 'seed': outer_l4.get('seed', 1),
+ })
+
return result
- def _ixia_traffic_generate(self, traffic_generator, traffic, ixia_obj):
- for key, value in traffic.items():
- if key.startswith((self.UPLINK, self.DOWNLINK)):
- value["iload"] = str(self.rate)
- ixia_obj.ix_update_frame(traffic)
- ixia_obj.ix_update_ether(traffic)
- ixia_obj.add_ip_header(traffic, 4)
- ixia_obj.ix_start_traffic()
- self.tmp_drop = 0
- self.tmp_throughput = 0
+ def _ixia_traffic_generate(self, traffic, ixia_obj, traffic_gen):
+ ixia_obj.update_frame(traffic, self.config.duration)
+ ixia_obj.update_ip_packet(traffic)
+ ixia_obj.update_l4(traffic)
+ self._update_traffic_tracking_options(traffic_gen)
+ ixia_obj.start_traffic()
+
+ def _update_traffic_tracking_options(self, traffic_gen):
+ traffic_gen.update_tracking_options()
def update_traffic_profile(self, traffic_generator):
def port_generator():
@@ -99,86 +165,261 @@ class IXIARFC2544Profile(TrexProfile):
if not profile_data:
continue
self.profile_data = profile_data
- self.get_streams(self.profile_data)
self.full_profile.update({vld_id: self.profile_data})
for intf in intfs:
yield traffic_generator.vnfd_helper.port_num(intf)
self.ports = [port for port in port_generator()]
- def execute_traffic(self, traffic_generator, ixia_obj, mac=None, xfile=None):
- if mac is None:
- mac = {}
+ def execute_traffic(self, traffic_generator, ixia_obj=None, mac=None):
+ mac = {} if mac is None else mac
+ first_run = self.first_run
if self.first_run:
- self.full_profile = {}
+ self.first_run = False
self.pg_id = 0
- self.update_traffic_profile(traffic_generator)
- traffic = \
- self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
self.max_rate = self.rate
- self.min_rate = 0
- self.get_multiplier()
- self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj)
-
- def get_multiplier(self):
- self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
- multiplier = round(self.rate / self.pps, 2)
- return str(multiplier)
-
- def start_ixia_latency(self, traffic_generator, ixia_obj,
- mac=None, xfile=None):
- if mac is None:
- mac = {}
- self.update_traffic_profile(traffic_generator)
- traffic = \
- self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
- self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj)
-
- def get_drop_percentage(self, traffic_generator, samples, tol_min,
- tolerance, ixia_obj, mac=None, xfile=None):
- if mac is None:
- mac = {}
- status = 'Running'
+ self.min_rate = 0.0
+ else:
+ self.rate = self._get_next_rate()
+
+ self.iteration = traffic_generator.rfc_helper.iteration.value
+ traffic = self._get_ixia_traffic_profile(self.full_profile, mac)
+ self._ixia_traffic_generate(traffic, ixia_obj, traffic_generator)
+ return first_run
+
+ # pylint: disable=unused-argument
+ def get_drop_percentage(self, samples, tol_min, tolerance, precision,
+ resolution, first_run=False, tc_rfc2544_opts=None):
+ completed = False
+ drop_percent = 100.0
+ num_ifaces = len(samples)
+ duration = self.config.duration
+ in_packets_sum = sum(
+ [samples[iface]['InPackets'] for iface in samples])
+ out_packets_sum = sum(
+ [samples[iface]['OutPackets'] for iface in samples])
+ in_bytes_sum = sum(
+ [samples[iface]['InBytes'] for iface in samples])
+ out_bytes_sum = sum(
+ [samples[iface]['OutBytes'] for iface in samples])
+ rx_throughput = round(float(in_packets_sum) / duration, 3)
+ tx_throughput = round(float(out_packets_sum) / duration, 3)
+ # Rx throughput in Bps
+ rx_throughput_bps = round(float(in_bytes_sum) / duration, 3)
+ # Tx throughput in Bps
+ tx_throughput_bps = round(float(out_bytes_sum) / duration, 3)
+ packet_drop = abs(out_packets_sum - in_packets_sum)
+
+ try:
+ drop_percent = round(
+ (packet_drop / float(out_packets_sum)) * 100,
+ self.DROP_PERCENT_ROUND)
+ except ZeroDivisionError:
+ LOG.info('No traffic is flowing')
+
+ if first_run:
+ completed = True if drop_percent <= tolerance else False
+ if (first_run and
+ self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS):
+ self.rate = float(out_packets_sum) / duration / num_ifaces
+
+ if drop_percent > tolerance:
+ self.max_rate = self.rate
+ elif drop_percent < tol_min:
+ self.min_rate = self.rate
+ else:
+ completed = True
+
+ last_rate = self.rate
+ next_rate = self._get_next_rate()
+ if abs(next_rate - self.rate) < resolution:
+ LOG.debug("rate=%s, next_rate=%s, resolution=%s", self.rate,
+ next_rate, resolution)
+ # stop test if the difference between the rate transmission
+ # in two iterations is smaller than the value of the resolution
+ completed = True
+
+ LOG.debug("tolerance=%s, tolerance_precision=%s drop_percent=%s "
+ "completed=%s", tolerance, precision, drop_percent,
+ completed)
+
+ latency_ns_avg = float(sum(
+ [samples[iface]['LatencyAvg'] for iface in samples])) / num_ifaces
+ latency_ns_min = min([samples[iface]['LatencyMin'] for iface in samples])
+ latency_ns_max = max([samples[iface]['LatencyMax'] for iface in samples])
+
+ samples['Status'] = self.STATUS_FAIL
+ if round(drop_percent, precision) <= tolerance:
+ samples['Status'] = self.STATUS_SUCCESS
+
+ samples['TxThroughput'] = tx_throughput
+ samples['RxThroughput'] = rx_throughput
+ samples['TxThroughputBps'] = tx_throughput_bps
+ samples['RxThroughputBps'] = rx_throughput_bps
+ samples['DropPercentage'] = drop_percent
+ samples['LatencyAvg'] = latency_ns_avg
+ samples['LatencyMin'] = latency_ns_min
+ samples['LatencyMax'] = latency_ns_max
+ samples['Rate'] = last_rate
+ samples['PktSize'] = self._get_framesize()
+ samples['Iteration'] = self.iteration
+
+ return completed, samples
+
+
+class IXIARFC2544PppoeScenarioProfile(IXIARFC2544Profile):
+ """Class handles BNG PPPoE scenario tests traffic profile"""
+
+ def __init__(self, yaml_data):
+ super(IXIARFC2544PppoeScenarioProfile, self).__init__(yaml_data)
+ self.full_profile = collections.OrderedDict()
+
+ def _get_flow_groups_params(self):
+ flows_data = [key for key in self.params.keys()
+ if key.split('_')[0] in [self.UPLINK, self.DOWNLINK]]
+ for i in range(len(flows_data)):
+ uplink = '_'.join([self.UPLINK, str(i)])
+ downlink = '_'.join([self.DOWNLINK, str(i)])
+ if uplink in flows_data:
+ self.full_profile.update({uplink: self.params[uplink]})
+ if downlink in flows_data:
+ self.full_profile.update({downlink: self.params[downlink]})
+
+ def update_traffic_profile(self, traffic_generator):
+
+ networks = collections.OrderedDict()
+
+ # Sort network interfaces pairs
+ for i in range(len(traffic_generator.networks)):
+ uplink = '_'.join([self.UPLINK, str(i)])
+ downlink = '_'.join([self.DOWNLINK, str(i)])
+ if uplink in traffic_generator.networks:
+ networks[uplink] = traffic_generator.networks[uplink]
+ if downlink in traffic_generator.networks:
+ networks[downlink] = traffic_generator.networks[downlink]
+
+ def port_generator():
+ for intfs in networks.values():
+ for intf in intfs:
+ yield traffic_generator.vnfd_helper.port_num(intf)
+
+ self._get_flow_groups_params()
+ self.ports = [port for port in port_generator()]
+
+ def _get_prio_flows_drop_percentage(self, stats):
drop_percent = 100
- in_packets = sum([samples[iface]['in_packets'] for iface in samples])
- out_packets = sum([samples[iface]['out_packets'] for iface in samples])
- rx_throughput = \
- sum([samples[iface]['RxThroughput'] for iface in samples])
- tx_throughput = \
- sum([samples[iface]['TxThroughput'] for iface in samples])
- packet_drop = abs(out_packets - in_packets)
+ for prio_id in stats:
+ prio_flow = stats[prio_id]
+ sum_packet_drop = abs(prio_flow['OutPackets'] - prio_flow['InPackets'])
+ try:
+ drop_percent = round(
+ (sum_packet_drop / float(prio_flow['OutPackets'])) * 100,
+ self.DROP_PERCENT_ROUND)
+ except ZeroDivisionError:
+ LOG.info('No traffic is flowing')
+ prio_flow['DropPercentage'] = drop_percent
+ return stats
+
+ def _get_summary_pppoe_subs_counters(self, samples):
+ result = {}
+ keys = ['SessionsUp',
+ 'SessionsDown',
+ 'SessionsNotStarted',
+ 'SessionsTotal']
+ for key in keys:
+ result[key] = \
+ sum([samples[port][key] for port in samples
+ if key in samples[port]])
+ return result
+
+ def get_drop_percentage(self, samples, tol_min, tolerance, precision,
+ resolution, first_run=False, tc_rfc2544_opts=None):
+ completed = False
+ sum_drop_percent = 100
+ num_ifaces = len(samples)
+ duration = self.config.duration
+ last_rate = self.rate
+ priority_stats = samples.pop('priority_stats')
+ priority_stats = self._get_prio_flows_drop_percentage(priority_stats)
+ summary_subs_stats = self._get_summary_pppoe_subs_counters(samples)
+ in_packets_sum = sum(
+ [samples[iface]['InPackets'] for iface in samples])
+ out_packets_sum = sum(
+ [samples[iface]['OutPackets'] for iface in samples])
+ in_bytes_sum = sum(
+ [samples[iface]['InBytes'] for iface in samples])
+ out_bytes_sum = sum(
+ [samples[iface]['OutBytes'] for iface in samples])
+ rx_throughput = round(float(in_packets_sum) / duration, 3)
+ tx_throughput = round(float(out_packets_sum) / duration, 3)
+ # Rx throughput in Bps
+ rx_throughput_bps = round(float(in_bytes_sum) / duration, 3)
+ # Tx throughput in Bps
+ tx_throughput_bps = round(float(out_bytes_sum) / duration, 3)
+ sum_packet_drop = abs(out_packets_sum - in_packets_sum)
+
try:
- drop_percent = round((packet_drop / float(out_packets)) * 100, 2)
+ sum_drop_percent = round(
+ (sum_packet_drop / float(out_packets_sum)) * 100,
+ self.DROP_PERCENT_ROUND)
except ZeroDivisionError:
LOG.info('No traffic is flowing')
- samples['TxThroughput'] = round(tx_throughput / 1.0, 2)
- samples['RxThroughput'] = round(rx_throughput / 1.0, 2)
- samples['CurrentDropPercentage'] = drop_percent
- samples['Throughput'] = self.tmp_throughput
- samples['DropPercentage'] = self.tmp_drop
- if drop_percent > tolerance and self.tmp_throughput == 0:
- samples['Throughput'] = round(rx_throughput / 1.0, 2)
- samples['DropPercentage'] = drop_percent
- if self.first_run:
- max_supported_rate = out_packets / 30.0
- self.rate = max_supported_rate
- self.first_run = False
- if drop_percent <= tolerance:
- status = 'Completed'
+
+ latency_ns_avg = float(sum(
+ [samples[iface]['LatencyAvg'] for iface in samples])) / num_ifaces
+ latency_ns_min = min([samples[iface]['LatencyMin'] for iface in samples])
+ latency_ns_max = max([samples[iface]['LatencyMax'] for iface in samples])
+
+ samples['TxThroughput'] = tx_throughput
+ samples['RxThroughput'] = rx_throughput
+ samples['TxThroughputBps'] = tx_throughput_bps
+ samples['RxThroughputBps'] = rx_throughput_bps
+ samples['DropPercentage'] = sum_drop_percent
+ samples['LatencyAvg'] = latency_ns_avg
+ samples['LatencyMin'] = latency_ns_min
+ samples['LatencyMax'] = latency_ns_max
+ samples['Priority'] = priority_stats
+ samples['Rate'] = last_rate
+ samples['PktSize'] = self._get_framesize()
+ samples['Iteration'] = self.iteration
+ samples.update(summary_subs_stats)
+
+ if tc_rfc2544_opts:
+ priority = tc_rfc2544_opts.get('priority')
+ if priority:
+ drop_percent = samples['Priority'][priority]['DropPercentage']
+ else:
+ drop_percent = sum_drop_percent
+ else:
+ drop_percent = sum_drop_percent
+
+ if first_run:
+ completed = True if drop_percent <= tolerance else False
+ if (first_run and
+ self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS):
+ self.rate = float(out_packets_sum) / duration / num_ifaces
+
if drop_percent > tolerance:
self.max_rate = self.rate
elif drop_percent < tol_min:
self.min_rate = self.rate
- if drop_percent >= self.tmp_drop:
- self.tmp_drop = drop_percent
- self.tmp_throughput = round((rx_throughput / 1.0), 2)
- samples['Throughput'] = round(rx_throughput / 1.0, 2)
- samples['DropPercentage'] = drop_percent
else:
- samples['Throughput'] = round(rx_throughput / 1.0, 2)
- samples['DropPercentage'] = drop_percent
- return status, samples
- self.get_multiplier()
- traffic = self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
- self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj)
- return status, samples
+ completed = True
+
+ next_rate = self._get_next_rate()
+ if abs(next_rate - self.rate) < resolution:
+ LOG.debug("rate=%s, next_rate=%s, resolution=%s", self.rate,
+ next_rate, resolution)
+ # stop test if the difference between the rate transmission
+ # in two iterations is smaller than the value of the resolution
+ completed = True
+
+ LOG.debug("tolerance=%s, tolerance_precision=%s drop_percent=%s "
+ "completed=%s", tolerance, precision, drop_percent,
+ completed)
+
+ samples['Status'] = self.STATUS_FAIL
+ if round(drop_percent, precision) <= tolerance:
+ samples['Status'] = self.STATUS_SUCCESS
+
+ return completed, samples
diff --git a/yardstick/network_services/traffic_profile/landslide_profile.py b/yardstick/network_services/traffic_profile/landslide_profile.py
new file mode 100644
index 000000000..f79226fb4
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/landslide_profile.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Spirent Landslide traffic profile definitions """
+
+from yardstick.network_services.traffic_profile import base
+
+
+class LandslideProfile(base.TrafficProfile):
+ """
+ This traffic profile handles attributes of Landslide data stream
+ """
+
+ def __init__(self, tp_config):
+ super(LandslideProfile, self).__init__(tp_config)
+
+ # for backward compatibility support dict and list of dicts
+ if isinstance(tp_config["dmf_config"], dict):
+ self.dmf_config = [tp_config["dmf_config"]]
+ else:
+ self.dmf_config = tp_config["dmf_config"]
+
+ def execute(self, traffic_generator):
+ pass
+
+ def update_dmf(self, options):
+ if 'dmf' in options:
+ if isinstance(options['dmf'], dict):
+ _dmfs = [options['dmf']]
+ else:
+ _dmfs = options['dmf']
+
+ for index, _dmf in enumerate(_dmfs):
+ try:
+ self.dmf_config[index].update(_dmf)
+ except IndexError:
+ pass
diff --git a/yardstick/network_services/traffic_profile/pktgen.py b/yardstick/network_services/traffic_profile/pktgen.py
new file mode 100644
index 000000000..30f81b794
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/pktgen.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+
+
+class PktgenTrafficProfile(tp_base.TrafficProfile):
+ """This class handles Pktgen Trex Traffic profile execution"""
+
+ def __init__(self, tp_config): # pragma: no cover
+ super(PktgenTrafficProfile, self).__init__(tp_config)
+ self._host = None
+ self._port = None
+
+ def init(self, host, port): # pragma: no cover
+ """Initialize control parameters
+
+ :param host: (str) ip or host name
+ :param port: (int) TCP socket port number for Lua commands
+ """
+ self._host = host
+ self._port = port
+
+ def start(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.start("0")') != 0:
+ raise exceptions.PktgenActionError(action='start')
+
+ def stop(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.stop("0")') != 0:
+ raise exceptions.PktgenActionError(action='stop')
+
+ def rate(self, rate):
+ command = 'pktgen.set("0", "rate", ' + str(rate) + ')'
+ if utils.send_socket_command(self._host, self._port, command) != 0:
+ raise exceptions.PktgenActionError(action='rate')
+
+ def clear_all_stats(self):
+ if utils.send_socket_command(self._host, self._port, 'clr') != 0:
+ raise exceptions.PktgenActionError(action='clear all stats')
+
+ def help(self):
+ if utils.send_socket_command(self._host, self._port, 'help') != 0:
+ raise exceptions.PktgenActionError(action='help')
+
+ def execute_traffic(self, *args, **kwargs): # pragma: no cover
+ pass
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index 1fd6ec41a..402bf741c 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -16,11 +16,23 @@
from __future__ import absolute_import
import logging
+import datetime
+import time
from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
+from yardstick.network_services import constants
+from yardstick.common import constants as overall_constants
LOG = logging.getLogger(__name__)
+STATUS_SUCCESS = "Success"
+STATUS_FAIL = "Failure"
+STATUS_RESULT = "Result"
+STEP_CONFIRM = "Confirm retry"
+STEP_INCREASE_LOWER = "Increase lower"
+STEP_DECREASE_LOWER = "Decrease lower"
+STEP_DECREASE_UPPER = "Decrease upper"
+
class ProxBinSearchProfile(ProxProfile):
"""
@@ -54,6 +66,9 @@ class ProxBinSearchProfile(ProxProfile):
yield test_value
test_value = self.mid_point
+ def is_ended(self):
+ return self.done.is_set()
+
def run_test_with_pkt_size(self, traffic_gen, pkt_size, duration):
"""Run the test for a single packet size.
@@ -81,19 +96,111 @@ class ProxBinSearchProfile(ProxProfile):
# success, the binary search will complete on an integer multiple
# of the precision, rather than on a fraction of it.
+ theor_max_thruput = 0.0
+
+ result_samples = {}
+
+ test_data = {
+ "test_duration": traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"],
+ "test_precision": self.params["traffic_profile"]["test_precision"],
+ "tolerated_loss": self.params["traffic_profile"]["tolerated_loss"],
+ "duration": duration
+ }
+ self.prev_time = time.time()
+
# throughput and packet loss from the most recent successful test
successful_pkt_loss = 0.0
- for test_value in self.bounds_iterator(LOG):
- result, port_samples = self._profile_helper.run_test(pkt_size, duration,
- test_value, self.tolerated_loss)
-
- if result.success:
- LOG.debug("Success! Increasing lower bound")
- self.current_lower = test_value
- successful_pkt_loss = result.pkt_loss
- else:
- LOG.debug("Failure... Decreasing upper bound")
- self.current_upper = test_value
-
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
- self.queue.put(samples)
+ line_speed = traffic_gen.scenario_helper.all_options.get(
+ "interface_speed_gbps", constants.NIC_GBPS_DEFAULT) * constants.ONE_GIGABIT_IN_BITS
+
+ ok_retry = traffic_gen.scenario_helper.scenario_cfg["runner"].get("confirmation", 0)
+ for step_id, test_value in enumerate(self.bounds_iterator(LOG)):
+ pos_retry = 0
+ neg_retry = 0
+ total_retry = 0
+
+ LOG.info("Checking MAX %s MIN %s TEST %s", self.current_upper,
+ self.lower_bound, test_value)
+
+ while (pos_retry <= ok_retry) and (neg_retry <= ok_retry):
+
+ total_retry = total_retry + 1
+
+ result, port_samples = self._profile_helper.run_test(pkt_size, duration,
+ test_value,
+ self.tolerated_loss,
+ line_speed)
+
+ if (total_retry > (ok_retry * 3)) and (ok_retry is not 0):
+ status = STATUS_FAIL
+ next_step = STEP_DECREASE_LOWER
+ successful_pkt_loss = result.pkt_loss
+ self.current_upper = test_value
+ neg_retry = total_retry
+ elif result.success:
+ if (pos_retry < ok_retry) and (ok_retry is not 0):
+ status = STATUS_SUCCESS
+ next_step = STEP_CONFIRM
+ successful_pkt_loss = result.pkt_loss
+ neg_retry = 0
+ else:
+ status = STATUS_SUCCESS
+ next_step = STEP_INCREASE_LOWER
+ self.current_lower = test_value
+ successful_pkt_loss = result.pkt_loss
+
+ pos_retry = pos_retry + 1
+
+ else:
+ if (neg_retry < ok_retry) and (ok_retry is not 0):
+ status = STATUS_FAIL
+ next_step = STEP_CONFIRM
+ pos_retry = 0
+ else:
+ status = STATUS_FAIL
+ next_step = STEP_DECREASE_UPPER
+ self.current_upper = test_value
+
+ neg_retry = neg_retry + 1
+
+ LOG.info(
+ "Status = '%s' Next_Step = '%s'", status, next_step)
+
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+
+ if theor_max_thruput < samples["RequestedTxThroughput"]:
+ theor_max_thruput = samples['RequestedTxThroughput']
+ samples['theor_max_throughput'] = theor_max_thruput
+
+ samples["rx_total"] = int(result.rx_total)
+ samples["tx_total"] = int(result.tx_total)
+ samples["can_be_lost"] = int(result.can_be_lost)
+ samples["drop_total"] = int(result.drop_total)
+ samples["RxThroughput_gbps"] = \
+ (samples["RxThroughput"] / 1000) * ((pkt_size + 20) * 8)
+ samples['Status'] = status
+ samples['Next_Step'] = next_step
+ samples["MAX_Rate"] = self.current_upper
+ samples["MIN_Rate"] = self.current_lower
+ samples["Test_Rate"] = test_value
+ samples["Step_Id"] = step_id
+ samples["Confirmation_Retry"] = total_retry
+
+ samples.update(test_data)
+
+ if status == STATUS_SUCCESS and next_step == STEP_INCREASE_LOWER:
+ # Store success samples for result samples
+ result_samples = samples
+
+ LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples)
+
+ self.queue.put(samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
+
+ LOG.info(
+ ">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s",
+ pkt_size, theor_max_thruput, result_samples.get("RxThroughput", 0.0))
+ result_samples["Status"] = STATUS_RESULT
+ result_samples["Next_Step"] = ""
+ result_samples["Actual_throughput"] = result_samples.get("RxThroughput", 0.0)
+ result_samples["theor_max_throughput"] = theor_max_thruput
+ self.queue.put(result_samples)
diff --git a/yardstick/network_services/traffic_profile/prox_irq.py b/yardstick/network_services/traffic_profile/prox_irq.py
new file mode 100644
index 000000000..0ea294914
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/prox_irq.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2016-2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Fixed traffic profile definitions """
+
+import logging
+import time
+
+from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
+
+LOG = logging.getLogger(__name__)
+
+
+class ProxIrqProfile(ProxProfile):
+ """
+ This profile adds a single stream at the beginning of the traffic session
+ """
+
+ def __init__(self, tp_config):
+ super(ProxIrqProfile, self).__init__(tp_config)
+
+ def init(self, queue):
+ self.queue = queue
+ self.queue.cancel_join_thread()
+
+ def execute_traffic(self, traffic_generator):
+ LOG.debug("Prox_IRQ Execute Traffic....")
+ time.sleep(5)
+
+ def is_ended(self):
+ return False
+
+ def run_test(self):
+ """Run the test
+ """
+
+ LOG.info("Prox_IRQ ....")
diff --git a/yardstick/network_services/traffic_profile/prox_profile.py b/yardstick/network_services/traffic_profile/prox_profile.py
index 170dfd96f..be450c9f7 100644
--- a/yardstick/network_services/traffic_profile/prox_profile.py
+++ b/yardstick/network_services/traffic_profile/prox_profile.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,6 +16,8 @@
from __future__ import absolute_import
import logging
+import multiprocessing
+import time
from yardstick.network_services.traffic_profile.base import TrafficProfile
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxProfileHelper
@@ -29,8 +31,22 @@ class ProxProfile(TrafficProfile):
"""
@staticmethod
+ def sort_vpci(traffic_gen):
+ """Return the list of external interfaces ordered by vpci and name
+
+ :param traffic_gen: (ProxTrafficGen) traffic generator
+ :return: list of ordered interfaces
+ """
+ def key_func(interface):
+ return interface['virtual-interface']['vpci'], interface['name']
+
+ return sorted(traffic_gen.vnfd_helper['vdu'][0]['external-interface'],
+ key=key_func)
+
+ @staticmethod
def fill_samples(samples, traffic_gen):
- for vpci_idx, intf in enumerate(traffic_gen.vpci_if_name_ascending):
+ vpci_if_name_ascending = ProxProfile.sort_vpci(traffic_gen)
+ for vpci_idx, intf in enumerate(vpci_if_name_ascending):
name = intf[1]
# TODO: VNFDs KPIs values needs to be mapped to TRex structure
xe_port = traffic_gen.resource_helper.sut.port_stats([vpci_idx])
@@ -42,7 +58,7 @@ class ProxProfile(TrafficProfile):
def __init__(self, tp_config):
super(ProxProfile, self).__init__(tp_config)
self.queue = None
- self.done = False
+ self.done = multiprocessing.Event()
self.results = []
# TODO: get init values from tp_config
@@ -102,7 +118,8 @@ class ProxProfile(TrafficProfile):
try:
pkt_size = next(self.pkt_size_iterator)
except StopIteration:
- self.done = True
+ time.sleep(5)
+ self.done.set()
return
# Adjust packet size upwards if it's less than the minimum
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index b1ca8a345..aaa491b75 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -11,190 +11,352 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" RFC2544 Throughput implemenation """
-from __future__ import absolute_import
-from __future__ import division
import logging
-from trex_stl_lib.trex_stl_client import STLStream
-from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib import api as Pkt
+from trex_stl_lib import trex_stl_client
+from trex_stl_lib import trex_stl_packet_builder_scapy
+from trex_stl_lib import trex_stl_streams
-from yardstick.network_services.traffic_profile.traffic_profile \
- import TrexProfile
+from yardstick.common import constants
+from yardstick.network_services.traffic_profile import trex_traffic_profile
-LOGGING = logging.getLogger(__name__)
+LOG = logging.getLogger(__name__)
+SRC_PORT = 'sport'
+DST_PORT = 'dport'
-class RFC2544Profile(TrexProfile):
- """ This class handles rfc2544 implemenation. """
- def __init__(self, traffic_generator):
- super(RFC2544Profile, self).__init__(traffic_generator)
- self.generator = None
- self.max_rate = None
- self.min_rate = None
- self.ports = None
- self.rate = 100
- self.drop_percent_at_max_tx = None
- self.throughput_max = None
+class PortPgIDMap(object):
+ """Port and pg_id mapping class
- def register_generator(self, generator):
- self.generator = generator
+ "pg_id" is the identification STL library gives to each stream. In the
+ RFC2544Profile class, the traffic has a STLProfile per port, which contains
+ one or several streams, one per packet size defined in the IMIX test case
+ description.
- def execute_traffic(self, traffic_generator=None):
- """ Generate the stream and run traffic on the given ports """
- if traffic_generator is not None and self.generator is None:
- self.generator = traffic_generator
+ Example of port <-> pg_id map:
+ self._port_pg_id_map = {
+ 0: [1, 2, 3, 4],
+ 1: [5, 6, 7, 8]
+ }
+ """
- if self.ports is not None:
- return
+ def __init__(self):
+ self._pg_id = 0
+ self._last_port = None
+ self._port_pg_id_map = {}
- self.ports = []
- for vld_id, intfs in sorted(self.generator.networks.items()):
- profile_data = self.params.get(vld_id)
- # no profile for this port
- if not profile_data:
- continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
- continue
- for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.max_rate = self.rate
- self.min_rate = 0
- self.generator.client.start(ports=self.ports, mult=self.get_multiplier(),
- duration=30, force=True)
- self.drop_percent_at_max_tx = 0
- self.throughput_max = 0
-
- def get_multiplier(self):
- """ Get the rate at which next iteration to run """
- self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
- multiplier = round(self.rate / self.pps, 2)
- return str(multiplier)
-
- def get_drop_percentage(self, generator=None):
- """ Calculate the drop percentage and run the traffic """
- if generator is None:
- generator = self.generator
- run_duration = self.generator.RUN_DURATION
- samples = self.generator.generate_samples(self.ports)
-
- in_packets = sum([value['in_packets'] for value in samples.values()])
- out_packets = sum([value['out_packets'] for value in samples.values()])
-
- packet_drop = abs(out_packets - in_packets)
- drop_percent = 100.0
- try:
- drop_percent = round((packet_drop / float(out_packets)) * 100, 5)
- except ZeroDivisionError:
- LOGGING.info('No traffic is flowing')
+ def add_port(self, port):
+ self._last_port = port
+ self._port_pg_id_map[port] = []
- # TODO(esm): RFC2544 doesn't tolerate packet loss, why do we?
- tolerance_low = generator.rfc2544_helper.tolerance_low
- tolerance_high = generator.rfc2544_helper.tolerance_high
+ def get_pg_ids(self, port):
+ return self._port_pg_id_map.get(port, [])
- tx_rate = out_packets / run_duration
- rx_rate = in_packets / run_duration
+ def increase_pg_id(self, port=None):
+ port = self._last_port if not port else port
+ if port is None:
+ return
+ pg_id_list = self._port_pg_id_map.get(port)
+ if not pg_id_list:
+ self.add_port(port)
+ pg_id_list = self._port_pg_id_map[port]
+ self._pg_id += 1
+ pg_id_list.append(self._pg_id)
+ return self._pg_id
- throughput_max = self.throughput_max
- drop_percent_at_max_tx = self.drop_percent_at_max_tx
- if self.drop_percent_at_max_tx is None:
- self.rate = tx_rate
- self.first_run = False
+class RFC2544Profile(trex_traffic_profile.TrexProfile):
+ """TRex RFC2544 traffic profile"""
- if drop_percent > tolerance_high:
- # TODO(esm): why don't we discard results that are out of tolerance?
- self.max_rate = self.rate
- if throughput_max == 0:
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= tolerance_low:
- # TODO(esm): why do we update the samples dict in this case
- # and not update our tracking values?
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= self.drop_percent_at_max_tx:
- # TODO(esm): why don't we discard results that are out of tolerance?
- self.min_rate = self.rate
- self.drop_percent_at_max_tx = drop_percent_at_max_tx = drop_percent
- self.throughput_max = throughput_max = rx_rate
+ TOLERANCE_LIMIT = 0.01
+ STATUS_SUCCESS = "Success"
+ STATUS_FAIL = "Failure"
- else:
- # TODO(esm): why don't we discard results that are out of tolerance?
- self.min_rate = self.rate
-
- generator.clear_client_stats(self.ports)
- generator.start_client(self.ports, mult=self.get_multiplier(),
- duration=run_duration, force=True)
-
- # if correlated traffic update the Throughput
- if generator.rfc2544_helper.correlated_traffic:
- throughput_max *= 2
+ def __init__(self, traffic_generator):
+ super(RFC2544Profile, self).__init__(traffic_generator)
+ self.generator = None
+ self.iteration = 0
+ self.rate = self.config.frame_rate
+ self.max_rate = self.config.frame_rate
+ self.min_rate = 0
- samples.update({
- 'TxThroughput': tx_rate,
- 'RxThroughput': rx_rate,
- 'CurrentDropPercentage': drop_percent,
- 'Throughput': throughput_max,
- 'DropPercentage': drop_percent_at_max_tx,
- })
+ def register_generator(self, generator):
+ self.generator = generator
- return samples
+ def stop_traffic(self, traffic_generator=None):
+ """"Stop traffic injection, reset counters and remove streams"""
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
- def execute_latency(self, generator=None, samples=None):
- if generator is not None and self.generator is None:
- self.generator = generator
+ self.generator.client.stop()
+ self.generator.client.reset()
+ self.generator.client.remove_all_streams()
- if samples is None:
- samples = self.generator.generate_samples()
+ def execute_traffic(self, traffic_generator=None):
+ """Generate the stream and run traffic on the given ports
+
+ :param traffic_generator: (TrexTrafficGenRFC) traffic generator
+ :return ports: (list of int) indexes of ports
+ port_pg_id: (dict) port indexes and pg_id [1] map
+ [1] https://trex-tgn.cisco.com/trex/doc/cp_stl_docs/api/
+ profile_code.html#stlstream-modes
+ """
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
- self.pps, multiplier = self.calculate_pps(samples)
- self.ports = []
- self.pg_id = self.params['traffic_profile'].get('pg_id', 1)
+ port_pg_id = PortPgIDMap()
+ ports = []
for vld_id, intfs in sorted(self.generator.networks.items()):
profile_data = self.params.get(vld_id)
if not profile_data:
continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
+ if (vld_id.startswith(self.DOWNLINK) and
+ self.generator.rfc2544_helper.correlated_traffic):
continue
for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.generator.start_client(ports=self.ports, mult=str(multiplier),
- duration=120, force=True)
- self.first_run = False
-
- def calculate_pps(self, samples):
- pps = round(samples['Throughput'] / 2, 2)
- multiplier = round(self.rate / self.pps, 2)
- return pps, multiplier
-
- def create_single_stream(self, packet_size, pps, isg=0):
- packet = self._create_single_packet(packet_size)
- if pps:
- stl_mode = STLTXCont(pps=pps)
+ port_num = int(self.generator.port_num(intf))
+ ports.append(port_num)
+ port_pg_id.add_port(port_num)
+ profile = self._create_profile(profile_data,
+ self.rate, port_pg_id,
+ self.config.enable_latency)
+ self.generator.client.add_streams(profile, ports=[port_num])
+
+ self.generator.client.start(ports=ports,
+ duration=self.config.duration,
+ force=True)
+ self.iteration = self.generator.rfc2544_helper.iteration.value
+ return ports, port_pg_id
+
+ def _create_profile(self, profile_data, rate, port_pg_id, enable_latency):
+ """Create a STL profile (list of streams) for a port"""
+ streams = []
+ for packet_name in profile_data:
+ imix = (profile_data[packet_name].
+ get('outer_l2', {}).get('framesize'))
+ imix_data = self._create_imix_data(imix)
+ self._create_vm(profile_data[packet_name])
+ _streams = self._create_streams(imix_data, rate, port_pg_id,
+ enable_latency)
+ streams.extend(_streams)
+ return trex_stl_streams.STLProfile(streams)
+
+ def _create_imix_data(self, imix,
+ weight_mode=constants.DISTRIBUTION_IN_BYTES):
+ """Generate the IMIX distribution for a STL profile
+
+ The input information is the framesize dictionary in a test case
+ traffic profile definition. E.g.:
+ downlink_0:
+ ipv4:
+ id: 2
+ outer_l2:
+ framesize:
+ 64B: 10
+ 128B: 20
+ ...
+
+ This function normalizes the sum of framesize weights to 100 and
+ returns a dictionary of frame sizes in bytes and weight in percentage.
+ E.g.:
+ imix_count = {64: 25, 128: 75}
+
+ The weight mode is described in [1]. There are two ways to describe the
+ weight of the packets:
+ - Distribution in packets: the weight defines the percentage of
+ packets sent per packet size. IXIA uses this definition.
+ - Distribution in bytes: the weight defines the percentage of bytes
+ sent per packet size.
+
+ Packet size # packets D. in packets Bytes D. in bytes
+ 40 7 58.33% 280 7%
+ 576 4 33.33% 2304 56%
+ 1500 1 8.33% 1500 37%
+
+ [1] https://en.wikipedia.org/wiki/Internet_Mix
+
+ :param imix: (dict) IMIX size and weight
+ """
+ imix_count = {}
+ if not imix:
+ return imix_count
+
+ imix_count = {size.upper().replace('B', ''): int(weight)
+ for size, weight in imix.items()}
+ imix_sum = sum(imix_count.values())
+ if imix_sum <= 0:
+ imix_count = {64: 100}
+ imix_sum = 100
+
+ weight_normalize = float(imix_sum) / 100
+ imix_dip = {size: float(weight) / weight_normalize
+ for size, weight in imix_count.items()}
+
+ if weight_mode == constants.DISTRIBUTION_IN_PACKETS:
+ return imix_dip
+
+ byte_total = sum([int(size) * weight
+ for size, weight in imix_count.items()])
+ return {size: float(int(size) * weight * 100) / byte_total
+ for size, weight in imix_count.items()}
+
+ def _create_vm(self, packet_definition):
+ """Create the STL Raw instructions"""
+ self.ether_packet = Pkt.Ether()
+ self.ip_packet = Pkt.IP()
+ self.ip6_packet = None
+ self.udp_packet = Pkt.UDP()
+ self.udp[DST_PORT] = 'UDP.dport'
+ self.udp[SRC_PORT] = 'UDP.sport'
+ self.qinq = False
+ self.vm_flow_vars = []
+ outer_l2 = packet_definition.get('outer_l2')
+ outer_l3v4 = packet_definition.get('outer_l3v4')
+ outer_l3v6 = packet_definition.get('outer_l3v6')
+ outer_l4 = packet_definition.get('outer_l4')
+ if outer_l2:
+ self._set_outer_l2_fields(outer_l2)
+ if outer_l3v4:
+ self._set_outer_l3v4_fields(outer_l3v4)
+ if outer_l3v6:
+ self._set_outer_l3v6_fields(outer_l3v6)
+ if outer_l4:
+ self._set_outer_l4_fields(outer_l4)
+ self.trex_vm = trex_stl_packet_builder_scapy.STLScVmRaw(
+ self.vm_flow_vars)
+
+ def _create_single_packet(self, size=64):
+ size -= 4
+ ether_packet = self.ether_packet
+ ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
+ udp_packet = self.udp_packet
+ if self.qinq:
+ qinq_packet = self.qinq_packet
+ base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
else:
- stl_mode = STLTXCont(pps=self.pps)
- if self.pg_id:
- LOGGING.debug("pg_id: %s", self.pg_id)
- stl_flow_stats = STLFlowLatencyStats(pg_id=self.pg_id)
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode,
- flow_stats=stl_flow_stats)
- self.pg_id += 1
+ base_pkt = ether_packet / ip_packet / udp_packet
+ pad = max(0, size - len(base_pkt)) * 'x'
+ return trex_stl_packet_builder_scapy.STLPktBuilder(
+ pkt=base_pkt / pad, vm=self.trex_vm)
+
+ def _create_streams(self, imix_data, rate, port_pg_id, enable_latency):
+ """Create a list of streams per packet size
+
+ The STL TX mode speed of the generated streams will depend on the frame
+ weight and the frame rate. Both the frame weight and the total frame
+ rate are normalized to 100. The STL TX mode speed, defined in
+ percentage, is the combitation of both percentages. E.g.:
+ frame weight = 100
+ rate = 90
+ --> STLTXmode percentage = 10 (%)
+
+ frame weight = 80
+ rate = 50
+ --> STLTXmode percentage = 40 (%)
+
+ :param imix_data: (dict) IMIX size and weight
+ :param rate: (float) normalized [0..100] total weight
+ :param pg_id: (PortPgIDMap) port / pg_id (list) map
+ """
+ streams = []
+ for size, weight in ((int(size), float(weight)) for (size, weight)
+ in imix_data.items() if float(weight) > 0):
+ packet = self._create_single_packet(size)
+ pg_id = port_pg_id.increase_pg_id()
+ stl_flow = (trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id) if
+ enable_latency else None)
+ mode = trex_stl_streams.STLTXCont(percentage=weight * rate / 100)
+ streams.append(trex_stl_client.STLStream(
+ packet=packet, flow_stats=stl_flow, mode=mode))
+ return streams
+
+ def get_drop_percentage(self, samples, tol_low, tol_high,
+ correlated_traffic, resolution): # pylint: disable=unused-argument
+ """Calculate the drop percentage and run the traffic"""
+ completed = False
+ status = self.STATUS_FAIL
+ out_pkt_end = sum(port['out_packets'] for port in samples[-1].values())
+ in_pkt_end = sum(port['in_packets'] for port in samples[-1].values())
+ out_pkt_ini = sum(port['out_packets'] for port in samples[0].values())
+ in_pkt_ini = sum(port['in_packets'] for port in samples[0].values())
+ in_bytes_ini = sum(port['in_bytes'] for port in samples[0].values())
+ out_bytes_ini = sum(port['out_bytes'] for port in samples[0].values())
+ in_bytes_end = sum(port['in_bytes'] for port in samples[-1].values())
+ out_bytes_end = sum(port['out_bytes'] for port in samples[-1].values())
+ time_diff = (list(samples[-1].values())[0]['timestamp'] -
+ list(samples[0].values())[0]['timestamp']).total_seconds()
+ out_packets = out_pkt_end - out_pkt_ini
+ in_packets = in_pkt_end - in_pkt_ini
+ out_bytes = out_bytes_end - out_bytes_ini
+ in_bytes = in_bytes_end - in_bytes_ini
+ tx_rate_fps = float(out_packets) / time_diff
+ rx_rate_fps = float(in_packets) / time_diff
+ drop_percent = 100.0
+
+ # https://tools.ietf.org/html/rfc2544#section-26.3
+ if out_packets:
+ drop_percent = round(
+ (float(abs(out_packets - in_packets)) / out_packets) * 100, 5)
+
+ tol_high = max(tol_high, self.TOLERANCE_LIMIT)
+ tol_low = min(tol_low, self.TOLERANCE_LIMIT)
+ if drop_percent > tol_high:
+ self.max_rate = self.rate
+ elif drop_percent < tol_low:
+ self.min_rate = self.rate
else:
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode)
- return stream
+ status = self.STATUS_SUCCESS
+ completed = True
+
+ last_rate = self.rate
+ self.rate = self._get_next_rate()
+ if abs(last_rate - self.rate) < resolution:
+ # stop test if the difference between the rate transmission
+ # in two iterations is smaller than the value of the resolution
+ completed = True
+ LOG.debug("rate=%s, next_rate=%s, resolution=%s, completed=%s",
+ last_rate, self.rate, resolution, completed)
+
+ ports = samples[-1].keys()
+ num_ports = len(ports)
+
+ output = {}
+ for port in ports:
+ output[port] = {}
+ first = samples[0][port]
+ last = samples[-1][port]
+ output[port]['InPackets'] = last['in_packets'] - first['in_packets']
+ output[port]['OutPackets'] = last['out_packets'] - first['out_packets']
+ output[port]['InBytes'] = last['in_bytes'] - first['in_bytes']
+ output[port]['OutBytes'] = last['out_bytes'] - first['out_bytes']
+ if self.config.enable_latency:
+ output[port]['LatencyAvg'] = float(sum(
+ [last['latency'][id]['average'] for id in
+ last['latency']]) * 1000) / len(last['latency'])
+ output[port]['LatencyMin'] = min(
+ [last['latency'][id]['total_min'] for id in
+ last['latency']]) * 1000
+ output[port]['LatencyMax'] = max(
+ [last['latency'][id]['total_max'] for id in
+ last['latency']]) * 1000
+
+ output['TxThroughput'] = tx_rate_fps
+ output['RxThroughput'] = rx_rate_fps
+ output['RxThroughputBps'] = round(float(in_bytes) / time_diff, 3)
+ output['TxThroughputBps'] = round(float(out_bytes) / time_diff, 3)
+ output['DropPercentage'] = drop_percent
+ output['Rate'] = last_rate
+ output['PktSize'] = self._get_framesize()
+ output['Iteration'] = self.iteration
+ output['Status'] = status
+
+ if self.config.enable_latency:
+ output['LatencyAvg'] = float(
+ sum([output[port]['LatencyAvg'] for port in ports])) / num_ports
+ output['LatencyMin'] = min([output[port]['LatencyMin'] for port in ports])
+ output['LatencyMax'] = max([output[port]['LatencyMax'] for port in ports])
+
+ return completed, output
diff --git a/yardstick/network_services/traffic_profile/sip.py b/yardstick/network_services/traffic_profile/sip.py
new file mode 100644
index 000000000..d18574090
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/sip.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.network_services.traffic_profile import base
+
+
+class SipProfile(base.TrafficProfile):
+ """ Sipp Traffic profile """
+
+ def __init__(self, yaml_data):
+ super(SipProfile, self).__init__(yaml_data)
+ self.generator = None
+
+ def execute_traffic(self, traffic_generator=None):
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
+
+ def is_ended(self):
+ if self.generator is not None:
+ return self.generator.is_ended()
+ return False
diff --git a/yardstick/network_services/traffic_profile/traffic_profile.py b/yardstick/network_services/traffic_profile/trex_traffic_profile.py
index 2f97945c0..cf538d488 100644
--- a/yardstick/network_services/traffic_profile/traffic_profile.py
+++ b/yardstick/network_services/traffic_profile/trex_traffic_profile.py
@@ -11,29 +11,24 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Trex Traffic Profile definitions """
-from __future__ import absolute_import
import struct
import socket
import logging
from random import SystemRandom
-import six
import ipaddress
-from yardstick.network_services.traffic_profile.base import TrafficProfile
-from trex_stl_lib.trex_stl_client import STLStream
-from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from trex_stl_lib.trex_stl_streams import STLTXCont
-from trex_stl_lib.trex_stl_streams import STLProfile
+import six
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVarRepeatableRandom
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
-from trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
-from trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
from trex_stl_lib import api as Pkt
+from yardstick.common import exceptions as y_exc
+from yardstick.network_services.traffic_profile import base
+
+
SRC = 'src'
DST = 'dst'
ETHERNET = 'Ethernet'
@@ -48,7 +43,7 @@ TYPE_OF_SERVICE = 'tos'
LOG = logging.getLogger(__name__)
-class TrexProfile(TrafficProfile):
+class TrexProfile(base.TrafficProfile):
""" This class handles Trex Traffic profile generation and execution """
PROTO_MAP = {
@@ -57,6 +52,7 @@ class TrexProfile(TrafficProfile):
IPv6: ('ip6_packet', Pkt.IPv6),
UDP: ('udp_packet', Pkt.UDP),
}
+ RATE_ROUND = 5
def _general_single_action_partial(self, protocol):
def f(field):
@@ -70,6 +66,7 @@ class TrexProfile(TrafficProfile):
def _ethernet_range_action_partial(self, direction, _):
def partial(min_value, max_value, count):
+ # pylint: disable=unused-argument
stl_vm_flow_var = STLVmFlowVar(name="mac_{}".format(direction),
min_value=1,
max_value=30,
@@ -77,30 +74,32 @@ class TrexProfile(TrafficProfile):
op='inc',
step=1)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='mac_{}'.format(direction),
- pkt_offset='Ether.{}'.format(direction))
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='mac_{}'.format(direction),
+ pkt_offset='Ether.{}'.format(direction))
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
def _ip_range_action_partial(self, direction, count=1):
+ # pylint: disable=unused-argument
def partial(min_value, max_value, count):
- ip1 = int(ipaddress.IPv4Address(min_value))
- ip2 = int(ipaddress.IPv4Address(max_value))
- actual_count = (ip2 - ip1)
+ _, _, actual_count = self._count_ip(min_value, max_value)
if not actual_count:
count = 1
elif actual_count < int(count):
count = actual_count
- stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="ip4_{}".format(direction),
- min_value=min_value,
- max_value=max_value,
- size=4,
- limit=int(count),
- seed=0x1235)
+ stl_vm_flow_var = STLVmFlowVarRepeatableRandom(
+ name="ip4_{}".format(direction),
+ min_value=min_value,
+ max_value=max_value,
+ size=4,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip4_{}'.format(direction),
- pkt_offset='IP.{}'.format(direction))
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='ip4_{}'.format(direction),
+ pkt_offset='IP.{}'.format(direction))
self.vm_flow_vars.append(stl_vm_wr_flow_var)
stl_vm_fix_ipv4 = STLVmFixIpv4(offset="IP")
self.vm_flow_vars.append(stl_vm_fix_ipv4)
@@ -108,7 +107,8 @@ class TrexProfile(TrafficProfile):
def _ip6_range_action_partial(self, direction, _):
def partial(min_value, max_value, count):
- min_value, max_value = self._get_start_end_ipv6(min_value, max_value)
+ # pylint: disable=unused-argument
+ min_value, max_value, _ = self._count_ip(min_value, max_value)
stl_vm_flow_var = STLVmFlowVar(name="ip6_{}".format(direction),
min_value=min_value,
max_value=max_value,
@@ -116,14 +116,16 @@ class TrexProfile(TrafficProfile):
op='random',
step=1)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip6_{}'.format(direction),
- pkt_offset='IPv6.{}'.format(direction),
- offset_fixup=8)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='ip6_{}'.format(direction),
+ pkt_offset='IPv6.{}'.format(direction),
+ offset_fixup=8)
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
- def _dscp_range_action_partial(self, *_):
+ def _dscp_range_action_partial(self, *args):
def partial(min_value, max_value, count):
+ # pylint: disable=unused-argument
stl_vm_flow_var = STLVmFlowVar(name="dscp",
min_value=min_value,
max_value=max_value,
@@ -134,8 +136,10 @@ class TrexProfile(TrafficProfile):
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dscp',
pkt_offset='IP.tos')
self.vm_flow_vars.append(stl_vm_wr_flow_var)
+ return partial
def _udp_range_action_partial(self, field, count=1):
+ # pylint: disable=unused-argument
def partial(min_value, max_value, count):
actual_count = int(max_value) - int(min_value)
if not actual_count:
@@ -143,15 +147,17 @@ class TrexProfile(TrafficProfile):
elif int(count) > actual_count:
count = actual_count
- stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="port_{}".format(field),
- min_value=min_value,
- max_value=max_value,
- size=2,
- limit=int(count),
- seed=0x1235)
+ stl_vm_flow_var = STLVmFlowVarRepeatableRandom(
+ name="port_{}".format(field),
+ min_value=min_value,
+ max_value=max_value,
+ size=2,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_{}'.format(field),
- pkt_offset=self.udp[field])
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='port_{}'.format(field),
+ pkt_offset=self.udp[field])
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
@@ -181,6 +187,8 @@ class TrexProfile(TrafficProfile):
self.qinq = False
self.vm_flow_vars = []
self.packets = []
+ self.max_rate = 0
+ self.min_rate = 0
self._map_proto_actions = {
# the tuple is (single value function, range value function, if the values should be
@@ -332,130 +340,38 @@ class TrexProfile(TrafficProfile):
if 'dstport' in outer_l4:
self._set_proto_addr(UDP, DST_PORT, outer_l4['dstport'], outer_l4['count'])
- def generate_imix_data(self, packet_definition):
- """ generate packet size for a given traffic profile """
- imix_count = {}
- imix_data = {}
- if not packet_definition:
- return imix_count
- imix = packet_definition.get('framesize')
- if imix:
- for size in imix:
- data = imix[size]
- imix_data[int(size[:-1])] = int(data)
- imix_sum = sum(imix_data.values())
- if imix_sum > 100:
- raise SystemExit("Error in IMIX data")
- elif imix_sum < 100:
- imix_data[64] = imix_data.get(64, 0) + (100 - imix_sum)
-
- avg_size = 0.0
- for size in imix_data:
- count = int(imix_data[size])
- if count:
- avg_size += round(size * count / 100, 2)
- pps = round(self.pps * count / 100, 0)
- imix_count[size] = pps
- self.rate = round(1342177280 / avg_size, 0) * 2
- logging.debug("Imax: %s rate: %s", imix_count, self.rate)
- return imix_count
-
- def get_streams(self, profile_data):
- """ generate trex stream
- :param profile_data:
- :type profile_data:
- """
- self.streams = []
- self.pps = self.params['traffic_profile'].get('frame_rate', 100)
- for packet_name in profile_data:
- outer_l2 = profile_data[packet_name].get('outer_l2')
- imix_data = self.generate_imix_data(outer_l2)
- if not imix_data:
- imix_data = {64: self.pps}
- self.generate_vm(profile_data[packet_name])
- for size in imix_data:
- self._generate_streams(size, imix_data[size])
- self._generate_profile()
- return self.profile
-
- def generate_vm(self, packet_definition):
- """ generate trex vm with flows setup """
- self.ether_packet = Pkt.Ether()
- self.ip_packet = Pkt.IP()
- self.ip6_packet = None
- self.udp_packet = Pkt.UDP()
- self.udp[DST_PORT] = 'UDP.dport'
- self.udp[SRC_PORT] = 'UDP.sport'
- self.qinq = False
- self.vm_flow_vars = []
- outer_l2 = packet_definition.get('outer_l2', None)
- outer_l3v4 = packet_definition.get('outer_l3v4', None)
- outer_l3v6 = packet_definition.get('outer_l3v6', None)
- outer_l4 = packet_definition.get('outer_l4', None)
- if outer_l2:
- self._set_outer_l2_fields(outer_l2)
- if outer_l3v4:
- self._set_outer_l3v4_fields(outer_l3v4)
- if outer_l3v6:
- self._set_outer_l3v6_fields(outer_l3v6)
- if outer_l4:
- self._set_outer_l4_fields(outer_l4)
- self.trex_vm = STLScVmRaw(self.vm_flow_vars)
-
- def generate_packets(self):
- """ generate packets from trex TG """
- base_pkt = self.base_pkt
- size = self.fsize - 4
- pad = max(0, size - len(base_pkt)) * 'x'
- self.packets = [STLPktBuilder(pkt=base_pkt / pad,
- vm=vm) for vm in self.vms]
-
- def _create_single_packet(self, size=64):
- size = size - 4
- ether_packet = self.ether_packet
- ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
- udp_packet = self.udp_packet
- if self.qinq:
- qinq_packet = self.qinq_packet
- base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
- else:
- base_pkt = ether_packet / ip_packet / udp_packet
- pad = max(0, size - len(base_pkt)) * 'x'
- packet = STLPktBuilder(pkt=base_pkt / pad, vm=self.trex_vm)
- return packet
-
- def _create_single_stream(self, packet_size, pps, isg=0):
- packet = self._create_single_packet(packet_size)
- if self.pg_id:
- self.pg_id += 1
- stl_flow = STLFlowLatencyStats(pg_id=self.pg_id)
- stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps),
- flow_stats=stl_flow)
- else:
- stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps))
- return stream
-
- def _generate_streams(self, packet_size, pps):
- self.streams.append(self._create_single_stream(packet_size, pps))
-
- def _generate_profile(self):
- self.profile = STLProfile(self.streams)
+ def _get_next_rate(self):
+ rate = round(float(self.max_rate + self.min_rate)/2.0, self.RATE_ROUND)
+ return rate
+
+ def _get_framesize(self):
+ framesizes = []
+ for traffickey, value in self.params.items():
+ if not traffickey.startswith((self.UPLINK, self.DOWNLINK)):
+ continue
+ for _, data in value.items():
+ framesize = data['outer_l2']['framesize']
+ for size in (s for s, w in framesize.items() if int(w) != 0):
+ framesizes.append(size)
+ if len(set(framesizes)) == 0:
+ return ''
+ elif len(set(framesizes)) == 1:
+ return framesizes[0]
+ return 'IMIX'
@classmethod
- def _get_start_end_ipv6(cls, start_ip, end_ip):
- try:
- ip1 = socket.inet_pton(socket.AF_INET6, start_ip)
- ip2 = socket.inet_pton(socket.AF_INET6, end_ip)
- hi1, lo1 = struct.unpack('!QQ', ip1)
- hi2, lo2 = struct.unpack('!QQ', ip2)
- if ((hi1 << 64) | lo1) > ((hi2 << 64) | lo2):
- raise SystemExit("IPv6: start_ip is greater then end_ip")
- max_p1 = abs(int(lo1) - int(lo2))
- base_p1 = lo1
- except Exception as ex_error:
- raise SystemExit(ex_error)
- else:
- return base_p1, max_p1 + base_p1
+ def _count_ip(cls, start_ip, end_ip):
+ start = ipaddress.ip_address(six.u(start_ip))
+ end = ipaddress.ip_address(six.u(end_ip))
+ if start.version == 4:
+ return start, end, int(end) - int(start)
+ elif start.version == 6:
+ if int(start) > int(end):
+ raise y_exc.IPv6RangeError(start_ip=str(start),
+ end_ip=str(end))
+ _, lo1 = struct.unpack('!QQ', start.packed)
+ _, lo2 = struct.unpack('!QQ', end.packed)
+ return lo1, lo2, lo2 - lo1
@classmethod
def _get_random_value(cls, min_port, max_port):
diff --git a/yardstick/network_services/traffic_profile/vpp_rfc2544.py b/yardstick/network_services/traffic_profile/vpp_rfc2544.py
new file mode 100644
index 000000000..412e4e69a
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/vpp_rfc2544.py
@@ -0,0 +1,339 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import ipaddress
+import logging
+import random
+import string
+
+from trex_stl_lib import api as Pkt
+from trex_stl_lib import trex_stl_client
+from trex_stl_lib import trex_stl_packet_builder_scapy
+from trex_stl_lib import trex_stl_streams
+
+from yardstick.common import constants
+from yardstick.network_services.helpers.vpp_helpers.multiple_loss_ratio_search import \
+ MultipleLossRatioSearch
+from yardstick.network_services.traffic_profile.rfc2544 import RFC2544Profile, \
+ PortPgIDMap
+from yardstick.network_services.traffic_profile.trex_traffic_profile import IP, \
+ DST
+
+LOGGING = logging.getLogger(__name__)
+
+
+class VppRFC2544Profile(RFC2544Profile):
+
+ def __init__(self, traffic_generator):
+ super(VppRFC2544Profile, self).__init__(traffic_generator)
+
+ tp_cfg = traffic_generator["traffic_profile"]
+ self.number_of_intermediate_phases = tp_cfg.get("intermediate_phases",
+ 2)
+
+ self.duration = self.config.duration
+ self.precision = self.config.test_precision
+ self.lower_bound = self.config.lower_bound
+ self.upper_bound = self.config.upper_bound
+ self.step_interval = self.config.step_interval
+ self.enable_latency = self.config.enable_latency
+
+ self.pkt_size = None
+ self.flow = None
+
+ self.tolerance_low = 0
+ self.tolerance_high = 0
+
+ self.queue = None
+ self.port_pg_id = None
+
+ self.current_lower = self.lower_bound
+ self.current_upper = self.upper_bound
+
+ self.ports = []
+ self.profiles = {}
+
+ @property
+ def delta(self):
+ return self.current_upper - self.current_lower
+
+ @property
+ def mid_point(self):
+ return (self.current_lower + self.current_upper) / 2
+
+ @staticmethod
+ def calculate_frame_size(imix):
+ if not imix:
+ return 64, 100
+
+ imix_count = {size.upper().replace('B', ''): int(weight)
+ for size, weight in imix.items()}
+ imix_sum = sum(imix_count.values())
+ if imix_sum <= 0:
+ return 64, 100
+ packets_total = sum([int(size) * weight
+ for size, weight in imix_count.items()])
+ return packets_total / imix_sum, imix_sum
+
+ @staticmethod
+ def _gen_payload(length):
+ payload = ""
+ for _ in range(length):
+ payload += random.choice(string.ascii_letters)
+
+ return payload
+
+ def bounds_iterator(self, logger=None):
+ self.current_lower = self.lower_bound
+ self.current_upper = self.upper_bound
+
+ test_value = self.current_upper
+ while abs(self.delta) >= self.precision:
+ if logger:
+ logger.debug("New interval [%s, %s), precision: %d",
+ self.current_lower,
+ self.current_upper, self.step_interval)
+ logger.info("Testing with value %s", test_value)
+
+ yield test_value
+ test_value = self.mid_point
+
+ def register_generator(self, generator):
+ super(VppRFC2544Profile, self).register_generator(generator)
+ self.init_traffic_params(generator)
+
+ def init_queue(self, queue):
+ self.queue = queue
+ self.queue.cancel_join_thread()
+
+ def init_traffic_params(self, generator):
+ if generator.rfc2544_helper.latency:
+ self.enable_latency = True
+ self.tolerance_low = generator.rfc2544_helper.tolerance_low
+ self.tolerance_high = generator.rfc2544_helper.tolerance_high
+ self.max_rate = generator.scenario_helper.all_options.get('vpp_config',
+ {}).get(
+ 'max_rate', self.rate)
+
+ def create_profile(self, profile_data, current_port):
+ streams = []
+ for packet_name in profile_data:
+ imix = (profile_data[packet_name].
+ get('outer_l2', {}).get('framesize'))
+ self.pkt_size, imix_sum = self.calculate_frame_size(imix)
+ self._create_vm(profile_data[packet_name])
+ if self.max_rate > 100:
+ imix_data = self._create_imix_data(imix,
+ constants.DISTRIBUTION_IN_PACKETS)
+ else:
+ imix_data = self._create_imix_data(imix)
+ _streams = self._create_single_stream(current_port, imix_data,
+ imix_sum)
+ streams.extend(_streams)
+ return trex_stl_streams.STLProfile(streams)
+
+ def _set_outer_l3v4_fields(self, outer_l3v4):
+ """ setup outer l3v4 fields from traffic profile """
+ ip_params = {}
+ if 'proto' in outer_l3v4:
+ ip_params['proto'] = outer_l3v4['proto']
+ self._set_proto_fields(IP, **ip_params)
+
+ self.flow = int(outer_l3v4['count'])
+ src_start_ip, _ = outer_l3v4['srcip4'].split('-')
+ dst_start_ip, _ = outer_l3v4['dstip4'].split('-')
+
+ self.ip_packet = Pkt.IP(src=src_start_ip,
+ dst=dst_start_ip,
+ proto=outer_l3v4['proto'])
+ if self.flow > 1:
+ dst_start_int = int(ipaddress.ip_address(str(dst_start_ip)))
+ dst_end_ip_new = ipaddress.ip_address(
+ dst_start_int + self.flow - 1)
+ # self._set_proto_addr(IP, SRC, outer_l3v4['srcip4'], outer_l3v4['count'])
+ self._set_proto_addr(IP, DST,
+ "{start_ip}-{end_ip}".format(
+ start_ip=dst_start_ip,
+ end_ip=str(dst_end_ip_new)),
+ self.flow)
+
+ def _create_single_packet(self, size=64):
+ ether_packet = self.ether_packet
+ ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
+ base_pkt = ether_packet / ip_packet
+ payload_len = max(0, size - len(base_pkt) - 4)
+ packet = trex_stl_packet_builder_scapy.STLPktBuilder(
+ pkt=base_pkt / self._gen_payload(payload_len),
+ vm=self.trex_vm)
+ packet_lat = trex_stl_packet_builder_scapy.STLPktBuilder(
+ pkt=base_pkt / self._gen_payload(payload_len))
+
+ return packet, packet_lat
+
+ def _create_single_stream(self, current_port, imix_data, imix_sum,
+ isg=0.0):
+ streams = []
+ for size, weight in ((int(size), float(weight)) for (size, weight)
+ in imix_data.items() if float(weight) > 0):
+ if current_port == 1:
+ isg += 10.0
+ if self.max_rate > 100:
+ mode = trex_stl_streams.STLTXCont(
+ pps=int(weight * imix_sum / 100))
+ mode_lat = mode
+ else:
+ mode = trex_stl_streams.STLTXCont(
+ percentage=weight * self.max_rate / 100)
+ mode_lat = trex_stl_streams.STLTXCont(pps=9000)
+
+ packet, packet_lat = self._create_single_packet(size)
+ streams.append(
+ trex_stl_client.STLStream(isg=isg, packet=packet, mode=mode))
+ if self.enable_latency:
+ pg_id = self.port_pg_id.increase_pg_id(current_port)
+ stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
+ stream_lat = trex_stl_client.STLStream(isg=isg,
+ packet=packet_lat,
+ mode=mode_lat,
+ flow_stats=stl_flow)
+ streams.append(stream_lat)
+ return streams
+
+ def execute_traffic(self, traffic_generator=None):
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
+
+ self.ports = []
+ self.profiles = {}
+ self.port_pg_id = PortPgIDMap()
+ for vld_id, intfs in sorted(self.generator.networks.items()):
+ profile_data = self.params.get(vld_id)
+ if not profile_data:
+ continue
+ if (vld_id.startswith(self.DOWNLINK) and
+ self.generator.rfc2544_helper.correlated_traffic):
+ continue
+ for intf in intfs:
+ current_port = int(self.generator.port_num(intf))
+ self.port_pg_id.add_port(current_port)
+ profile = self.create_profile(profile_data, current_port)
+ self.generator.client.add_streams(profile,
+ ports=[current_port])
+
+ self.ports.append(current_port)
+ self.profiles[current_port] = profile
+
+ timeout = self.generator.scenario_helper.scenario_cfg["runner"][
+ "duration"]
+ test_data = {
+ "test_duration": timeout,
+ "test_precision": self.precision,
+ "tolerated_loss": self.tolerance_high,
+ "duration": self.duration,
+ "packet_size": self.pkt_size,
+ "flow": self.flow
+ }
+
+ if self.max_rate > 100:
+ self.binary_search_with_optimized(self.generator, self.duration,
+ timeout, test_data)
+ else:
+ self.binary_search(self.generator, self.duration,
+ self.tolerance_high, test_data)
+
+ def binary_search_with_optimized(self, traffic_generator, duration,
+ timeout, test_data):
+ self.queue.cancel_join_thread()
+ algorithm = MultipleLossRatioSearch(
+ measurer=traffic_generator, latency=self.enable_latency,
+ pkt_size=self.pkt_size,
+ final_trial_duration=duration,
+ final_relative_width=self.step_interval / 100,
+ number_of_intermediate_phases=self.number_of_intermediate_phases,
+ initial_trial_duration=1,
+ timeout=timeout)
+ algorithm.init_generator(self.ports, self.port_pg_id, self.profiles,
+ test_data, self.queue)
+ return algorithm.narrow_down_ndr_and_pdr(10000, self.max_rate,
+ self.tolerance_high)
+
+ def binary_search(self, traffic_generator, duration, tolerance_value,
+ test_data):
+ theor_max_thruput = 0
+ result_samples = {}
+
+ for test_value in self.bounds_iterator(LOGGING):
+ stats = traffic_generator.send_traffic_on_tg(self.ports,
+ self.port_pg_id,
+ duration,
+ str(
+ test_value / self.max_rate / 2),
+ latency=self.enable_latency)
+ traffic_generator.client.reset(ports=self.ports)
+ traffic_generator.client.clear_stats(ports=self.ports)
+ traffic_generator.client.remove_all_streams(ports=self.ports)
+ for port, profile in self.profiles.items():
+ traffic_generator.client.add_streams(profile, ports=[port])
+
+ loss_ratio = (float(traffic_generator.loss) / float(
+ traffic_generator.sent)) * 100
+
+ samples = traffic_generator.generate_samples(stats, self.ports,
+ self.port_pg_id,
+ self.enable_latency)
+ samples.update(test_data)
+ LOGGING.info("Collect TG KPIs %s %s %s", datetime.datetime.now(),
+ test_value, samples)
+ self.queue.put(samples)
+
+ if float(loss_ratio) > float(tolerance_value):
+ LOGGING.debug("Failure... Decreasing upper bound")
+ self.current_upper = test_value
+ else:
+ LOGGING.debug("Success! Increasing lower bound")
+ self.current_lower = test_value
+
+ rate_total = float(traffic_generator.sent) / float(duration)
+ bandwidth_total = float(rate_total) * (
+ float(self.pkt_size) + 20) * 8 / (10 ** 9)
+
+ success_samples = {'Result_' + key: value for key, value in
+ samples.items()}
+ success_samples["Result_{}".format('PDR')] = {
+ "rate_total_pps": float(rate_total),
+ "bandwidth_total_Gbps": float(bandwidth_total),
+ "packet_loss_ratio": float(loss_ratio),
+ "packets_lost": int(traffic_generator.loss),
+ }
+ self.queue.put(success_samples)
+
+ # Store Actual throughput for result samples
+ for intf in traffic_generator.vnfd_helper.interfaces:
+ name = intf["name"]
+ result_samples[name] = {
+ "Result_Actual_throughput": float(
+ success_samples["Result_{}".format(name)][
+ "rx_throughput_bps"]),
+ }
+
+ for intf in traffic_generator.vnfd_helper.interfaces:
+ name = intf["name"]
+ if theor_max_thruput < samples[name]["tx_throughput_bps"]:
+ theor_max_thruput = samples[name]['tx_throughput_bps']
+ self.queue.put({'theor_max_throughput': theor_max_thruput})
+
+ result_samples["Result_theor_max_throughput"] = theor_max_thruput
+ self.queue.put(result_samples)
+ return result_samples