aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJing Zhang <jing.c.zhang@nokia.com>2017-04-06 09:38:20 -0400
committerRoss Brattain <ross.b.brattain@intel.com>2017-05-31 16:18:39 +0000
commit68fc8023c0018e8380d35540d31d410ae5c507f9 (patch)
tree85d6655b55f4aace05b8e4dc294d70c9901cf1d2
parent37921fcd232cd2fbba9f45ef9fa5d8c912f54af6 (diff)
Add a new runner to do binary search for max PPS
A run consists of multiple (configurable) iterations. The first iteration starts from a configured packet rate. The subsequent iterations start from the observed rate from the previous run. An iteration is a binary search for maximum pps while not exceeding 10-6 packet loss rate. The upper rate is capped to the last pps when packet loss target is missed, the bottom rate is capped to the last pps when packet loss target is met. An iteration stops when the upper rate and the lower rate are close enough (configurable) or the received rate is well below the sending rate. The output observed rate is set to the bottom rate. Update-1: local run of run_tests.sh is good, but two lines are reported by Jekins as too long Update-2: Minor fix to cope with "pps" is not defined in test case yaml file. Update-3: Add pragma to skip unit test for this patch. JIRA: YARDSTICK-613 Change-Id: I2411b173d18d928cc1cf08f883b08bc13a125ea2 Signed-off-by: Jing Zhang <jing.c.zhang@nokia.com>
-rwxr-xr-xyardstick/benchmark/runners/dynamictp.py169
1 files changed, 169 insertions, 0 deletions
diff --git a/yardstick/benchmark/runners/dynamictp.py b/yardstick/benchmark/runners/dynamictp.py
new file mode 100755
index 000000000..106595dbd
--- /dev/null
+++ b/yardstick/benchmark/runners/dynamictp.py
@@ -0,0 +1,169 @@
+# Copyright 2016: Nokia
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# yardstick comment: this is a modified copy of
+# rally/rally/benchmark/runners/constant.py
+
+"""A runner that searches for the max throughput with binary search
+"""
+
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from yardstick.benchmark.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, cls, method_name, scenario_cfg,
+ context_cfg, aborted): # pragma: no cover
+
+ runner_cfg = scenario_cfg['runner']
+ iterations = runner_cfg.get("iterations", 1)
+ interval = runner_cfg.get("interval", 1)
+ run_step = runner_cfg.get("run_step", "setup,run,teardown")
+ delta = runner_cfg.get("delta", 1000)
+ options_cfg = scenario_cfg['options']
+ initial_rate = options_cfg.get("pps", 1000000)
+ LOG.info("worker START, class %s", cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ benchmark = cls(scenario_cfg, context_cfg)
+ if "setup" in run_step:
+ benchmark.setup()
+
+ method = getattr(benchmark, method_name)
+
+ queue.put({'runner_id': runner_cfg['runner_id'],
+ 'scenario_cfg': scenario_cfg,
+ 'context_cfg': context_cfg})
+
+ if "run" in run_step:
+ iterator = 0
+ search_max = initial_rate
+ search_min = 0
+ while iterator < iterations:
+ search_min = int(search_min / 2)
+ scenario_cfg['options']['pps'] = search_max
+ search_max_found = False
+ max_throuput_found = False
+ sequence = 0
+
+ last_min_data = {}
+ last_min_data['packets_per_second'] = 0
+
+ while True:
+ sequence += 1
+
+ data = {}
+ errors = ""
+ too_high = False
+
+ LOG.debug("sequence: %s search_min: %s search_max: %s",
+ sequence, search_min, search_max)
+
+ try:
+ method(data)
+ except AssertionError as assertion:
+ LOG.warning("SLA validation failed: %s" % assertion.args)
+ too_high = True
+ except Exception as e:
+ errors = traceback.format_exc()
+ LOG.exception(e)
+
+ actual_pps = data['packets_per_second']
+
+ if too_high:
+ search_max = actual_pps
+
+ if not search_max_found:
+ search_max_found = True
+ else:
+ last_min_data = data
+ search_min = actual_pps
+
+ # Check if the actual rate is well below the asked rate
+ if scenario_cfg['options']['pps'] > actual_pps * 1.5:
+ search_max = actual_pps
+ LOG.debug("Sender reached max tput: %s", search_max)
+ elif not search_max_found:
+ search_max = int(actual_pps * 1.5)
+
+ if ((search_max - search_min) < delta) or \
+ (search_max <= search_min) or (10 <= sequence):
+ if last_min_data['packets_per_second'] > 0:
+ data = last_min_data
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ record = {
+ 'runner_id': runner_cfg['runner_id'],
+ 'benchmark': benchmark_output
+ }
+
+ queue.put(record)
+ max_throuput_found = True
+
+ if (errors) or aborted.is_set() or max_throuput_found:
+ LOG.info("worker END")
+ break
+
+ if not search_max_found:
+ scenario_cfg['options']['pps'] = search_max
+ else:
+ scenario_cfg['options']['pps'] = \
+ (search_max - search_min) / 2 + search_min
+
+ time.sleep(interval)
+
+ iterator += 1
+ LOG.debug("iterator: %s iterations: %s", iterator, iterations)
+
+ if "teardown" in run_step:
+ benchmark.teardown()
+
+
+class IterationRunner(base.Runner):
+ '''Run a scenario to find the max throughput
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+ Parameters
+ interval - time to wait between each scenario invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ delta - stop condition for the search.
+ type: int
+ unit: pps
+ default: 1000 pps
+ '''
+ __execution_type__ = 'Dynamictp'
+
+ def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ self.process = multiprocessing.Process(
+ target=_worker_process,
+ args=(self.result_queue, cls, method, scenario_cfg,
+ context_cfg, self.aborted))
+ self.process.start()