diff options
Diffstat (limited to 'yardstick/benchmark/runners/duration.py')
-rw-r--r-- | yardstick/benchmark/runners/duration.py | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py index 363320a6d..af5aae899 100644 --- a/yardstick/benchmark/runners/duration.py +++ b/yardstick/benchmark/runners/duration.py @@ -21,38 +21,40 @@ from yardstick.benchmark.runners import base LOG = logging.getLogger(__name__) -def _worker_process(queue, cls, method_name, context, scenario_args): +def _worker_process(queue, cls, method_name, scenario_cfg): sequence = 1 - interval = context.get("interval", 1) - duration = context.get("duration", 60) + runner_cfg = scenario_cfg['runner'] + + interval = runner_cfg.get("interval", 1) + duration = runner_cfg.get("duration", 60) LOG.info("worker START, duration %d sec, class %s", duration, cls) - context['runner'] = os.getpid() + runner_cfg['runner_id'] = os.getpid() - benchmark = cls(context) + benchmark = cls(runner_cfg) benchmark.setup() method = getattr(benchmark, method_name) - record_context = {"runner": context["runner"], - "host": context["host"]} - sla_action = None - if "sla" in scenario_args: - sla_action = scenario_args["sla"].get("action", "assert") + if "sla" in scenario_cfg: + sla_action = scenario_cfg["sla"].get("action", "assert") + + queue.put({'runner_id': runner_cfg['runner_id'], + 'scenario_cfg': scenario_cfg}) start = time.time() while True: LOG.debug("runner=%(runner)s seq=%(sequence)s START" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) data = {} errors = "" try: - data = method(scenario_args) + data = method(scenario_cfg) except AssertionError as assertion: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": @@ -73,11 +75,13 @@ def _worker_process(queue, cls, method_name, context, scenario_args): 'errors': errors } - queue.put({'context': record_context, 'sargs': scenario_args, - 'benchmark': benchmark_output}) + record = {'runner_id': runner_cfg['runner_id'], + 'benchmark': benchmark_output} + + queue.put(record) LOG.debug("runner=%(runner)s seq=%(sequence)s END" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) sequence += 1 @@ -105,8 +109,8 @@ If the scenario ends before the time has elapsed, it will be started again. ''' __execution_type__ = 'Duration' - def _run_benchmark(self, cls, method, scenario_args): + def _run_benchmark(self, cls, method, scenario_cfg): self.process = multiprocessing.Process( target=_worker_process, - args=(self.result_queue, cls, method, self.config, scenario_args)) + args=(self.result_queue, cls, method, scenario_cfg)) self.process.start() |