diff options
author | Jo¶rgen Karlsson <jorgen.w.karlsson@ericsson.com> | 2015-09-16 20:18:14 +0200 |
---|---|---|
committer | Jörgen Karlsson <jorgen.w.karlsson@ericsson.com> | 2015-09-22 07:37:07 +0000 |
commit | 52fbce20e29b6dc7c5637b57b71de102c198b05a (patch) | |
tree | 114aff9e33743804917252c0ce9de4b776cfe1f1 | |
parent | b3cbb26122ecf69bfcbe9dd98d39b11b0c558412 (diff) |
Structure output and make it less redundant
Note: this commit replaces:
https://gerrit.opnfv.org/gerrit/#/c/976/8
Adjusts the JSON output of the runners to follow a different structure,
laid out below. It is based upon the patch above but is not using the
output manager.
The purpose is to provide a unified basic layout (as already existed),
while making long data series much less repetitive and more space
efficient.
OUTPUT FORMAT:
------------------------------------------------------------------------
RUNNER PREP - printed exactly once per runner per scenario.
Runner MUST print this before sending any RUNNER DATA output
{
runner_id: <int>
scenario_cfg: {
<scenario and runner config>
}
}
where
runner_id: ID of the runner sending this block
scenario_cfg: scenario and runner configuration
------------------------------------------------------------------------
RUNNER DATA
runner may print any number of these AFTER having printed a RUNNER PREP
{
runner_id: <int>
benchmark: {
<measurements>
}
}
------------------------------------------------------------------------
The runner_id currently is not unique across runners as it is assigned
by noting the runner process id in the underlying operating system.
A possible improvement would be to assign runner_id an UUID value
according to RFC 4122 (e.g. uuid.uuid4() in python).
------------------------------------------------------------------------
Other changes/cleanups in this patch:
- Removed the context argument from _worker_process as it was
redundant. It contained a dictionary with the runner configuration
but the same dictionary was already in included in the scenario_args argument.
- For clarity renamed scenario_args to scenario_cfg. scenario_cfg was
the original name used in task.py and it changed name across function calls.
Change-Id: I17d96f37c7d3e24b0747d23fcad7509fa949d662
JIRA: YARDSTICK-59
Signed-off-by: Jo¶rgen Karlsson <jorgen.w.karlsson@ericsson.com>
-rwxr-xr-x | yardstick/benchmark/runners/arithmetic.py | 42 | ||||
-rw-r--r-- | yardstick/benchmark/runners/base.py | 4 | ||||
-rw-r--r-- | yardstick/benchmark/runners/duration.py | 38 | ||||
-rwxr-xr-x | yardstick/benchmark/runners/iteration.py | 36 | ||||
-rw-r--r-- | yardstick/benchmark/runners/sequence.py | 44 |
5 files changed, 90 insertions, 74 deletions
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py index bae40eb75..3f5b640d9 100755 --- a/yardstick/benchmark/runners/arithmetic.py +++ b/yardstick/benchmark/runners/arithmetic.py @@ -22,32 +22,34 @@ from yardstick.benchmark.runners import base LOG = logging.getLogger(__name__) -def _worker_process(queue, cls, method_name, context, scenario_args): +def _worker_process(queue, cls, method_name, scenario_cfg): sequence = 1 - interval = context.get("interval", 1) - arg_name = context.get('name') - stop = context.get('stop') - step = context.get('step') - options = scenario_args['options'] + runner_cfg = scenario_cfg['runner'] + + interval = runner_cfg.get("interval", 1) + arg_name = runner_cfg.get('name') + stop = runner_cfg.get('stop') + step = runner_cfg.get('step') + options = scenario_cfg['options'] start = options.get(arg_name, 0) - context['runner'] = os.getpid() + runner_cfg['runner_id'] = os.getpid() LOG.info("worker START, step(%s, %d, %d, %d), class %s", arg_name, start, stop, step, cls) - benchmark = cls(context) + benchmark = cls(runner_cfg) benchmark.setup() method = getattr(benchmark, method_name) - record_context = {"runner": context["runner"], - "host": context["host"]} + queue.put({'runner_id': runner_cfg['runner_id'], + 'scenario_cfg': scenario_cfg}) sla_action = None - if "sla" in scenario_args: - sla_action = scenario_args["sla"].get("action", "assert") + if "sla" in scenario_cfg: + sla_action = scenario_cfg["sla"].get("action", "assert") margin = 1 if step > 0 else -1 for value in range(start, stop+margin, step): @@ -55,13 +57,13 @@ def _worker_process(queue, cls, method_name, context, scenario_args): options[arg_name] = value LOG.debug("runner=%(runner)s seq=%(sequence)s START" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) data = {} errors = "" try: - data = method(scenario_args) + data = method(scenario_cfg) except AssertionError as assertion: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": @@ -82,11 +84,13 @@ def _worker_process(queue, cls, method_name, context, scenario_args): 'errors': errors } - queue.put({'context': record_context, 'sargs': scenario_args, - 'benchmark': benchmark_output}) + record = {'runner_id': runner_cfg['runner_id'], + 'benchmark': benchmark_output} + + queue.put(record) LOG.debug("runner=%(runner)s seq=%(sequence)s END" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) sequence += 1 @@ -125,8 +129,8 @@ class ArithmeticRunner(base.Runner): __execution_type__ = 'Arithmetic' - def _run_benchmark(self, cls, method, scenario_args): + def _run_benchmark(self, cls, method, scenario_cfg): self.process = multiprocessing.Process( target=_worker_process, - args=(self.result_queue, cls, method, self.config, scenario_args)) + args=(self.result_queue, cls, method, scenario_cfg)) self.process.start() diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py index 848322679..d8783f3c1 100644 --- a/yardstick/benchmark/runners/base.py +++ b/yardstick/benchmark/runners/base.py @@ -148,7 +148,7 @@ class Runner(object): output = "{'post-stop-action-data': %s}" % data self.result_queue.put(output) - def run(self, scenario_type, scenario_args): + def run(self, scenario_type, scenario_cfg): class_name = base_scenario.Scenario.get(scenario_type) path_split = class_name.split(".") module_path = ".".join(path_split[:-1]) @@ -177,7 +177,7 @@ class Runner(object): self.result_queue)) self.periodic_action_process.start() - self._run_benchmark(cls, "run", scenario_args) + self._run_benchmark(cls, "run", scenario_cfg) def join(self): self.process.join() diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py index 363320a6d..af5aae899 100644 --- a/yardstick/benchmark/runners/duration.py +++ b/yardstick/benchmark/runners/duration.py @@ -21,38 +21,40 @@ from yardstick.benchmark.runners import base LOG = logging.getLogger(__name__) -def _worker_process(queue, cls, method_name, context, scenario_args): +def _worker_process(queue, cls, method_name, scenario_cfg): sequence = 1 - interval = context.get("interval", 1) - duration = context.get("duration", 60) + runner_cfg = scenario_cfg['runner'] + + interval = runner_cfg.get("interval", 1) + duration = runner_cfg.get("duration", 60) LOG.info("worker START, duration %d sec, class %s", duration, cls) - context['runner'] = os.getpid() + runner_cfg['runner_id'] = os.getpid() - benchmark = cls(context) + benchmark = cls(runner_cfg) benchmark.setup() method = getattr(benchmark, method_name) - record_context = {"runner": context["runner"], - "host": context["host"]} - sla_action = None - if "sla" in scenario_args: - sla_action = scenario_args["sla"].get("action", "assert") + if "sla" in scenario_cfg: + sla_action = scenario_cfg["sla"].get("action", "assert") + + queue.put({'runner_id': runner_cfg['runner_id'], + 'scenario_cfg': scenario_cfg}) start = time.time() while True: LOG.debug("runner=%(runner)s seq=%(sequence)s START" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) data = {} errors = "" try: - data = method(scenario_args) + data = method(scenario_cfg) except AssertionError as assertion: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": @@ -73,11 +75,13 @@ def _worker_process(queue, cls, method_name, context, scenario_args): 'errors': errors } - queue.put({'context': record_context, 'sargs': scenario_args, - 'benchmark': benchmark_output}) + record = {'runner_id': runner_cfg['runner_id'], + 'benchmark': benchmark_output} + + queue.put(record) LOG.debug("runner=%(runner)s seq=%(sequence)s END" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) sequence += 1 @@ -105,8 +109,8 @@ If the scenario ends before the time has elapsed, it will be started again. ''' __execution_type__ = 'Duration' - def _run_benchmark(self, cls, method, scenario_args): + def _run_benchmark(self, cls, method, scenario_cfg): self.process = multiprocessing.Process( target=_worker_process, - args=(self.result_queue, cls, method, self.config, scenario_args)) + args=(self.result_queue, cls, method, scenario_cfg)) self.process.start() diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py index 03dcfae03..3a6b2e1d6 100755 --- a/yardstick/benchmark/runners/iteration.py +++ b/yardstick/benchmark/runners/iteration.py @@ -21,37 +21,39 @@ from yardstick.benchmark.runners import base LOG = logging.getLogger(__name__) -def _worker_process(queue, cls, method_name, context, scenario_args): +def _worker_process(queue, cls, method_name, scenario_cfg): sequence = 1 - interval = context.get("interval", 1) - iterations = context.get("iterations", 1) + runner_cfg = scenario_cfg['runner'] + + interval = runner_cfg.get("interval", 1) + iterations = runner_cfg.get("iterations", 1) LOG.info("worker START, iterations %d times, class %s", iterations, cls) - context['runner'] = os.getpid() + runner_cfg['runner_id'] = os.getpid() - benchmark = cls(context) + benchmark = cls(runner_cfg) benchmark.setup() method = getattr(benchmark, method_name) - record_context = {"runner": context["runner"], - "host": context["host"]} + queue.put({'runner_id': runner_cfg['runner_id'], + 'scenario_cfg': scenario_cfg}) sla_action = None - if "sla" in scenario_args: - sla_action = scenario_args["sla"].get("action", "assert") + if "sla" in scenario_cfg: + sla_action = scenario_cfg["sla"].get("action", "assert") while True: LOG.debug("runner=%(runner)s seq=%(sequence)s START" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) data = {} errors = "" try: - data = method(scenario_args) + data = method(scenario_cfg) except AssertionError as assertion: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": @@ -72,11 +74,13 @@ def _worker_process(queue, cls, method_name, context, scenario_args): 'errors': errors } - queue.put({'context': record_context, 'sargs': scenario_args, - 'benchmark': benchmark_output}) + record = {'runner_id': runner_cfg['runner_id'], + 'benchmark': benchmark_output} + + queue.put(record) LOG.debug("runner=%(runner)s seq=%(sequence)s END" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) sequence += 1 @@ -104,8 +108,8 @@ If the scenario ends before the time has elapsed, it will be started again. ''' __execution_type__ = 'Iteration' - def _run_benchmark(self, cls, method, scenario_args): + def _run_benchmark(self, cls, method, scenario_cfg): self.process = multiprocessing.Process( target=_worker_process, - args=(self.result_queue, cls, method, self.config, scenario_args)) + args=(self.result_queue, cls, method, scenario_cfg)) self.process.start() diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py index 25b65b0b8..ac8fe1418 100644 --- a/yardstick/benchmark/runners/sequence.py +++ b/yardstick/benchmark/runners/sequence.py @@ -22,46 +22,48 @@ from yardstick.benchmark.runners import base LOG = logging.getLogger(__name__) -def _worker_process(queue, cls, method_name, context, scenario_args): +def _worker_process(queue, cls, method_name, scenario_cfg): sequence = 1 - interval = context.get("interval", 1) - arg_name = context.get('scenario_option_name') - sequence_values = context.get('sequence') + runner_cfg = scenario_cfg['runner'] - if 'options' not in scenario_args: - scenario_args['options'] = {} + interval = runner_cfg.get("interval", 1) + arg_name = runner_cfg.get('scenario_option_name') + sequence_values = runner_cfg.get('sequence') - options = scenario_args['options'] + if 'options' not in scenario_cfg: + scenario_cfg['options'] = {} - context['runner'] = os.getpid() + options = scenario_cfg['options'] + + runner_cfg['runner_id'] = os.getpid() LOG.info("worker START, sequence_values(%s, %s), class %s", arg_name, sequence_values, cls) - benchmark = cls(context) + benchmark = cls(runner_cfg) benchmark.setup() method = getattr(benchmark, method_name) - record_context = {"runner": context["runner"], - "host": context["host"]} + queue.put({'runner_id': runner_cfg['runner_id'], + 'scenario_cfg': scenario_cfg}) sla_action = None - if "sla" in scenario_args: - sla_action = scenario_args["sla"].get("action", "assert") + if "sla" in scenario_cfg: + sla_action = scenario_cfg["sla"].get("action", "assert") for value in sequence_values: options[arg_name] = value LOG.debug("runner=%(runner)s seq=%(sequence)s START" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) data = {} errors = "" try: - data = method(scenario_args) + data = method(scenario_cfg) except AssertionError as assertion: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": @@ -82,11 +84,13 @@ def _worker_process(queue, cls, method_name, context, scenario_args): 'errors': errors } - queue.put({'context': record_context, 'sargs': scenario_args, - 'benchmark': benchmark_output}) + record = {'runner_id': runner_cfg['runner_id'], + 'benchmark': benchmark_output} + + queue.put(record) LOG.debug("runner=%(runner)s seq=%(sequence)s END" % - {"runner": context["runner"], "sequence": sequence}) + {"runner": runner_cfg["runner_id"], "sequence": sequence}) sequence += 1 @@ -117,8 +121,8 @@ class SequenceRunner(base.Runner): __execution_type__ = 'Sequence' - def _run_benchmark(self, cls, method, scenario_args): + def _run_benchmark(self, cls, method, scenario_cfg): self.process = multiprocessing.Process( target=_worker_process, - args=(self.result_queue, cls, method, self.config, scenario_args)) + args=(self.result_queue, cls, method, scenario_cfg)) self.process.start() |