From 44ee87706046d6caac49feff5a595e837974ff69 Mon Sep 17 00:00:00 2001 From: Hans Feldt Date: Wed, 20 May 2015 14:28:24 +0200 Subject: add package runners and a few runners The "duration" runner will run a test for an amount of time. The "arithmetic" runner will increase an input value of the test for every run until a limit is reached. Change-Id: I31c9864604110ce3f5d19ec110144d1c886c7620 JIRA: - Signed-off-by: Hans Feldt --- yardstick/benchmark/runners/__init__.py | 0 yardstick/benchmark/runners/arithmetic.py | 105 ++++++++++++++++++++++++++++++ yardstick/benchmark/runners/base.py | 94 ++++++++++++++++++++++++++ yardstick/benchmark/runners/duration.py | 95 +++++++++++++++++++++++++++ 4 files changed, 294 insertions(+) create mode 100644 yardstick/benchmark/runners/__init__.py create mode 100644 yardstick/benchmark/runners/arithmetic.py create mode 100644 yardstick/benchmark/runners/base.py create mode 100644 yardstick/benchmark/runners/duration.py diff --git a/yardstick/benchmark/runners/__init__.py b/yardstick/benchmark/runners/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py new file mode 100644 index 000000000..bac7efb49 --- /dev/null +++ b/yardstick/benchmark/runners/arithmetic.py @@ -0,0 +1,105 @@ +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +'''A runner that every run arithmetically steps a specified input value to +the scenario. This just means a step value is added to the previous value. +''' + +import os +import multiprocessing +import logging +import traceback +import time + +from yardstick.benchmark.runners import base + +LOG = logging.getLogger(__name__) + + +def _worker_process(queue, cls, method_name, context, scenario_args): + + sequence = 1 + + benchmark = cls(context) + method = getattr(benchmark, method_name) + + interval = context.get("interval", 1) + arg_name = context.get('name') + stop = context.get('stop') + step = context.get('step') + options = scenario_args['options'] + start = options.get(arg_name, 0) + + context['runner'] = os.getpid() + + LOG.info("worker START, step(%s, %d, %d, %d), class %s", + arg_name, start, stop, step, cls) + + record_context = {"runner": context["runner"], + "host": context["host"]} + + sla_action = None + if "sla" in scenario_args: + sla_action = scenario_args["sla"].get("action", "assert") + + for value in range(start, stop, step): + + options[arg_name] = value + + LOG.debug("runner=%(runner)s seq=%(sequence)s START" % + {"runner": context["runner"], "sequence": sequence}) + + data = {} + errors = "" + + try: + data = method(scenario_args) + except AssertionError as assertion: + # SLA validation failed in scenario, determine what to do now + if sla_action == "assert": + raise + elif sla_action == "monitor": + LOG.warning("SLA validation failed: %s" % assertion.args) + errors = assertion.args + except Exception as e: + errors = traceback.format_exc() + LOG.exception(e) + + time.sleep(interval) + + benchmark_output = { + 'timestamp': time.time(), + 'sequence': sequence, + 'data': data, + 'errors': errors + } + + queue.put({'context': record_context, 'sargs:': scenario_args, + 'benchmark': benchmark_output}) + + LOG.debug("runner=%(runner)s seq=%(sequence)s END" % + {"runner": context["runner"], "sequence": sequence}) + + sequence += 1 + + if errors: + break + + LOG.info("worker END") + + +class ArithmeticRunner(base.Runner): + + __execution_type__ = 'Arithmetic' + + def _run_benchmark(self, cls, method, scenario_args): + self.process = multiprocessing.Process( + target=_worker_process, + args=(self.result_queue, cls, method, self.config, scenario_args)) + self.process.start() diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py new file mode 100644 index 000000000..59ec4734a --- /dev/null +++ b/yardstick/benchmark/runners/base.py @@ -0,0 +1,94 @@ +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import importlib +import multiprocessing +import json +import logging + +log = logging.getLogger(__name__) + +import yardstick.common.utils as utils +from yardstick.benchmark.scenarios import base as base_scenario + + +def _output_serializer_main(filename, queue): + '''entrypoint for the singleton subprocess writing to outfile + Use of this process enables multiple instances of a scenario without + messing up the output file. + ''' + with open(filename, 'w') as outfile: + while True: + # blocks until data becomes available + record = queue.get() + if record == '_TERMINATE_': + outfile.close() + break + else: + json.dump(record, outfile) + outfile.write('\n') + + +class Runner(object): + queue = None + dump_process = None + runners = [] + + @staticmethod + def _get_cls(runner_type): + for runner in utils.itersubclasses(Runner): + if runner_type == runner.__execution_type__: + return runner + raise RuntimeError("No such runner_type %s" % runner_type) + + @staticmethod + def get(config): + """Returns instance of a scenario runner for execution type. + """ + # if there is no runner, start the output serializer subprocess + if len(Runner.runners) == 0: + log.debug("Starting dump process file '%s'" % + config["output_filename"]) + Runner.queue = multiprocessing.Queue() + Runner.dump_process = multiprocessing.Process( + target=_output_serializer_main, + name="Dumper", + args=(config["output_filename"], Runner.queue)) + Runner.dump_process.start() + + return Runner._get_cls(config["type"])(config, Runner.queue) + + @staticmethod + def release(runner): + '''Release the runner''' + Runner.runners.remove(runner) + # if this was the last runner, stop the output serializer subprocess + if len(Runner.runners) == 0: + log.debug("Stopping dump process") + Runner.queue.put('_TERMINATE_') + Runner.dump_process.join() + + def __init__(self, config, queue): + self.context = {} + self.config = config + self.result_queue = queue + Runner.runners.append(self) + + def run(self, scenario_type, scenario_args): + class_name = base_scenario.Scenario.get(scenario_type) + path_split = class_name.split(".") + module_path = ".".join(path_split[:-1]) + module = importlib.import_module(module_path) + cls = getattr(module, path_split[-1]) + + self.config['object'] = class_name + self._run_benchmark(cls, "run", scenario_args) + + def join(self): + self.process.join() diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py new file mode 100644 index 000000000..991724826 --- /dev/null +++ b/yardstick/benchmark/runners/duration.py @@ -0,0 +1,95 @@ +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +'''A runner that runs a specific time before it returns +''' + +import os +import multiprocessing +import logging +import traceback +import time + +from yardstick.benchmark.runners import base + +LOG = logging.getLogger(__name__) + + +def _worker_process(queue, cls, method_name, context, scenario_args): + + sequence = 1 + + benchmark = cls(context) + method = getattr(benchmark, method_name) + interval = context.get("interval", 1) + duration = context.get("duration", 60) + context['runner'] = os.getpid() + + LOG.info("worker START, duration %d sec, class %s", duration, cls) + + record_context = {"runner": context["runner"], + "host": context["host"]} + + sla_action = None + if "sla" in scenario_args: + sla_action = scenario_args["sla"].get("action", "assert") + + start = time.time() + while True: + + LOG.debug("runner=%(runner)s seq=%(sequence)s START" % + {"runner": context["runner"], "sequence": sequence}) + + data = {} + errors = "" + + try: + data = method(scenario_args) + except AssertionError as assertion: + # SLA validation failed in scenario, determine what to do now + if sla_action == "assert": + raise + elif sla_action == "monitor": + LOG.warning("SLA validation failed: %s" % assertion.args) + errors = assertion.args + except Exception as e: + errors = traceback.format_exc() + LOG.exception(e) + + time.sleep(interval) + + benchmark_output = { + 'timestamp': time.time(), + 'sequence': sequence, + 'data': data, + 'errors': errors + } + + queue.put({'context': record_context, 'sargs': scenario_args, + 'benchmark': benchmark_output}) + + LOG.debug("runner=%(runner)s seq=%(sequence)s END" % + {"runner": context["runner"], "sequence": sequence}) + + sequence += 1 + + if (errors and sla_action is None) or (time.time() - start > duration): + LOG.info("worker END") + return + + +class DurationRunner(base.Runner): + + __execution_type__ = 'Duration' + + def _run_benchmark(self, cls, method, scenario_args): + self.process = multiprocessing.Process( + target=_worker_process, + args=(self.result_queue, cls, method, self.config, scenario_args)) + self.process.start() -- cgit 1.2.3-korg