aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/runners
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/runners')
-rwxr-xr-xyardstick/benchmark/runners/arithmetic.py11
-rwxr-xr-xyardstick/benchmark/runners/base.py71
-rw-r--r--yardstick/benchmark/runners/duration.py17
-rwxr-xr-xyardstick/benchmark/runners/dynamictp.py7
-rw-r--r--yardstick/benchmark/runners/iteration.py47
-rw-r--r--yardstick/benchmark/runners/proxduration.py166
-rw-r--r--yardstick/benchmark/runners/search.py9
-rw-r--r--yardstick/benchmark/runners/sequence.py40
8 files changed, 288 insertions, 80 deletions
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 6aaaed888..ecb59f960 100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -37,6 +37,7 @@ import six
from six.moves import range
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -86,7 +87,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
loop_iter = six.moves.zip(*param_iters)
else:
LOG.warning("iter_type unrecognized: %s", iter_type)
- raise TypeError("iter_type unrecognized: %s", iter_type)
+ raise TypeError("iter_type unrecognized: %s" % iter_type)
# Populate options and run the requested method for each value combination
for comb_values in loop_iter:
@@ -105,14 +106,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else:
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index a887fa5b3..94de45d1e 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -12,24 +12,22 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+#
+# This is a modified copy of ``rally/rally/benchmark/runners/base.py``
-# yardstick comment: this is a modified copy of
-# rally/rally/benchmark/runners/base.py
-
-from __future__ import absolute_import
-
+import importlib
import logging
import multiprocessing
import subprocess
import time
import traceback
-import importlib
-
-from six.moves.queue import Empty
+from six import moves
-import yardstick.common.utils as utils
from yardstick.benchmark.scenarios import base as base_scenario
+from yardstick.common import utils
+from yardstick.dispatcher.base import Base as DispatcherBase
+
log = logging.getLogger(__name__)
@@ -39,7 +37,7 @@ def _execute_shell_command(command):
exitcode = 0
try:
output = subprocess.check_output(command, shell=True)
- except Exception:
+ except subprocess.CalledProcessError:
exitcode = -1
output = traceback.format_exc()
log.error("exec command '%s' error:\n ", command)
@@ -79,6 +77,33 @@ def _periodic_action(interval, command, queue):
queue.put({'periodic-action-data': data})
+class ScenarioOutput(dict):
+
+ QUEUE_PUT_TIMEOUT = 10
+
+ def __init__(self, queue, **kwargs):
+ super(ScenarioOutput, self).__init__()
+ self._queue = queue
+ self.result_ext = dict()
+ for key, val in kwargs.items():
+ self.result_ext[key] = val
+ setattr(self, key, val)
+
+ def push(self, data=None, add_timestamp=True):
+ if data is None:
+ data = dict(self)
+
+ if add_timestamp:
+ result = {'timestamp': time.time(), 'data': data}
+ else:
+ result = data
+
+ for key in self.result_ext.keys():
+ result[key] = getattr(self, key)
+
+ self._queue.put(result, True, self.QUEUE_PUT_TIMEOUT)
+
+
class Runner(object):
runners = []
@@ -119,7 +144,7 @@ class Runner(object):
@staticmethod
def terminate_all():
"""Terminate all runners (subprocesses)"""
- log.debug("Terminating all runners", exc_info=True)
+ log.debug("Terminating all runners")
# release dumper process as some errors before any runner is created
if not Runner.runners:
@@ -137,6 +162,8 @@ class Runner(object):
Runner.release(runner)
def __init__(self, config):
+ self.task_id = None
+ self.case_name = None
self.config = config
self.periodic_action_process = None
self.output_queue = multiprocessing.Queue()
@@ -170,6 +197,8 @@ class Runner(object):
cls = getattr(module, path_split[-1])
self.config['object'] = class_name
+ self.case_name = scenario_cfg['tc']
+ self.task_id = scenario_cfg['task_id']
self.aborted.clear()
# run a potentially configured pre-start action
@@ -239,16 +268,30 @@ class Runner(object):
log.debug("output_queue size %s", self.output_queue.qsize())
try:
result.update(self.output_queue.get(True, 1))
- except Empty:
+ except moves.queue.Empty:
pass
return result
def get_result(self):
result = []
+
+ dispatcher = self.config['output_config']['DEFAULT']['dispatcher']
+ output_in_influxdb = 'influxdb' in dispatcher
+
while not self.result_queue.empty():
log.debug("result_queue size %s", self.result_queue.qsize())
try:
- result.append(self.result_queue.get(True, 1))
- except Empty:
+ one_record = self.result_queue.get(True, 1)
+ except moves.queue.Empty:
pass
+ else:
+ if output_in_influxdb:
+ self._output_to_influxdb(one_record)
+
+ result.append(one_record)
return result
+
+ def _output_to_influxdb(self, record):
+ dispatchers = DispatcherBase.get(self.config['output_config'])
+ dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb'))
+ dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id)
diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py
index fbf72a74c..55c3690fd 100644
--- a/yardstick/benchmark/runners/duration.py
+++ b/yardstick/benchmark/runners/duration.py
@@ -27,6 +27,7 @@ import traceback
import time
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -66,18 +67,21 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
data = {}
errors = ""
+ benchmark.pre_run_wait_time(interval)
+
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
+ benchmark.teardown()
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
# catch all exceptions because with multiprocessing we can have un-picklable exception
# problems https://bugs.python.org/issue9400
- except Exception:
+ except Exception: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception("")
else:
@@ -86,7 +90,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
# if we do timeout we don't care about dropping individual KPIs
output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
- time.sleep(interval)
+ benchmark.post_run_wait_time(interval)
benchmark_output = {
'timestamp': time.time(),
@@ -102,7 +106,8 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
sequence += 1
- if (errors and sla_action is None) or time.time() > timeout or aborted.is_set():
+ if ((errors and sla_action is None) or time.time() > timeout
+ or aborted.is_set() or benchmark.is_ended()):
LOG.info("Worker END")
break
diff --git a/yardstick/benchmark/runners/dynamictp.py b/yardstick/benchmark/runners/dynamictp.py
index 63bfc823a..88d3c5704 100755
--- a/yardstick/benchmark/runners/dynamictp.py
+++ b/yardstick/benchmark/runners/dynamictp.py
@@ -27,6 +27,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -80,10 +81,10 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
method(data)
- except AssertionError as assertion:
- LOG.warning("SLA validation failed: %s" % assertion.args)
+ except y_exc.SLAValidationError as error:
+ LOG.warning("SLA validation failed: %s", error.args)
too_high = True
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index cb0424377..15dad2cd5 100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -23,12 +23,12 @@ from __future__ import absolute_import
import logging
import multiprocessing
-import time
import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -39,8 +39,6 @@ QUEUE_PUT_TIMEOUT = 10
def _worker_process(queue, cls, method_name, scenario_cfg,
context_cfg, aborted, output_queue):
- sequence = 1
-
runner_cfg = scenario_cfg['runner']
interval = runner_cfg.get("interval", 1)
@@ -52,6 +50,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
runner_cfg['runner_id'] = os.getpid()
+ scenario_output = base.ScenarioOutput(queue, sequence=1, errors="")
benchmark = cls(scenario_cfg, context_cfg)
if "setup" in run_step:
benchmark.setup()
@@ -66,20 +65,21 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
LOG.debug("runner=%(runner)s seq=%(sequence)s START",
{"runner": runner_cfg["runner_id"],
- "sequence": sequence})
+ "sequence": scenario_output.sequence})
- data = {}
- errors = ""
+ scenario_output.clear()
+ scenario_output.errors = ""
+ benchmark.pre_run_wait_time(interval)
try:
- result = method(data)
- except AssertionError as assertion:
+ result = method(scenario_output)
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ scenario_output.errors = error.args
elif sla_action == "rate-control":
try:
scenario_cfg['options']['rate']
@@ -88,36 +88,31 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
scenario_cfg['options']['rate'] = 100
scenario_cfg['options']['rate'] -= delta
- sequence = 1
+ scenario_output.sequence = 1
continue
- except Exception:
- errors = traceback.format_exc()
+ except Exception: # pylint: disable=broad-except
+ scenario_output.errors = traceback.format_exc()
LOG.exception("")
+ raise
else:
if result:
# add timeout for put so we don't block test
# if we do timeout we don't care about dropping individual KPIs
output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
- time.sleep(interval)
-
- benchmark_output = {
- 'timestamp': time.time(),
- 'sequence': sequence,
- 'data': data,
- 'errors': errors
- }
+ benchmark.post_run_wait_time(interval)
- queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT)
+ if scenario_output:
+ scenario_output.push()
LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"],
- "sequence": sequence})
+ "sequence": scenario_output.sequence})
- sequence += 1
+ scenario_output.sequence += 1
- if (errors and sla_action is None) or \
- (sequence > iterations or aborted.is_set()):
+ if (scenario_output.errors and sla_action is None) or \
+ (scenario_output.sequence > iterations or aborted.is_set()):
LOG.info("worker END")
break
if "teardown" in run_step:
diff --git a/yardstick/benchmark/runners/proxduration.py b/yardstick/benchmark/runners/proxduration.py
new file mode 100644
index 000000000..e217904b9
--- /dev/null
+++ b/yardstick/benchmark/runners/proxduration.py
@@ -0,0 +1,166 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# yardstick comment: this is a modified copy of
+# rally/rally/benchmark/runners/constant.py
+
+"""A runner that runs a specific time before it returns
+"""
+
+from __future__ import absolute_import
+
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
+from yardstick.common import constants
+
+LOG = logging.getLogger(__name__)
+
+def _worker_process(queue, cls, method_name, scenario_cfg,
+ context_cfg, aborted, output_queue):
+
+ sequence = 1
+
+ runner_cfg = scenario_cfg['runner']
+
+ requested_interval = interval = runner_cfg.get("interval", 1)
+ duration = runner_cfg.get("duration", 60)
+ sampled = runner_cfg.get("sampled", False)
+
+ LOG.info("Worker START, duration is %ds", duration)
+ LOG.debug("class is %s", cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ benchmark = cls(scenario_cfg, context_cfg)
+ benchmark.setup()
+ method = getattr(benchmark, method_name)
+
+ sla_action = None
+ if "sla" in scenario_cfg:
+ sla_action = scenario_cfg["sla"].get("action", "assert")
+
+
+ start = time.time()
+ timeout = start + duration
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+ {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ benchmark.pre_run_wait_time(interval)
+
+ if sampled:
+ try:
+ pre_adjustment = time.time()
+ result = method(data)
+ post_adjustment = time.time()
+ if requested_interval > post_adjustment - pre_adjustment:
+ interval = requested_interval - (post_adjustment - pre_adjustment)
+ else:
+ interval = 0
+
+ except y_exc.SLAValidationError as error:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ # catch all exceptions because with multiprocessing we can have un-picklable exception
+ # problems https://bugs.python.org/issue9400
+ except Exception: # pylint: disable=broad-except
+ errors = traceback.format_exc()
+ LOG.exception("")
+ else:
+ if result:
+ # add timeout for put so we don't block test
+ # if we do timeout we don't care about dropping individual KPIs
+ output_queue.put(result, True, constants.QUEUE_PUT_TIMEOUT)
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ queue.put(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)
+ else:
+ LOG.debug("No sample collected ...Sequence %s", sequence)
+
+
+ sequence += 1
+
+ if ((errors and sla_action is None) or time.time() > timeout
+ or aborted.is_set() or benchmark.is_ended()):
+ LOG.info("Worker END")
+ break
+
+ try:
+ benchmark.teardown()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise SystemExit(1)
+
+ LOG.debug("queue.qsize() = %s", queue.qsize())
+ LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+ LOG.info("Exiting ProxDuration Runner...")
+
+class ProxDurationRunner(base.Runner):
+ """Run a scenario for a certain amount of time
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+ Parameters
+ duration - amount of time the scenario will be run for
+ type: int
+ unit: seconds
+ default: 60 sec
+ interval - time to wait between each scenario invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ sampled - Sample data is required yes/no
+ type: boolean
+ unit: True/False
+ default: False
+ confirmation - Number of confirmation retries
+ type: int
+ unit: retry attempts
+ default: 0
+ """
+ __execution_type__ = 'ProxDuration'
+
+ def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, scenario_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/yardstick/benchmark/runners/search.py b/yardstick/benchmark/runners/search.py
index 8037329b5..01a4292c7 100644
--- a/yardstick/benchmark/runners/search.py
+++ b/yardstick/benchmark/runners/search.py
@@ -33,6 +33,7 @@ from collections import Mapping
from six.moves import zip
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -119,14 +120,14 @@ If the scenario ends before the time has elapsed, it will be started again.
try:
self.worker_helper(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if self.sla_action == "assert":
raise
elif self.sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index d6e3f7109..58ffddd22 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -30,6 +30,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -37,8 +38,6 @@ LOG = logging.getLogger(__name__)
def _worker_process(queue, cls, method_name, scenario_cfg,
context_cfg, aborted, output_queue):
- sequence = 1
-
runner_cfg = scenario_cfg['runner']
interval = runner_cfg.get("interval", 1)
@@ -55,6 +54,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
LOG.info("worker START, sequence_values(%s, %s), class %s",
arg_name, sequence_values, cls)
+ scenario_output = base.ScenarioOutput(queue, sequence=1, errors="")
benchmark = cls(scenario_cfg, context_cfg)
benchmark.setup()
method = getattr(benchmark, method_name)
@@ -67,22 +67,23 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
options[arg_name] = value
LOG.debug("runner=%(runner)s seq=%(sequence)s START",
- {"runner": runner_cfg["runner_id"], "sequence": sequence})
+ {"runner": runner_cfg["runner_id"],
+ "sequence": scenario_output.sequence})
- data = {}
- errors = ""
+ scenario_output.clear()
+ scenario_output.errors = ""
try:
- result = method(data)
- except AssertionError as assertion:
+ result = method(scenario_output)
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
- errors = traceback.format_exc()
+ LOG.warning("SLA validation failed: %s", error.args)
+ scenario_output.errors = error.args
+ except Exception as e: # pylint: disable=broad-except
+ scenario_output.errors = traceback.format_exc()
LOG.exception(e)
else:
if result:
@@ -90,21 +91,16 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
time.sleep(interval)
- benchmark_output = {
- 'timestamp': time.time(),
- 'sequence': sequence,
- 'data': data,
- 'errors': errors
- }
-
- queue.put(benchmark_output)
+ if scenario_output:
+ scenario_output.push()
LOG.debug("runner=%(runner)s seq=%(sequence)s END",
- {"runner": runner_cfg["runner_id"], "sequence": sequence})
+ {"runner": runner_cfg["runner_id"],
+ "sequence": scenario_output.sequence})
- sequence += 1
+ scenario_output.sequence += 1
- if (errors and sla_action is None) or aborted.is_set():
+ if (scenario_output.errors and sla_action is None) or aborted.is_set():
break
try: