aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/runners/iteration.py
diff options
context:
space:
mode:
authorkubi <jean.gaoliang@huawei.com>2015-12-31 14:38:49 +0800
committerJörgen Karlsson <jorgen.w.karlsson@ericsson.com>2016-01-08 14:09:18 +0000
commit169aadbf7fbfea8ad86011fbac62ccbc0e1c3bf7 (patch)
treec965bb53d0ac66d83354a7b0b3edc0a3345a3945 /yardstick/benchmark/runners/iteration.py
parent0bbef8513b2ed9166d57e3467d62c823a134d4e6 (diff)
support for ipv6
JIRA:YARDSTICK-187 Change-Id: I1cecd400b4449a09d22d43f4a42e889f00dd4fe7 Signed-off-by: kubi <jean.gaoliang@huawei.com> (cherry picked from commit cd80b44f3fd9b8c9e2afc51bc67d7a5cf34fb1c6)
Diffstat (limited to 'yardstick/benchmark/runners/iteration.py')
-rw-r--r--[-rwxr-xr-x]yardstick/benchmark/runners/iteration.py101
1 files changed, 53 insertions, 48 deletions
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index e38ed3749..c24957b1a 100755..100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -30,12 +30,15 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
interval = runner_cfg.get("interval", 1)
iterations = runner_cfg.get("iterations", 1)
+ run_step = runner_cfg.get("run_step", "setup,run,teardown")
LOG.info("worker START, iterations %d times, class %s", iterations, cls)
runner_cfg['runner_id'] = os.getpid()
benchmark = cls(scenario_cfg, context_cfg)
- benchmark.setup()
+ if "setup" in run_step:
+ benchmark.setup()
+
method = getattr(benchmark, method_name)
queue.put({'runner_id': runner_cfg['runner_id'],
@@ -45,53 +48,55 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
sla_action = None
if "sla" in scenario_cfg:
sla_action = scenario_cfg["sla"].get("action", "assert")
-
- while True:
-
- LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
- {"runner": runner_cfg["runner_id"], "sequence": sequence})
-
- data = {}
- errors = ""
-
- try:
- method(data)
- except AssertionError as assertion:
- # SLA validation failed in scenario, determine what to do now
- if sla_action == "assert":
- raise
- elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s" % assertion.args)
- errors = assertion.args
- except Exception as e:
- errors = traceback.format_exc()
- LOG.exception(e)
-
- time.sleep(interval)
-
- benchmark_output = {
- 'timestamp': time.time(),
- 'sequence': sequence,
- 'data': data,
- 'errors': errors
- }
-
- record = {'runner_id': runner_cfg['runner_id'],
- 'benchmark': benchmark_output}
-
- queue.put(record)
-
- LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
- {"runner": runner_cfg["runner_id"], "sequence": sequence})
-
- sequence += 1
-
- if (errors and sla_action is None) or \
- (sequence > iterations or aborted.is_set()):
- LOG.info("worker END")
- break
-
- benchmark.teardown()
+ if "run" in run_step:
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+ {"runner": runner_cfg["runner_id"],
+ "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ try:
+ method(data)
+ except AssertionError as assertion:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s" % assertion.args)
+ errors = assertion.args
+ except Exception as e:
+ errors = traceback.format_exc()
+ LOG.exception(e)
+
+ time.sleep(interval)
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ record = {'runner_id': runner_cfg['runner_id'],
+ 'benchmark': benchmark_output}
+
+ queue.put(record)
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+ {"runner": runner_cfg["runner_id"],
+ "sequence": sequence})
+
+ sequence += 1
+
+ if (errors and sla_action is None) or \
+ (sequence > iterations or aborted.is_set()):
+ LOG.info("worker END")
+ break
+ if "teardown" in run_step:
+ benchmark.teardown()
class IterationRunner(base.Runner):