diff options
author | chenjiankun <chenjiankun1@huawei.com> | 2017-05-25 09:19:04 +0000 |
---|---|---|
committer | chenjiankun <chenjiankun1@huawei.com> | 2017-05-26 02:16:25 +0000 |
commit | fc6eddef3d27cf51b3f6da3a523080e55c6bfb70 (patch) | |
tree | f3fe93a193eec436bd834ce4583b084017ac6b5b | |
parent | dd42ba3cafb908246da5b90c8bbbc2c7d0beb801 (diff) |
Pass parameters between scenarios
JIRA: YARDSTICK-641
Allowing parameters to pass between scenarios so that the one test case
can be combination of several scenarios.
Change-Id: I55a00855e77d5b719a27a069a3ea195d6bbd0ef8
Signed-off-by: chenjiankun <chenjiankun1@huawei.com>
-rw-r--r-- | tests/unit/benchmark/core/test_task.py | 28 | ||||
-rw-r--r-- | tests/unit/benchmark/runner/__init__.py | 0 | ||||
-rw-r--r-- | tests/unit/benchmark/runner/test_base.py | 45 | ||||
-rw-r--r-- | yardstick/benchmark/core/task.py | 17 | ||||
-rwxr-xr-x | yardstick/benchmark/runners/arithmetic.py | 9 | ||||
-rwxr-xr-x | yardstick/benchmark/runners/base.py | 7 | ||||
-rw-r--r-- | yardstick/benchmark/runners/duration.py | 9 | ||||
-rw-r--r-- | yardstick/benchmark/runners/iteration.py | 9 | ||||
-rw-r--r-- | yardstick/benchmark/runners/sequence.py | 9 |
9 files changed, 121 insertions, 12 deletions
diff --git a/tests/unit/benchmark/core/test_task.py b/tests/unit/benchmark/core/test_task.py index cd7ffdebb..8034392f4 100644 --- a/tests/unit/benchmark/core/test_task.py +++ b/tests/unit/benchmark/core/test_task.py @@ -64,6 +64,7 @@ class TaskTestCase(unittest.TestCase): t = task.Task() runner = mock.Mock() runner.join.return_value = 0 + runner.get_output.return_value = {} mock_base_runner.Runner.get.return_value = runner t._run([scenario], False, "yardstick.out") self.assertTrue(runner.run.called) @@ -155,6 +156,33 @@ class TaskTestCase(unittest.TestCase): self.assertEqual(task_args_fnames[0], None) self.assertEqual(task_args_fnames[1], None) + def test_parse_options(self): + options = { + 'openstack': { + 'EXTERNAL_NETWORK': '$network' + }, + 'ndoes': ['node1', '$node'], + 'host': '$host' + } + + t = task.Task() + t.outputs = { + 'network': 'ext-net', + 'node': 'node2', + 'host': 'server.yardstick' + } + + idle_result = { + 'openstack': { + 'EXTERNAL_NETWORK': 'ext-net' + }, + 'ndoes': ['node1', 'node2'], + 'host': 'server.yardstick' + } + + actual_result = t._parse_options(options) + self.assertEqual(idle_result, actual_result) + def test_change_server_name_host_str(self): scenario = {'host': 'demo'} suffix = '-8' diff --git a/tests/unit/benchmark/runner/__init__.py b/tests/unit/benchmark/runner/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/tests/unit/benchmark/runner/__init__.py diff --git a/tests/unit/benchmark/runner/test_base.py b/tests/unit/benchmark/runner/test_base.py new file mode 100644 index 000000000..7880fe5a5 --- /dev/null +++ b/tests/unit/benchmark/runner/test_base.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +from __future__ import print_function +from __future__ import absolute_import + +import unittest +import multiprocessing +import time + +from yardstick.benchmark.runners.iteration import IterationRunner + + +class RunnerTestCase(unittest.TestCase): + + def test_get_output(self): + queue = multiprocessing.Queue() + runner = IterationRunner({}, queue) + runner.output_queue.put({'case': 'opnfv_yardstick_tc002'}) + runner.output_queue.put({'criteria': 'PASS'}) + + idle_result = { + 'case': 'opnfv_yardstick_tc002', + 'criteria': 'PASS' + } + + time.sleep(1) + actual_result = runner.get_output() + self.assertEqual(idle_result, actual_result) + + +def main(): + unittest.main() + + +if __name__ == '__main__': + main() diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 3a151dbba..c44081b73 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -44,6 +44,7 @@ class Task(object): # pragma: no cover def __init__(self): self.config = {} self.contexts = [] + self.outputs = {} def start(self, args, **kwargs): """Start a benchmark scenario.""" @@ -136,6 +137,7 @@ class Task(object): # pragma: no cover # Wait for runners to finish for runner in runners: runner_join(runner) + self.outputs.update(runner.get_output()) print("Runner ended, output in", output_file) else: # run serially @@ -143,6 +145,7 @@ class Task(object): # pragma: no cover if not _is_background_scenario(scenario): runner = self.run_one_scenario(scenario, output_file) runner_join(runner) + self.outputs.update(runner.get_output()) print("Runner ended, output in", output_file) # Abort background runners @@ -155,6 +158,7 @@ class Task(object): # pragma: no cover # Nuke if it did not stop nicely base_runner.Runner.terminate(runner) runner_join(runner) + self.outputs.update(runner.get_output()) else: base_runner.Runner.release(runner) print("Background task ended") @@ -168,11 +172,24 @@ class Task(object): # pragma: no cover for context in self.contexts[::-1]: context.undeploy() + def _parse_options(self, op): + if isinstance(op, dict): + return {k: self._parse_options(v) for k, v in op.items()} + elif isinstance(op, list): + return [self._parse_options(v) for v in op] + elif isinstance(op, str): + return self.outputs.get(op[1:]) if op.startswith('$') else op + else: + return op + def run_one_scenario(self, scenario_cfg, output_file): """run one scenario using context""" runner_cfg = scenario_cfg["runner"] runner_cfg['output_filename'] = output_file + options = scenario_cfg.get('options', {}) + scenario_cfg['options'] = self._parse_options(options) + # TODO support get multi hosts/vms info context_cfg = {} if "host" in scenario_cfg: diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py index 65fdb9d66..7ec593396 100755 --- a/yardstick/benchmark/runners/arithmetic.py +++ b/yardstick/benchmark/runners/arithmetic.py @@ -42,7 +42,7 @@ LOG = logging.getLogger(__name__) def _worker_process(queue, cls, method_name, scenario_cfg, - context_cfg, aborted): + context_cfg, aborted, output_queue): sequence = 1 @@ -108,7 +108,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg, errors = "" try: - method(data) + result = method(data) except AssertionError as assertion: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": @@ -119,6 +119,9 @@ def _worker_process(queue, cls, method_name, scenario_cfg, except Exception as e: errors = traceback.format_exc() LOG.exception(e) + else: + if result: + output_queue.put(result) time.sleep(interval) @@ -188,5 +191,5 @@ class ArithmeticRunner(base.Runner): self.process = multiprocessing.Process( target=_worker_process, args=(self.result_queue, cls, method, scenario_cfg, - context_cfg, self.aborted)) + context_cfg, self.aborted, self.output_queue)) self.process.start() diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py index b48ed973a..ebb9a91b5 100755 --- a/yardstick/benchmark/runners/base.py +++ b/yardstick/benchmark/runners/base.py @@ -197,6 +197,7 @@ class Runner(object): self.config = config self.periodic_action_process = None self.result_queue = queue + self.output_queue = multiprocessing.Queue() self.process = None self.aborted = multiprocessing.Event() Runner.runners.append(self) @@ -269,3 +270,9 @@ class Runner(object): self.run_post_stop_action() return self.process.exitcode + + def get_output(self): + result = {} + while not self.output_queue.empty(): + result.update(self.output_queue.get()) + return result diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py index 772983cfd..2bf2cd2fe 100644 --- a/yardstick/benchmark/runners/duration.py +++ b/yardstick/benchmark/runners/duration.py @@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__) def _worker_process(queue, cls, method_name, scenario_cfg, - context_cfg, aborted): + context_cfg, aborted, output_queue): sequence = 1 @@ -66,7 +66,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg, errors = "" try: - method(data) + result = method(data) except AssertionError as assertion: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": @@ -77,6 +77,9 @@ def _worker_process(queue, cls, method_name, scenario_cfg, except Exception as e: errors = traceback.format_exc() LOG.exception(e) + else: + if result: + output_queue.put(result) time.sleep(interval) @@ -126,5 +129,5 @@ If the scenario ends before the time has elapsed, it will be started again. self.process = multiprocessing.Process( target=_worker_process, args=(self.result_queue, cls, method, scenario_cfg, - context_cfg, self.aborted)) + context_cfg, self.aborted, self.output_queue)) self.process.start() diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py index 29daa0d42..3cafb5116 100644 --- a/yardstick/benchmark/runners/iteration.py +++ b/yardstick/benchmark/runners/iteration.py @@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__) def _worker_process(queue, cls, method_name, scenario_cfg, - context_cfg, aborted): + context_cfg, aborted, output_queue): sequence = 1 @@ -73,7 +73,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg, errors = "" try: - method(data) + result = method(data) except AssertionError as assertion: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": @@ -88,6 +88,9 @@ def _worker_process(queue, cls, method_name, scenario_cfg, except Exception as e: errors = traceback.format_exc() LOG.exception(e) + else: + if result: + output_queue.put(result) time.sleep(interval) @@ -138,5 +141,5 @@ If the scenario ends before the time has elapsed, it will be started again. self.process = multiprocessing.Process( target=_worker_process, args=(self.result_queue, cls, method, scenario_cfg, - context_cfg, self.aborted)) + context_cfg, self.aborted, self.output_queue)) self.process.start() diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py index af87c006e..74ff82204 100644 --- a/yardstick/benchmark/runners/sequence.py +++ b/yardstick/benchmark/runners/sequence.py @@ -33,7 +33,7 @@ LOG = logging.getLogger(__name__) def _worker_process(queue, cls, method_name, scenario_cfg, - context_cfg, aborted): + context_cfg, aborted, output_queue): sequence = 1 @@ -75,7 +75,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg, errors = "" try: - method(data) + result = method(data) except AssertionError as assertion: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": @@ -86,6 +86,9 @@ def _worker_process(queue, cls, method_name, scenario_cfg, except Exception as e: errors = traceback.format_exc() LOG.exception(e) + else: + if result: + output_queue.put(result) time.sleep(interval) @@ -137,5 +140,5 @@ class SequenceRunner(base.Runner): self.process = multiprocessing.Process( target=_worker_process, args=(self.result_queue, cls, method, scenario_cfg, - context_cfg, self.aborted)) + context_cfg, self.aborted, self.output_queue)) self.process.start() |