summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKristian Hunt <kristian.hunt@gmail.com>2015-07-02 15:32:44 +0200
committerKristian Hunt <kristian.hunt@gmail.com>2015-07-03 14:21:41 +0200
commitbca83ab90d5f6ffb0afe5f2e756e3fd37f85c2a7 (patch)
tree87e44571b88d0af44ed76277bc8832762f200a9a
parentda2d2731a79da35b07a4ef0d4b6f8a426f853b41 (diff)
Add new sequence runner
The "sequence" runner will use an input value of the test for every run from a pre-defined list from the task file. Example runner section of the task file: type: Sequence interval: 1 scenario_option_name: packetsize sequence: - 100 - 150 - 200 JIRA: YARDSTICK-47 Change-Id: I5bde9b78cb356499c338ef3da26ac1783670887d Signed-off-by: Kristian Hunt <kristian.hunt@gmail.com>
-rw-r--r--samples/ping-option-list.yaml44
-rw-r--r--yardstick/benchmark/runners/sequence.py124
2 files changed, 168 insertions, 0 deletions
diff --git a/samples/ping-option-list.yaml b/samples/ping-option-list.yaml
new file mode 100644
index 000000000..1fa95e6c2
--- /dev/null
+++ b/samples/ping-option-list.yaml
@@ -0,0 +1,44 @@
+---
+# Sample benchmark task config file
+# A list of input values for a chosen parameter using Sequence runner
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Ping
+ host: athena.demo
+ target: hades.demo
+
+ runner:
+ type: Sequence
+ interval: 1
+ scenario_option_name: packetsize
+ sequence:
+ - 100
+ - 200
+ - 250
+
+context:
+ name: demo
+ image: cirros-0.3.3
+ flavor: m1.tiny
+ user: cirros
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ athena:
+ floating_ip: true
+ placement: "pgrp1"
+ hades:
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
+ external_network: "net04_ext"
+
+
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
new file mode 100644
index 000000000..52bb69abf
--- /dev/null
+++ b/yardstick/benchmark/runners/sequence.py
@@ -0,0 +1,124 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+'''A runner that every run changes a specified input value to the scenario.
+The input value in the sequence is specified in a list in the input file.
+'''
+
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from yardstick.benchmark.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, cls, method_name, context, scenario_args):
+
+ sequence = 1
+
+ interval = context.get("interval", 1)
+ arg_name = context.get('scenario_option_name')
+ sequence_values = context.get('sequence')
+
+ if 'options' not in scenario_args:
+ scenario_args['options'] = {}
+
+ options = scenario_args['options']
+
+ context['runner'] = os.getpid()
+
+ LOG.info("worker START, sequence_values(%s, %s), class %s",
+ arg_name, sequence_values, cls)
+
+ benchmark = cls(context)
+ benchmark.setup()
+ method = getattr(benchmark, method_name)
+
+ record_context = {"runner": context["runner"],
+ "host": context["host"]}
+
+ sla_action = None
+ if "sla" in scenario_args:
+ sla_action = scenario_args["sla"].get("action", "assert")
+
+ for value in sequence_values:
+ options[arg_name] = value
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+ {"runner": context["runner"], "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ try:
+ data = method(scenario_args)
+ except AssertionError as assertion:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s" % assertion.args)
+ errors = assertion.args
+ except Exception as e:
+ errors = traceback.format_exc()
+ LOG.exception(e)
+
+ time.sleep(interval)
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ queue.put({'context': record_context, 'sargs:': scenario_args,
+ 'benchmark': benchmark_output})
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+ {"runner": context["runner"], "sequence": sequence})
+
+ sequence += 1
+
+ if errors:
+ break
+
+ benchmark.teardown()
+ LOG.info("worker END")
+
+
+class SequenceRunner(base.Runner):
+ '''Run a scenario by changing an input value defined in a list
+
+ Parameters
+ interval - time to wait between each scenario invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ scenario_option_name - name of the option that is increased each invocation
+ type: string
+ unit: na
+ default: none
+ sequence - list of values which are executed in their respective scenarios
+ type: [int]
+ unit: na
+ default: none
+ '''
+
+ __execution_type__ = 'Sequence'
+
+ def _run_benchmark(self, cls, method, scenario_args):
+ self.process = multiprocessing.Process(
+ target=_worker_process,
+ args=(self.result_queue, cls, method, self.config, scenario_args))
+ self.process.start()