aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/runners
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/runners')
-rwxr-xr-xyardstick/benchmark/runners/arithmetic.py11
-rwxr-xr-xyardstick/benchmark/runners/base.py44
-rw-r--r--yardstick/benchmark/runners/duration.py8
-rwxr-xr-xyardstick/benchmark/runners/dynamictp.py7
-rw-r--r--yardstick/benchmark/runners/iteration.py7
-rw-r--r--yardstick/benchmark/runners/iteration_ipc.py205
-rw-r--r--yardstick/benchmark/runners/proxduration.py165
-rw-r--r--yardstick/benchmark/runners/search.py9
-rw-r--r--yardstick/benchmark/runners/sequence.py9
9 files changed, 430 insertions, 35 deletions
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 6aaaed888..ecb59f960 100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -37,6 +37,7 @@ import six
from six.moves import range
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -86,7 +87,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
loop_iter = six.moves.zip(*param_iters)
else:
LOG.warning("iter_type unrecognized: %s", iter_type)
- raise TypeError("iter_type unrecognized: %s", iter_type)
+ raise TypeError("iter_type unrecognized: %s" % iter_type)
# Populate options and run the requested method for each value combination
for comb_values in loop_iter:
@@ -105,14 +106,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else:
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index fbdf6c281..af2557441 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -12,27 +12,26 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+#
+# This is a modified copy of ``rally/rally/benchmark/runners/base.py``
-# yardstick comment: this is a modified copy of
-# rally/rally/benchmark/runners/base.py
-
-from __future__ import absolute_import
-
+import importlib
import logging
import multiprocessing
import subprocess
import time
import traceback
-from subprocess import CalledProcessError
-
-import importlib
-from six.moves.queue import Empty
+from six import moves
-import yardstick.common.utils as utils
from yardstick.benchmark.scenarios import base as base_scenario
+from yardstick.common import messaging
+from yardstick.common.messaging import payloads
+from yardstick.common.messaging import producer
+from yardstick.common import utils
from yardstick.dispatcher.base import Base as DispatcherBase
+
log = logging.getLogger(__name__)
@@ -41,7 +40,7 @@ def _execute_shell_command(command):
exitcode = 0
try:
output = subprocess.check_output(command, shell=True)
- except CalledProcessError:
+ except subprocess.CalledProcessError:
exitcode = -1
output = traceback.format_exc()
log.error("exec command '%s' error:\n ", command)
@@ -245,7 +244,7 @@ class Runner(object):
log.debug("output_queue size %s", self.output_queue.qsize())
try:
result.update(self.output_queue.get(True, 1))
- except Empty:
+ except moves.queue.Empty:
pass
return result
@@ -259,7 +258,7 @@ class Runner(object):
log.debug("result_queue size %s", self.result_queue.qsize())
try:
one_record = self.result_queue.get(True, 1)
- except Empty:
+ except moves.queue.Empty:
pass
else:
if output_in_influxdb:
@@ -272,3 +271,22 @@ class Runner(object):
dispatchers = DispatcherBase.get(self.config['output_config'])
dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb'))
dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id)
+
+
+class RunnerProducer(producer.MessagingProducer):
+ """Class implementing the message producer for runners"""
+
+ def __init__(self, _id):
+ super(RunnerProducer, self).__init__(messaging.TOPIC_RUNNER, _id=_id)
+
+ def start_iteration(self, version=1, data=None):
+ data = {} if not data else data
+ self.send_message(
+ messaging.RUNNER_METHOD_START_ITERATION,
+ payloads.RunnerPayload(version=version, data=data))
+
+ def stop_iteration(self, version=1, data=None):
+ data = {} if not data else data
+ self.send_message(
+ messaging.RUNNER_METHOD_STOP_ITERATION,
+ payloads.RunnerPayload(version=version, data=data))
diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py
index 60b0348c3..14fd8bb47 100644
--- a/yardstick/benchmark/runners/duration.py
+++ b/yardstick/benchmark/runners/duration.py
@@ -27,6 +27,7 @@ import traceback
import time
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -70,13 +71,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
+ benchmark.teardown()
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
# catch all exceptions because with multiprocessing we can have un-picklable exception
# problems https://bugs.python.org/issue9400
except Exception: # pylint: disable=broad-except
diff --git a/yardstick/benchmark/runners/dynamictp.py b/yardstick/benchmark/runners/dynamictp.py
index 63bfc823a..88d3c5704 100755
--- a/yardstick/benchmark/runners/dynamictp.py
+++ b/yardstick/benchmark/runners/dynamictp.py
@@ -27,6 +27,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -80,10 +81,10 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
method(data)
- except AssertionError as assertion:
- LOG.warning("SLA validation failed: %s" % assertion.args)
+ except y_exc.SLAValidationError as error:
+ LOG.warning("SLA validation failed: %s", error.args)
too_high = True
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index 20d6da054..4c88f3671 100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -29,6 +29,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -75,13 +76,13 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
elif sla_action == "rate-control":
try:
scenario_cfg['options']['rate']
diff --git a/yardstick/benchmark/runners/iteration_ipc.py b/yardstick/benchmark/runners/iteration_ipc.py
new file mode 100644
index 000000000..a0335fdc7
--- /dev/null
+++ b/yardstick/benchmark/runners/iteration_ipc.py
@@ -0,0 +1,205 @@
+# Copyright 2018: Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A runner that runs a configurable number of times before it returns. Each
+ iteration has a configurable timeout. The loop control depends on the
+ feedback received from the running VNFs. The context PIDs from the VNFs
+ to listen the messages from are given in the scenario "setup" method.
+"""
+
+import logging
+import multiprocessing
+import time
+import traceback
+
+import os
+
+from yardstick.benchmark.runners import base as base_runner
+from yardstick.common import exceptions
+from yardstick.common import messaging
+from yardstick.common import utils
+from yardstick.common.messaging import consumer
+from yardstick.common.messaging import payloads
+
+
+LOG = logging.getLogger(__name__)
+
+QUEUE_PUT_TIMEOUT = 10
+ITERATION_TIMEOUT = 180
+
+
+class RunnerIterationIPCEndpoint(consumer.NotificationHandler):
+ """Endpoint class for ``RunnerIterationIPCConsumer``"""
+
+ def tg_method_started(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'id': ctxt['id'],
+ 'action': messaging.TG_METHOD_STARTED,
+ 'payload': payloads.TrafficGeneratorPayload.dict_to_obj(
+ kwargs)},
+ QUEUE_PUT_TIMEOUT)
+
+ def tg_method_finished(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'id': ctxt['id'],
+ 'action': messaging.TG_METHOD_FINISHED,
+ 'payload': payloads.TrafficGeneratorPayload.dict_to_obj(
+ kwargs)})
+
+ def tg_method_iteration(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'id': ctxt['id'],
+ 'action': messaging.TG_METHOD_ITERATION,
+ 'payload': payloads.TrafficGeneratorPayload.dict_to_obj(
+ kwargs)})
+
+
+class RunnerIterationIPCConsumer(consumer.MessagingConsumer):
+ """MQ consumer for "IterationIPC" runner"""
+
+ def __init__(self, _id, ctx_ids):
+ self._id = _id
+ self._queue = multiprocessing.Queue()
+ endpoints = [RunnerIterationIPCEndpoint(_id, ctx_ids, self._queue)]
+ super(RunnerIterationIPCConsumer, self).__init__(
+ messaging.TOPIC_TG, ctx_ids, endpoints)
+ self._kpi_per_id = {ctx: [] for ctx in ctx_ids}
+ self.iteration_index = None
+
+ def is_all_kpis_received_in_iteration(self):
+ """Check if all producers registered have sent the ITERATION msg
+
+ During the present iteration, all producers (traffic generators) must
+ start and finish the traffic injection, and at the end of the traffic
+ injection a TG_METHOD_ITERATION must be sent. This function will check
+ all KPIs in the present iteration are received. E.g.:
+ self.iteration_index = 2
+
+ self._kpi_per_id = {
+ 'ctx1': [kpi0, kpi1, kpi2],
+ 'ctx2': [kpi0, kpi1]} --> return False
+
+ self._kpi_per_id = {
+ 'ctx1': [kpi0, kpi1, kpi2],
+ 'ctx2': [kpi0, kpi1, kpi2]} --> return True
+ """
+ while not self._queue.empty():
+ msg = self._queue.get(True, 1)
+ if msg['action'] == messaging.TG_METHOD_ITERATION:
+ id_iter_list = self._kpi_per_id[msg['id']]
+ id_iter_list.append(msg['payload'].kpi)
+
+ return all(len(id_iter_list) == self.iteration_index
+ for id_iter_list in self._kpi_per_id.values())
+
+
+def _worker_process(queue, cls, method_name, scenario_cfg,
+ context_cfg, aborted, output_queue): # pragma: no cover
+ runner_cfg = scenario_cfg['runner']
+
+ timeout = runner_cfg.get('timeout', ITERATION_TIMEOUT)
+ iterations = runner_cfg.get('iterations', 1)
+ run_step = runner_cfg.get('run_step', 'setup,run,teardown')
+ LOG.info('Worker START. Iterations %d times, class %s', iterations, cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ benchmark = cls(scenario_cfg, context_cfg)
+ method = getattr(benchmark, method_name)
+
+ if 'setup' not in run_step:
+ raise exceptions.RunnerIterationIPCSetupActionNeeded()
+ benchmark.setup()
+ producer_ctxs = benchmark.get_mq_ids()
+ if not producer_ctxs:
+ raise exceptions.RunnerIterationIPCNoCtxs()
+
+ mq_consumer = RunnerIterationIPCConsumer(os.getpid(), producer_ctxs)
+ mq_consumer.start_rpc_server()
+ mq_producer = base_runner.RunnerProducer(scenario_cfg['task_id'])
+
+ iteration_index = 1
+ while 'run' in run_step:
+ LOG.debug('runner=%(runner)s seq=%(sequence)s START',
+ {'runner': runner_cfg['runner_id'],
+ 'sequence': iteration_index})
+ data = {}
+ result = None
+ errors = ''
+ mq_consumer.iteration_index = iteration_index
+ mq_producer.start_iteration()
+
+ try:
+ utils.wait_until_true(
+ mq_consumer.is_all_kpis_received_in_iteration,
+ timeout=timeout, sleep=2)
+ result = method(data)
+ except Exception: # pylint: disable=broad-except
+ errors = traceback.format_exc()
+ LOG.exception(errors)
+
+ mq_producer.stop_iteration()
+
+ if result:
+ output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
+ benchmark_output = {'timestamp': time.time(),
+ 'sequence': iteration_index,
+ 'data': data,
+ 'errors': errors}
+ queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT)
+
+ LOG.debug('runner=%(runner)s seq=%(sequence)s END',
+ {'runner': runner_cfg['runner_id'],
+ 'sequence': iteration_index})
+
+ iteration_index += 1
+ if iteration_index > iterations or aborted.is_set():
+ LOG.info('"IterationIPC" worker END')
+ break
+
+ if 'teardown' in run_step:
+ try:
+ benchmark.teardown()
+ except Exception:
+ LOG.exception('Exception during teardown process')
+ mq_consumer.stop_rpc_server()
+ raise SystemExit(1)
+
+ LOG.debug('Data queue size = %s', queue.qsize())
+ LOG.debug('Output queue size = %s', output_queue.qsize())
+ mq_consumer.stop_rpc_server()
+
+
+class IterationIPCRunner(base_runner.Runner):
+ """Run a scenario for a configurable number of times.
+
+ Each iteration has a configurable timeout. The loop control depends on the
+ feedback received from the running VNFs. The context PIDs from the VNFs to
+ listen the messages from are given in the scenario "setup" method.
+ """
+ __execution_type__ = 'IterationIPC'
+
+ def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = '{}-{}-{}'.format(
+ self.__execution_type__, scenario_cfg.get('type'), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, scenario_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/yardstick/benchmark/runners/proxduration.py b/yardstick/benchmark/runners/proxduration.py
new file mode 100644
index 000000000..61a468fd3
--- /dev/null
+++ b/yardstick/benchmark/runners/proxduration.py
@@ -0,0 +1,165 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# yardstick comment: this is a modified copy of
+# rally/rally/benchmark/runners/constant.py
+
+"""A runner that runs a specific time before it returns
+"""
+
+from __future__ import absolute_import
+
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
+from yardstick.common import constants
+
+LOG = logging.getLogger(__name__)
+
+def _worker_process(queue, cls, method_name, scenario_cfg,
+ context_cfg, aborted, output_queue):
+
+ sequence = 1
+
+ runner_cfg = scenario_cfg['runner']
+
+ requested_interval = interval = runner_cfg.get("interval", 1)
+ duration = runner_cfg.get("duration", 60)
+ sampled = runner_cfg.get("sampled", False)
+
+ LOG.info("Worker START, duration is %ds", duration)
+ LOG.debug("class is %s", cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ benchmark = cls(scenario_cfg, context_cfg)
+ benchmark.setup()
+ method = getattr(benchmark, method_name)
+
+ sla_action = None
+ if "sla" in scenario_cfg:
+ sla_action = scenario_cfg["sla"].get("action", "assert")
+
+
+ start = time.time()
+ timeout = start + duration
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+ {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ benchmark.pre_run_wait_time(interval)
+
+ if sampled:
+ try:
+ pre_adjustment = time.time()
+ result = method(data)
+ post_adjustment = time.time()
+ if requested_interval > post_adjustment - pre_adjustment:
+ interval = requested_interval - (post_adjustment - pre_adjustment)
+ else:
+ interval = 0
+
+ except y_exc.SLAValidationError as error:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ # catch all exceptions because with multiprocessing we can have un-picklable exception
+ # problems https://bugs.python.org/issue9400
+ except Exception: # pylint: disable=broad-except
+ errors = traceback.format_exc()
+ LOG.exception("")
+ else:
+ if result:
+ # add timeout for put so we don't block test
+ # if we do timeout we don't care about dropping individual KPIs
+ output_queue.put(result, True, constants.QUEUE_PUT_TIMEOUT)
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ queue.put(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)
+ else:
+ LOG.debug("No sample collected ...Sequence %s", sequence)
+
+
+ sequence += 1
+
+ if (errors and sla_action is None) or time.time() > timeout or aborted.is_set():
+ LOG.info("Worker END")
+ break
+
+ try:
+ benchmark.teardown()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise SystemExit(1)
+
+ LOG.debug("queue.qsize() = %s", queue.qsize())
+ LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+ LOG.info("Exiting ProxDuration Runner...")
+
+class ProxDurationRunner(base.Runner):
+ """Run a scenario for a certain amount of time
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+ Parameters
+ duration - amount of time the scenario will be run for
+ type: int
+ unit: seconds
+ default: 60 sec
+ interval - time to wait between each scenario invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ sampled - Sample data is required yes/no
+ type: boolean
+ unit: True/False
+ default: False
+ confirmation - Number of confirmation retries
+ type: int
+ unit: retry attempts
+ default: 0
+ """
+ __execution_type__ = 'ProxDuration'
+
+ def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, scenario_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/yardstick/benchmark/runners/search.py b/yardstick/benchmark/runners/search.py
index 8037329b5..01a4292c7 100644
--- a/yardstick/benchmark/runners/search.py
+++ b/yardstick/benchmark/runners/search.py
@@ -33,6 +33,7 @@ from collections import Mapping
from six.moves import zip
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -119,14 +120,14 @@ If the scenario ends before the time has elapsed, it will be started again.
try:
self.worker_helper(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if self.sla_action == "assert":
raise
elif self.sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index d6e3f7109..0148a45b2 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -30,6 +30,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -74,14 +75,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else: