aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark')
-rw-r--r--yardstick/benchmark/contexts/base.py81
-rw-r--r--yardstick/benchmark/contexts/dummy.py6
-rw-r--r--yardstick/benchmark/contexts/heat.py44
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py9
-rw-r--r--yardstick/benchmark/contexts/node.py62
-rw-r--r--yardstick/benchmark/contexts/standalone/model.py14
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py17
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py18
-rw-r--r--yardstick/benchmark/core/task.py33
-rwxr-xr-xyardstick/benchmark/runners/arithmetic.py11
-rwxr-xr-xyardstick/benchmark/runners/base.py44
-rw-r--r--yardstick/benchmark/runners/duration.py8
-rwxr-xr-xyardstick/benchmark/runners/dynamictp.py7
-rw-r--r--yardstick/benchmark/runners/iteration.py7
-rw-r--r--yardstick/benchmark/runners/iteration_ipc.py205
-rw-r--r--yardstick/benchmark/runners/proxduration.py165
-rw-r--r--yardstick/benchmark/runners/search.py9
-rw-r--r--yardstick/benchmark/runners/sequence.py9
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py8
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py22
-rw-r--r--yardstick/benchmark/scenarios/base.py10
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/perf.py20
-rw-r--r--yardstick/benchmark/scenarios/compute/qemu_migrate.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/ramspeed.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/unixbench.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/iperf3.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/moongen_testpmd.py7
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf.py6
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_node.py7
-rw-r--r--yardstick/benchmark/scenarios/networking/nstat.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py20
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6.py13
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen.py124
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk.py7
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py8
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py57
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py14
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf_dpdk.py16
-rw-r--r--yardstick/benchmark/scenarios/storage/fio.py2
41 files changed, 816 insertions, 315 deletions
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index 692c16892..f3f5879eb 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -8,10 +8,14 @@
##############################################################################
import abc
+import errno
import six
+import os
from yardstick.common import constants
from yardstick.common import utils
+from yardstick.common import yaml_loader
+from yardstick.common.constants import YARDSTICK_ROOT_PATH
class Flags(object):
@@ -45,20 +49,13 @@ class Context(object):
list = []
SHORT_TASK_ID_LEN = 8
- @staticmethod
- def split_name(name, sep='.'):
- try:
- name_iter = iter(name.split(sep))
- except AttributeError:
- # name is not a string
- return None, None
- return next(name_iter), next(name_iter, None)
-
- def __init__(self):
+ def __init__(self, host_name_separator='.'):
Context.list.append(self)
self._flags = Flags()
self._name = None
self._task_id = None
+ self.file_path = None
+ self._host_name_separator = host_name_separator
def init(self, attrs):
"""Initiate context"""
@@ -68,6 +65,35 @@ class Context(object):
self._name_task_id = '{}-{}'.format(
self._name, self._task_id[:self.SHORT_TASK_ID_LEN])
+ def split_host_name(self, name):
+ if (isinstance(name, six.string_types)
+ and self._host_name_separator in name):
+ return tuple(name.split(self._host_name_separator, 1))
+ return None, None
+
+ def read_pod_file(self, attrs):
+ self.file_path = file_path = attrs.get("file", "pod.yaml")
+ try:
+ cfg = yaml_loader.read_yaml_file(self.file_path)
+ except IOError as io_error:
+ if io_error.errno != errno.ENOENT:
+ raise
+
+ self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
+ cfg = yaml_loader.read_yaml_file(self.file_path)
+
+ for node in cfg["nodes"]:
+ node["ctx_type"] = self.__context_type__
+
+ self.nodes.extend(cfg["nodes"])
+ self.controllers.extend([node for node in cfg["nodes"]
+ if node.get("role") == "Controller"])
+ self.computes.extend([node for node in cfg["nodes"]
+ if node.get("role") == "Compute"])
+ self.baremetals.extend([node for node in cfg["nodes"]
+ if node.get("role") == "Baremetal"])
+ return cfg
+
@property
def name(self):
if self._flags.no_setup or self._flags.no_teardown:
@@ -79,6 +105,10 @@ class Context(object):
def assigned_name(self):
return self._name
+ @property
+ def host_name_separator(self):
+ return self._host_name_separator
+
@staticmethod
def get_cls(context_type):
"""Return class of specified type."""
@@ -129,6 +159,25 @@ class Context(object):
attr_name)
@staticmethod
+ def get_physical_nodes():
+ """return physical node names for all contexts"""
+ physical_nodes = {}
+ for context in Context.list:
+ nodes = context._get_physical_nodes()
+ physical_nodes.update({context._name: nodes})
+
+ return physical_nodes
+
+ @staticmethod
+ def get_physical_node_from_server(server_name):
+ """return physical nodes for all contexts"""
+ context = Context.get_context_from_server(server_name)
+ if context == None:
+ return None
+
+ return context._get_physical_node_for_server(server_name)
+
+ @staticmethod
def get_context_from_server(attr_name):
"""lookup context info by name from node config
attr_name: either a name of the node created by yardstick or a dict
@@ -157,3 +206,15 @@ class Context(object):
except StopIteration:
raise ValueError("context not found for server %r" %
attr_name)
+
+ @abc.abstractmethod
+ def _get_physical_nodes(self):
+ """return the list of physical nodes in context"""
+
+ @abc.abstractmethod
+ def _get_physical_node_for_server(self, server_name):
+ """ Find physical node for given server
+
+ :param server_name: (string) Server name in scenario
+ :return string: <node_name>.<context_name>
+ """
diff --git a/yardstick/benchmark/contexts/dummy.py b/yardstick/benchmark/contexts/dummy.py
index a9e4564fe..36e8854e8 100644
--- a/yardstick/benchmark/contexts/dummy.py
+++ b/yardstick/benchmark/contexts/dummy.py
@@ -32,3 +32,9 @@ class DummyContext(Context):
def _get_network(self, attr_name):
return None
+
+ def _get_physical_nodes(self):
+ return None
+
+ def _get_physical_node_for_server(self, server_name):
+ return None
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 82861829e..ac85b6ffe 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -29,6 +29,7 @@ from yardstick.common import constants as consts
from yardstick.common import utils
from yardstick.common.utils import source_env
from yardstick.ssh import SSH
+from yardstick.common import openstack_utils
LOG = logging.getLogger(__name__)
@@ -68,6 +69,12 @@ class HeatContext(Context):
self.shade_client = None
self.heat_timeout = None
self.key_filename = None
+ self.shade_client = None
+ self.operator_client = None
+ self.nodes = []
+ self.controllers = []
+ self.computes = []
+ self.baremetals = []
super(HeatContext, self).__init__()
@staticmethod
@@ -96,6 +103,14 @@ class HeatContext(Context):
self.template_file = attrs.get("heat_template")
+ self.shade_client = openstack_utils.get_shade_client()
+ self.operator_client = openstack_utils.get_shade_operator_client()
+
+ try:
+ self.read_pod_file(attrs)
+ except IOError:
+ LOG.warning("No pod file specified. NVFi metrics will be disabled")
+
self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
if self.template_file:
self.heat_parameters = attrs.get("heat_parameters")
@@ -465,7 +480,7 @@ class HeatContext(Context):
with attribute name mapping when using external heat templates
"""
if isinstance(attr_name, collections.Mapping):
- node_name, cname = self.split_name(attr_name['name'])
+ node_name, cname = self.split_host_name(attr_name['name'])
if cname is None or cname != self.name:
return None
@@ -528,3 +543,30 @@ class HeatContext(Context):
"physical_network": network.physical_network,
}
return result
+
+ def _get_physical_nodes(self):
+ return self.nodes
+
+ def _get_physical_node_for_server(self, server_name):
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s.name == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ server = openstack_utils.get_server(self.shade_client,
+ name_or_id=server_name)
+
+ if server:
+ server = server.toDict()
+ list_hypervisors = self.operator_client.list_hypervisors()
+
+ for hypervisor in list_hypervisors:
+ if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
+ for node in self.nodes:
+ if node['ip'] == hypervisor.host_ip:
+ return "{}.{}".format(node['name'], self._name)
+
+ return None
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index ddfc530cb..4ba9eee36 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -33,8 +33,7 @@ class KubernetesContext(Context):
self.key_path = ''
self.public_key_path = ''
self.template = None
-
- super(KubernetesContext, self).__init__()
+ super(KubernetesContext, self).__init__(host_name_separator='-')
def init(self, attrs):
super(KubernetesContext, self).init(attrs)
@@ -175,3 +174,9 @@ class KubernetesContext(Context):
def _get_network(self, attr_name):
return None
+
+ def _get_physical_nodes(self):
+ return None
+
+ def _get_physical_node_for_server(self, server_name):
+ return None
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index fa619a9aa..d3af98920 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -8,7 +8,6 @@
##############################################################################
from __future__ import absolute_import
-import errno
import subprocess
import os
import collections
@@ -22,7 +21,7 @@ from yardstick import ssh
from yardstick.benchmark.contexts.base import Context
from yardstick.common.constants import ANSIBLE_DIR, YARDSTICK_ROOT_PATH
from yardstick.common.ansible_common import AnsibleCommon
-from yardstick.common.yaml_loader import yaml_load
+from yardstick.common.exceptions import ContextUpdateCollectdForNodeError
LOG = logging.getLogger(__name__)
@@ -49,40 +48,11 @@ class NodeContext(Context):
}
super(NodeContext, self).__init__()
- def read_config_file(self):
- """Read from config file"""
-
- with open(self.file_path) as stream:
- LOG.info("Parsing pod file: %s", self.file_path)
- cfg = yaml_load(stream)
- return cfg
-
def init(self, attrs):
"""initializes itself from the supplied arguments"""
super(NodeContext, self).init(attrs)
- self.file_path = file_path = attrs.get("file", "pod.yaml")
-
- try:
- cfg = self.read_config_file()
- except IOError as io_error:
- if io_error.errno != errno.ENOENT:
- raise
-
- self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
- cfg = self.read_config_file()
-
- self.nodes.extend(cfg["nodes"])
- self.controllers.extend([node for node in cfg["nodes"]
- if node.get("role") == "Controller"])
- self.computes.extend([node for node in cfg["nodes"]
- if node.get("role") == "Compute"])
- self.baremetals.extend([node for node in cfg["nodes"]
- if node.get("role") == "Baremetal"])
- LOG.debug("Nodes: %r", self.nodes)
- LOG.debug("Controllers: %r", self.controllers)
- LOG.debug("Computes: %r", self.computes)
- LOG.debug("BareMetals: %r", self.baremetals)
+ cfg = self.read_pod_file(attrs)
self.env = attrs.get('env', {})
self.attrs = attrs
@@ -135,11 +105,37 @@ class NodeContext(Context):
playbook = os.path.join(ANSIBLE_DIR, playbook)
return playbook
+ def _get_physical_nodes(self):
+ return self.nodes
+
+ def _get_physical_node_for_server(self, server_name):
+
+ node_name, context_name = self.split_host_name(server_name)
+
+ if context_name is None or self.name != context_name:
+ return None
+
+ for n in (n for n in self.nodes if n["name"] == node_name):
+ return "{}.{}".format(n["name"], self._name)
+
+ return None
+
+ def update_collectd_options_for_node(self, options, attr_name):
+ node_name, _ = self.split_host_name(attr_name)
+
+ matching_nodes = (n for n in self.nodes if n["name"] == node_name)
+ try:
+ node = next(matching_nodes)
+ except StopIteration:
+ raise ContextUpdateCollectdForNodeError(attr_name=attr_name)
+
+ node["collectd"] = options
+
def _get_server(self, attr_name):
"""lookup server info by name from context
attr_name: a name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py
index 320c61c92..ecddcbbe0 100644
--- a/yardstick/benchmark/contexts/standalone/model.py
+++ b/yardstick/benchmark/contexts/standalone/model.py
@@ -26,7 +26,7 @@ import xml.etree.ElementTree as ET
from yardstick import ssh
from yardstick.common import constants
from yardstick.common import exceptions
-from yardstick.common.yaml_loader import yaml_load
+from yardstick.common import yaml_loader
from yardstick.network_services.utils import PciAddress
from yardstick.network_services.helpers.cpu import CpuSysCores
@@ -394,26 +394,18 @@ class StandaloneContextHelper(object):
return pf_vfs
- def read_config_file(self):
- """Read from config file"""
-
- with open(self.file_path) as stream:
- LOG.info("Parsing pod file: %s", self.file_path)
- cfg = yaml_load(stream)
- return cfg
-
def parse_pod_file(self, file_path, nfvi_role='Sriov'):
self.file_path = file_path
nodes = []
nfvi_host = []
try:
- cfg = self.read_config_file()
+ cfg = yaml_loader.read_yaml_file(self.file_path)
except IOError as io_error:
if io_error.errno != errno.ENOENT:
raise
self.file_path = os.path.join(constants.YARDSTICK_ROOT_PATH,
file_path)
- cfg = self.read_config_file()
+ cfg = yaml_loader.read_yaml_file(self.file_path)
nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index b9e66a481..e6a6f99e4 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -298,13 +298,28 @@ class OvsDpdkContext(Context):
for vm in self.vm_names:
model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
+ def _get_physical_nodes(self):
+ return self.nfvi_host
+
+ def _get_physical_node_for_server(self, server_name):
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ # self.nfvi_host always contain only one host
+ return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
+
def _get_server(self, attr_name):
"""lookup server info by name from context
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index 2f93e530b..05fac0218 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -106,13 +106,29 @@ class SriovContext(Context):
build_vfs = "echo 0 > /sys/bus/pci/devices/{0}/sriov_numvfs"
self.connection.execute(build_vfs.format(ports.get('phy_port')))
+ def _get_physical_nodes(self):
+ return self.nfvi_host
+
+ def _get_physical_node_for_server(self, server_name):
+
+ # self.nfvi_host always contain only one host.
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
+
def _get_server(self, attr_name):
"""lookup server info by name from context
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 697cc007f..f050e8d0f 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -619,27 +619,22 @@ class TaskParser(object): # pragma: no cover
nodes:
tg__0: tg_0.yardstick
vnf__0: vnf_0.yardstick
+
+ NOTE: in Kubernetes context, the separator character between the server
+ name and the context name is "-":
+ scenario:
+ host: host-k8s
+ target: target-k8s
"""
def qualified_name(name):
- try:
- # for openstack
- node_name, context_name = name.split('.')
- sep = '.'
- except ValueError:
- # for kubernetes, some kubernetes resources don't support
- # name format like 'xxx.xxx', so we use '-' instead
- # need unified later
- node_name, context_name = name.split('-')
- sep = '-'
-
- try:
- ctx = next((context for context in contexts
- if context.assigned_name == context_name))
- except StopIteration:
- raise y_exc.ScenarioConfigContextNameNotFound(
- context_name=context_name)
-
- return '{}{}{}'.format(node_name, sep, ctx.name)
+ for context in contexts:
+ host_name, ctx_name = context.split_host_name(name)
+ if context.assigned_name == ctx_name:
+ return '{}{}{}'.format(host_name,
+ context.host_name_separator,
+ context.name)
+
+ raise y_exc.ScenarioConfigContextNameNotFound(host_name=name)
if 'host' in scenario:
scenario['host'] = qualified_name(scenario['host'])
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 6aaaed888..ecb59f960 100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -37,6 +37,7 @@ import six
from six.moves import range
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -86,7 +87,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
loop_iter = six.moves.zip(*param_iters)
else:
LOG.warning("iter_type unrecognized: %s", iter_type)
- raise TypeError("iter_type unrecognized: %s", iter_type)
+ raise TypeError("iter_type unrecognized: %s" % iter_type)
# Populate options and run the requested method for each value combination
for comb_values in loop_iter:
@@ -105,14 +106,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else:
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index fbdf6c281..af2557441 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -12,27 +12,26 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+#
+# This is a modified copy of ``rally/rally/benchmark/runners/base.py``
-# yardstick comment: this is a modified copy of
-# rally/rally/benchmark/runners/base.py
-
-from __future__ import absolute_import
-
+import importlib
import logging
import multiprocessing
import subprocess
import time
import traceback
-from subprocess import CalledProcessError
-
-import importlib
-from six.moves.queue import Empty
+from six import moves
-import yardstick.common.utils as utils
from yardstick.benchmark.scenarios import base as base_scenario
+from yardstick.common import messaging
+from yardstick.common.messaging import payloads
+from yardstick.common.messaging import producer
+from yardstick.common import utils
from yardstick.dispatcher.base import Base as DispatcherBase
+
log = logging.getLogger(__name__)
@@ -41,7 +40,7 @@ def _execute_shell_command(command):
exitcode = 0
try:
output = subprocess.check_output(command, shell=True)
- except CalledProcessError:
+ except subprocess.CalledProcessError:
exitcode = -1
output = traceback.format_exc()
log.error("exec command '%s' error:\n ", command)
@@ -245,7 +244,7 @@ class Runner(object):
log.debug("output_queue size %s", self.output_queue.qsize())
try:
result.update(self.output_queue.get(True, 1))
- except Empty:
+ except moves.queue.Empty:
pass
return result
@@ -259,7 +258,7 @@ class Runner(object):
log.debug("result_queue size %s", self.result_queue.qsize())
try:
one_record = self.result_queue.get(True, 1)
- except Empty:
+ except moves.queue.Empty:
pass
else:
if output_in_influxdb:
@@ -272,3 +271,22 @@ class Runner(object):
dispatchers = DispatcherBase.get(self.config['output_config'])
dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb'))
dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id)
+
+
+class RunnerProducer(producer.MessagingProducer):
+ """Class implementing the message producer for runners"""
+
+ def __init__(self, _id):
+ super(RunnerProducer, self).__init__(messaging.TOPIC_RUNNER, _id=_id)
+
+ def start_iteration(self, version=1, data=None):
+ data = {} if not data else data
+ self.send_message(
+ messaging.RUNNER_METHOD_START_ITERATION,
+ payloads.RunnerPayload(version=version, data=data))
+
+ def stop_iteration(self, version=1, data=None):
+ data = {} if not data else data
+ self.send_message(
+ messaging.RUNNER_METHOD_STOP_ITERATION,
+ payloads.RunnerPayload(version=version, data=data))
diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py
index 60b0348c3..14fd8bb47 100644
--- a/yardstick/benchmark/runners/duration.py
+++ b/yardstick/benchmark/runners/duration.py
@@ -27,6 +27,7 @@ import traceback
import time
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -70,13 +71,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
+ benchmark.teardown()
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
# catch all exceptions because with multiprocessing we can have un-picklable exception
# problems https://bugs.python.org/issue9400
except Exception: # pylint: disable=broad-except
diff --git a/yardstick/benchmark/runners/dynamictp.py b/yardstick/benchmark/runners/dynamictp.py
index 63bfc823a..88d3c5704 100755
--- a/yardstick/benchmark/runners/dynamictp.py
+++ b/yardstick/benchmark/runners/dynamictp.py
@@ -27,6 +27,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -80,10 +81,10 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
method(data)
- except AssertionError as assertion:
- LOG.warning("SLA validation failed: %s" % assertion.args)
+ except y_exc.SLAValidationError as error:
+ LOG.warning("SLA validation failed: %s", error.args)
too_high = True
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index 20d6da054..4c88f3671 100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -29,6 +29,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -75,13 +76,13 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
elif sla_action == "rate-control":
try:
scenario_cfg['options']['rate']
diff --git a/yardstick/benchmark/runners/iteration_ipc.py b/yardstick/benchmark/runners/iteration_ipc.py
new file mode 100644
index 000000000..a0335fdc7
--- /dev/null
+++ b/yardstick/benchmark/runners/iteration_ipc.py
@@ -0,0 +1,205 @@
+# Copyright 2018: Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A runner that runs a configurable number of times before it returns. Each
+ iteration has a configurable timeout. The loop control depends on the
+ feedback received from the running VNFs. The context PIDs from the VNFs
+ to listen the messages from are given in the scenario "setup" method.
+"""
+
+import logging
+import multiprocessing
+import time
+import traceback
+
+import os
+
+from yardstick.benchmark.runners import base as base_runner
+from yardstick.common import exceptions
+from yardstick.common import messaging
+from yardstick.common import utils
+from yardstick.common.messaging import consumer
+from yardstick.common.messaging import payloads
+
+
+LOG = logging.getLogger(__name__)
+
+QUEUE_PUT_TIMEOUT = 10
+ITERATION_TIMEOUT = 180
+
+
+class RunnerIterationIPCEndpoint(consumer.NotificationHandler):
+ """Endpoint class for ``RunnerIterationIPCConsumer``"""
+
+ def tg_method_started(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'id': ctxt['id'],
+ 'action': messaging.TG_METHOD_STARTED,
+ 'payload': payloads.TrafficGeneratorPayload.dict_to_obj(
+ kwargs)},
+ QUEUE_PUT_TIMEOUT)
+
+ def tg_method_finished(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'id': ctxt['id'],
+ 'action': messaging.TG_METHOD_FINISHED,
+ 'payload': payloads.TrafficGeneratorPayload.dict_to_obj(
+ kwargs)})
+
+ def tg_method_iteration(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'id': ctxt['id'],
+ 'action': messaging.TG_METHOD_ITERATION,
+ 'payload': payloads.TrafficGeneratorPayload.dict_to_obj(
+ kwargs)})
+
+
+class RunnerIterationIPCConsumer(consumer.MessagingConsumer):
+ """MQ consumer for "IterationIPC" runner"""
+
+ def __init__(self, _id, ctx_ids):
+ self._id = _id
+ self._queue = multiprocessing.Queue()
+ endpoints = [RunnerIterationIPCEndpoint(_id, ctx_ids, self._queue)]
+ super(RunnerIterationIPCConsumer, self).__init__(
+ messaging.TOPIC_TG, ctx_ids, endpoints)
+ self._kpi_per_id = {ctx: [] for ctx in ctx_ids}
+ self.iteration_index = None
+
+ def is_all_kpis_received_in_iteration(self):
+ """Check if all producers registered have sent the ITERATION msg
+
+ During the present iteration, all producers (traffic generators) must
+ start and finish the traffic injection, and at the end of the traffic
+ injection a TG_METHOD_ITERATION must be sent. This function will check
+ all KPIs in the present iteration are received. E.g.:
+ self.iteration_index = 2
+
+ self._kpi_per_id = {
+ 'ctx1': [kpi0, kpi1, kpi2],
+ 'ctx2': [kpi0, kpi1]} --> return False
+
+ self._kpi_per_id = {
+ 'ctx1': [kpi0, kpi1, kpi2],
+ 'ctx2': [kpi0, kpi1, kpi2]} --> return True
+ """
+ while not self._queue.empty():
+ msg = self._queue.get(True, 1)
+ if msg['action'] == messaging.TG_METHOD_ITERATION:
+ id_iter_list = self._kpi_per_id[msg['id']]
+ id_iter_list.append(msg['payload'].kpi)
+
+ return all(len(id_iter_list) == self.iteration_index
+ for id_iter_list in self._kpi_per_id.values())
+
+
+def _worker_process(queue, cls, method_name, scenario_cfg,
+ context_cfg, aborted, output_queue): # pragma: no cover
+ runner_cfg = scenario_cfg['runner']
+
+ timeout = runner_cfg.get('timeout', ITERATION_TIMEOUT)
+ iterations = runner_cfg.get('iterations', 1)
+ run_step = runner_cfg.get('run_step', 'setup,run,teardown')
+ LOG.info('Worker START. Iterations %d times, class %s', iterations, cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ benchmark = cls(scenario_cfg, context_cfg)
+ method = getattr(benchmark, method_name)
+
+ if 'setup' not in run_step:
+ raise exceptions.RunnerIterationIPCSetupActionNeeded()
+ benchmark.setup()
+ producer_ctxs = benchmark.get_mq_ids()
+ if not producer_ctxs:
+ raise exceptions.RunnerIterationIPCNoCtxs()
+
+ mq_consumer = RunnerIterationIPCConsumer(os.getpid(), producer_ctxs)
+ mq_consumer.start_rpc_server()
+ mq_producer = base_runner.RunnerProducer(scenario_cfg['task_id'])
+
+ iteration_index = 1
+ while 'run' in run_step:
+ LOG.debug('runner=%(runner)s seq=%(sequence)s START',
+ {'runner': runner_cfg['runner_id'],
+ 'sequence': iteration_index})
+ data = {}
+ result = None
+ errors = ''
+ mq_consumer.iteration_index = iteration_index
+ mq_producer.start_iteration()
+
+ try:
+ utils.wait_until_true(
+ mq_consumer.is_all_kpis_received_in_iteration,
+ timeout=timeout, sleep=2)
+ result = method(data)
+ except Exception: # pylint: disable=broad-except
+ errors = traceback.format_exc()
+ LOG.exception(errors)
+
+ mq_producer.stop_iteration()
+
+ if result:
+ output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
+ benchmark_output = {'timestamp': time.time(),
+ 'sequence': iteration_index,
+ 'data': data,
+ 'errors': errors}
+ queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT)
+
+ LOG.debug('runner=%(runner)s seq=%(sequence)s END',
+ {'runner': runner_cfg['runner_id'],
+ 'sequence': iteration_index})
+
+ iteration_index += 1
+ if iteration_index > iterations or aborted.is_set():
+ LOG.info('"IterationIPC" worker END')
+ break
+
+ if 'teardown' in run_step:
+ try:
+ benchmark.teardown()
+ except Exception:
+ LOG.exception('Exception during teardown process')
+ mq_consumer.stop_rpc_server()
+ raise SystemExit(1)
+
+ LOG.debug('Data queue size = %s', queue.qsize())
+ LOG.debug('Output queue size = %s', output_queue.qsize())
+ mq_consumer.stop_rpc_server()
+
+
+class IterationIPCRunner(base_runner.Runner):
+ """Run a scenario for a configurable number of times.
+
+ Each iteration has a configurable timeout. The loop control depends on the
+ feedback received from the running VNFs. The context PIDs from the VNFs to
+ listen the messages from are given in the scenario "setup" method.
+ """
+ __execution_type__ = 'IterationIPC'
+
+ def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = '{}-{}-{}'.format(
+ self.__execution_type__, scenario_cfg.get('type'), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, scenario_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/yardstick/benchmark/runners/proxduration.py b/yardstick/benchmark/runners/proxduration.py
new file mode 100644
index 000000000..61a468fd3
--- /dev/null
+++ b/yardstick/benchmark/runners/proxduration.py
@@ -0,0 +1,165 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# yardstick comment: this is a modified copy of
+# rally/rally/benchmark/runners/constant.py
+
+"""A runner that runs a specific time before it returns
+"""
+
+from __future__ import absolute_import
+
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
+from yardstick.common import constants
+
+LOG = logging.getLogger(__name__)
+
+def _worker_process(queue, cls, method_name, scenario_cfg,
+ context_cfg, aborted, output_queue):
+
+ sequence = 1
+
+ runner_cfg = scenario_cfg['runner']
+
+ requested_interval = interval = runner_cfg.get("interval", 1)
+ duration = runner_cfg.get("duration", 60)
+ sampled = runner_cfg.get("sampled", False)
+
+ LOG.info("Worker START, duration is %ds", duration)
+ LOG.debug("class is %s", cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ benchmark = cls(scenario_cfg, context_cfg)
+ benchmark.setup()
+ method = getattr(benchmark, method_name)
+
+ sla_action = None
+ if "sla" in scenario_cfg:
+ sla_action = scenario_cfg["sla"].get("action", "assert")
+
+
+ start = time.time()
+ timeout = start + duration
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+ {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ benchmark.pre_run_wait_time(interval)
+
+ if sampled:
+ try:
+ pre_adjustment = time.time()
+ result = method(data)
+ post_adjustment = time.time()
+ if requested_interval > post_adjustment - pre_adjustment:
+ interval = requested_interval - (post_adjustment - pre_adjustment)
+ else:
+ interval = 0
+
+ except y_exc.SLAValidationError as error:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ # catch all exceptions because with multiprocessing we can have un-picklable exception
+ # problems https://bugs.python.org/issue9400
+ except Exception: # pylint: disable=broad-except
+ errors = traceback.format_exc()
+ LOG.exception("")
+ else:
+ if result:
+ # add timeout for put so we don't block test
+ # if we do timeout we don't care about dropping individual KPIs
+ output_queue.put(result, True, constants.QUEUE_PUT_TIMEOUT)
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ queue.put(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)
+ else:
+ LOG.debug("No sample collected ...Sequence %s", sequence)
+
+
+ sequence += 1
+
+ if (errors and sla_action is None) or time.time() > timeout or aborted.is_set():
+ LOG.info("Worker END")
+ break
+
+ try:
+ benchmark.teardown()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise SystemExit(1)
+
+ LOG.debug("queue.qsize() = %s", queue.qsize())
+ LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+ LOG.info("Exiting ProxDuration Runner...")
+
+class ProxDurationRunner(base.Runner):
+ """Run a scenario for a certain amount of time
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+ Parameters
+ duration - amount of time the scenario will be run for
+ type: int
+ unit: seconds
+ default: 60 sec
+ interval - time to wait between each scenario invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ sampled - Sample data is required yes/no
+ type: boolean
+ unit: True/False
+ default: False
+ confirmation - Number of confirmation retries
+ type: int
+ unit: retry attempts
+ default: 0
+ """
+ __execution_type__ = 'ProxDuration'
+
+ def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, scenario_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/yardstick/benchmark/runners/search.py b/yardstick/benchmark/runners/search.py
index 8037329b5..01a4292c7 100644
--- a/yardstick/benchmark/runners/search.py
+++ b/yardstick/benchmark/runners/search.py
@@ -33,6 +33,7 @@ from collections import Mapping
from six.moves import zip
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -119,14 +120,14 @@ If the scenario ends before the time has elapsed, it will be started again.
try:
self.worker_helper(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if self.sla_action == "assert":
raise
elif self.sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index d6e3f7109..0148a45b2 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -30,6 +30,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -74,14 +75,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else:
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 1fadd2532..e2db03a70 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -58,16 +58,20 @@ class ScenarioGeneral(base.Scenario):
self.director.stopMonitors()
verify_result = self.director.verify()
+ service_not_found = False
for k, v in self.director.data.items():
if v == 0:
- result['sla_pass'] = 0
verify_result = False
+ service_not_found = True
LOG.info("\033[92m The service process (%s) not found in the host environment", k)
result['sla_pass'] = 1 if verify_result else 0
self.director.store_result(result)
- assert verify_result is True, "The HA test case NOT passed"
+ self.verify_SLA(
+ verify_result, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "Director.verify() failed"))
def teardown(self):
self.director.knockoff()
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 42941c6e7..7f976fdbc 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -29,6 +29,7 @@ class ServiceHA(base.Scenario):
self.context_cfg = context_cfg
self.setup_done = False
self.data = {}
+ self.sla_pass = False
def setup(self):
"""scenario setup"""
@@ -69,23 +70,28 @@ class ServiceHA(base.Scenario):
self.monitorMgr.wait_monitors()
LOG.info("Monitor '%s' stop!", self.__scenario_type__)
- sla_pass = self.monitorMgr.verify_SLA()
+ self.sla_pass = self.monitorMgr.verify_SLA()
+ service_not_found = False
for k, v in self.data.items():
if v == 0:
- sla_pass = False
+ self.sla_pass = False
+ service_not_found = True
LOG.info("The service process (%s) not found in the host envrioment", k)
- result['sla_pass'] = 1 if sla_pass else 0
+ result['sla_pass'] = 1 if self.sla_pass else 0
self.monitorMgr.store_result(result)
- assert sla_pass is True, "The HA test case NOT pass the SLA"
-
- return
+ self.verify_SLA(
+ self.sla_pass, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "MonitorMgr.verify_SLA() failed"))
def teardown(self):
"""scenario teardown"""
- for attacker in self.attackers:
- attacker.recover()
+ # only recover when sla not pass
+ if not self.sla_pass:
+ for attacker in self.attackers:
+ attacker.recover()
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py
index 58a02805c..90a87ac29 100644
--- a/yardstick/benchmark/scenarios/base.py
+++ b/yardstick/benchmark/scenarios/base.py
@@ -20,6 +20,7 @@ import six
from stevedore import extension
import yardstick.common.utils as utils
+from yardstick.common import exceptions as y_exc
def _iter_scenario_classes(scenario_type=None):
@@ -61,6 +62,11 @@ class Scenario(object):
"""Time waited after executing the run method"""
time.sleep(time_seconds)
+ def verify_SLA(self, condition, error_msg):
+ if not condition:
+ raise y_exc.SLAValidationError(
+ case_name=self.__scenario_type__, error_msg=error_msg)
+
@staticmethod
def get_types():
"""return a list of known runner type (class) names"""
@@ -113,3 +119,7 @@ class Scenario(object):
except TypeError:
dic[k] = v
return dic
+
+ def get_mq_ids(self): # pragma: no cover
+ """Return stored MQ producer IDs, if defined"""
+ pass
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index 998463ef6..413709f3b 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -100,7 +100,7 @@ class Cyclictest(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -195,7 +195,7 @@ class Cyclictest(base.Scenario):
if latency > sla_latency:
sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
(t, latency, t, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index 801f7fa80..2237e49e0 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -119,8 +119,8 @@ class Lmbench(base.Scenario):
cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
(repetition, warmup)
else:
- raise RuntimeError("No such test_type: %s for Lmbench scenario",
- test_type)
+ raise RuntimeError("No such test_type: %s for Lmbench scenario"
+ % test_type)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -157,7 +157,7 @@ class Lmbench(base.Scenario):
if sla_latency < cache_latency:
sla_error += "latency %f > sla:max_latency(%f); " \
% (cache_latency, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/perf.py b/yardstick/benchmark/scenarios/compute/perf.py
index 0b8ed9b28..b973211f1 100644
--- a/yardstick/benchmark/scenarios/compute/perf.py
+++ b/yardstick/benchmark/scenarios/compute/perf.py
@@ -93,7 +93,7 @@ class Perf(base.Scenario):
% (load, duration, events_string)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, stdout, _ = self.client.execute(cmd)
if status:
raise RuntimeError(stdout)
@@ -105,16 +105,14 @@ class Perf(base.Scenario):
exp_val = self.scenario_cfg['sla']['expected_value']
smaller_than_exp = 'smaller_than_expected' \
in self.scenario_cfg['sla']
-
- if metric not in result:
- assert False, "Metric (%s) not found." % metric
- else:
- if smaller_than_exp:
- assert result[metric] < exp_val, "%s %d >= %d (sla); " \
- % (metric, result[metric], exp_val)
- else:
- assert result[metric] >= exp_val, "%s %d < %d (sla); " \
- % (metric, result[metric], exp_val)
+ self.verify_SLA(metric in result,
+ "Metric (%s) not found." % metric)
+ self.verify_SLA(
+ not smaller_than_exp,
+ "%s %d >= %d (sla); " % (metric, result[metric], exp_val))
+ self.verify_SLA(
+ result[metric] >= exp_val,
+ "%s %d < %d (sla); " % (metric, result[metric], exp_val))
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/qemu_migrate.py b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
index 2de1270ef..975c90b22 100644
--- a/yardstick/benchmark/scenarios/compute/qemu_migrate.py
+++ b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
@@ -56,7 +56,7 @@ class QemuMigrate(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -127,7 +127,7 @@ class QemuMigrate(base.Scenario):
if timevalue > sla_time:
sla_error += "%s timevalue %d > sla:max_%s(%d); " % \
(t, timevalue, t, sla_time)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/ramspeed.py b/yardstick/benchmark/scenarios/compute/ramspeed.py
index ca64935dd..4daf776ff 100644
--- a/yardstick/benchmark/scenarios/compute/ramspeed.py
+++ b/yardstick/benchmark/scenarios/compute/ramspeed.py
@@ -121,8 +121,8 @@ class Ramspeed(base.Scenario):
(test_id, load, block_size)
# only the test_id 1-6 will be used in this scenario
else:
- raise RuntimeError("No such type_id: %s for Ramspeed scenario",
- test_id)
+ raise RuntimeError("No such type_id: %s for Ramspeed scenario"
+ % test_id)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -140,4 +140,4 @@ class Ramspeed(base.Scenario):
if bw < sla_min_bw:
sla_error += "Bandwidth %f < " \
"sla:min_bandwidth(%f)" % (bw, sla_min_bw)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/compute/unixbench.py b/yardstick/benchmark/scenarios/compute/unixbench.py
index cdb345717..3cea31694 100644
--- a/yardstick/benchmark/scenarios/compute/unixbench.py
+++ b/yardstick/benchmark/scenarios/compute/unixbench.py
@@ -125,7 +125,7 @@ class Unixbench(base.Scenario):
if score < sla_score:
sla_error += "%s score %f < sla:%s_score(%f); " % \
(t, score, t, sla_score)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py
index 98c45990e..51e044e7b 100644
--- a/yardstick/benchmark/scenarios/networking/iperf3.py
+++ b/yardstick/benchmark/scenarios/networking/iperf3.py
@@ -92,7 +92,7 @@ For more info see http://software.es.net/iperf
def teardown(self):
LOG.debug("teardown")
self.host.close()
- status, stdout, stderr = self.target.execute("pkill iperf3")
+ status, _, stderr = self.target.execute("pkill iperf3")
if status:
LOG.warning(stderr)
self.target.close()
@@ -145,7 +145,7 @@ For more info see http://software.es.net/iperf
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.host.execute(cmd)
+ status, stdout, _ = self.host.execute(cmd)
if status:
# error cause in json dict on stdout
raise RuntimeError(stdout)
@@ -165,16 +165,17 @@ For more info see http://software.es.net/iperf
bit_per_second = \
int(iperf_result["end"]["sum_received"]["bits_per_second"])
bytes_per_second = bit_per_second / 8
- assert bytes_per_second >= sla_bytes_per_second, \
- "bytes_per_second %d < sla:bytes_per_second (%d); " % \
- (bytes_per_second, sla_bytes_per_second)
+ self.verify_SLA(
+ bytes_per_second >= sla_bytes_per_second,
+ "bytes_per_second %d < sla:bytes_per_second (%d); "
+ % (bytes_per_second, sla_bytes_per_second))
else:
sla_jitter = float(sla_iperf["jitter"])
jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"])
- assert jitter_ms <= sla_jitter, \
- "jitter_ms %f > sla:jitter %f; " % \
- (jitter_ms, sla_jitter)
+ self.verify_SLA(jitter_ms <= sla_jitter,
+ "jitter_ms %f > sla:jitter %f; "
+ % (jitter_ms, sla_jitter))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
index 86173c9da..e3bd7af46 100644
--- a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
+++ b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
@@ -367,9 +367,10 @@ ports = {0,1},
throughput_rx_mpps = int(
self.scenario_cfg["sla"]["throughput_rx_mpps"])
- assert throughput_rx_mpps <= moongen_result["tx_mpps"], \
- "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); " % \
- (throughput_rx_mpps, moongen_result["tx_mpps"])
+ self.verify_SLA(
+ throughput_rx_mpps <= moongen_result["tx_mpps"],
+ "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); "
+ % (throughput_rx_mpps, moongen_result["tx_mpps"]))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/netperf.py b/yardstick/benchmark/scenarios/networking/netperf.py
index 33c02d409..9f1a81413 100755
--- a/yardstick/benchmark/scenarios/networking/netperf.py
+++ b/yardstick/benchmark/scenarios/networking/netperf.py
@@ -138,9 +138,9 @@ class Netperf(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/netperf_node.py b/yardstick/benchmark/scenarios/networking/netperf_node.py
index d52e6b9e1..0ad2ecff5 100755
--- a/yardstick/benchmark/scenarios/networking/netperf_node.py
+++ b/yardstick/benchmark/scenarios/networking/netperf_node.py
@@ -156,9 +156,10 @@ class NetperfNode(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(
+ mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def teardown(self):
"""remove netperf from nodes after test"""
diff --git a/yardstick/benchmark/scenarios/networking/nstat.py b/yardstick/benchmark/scenarios/networking/nstat.py
index 10c560769..ea067f8ab 100644
--- a/yardstick/benchmark/scenarios/networking/nstat.py
+++ b/yardstick/benchmark/scenarios/networking/nstat.py
@@ -121,4 +121,4 @@ class Nstat(base.Scenario):
if rate > sla_rate:
sla_error += "%s rate %f > sla:%s_rate(%f); " % \
(i, rate, i, sla_rate)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index e7d9beea8..6caeab5ef 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -91,9 +91,10 @@ class Ping(base.Scenario):
result.update(utils.flatten_dict_key(ping_result))
if sla_max_rtt is not None:
sla_max_rtt = float(sla_max_rtt)
- assert rtt_result[target_vm_name] <= sla_max_rtt,\
- "rtt %f > sla: max_rtt(%f); " % \
- (rtt_result[target_vm_name], sla_max_rtt)
+ self.verify_SLA(
+ rtt_result[target_vm_name] <= sla_max_rtt,
+ "rtt %f > sla: max_rtt(%f); "
+ % (rtt_result[target_vm_name], sla_max_rtt))
else:
LOG.error("ping '%s' '%s' timeout", options, target_vm)
# we need to specify a result to satisfy influxdb schema
@@ -102,13 +103,12 @@ class Ping(base.Scenario):
rtt_result[target_vm_name] = float(self.PING_ERROR_RTT)
# store result before potential AssertionError
result.update(utils.flatten_dict_key(ping_result))
- if sla_max_rtt is not None:
- raise AssertionError("packet dropped rtt {:f} > sla: max_rtt({:f})".format(
- rtt_result[target_vm_name], sla_max_rtt))
-
- else:
- raise AssertionError(
- "packet dropped rtt {:f}".format(rtt_result[target_vm_name]))
+ self.verify_SLA(sla_max_rtt is None,
+ "packet dropped rtt %f > sla: max_rtt(%f)"
+ % (rtt_result[target_vm_name], sla_max_rtt))
+ self.verify_SLA(False,
+ "packet dropped rtt %f"
+ % (rtt_result[target_vm_name]))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/ping6.py b/yardstick/benchmark/scenarios/networking/ping6.py
index 74855a10f..377278004 100644
--- a/yardstick/benchmark/scenarios/networking/ping6.py
+++ b/yardstick/benchmark/scenarios/networking/ping6.py
@@ -59,8 +59,7 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.pre_setup_script, '~/pre_setup.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash pre_setup.sh")
+ self.client.execute("sudo bash pre_setup.sh")
def _get_controller_node(self, host_list):
for host_name in host_list:
@@ -122,7 +121,7 @@ class Ping6(base.Scenario): # pragma: no cover
cmd = "sudo bash %s %s %s" % \
(setup_bash_file, self.openrc, self.external_network)
LOG.debug("Executing setup command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ self.client.execute(cmd)
self.setup_done = True
@@ -171,8 +170,9 @@ class Ping6(base.Scenario): # pragma: no cover
result["rtt"] = float(stdout)
if "sla" in self.scenario_cfg:
sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
- assert result["rtt"] <= sla_max_rtt, \
- "rtt %f > sla:max_rtt(%f); " % (result["rtt"], sla_max_rtt)
+ self.verify_SLA(result["rtt"] <= sla_max_rtt,
+ "rtt %f > sla:max_rtt(%f); "
+ % (result["rtt"], sla_max_rtt))
else:
LOG.error("ping6 timeout!!!")
self.run_done = True
@@ -216,5 +216,4 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.post_teardown_script, '~/post_teardown.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash post_teardown.sh")
+ self.client.execute("sudo bash post_teardown.sh")
diff --git a/yardstick/benchmark/scenarios/networking/pktgen.py b/yardstick/benchmark/scenarios/networking/pktgen.py
index b79b91539..c78108adb 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen.py
@@ -87,7 +87,7 @@ class Pktgen(base.Scenario):
self.server.send_command(cmd)
self.client.send_command(cmd)
- """multiqueue setup"""
+ # multiqueue setup
if not self._is_irqbalance_disabled():
self._disable_irqbalance()
@@ -112,18 +112,14 @@ class Pktgen(base.Scenario):
def _get_vnic_driver_name(self):
cmd = "readlink /sys/class/net/%s/device/driver" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return os.path.basename(stdout.strip())
def _is_irqbalance_disabled(self):
"""Did we disable irqbalance already in the guest?"""
is_disabled = False
cmd = "grep ENABLED /etc/default/irqbalance"
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
if "0" in stdout:
is_disabled = True
@@ -132,49 +128,35 @@ class Pktgen(base.Scenario):
def _disable_irqbalance(self):
cmd = "sudo sed -i -e 's/ENABLED=\"1\"/ENABLED=\"0\"/g' " \
"/etc/default/irqbalance"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "sudo service irqbalance stop"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "sudo service irqbalance disable"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
def _setup_irqmapping_ovs(self, queue_number):
cmd = "grep 'virtio0-input.0' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'"
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "grep 'virtio0-output.0' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'"
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
if queue_number == 1:
return
@@ -186,44 +168,32 @@ class Pktgen(base.Scenario):
cmd = "grep 'virtio0-input.%s' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % (i)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "grep 'virtio0-output.%s' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % (i)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
def _setup_irqmapping_sriov(self, queue_number):
cmd = "grep '%s-TxRx-0' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % self.vnic_name
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
if queue_number == 1:
return
@@ -234,24 +204,18 @@ class Pktgen(base.Scenario):
cmd = "grep '%s-TxRx-%s' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % (self.vnic_name, i)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
def _get_sriov_queue_number(self):
"""Get queue number from server as both VMs are the same"""
cmd = "grep %s-TxRx- /proc/interrupts | wc -l" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def _get_available_queue_number(self):
@@ -259,9 +223,7 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -l %s | grep Combined | head -1 |" \
"awk '{printf $2}'" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def _get_usable_queue_number(self):
@@ -269,9 +231,7 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -l %s | grep Combined | tail -1 |" \
"awk '{printf $2}'" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def _enable_ovs_multiqueue(self):
@@ -282,10 +242,8 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -L %s combined %s" % \
(self.vnic_name, available_queue_number)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
return available_queue_number
def _iptables_setup(self):
@@ -294,9 +252,7 @@ class Pktgen(base.Scenario):
"sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP" \
% (1000 + self.number_of_ports)
LOG.debug("Executing command: %s", cmd)
- status, _, stderr = self.server.execute(cmd, timeout=SSH_TIMEOUT)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd, timeout=SSH_TIMEOUT)
def _iptables_get_result(self):
"""Get packet statistics from server"""
@@ -304,9 +260,7 @@ class Pktgen(base.Scenario):
"awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'" \
% (1000 + self.number_of_ports)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def run(self, result):
@@ -356,10 +310,8 @@ class Pktgen(base.Scenario):
duration, queue_number, pps)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd, timeout=SSH_TIMEOUT)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True,
+ timeout=SSH_TIMEOUT)
result.update(jsonutils.loads(stdout))
@@ -374,8 +326,8 @@ class Pktgen(base.Scenario):
if "sla" in self.scenario_cfg:
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm,
+ "ppm %d > sla_max_ppm %d; " % (ppm, sla_max_ppm))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
index 9a7b975a2..efb7d8b5d 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
@@ -113,10 +113,7 @@ cat ~/result.log -vT \
{print substr($0,RSTART,RLENGTH)}' \
|grep -v ^$ |awk '{if ($2 != 0) print $2}'\
"""
- client_status, client_stdout, client_stderr = self.client.execute(cmd)
-
- if client_status:
- raise RuntimeError(client_stderr)
+ _, client_stdout, _ = self.client.execute(cmd, raise_on_error=True)
avg_latency = 0
if client_stdout:
@@ -135,4 +132,4 @@ cat ~/result.log -vT \
LOG.info("sla_max_latency: %d", sla_max_latency)
debug_info = "avg_latency %d > sla_max_latency %d" \
% (avg_latency, sla_max_latency)
- assert avg_latency <= sla_max_latency, debug_info
+ self.verify_SLA(avg_latency <= sla_max_latency, debug_info)
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
index 497e59ee8..97b9cf73f 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
@@ -143,11 +143,11 @@ class PktgenDPDK(base.Scenario):
cmd = "ip a | grep eth1 2>/dev/null"
LOG.debug("Executing command: %s in %s", cmd, host)
if "server" in host:
- status, stdout, stderr = self.server.execute(cmd)
+ _, stdout, _ = self.server.execute(cmd)
if stdout:
is_run = False
else:
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
@@ -222,5 +222,5 @@ class PktgenDPDK(base.Scenario):
ppm += (sent - received) % sent > 0
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; "
+ % (ppm, sla_max_ppm))
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 1c3ea1f8d..3bb168c70 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -23,6 +23,7 @@ import time
import six
import yaml
+from yardstick.benchmark.contexts import base as context_base
from yardstick.benchmark.scenarios import base as scenario_base
from yardstick.common.constants import LOG_DIR
from yardstick.common import exceptions
@@ -36,6 +37,7 @@ from yardstick.network_services.traffic_profile import base as tprofile_base
from yardstick.network_services.utils import get_nsb_option
from yardstick import ssh
+
traffic_profile.register_modules()
@@ -48,7 +50,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
__scenario_type__ = "NSPerf"
- def __init__(self, scenario_cfg, context_cfg): # Yardstick API
+ def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
super(NetworkServiceTestCase, self).__init__()
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
@@ -59,6 +61,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self.traffic_profile = None
self.node_netdevs = {}
self.bin_path = get_nsb_option('bin_path', '')
+ self._mq_ids = []
def _get_ip_flow_range(self, ip_start_range):
@@ -133,11 +136,10 @@ class NetworkServiceTestCase(scenario_base.Scenario):
with utils.open_relative_file(profile, path) as infile:
return infile.read()
- def _get_topology(self):
- topology = self.scenario_cfg["topology"]
- path = self.scenario_cfg["task_path"]
- with utils.open_relative_file(topology, path) as infile:
- return infile.read()
+ def _get_duration(self):
+ options = self.scenario_cfg.get('options', {})
+ return options.get('duration',
+ tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
def _fill_traffic_profile(self):
tprofile = self._get_traffic_profile()
@@ -147,12 +149,17 @@ class NetworkServiceTestCase(scenario_base.Scenario):
'imix': self._get_traffic_imix(),
tprofile_base.TrafficProfile.UPLINK: {},
tprofile_base.TrafficProfile.DOWNLINK: {},
- 'extra_args': extra_args
- }
-
+ 'extra_args': extra_args,
+ 'duration': self._get_duration()}
traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
+ def _get_topology(self):
+ topology = self.scenario_cfg["topology"]
+ path = self.scenario_cfg["task_path"]
+ with utils.open_relative_file(topology, path) as infile:
+ return infile.read()
+
def _render_topology(self):
topology = self._get_topology()
topology_args = self.scenario_cfg.get('extra_args', {})
@@ -162,18 +169,18 @@ class NetworkServiceTestCase(scenario_base.Scenario):
topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
- def _find_vnf_name_from_id(self, vnf_id):
+ def _find_vnf_name_from_id(self, vnf_id): # pragma: no cover
return next((vnfd["vnfd-id-ref"]
for vnfd in self.topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
- def _find_vnfd_from_vnf_idx(self, vnf_id):
+ def _find_vnfd_from_vnf_idx(self, vnf_id): # pragma: no cover
return next((vnfd
for vnfd in self.topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
@staticmethod
- def find_node_if(nodes, name, if_name, vld_id):
+ def find_node_if(nodes, name, if_name, vld_id): # pragma: no cover
try:
# check for xe0, xe1
intf = nodes[name]["interfaces"][if_name]
@@ -266,14 +273,14 @@ class NetworkServiceTestCase(scenario_base.Scenario):
node0_if["peer_intf"] = node1_copy
node1_if["peer_intf"] = node0_copy
- def _update_context_with_topology(self):
+ def _update_context_with_topology(self): # pragma: no cover
for vnfd in self.topology["constituent-vnfd"]:
vnf_idx = vnfd["member-vnf-index"]
vnf_name = self._find_vnf_name_from_id(vnf_idx)
vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
self.context_cfg["nodes"][vnf_name].update(vnfd)
- def _generate_pod_yaml(self):
+ def _generate_pod_yaml(self): # pragma: no cover
context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
# convert OrderedDict to a list
# pod.yaml nodes is a list
@@ -287,7 +294,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
explicit_start=True)
@staticmethod
- def _serialize_node(node):
+ def _serialize_node(node): # pragma: no cover
new_node = copy.deepcopy(node)
# name field is required
# remove context suffix
@@ -309,7 +316,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self._update_context_with_topology()
@classmethod
- def get_vnf_impl(cls, vnf_model_id):
+ def get_vnf_impl(cls, vnf_model_id): # pragma: no cover
""" Find the implementing class from vnf_model["vnf"]["name"] field
:param vnf_model_id: parsed vnfd model ID field
@@ -337,7 +344,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
raise exceptions.IncorrectConfig(error_msg=message)
@staticmethod
- def create_interfaces_from_node(vnfd, node):
+ def create_interfaces_from_node(vnfd, node): # pragma: no cover
ext_intfs = vnfd["vdu"][0]["external-interface"] = []
# have to sort so xe0 goes first
for intf_name, intf in sorted(node['interfaces'].items()):
@@ -406,10 +413,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
return vnfs
def setup(self):
- """ Setup infrastructure, provission VNFs & start traffic
-
- :return:
- """
+ """Setup infrastructure, provission VNFs & start traffic"""
# 1. Verify if infrastructure mapping can meet topology
self.map_topology_to_infrastructure()
# 1a. Load VNF models
@@ -444,13 +448,18 @@ class NetworkServiceTestCase(scenario_base.Scenario):
traffic_gen.listen_traffic(self.traffic_profile)
# register collector with yardstick for KPI collection.
- self.collector = Collector(self.vnfs)
+ self.collector = Collector(self.vnfs, context_base.Context.get_physical_nodes())
self.collector.start()
# Start the actual traffic
for traffic_gen in traffic_runners:
LOG.info("Starting traffic on %s", traffic_gen.name)
traffic_gen.run_traffic(self.traffic_profile)
+ self._mq_ids.append(traffic_gen.get_mq_producer_id())
+
+ def get_mq_ids(self): # pragma: no cover
+ """Return stored MQ producer IDs"""
+ return self._mq_ids
def run(self, result): # yardstick API
""" Yardstick calls run() at intervals defined in the yaml and
@@ -489,10 +498,10 @@ class NetworkServiceTestCase(scenario_base.Scenario):
LOG.exception("")
raise RuntimeError("Error in teardown")
- def pre_run_wait_time(self, time_seconds):
+ def pre_run_wait_time(self, time_seconds): # pragma: no cover
"""Time waited before executing the run method"""
time.sleep(time_seconds)
- def post_run_wait_time(self, time_seconds):
+ def post_run_wait_time(self, time_seconds): # pragma: no cover
"""Time waited after executing the run method"""
pass
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 705544c41..2b3474070 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -215,15 +215,15 @@ class Vsperf(base.Scenario):
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
index 454587829..27bf40dcb 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
@@ -231,7 +231,7 @@ class VsperfDPDK(base.Scenario):
is_run = True
cmd = "ip a | grep %s 2>/dev/null" % (self.tg_port1)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
return is_run
@@ -325,15 +325,15 @@ class VsperfDPDK(base.Scenario):
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/storage/fio.py b/yardstick/benchmark/scenarios/storage/fio.py
index d3ed840d8..c57c6edf2 100644
--- a/yardstick/benchmark/scenarios/storage/fio.py
+++ b/yardstick/benchmark/scenarios/storage/fio.py
@@ -223,7 +223,7 @@ class Fio(base.Scenario):
sla_error += "%s %d < " \
"sla:%s(%d); " % (k, v, k, min_v)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test():