aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/benchmark/core/task.py20
-rwxr-xr-xyardstick/benchmark/runners/base.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash25
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py35
-rwxr-xr-xyardstick/cmd/NSBperf.py20
-rw-r--r--yardstick/dispatcher/influxdb.py4
-rw-r--r--yardstick/network_services/nfvi/collectd.conf18
-rw-r--r--yardstick/network_services/nfvi/resource.py35
8 files changed, 128 insertions, 33 deletions
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 53298d8d3..1512ca718 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -39,7 +39,6 @@ output_file_default = "/tmp/yardstick.out"
config_file = '/etc/yardstick/yardstick.conf'
test_cases_dir_default = "tests/opnfv/test_cases/"
LOG = logging.getLogger(__name__)
-JOIN_TIMEOUT = 60
class Task(object): # pragma: no cover
@@ -260,7 +259,7 @@ class Task(object): # pragma: no cover
# Wait for runners to finish
for runner in runners:
- status = runner_join(runner, self.outputs, result)
+ status = runner_join(runner, background_runners, self.outputs, result)
if status != 0:
raise RuntimeError(
"{0} runner status {1}".format(runner.__execution_type__, status))
@@ -270,7 +269,7 @@ class Task(object): # pragma: no cover
for scenario in scenarios:
if not _is_background_scenario(scenario):
runner = self.run_one_scenario(scenario, output_file)
- status = runner_join(runner, self.outputs, result)
+ status = runner_join(runner, background_runners, self.outputs, result)
if status != 0:
LOG.error('Scenario NO.%s: "%s" ERROR!',
scenarios.index(scenario) + 1,
@@ -285,11 +284,11 @@ class Task(object): # pragma: no cover
# Wait for background runners to finish
for runner in background_runners:
- status = runner.join(self.outputs, result, JOIN_TIMEOUT)
+ status = runner.join(self.outputs, result)
if status is None:
# Nuke if it did not stop nicely
base_runner.Runner.terminate(runner)
- runner.join(self.outputs, result, JOIN_TIMEOUT)
+ runner.join(self.outputs, result)
base_runner.Runner.release(runner)
print("Background task ended")
@@ -641,13 +640,22 @@ def get_networks_from_nodes(nodes):
return networks
-def runner_join(runner, outputs, result):
+def runner_join(runner, background_runners, outputs, result):
"""join (wait for) a runner, exit process at runner failure
+ :param background_runners:
+ :type background_runners:
:param outputs:
:type outputs: dict
:param result:
:type result: list
"""
+ while runner.poll() is None:
+ outputs.update(runner.get_output())
+ result.extend(runner.get_result())
+ # drain all the background runner queues
+ for background in background_runners:
+ outputs.update(background.get_output())
+ result.extend(background.get_result())
status = runner.join(outputs, result)
base_runner.Runner.release(runner)
return status
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index 13718d793..a887fa5b3 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -210,6 +210,10 @@ class Runner(object):
QUEUE_JOIN_INTERVAL = 5
+ def poll(self, timeout=QUEUE_JOIN_INTERVAL):
+ self.process.join(timeout)
+ return self.process.exitcode
+
def join(self, outputs, result, interval=QUEUE_JOIN_INTERVAL):
while self.process.exitcode is None:
# drain the queue while we are running otherwise we won't terminate
diff --git a/yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash b/yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash
index d9a440c89..757553e8b 100644
--- a/yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash
+++ b/yardstick/benchmark/scenarios/compute/qemu_migrate_benchmark.bash
@@ -21,15 +21,24 @@ max_down_time=$6
OUTPUT_FILE=/tmp/output-qemu.log
+echo "To check the parameters:"
+echo "SRC: $src"
+echo "DST: $dst"
+echo "DST_IP: $dst_ip"
+echo "MIGRATE_PORT: $migrate_to_port"
+echo "DOWN_TIME: $max_down_time"
+
do_migrate()
{
+ echo "Execution of Live Migration"
+
echo "info status" | nc -U $src
# with no speed limit
- echo "migrate_set_speed 0" |nc -U $src
+ echo "migrate_set_speed 0" | nc -U $src
# set the expected max downtime
- echo "migrate_set_downtime ${max_down_time}" |nc -U $src
+ echo "migrate_set_downtime ${max_down_time}" | nc -U $src
# start live migration
- echo "migrate -d tcp:${dst_ip}:$migrate_to_port" |nc -U $src
+ echo "migrate -d tcp:${dst_ip}:${migrate_to_port}" | nc -U $src
# wait until live migration completed
status=""
while [ "${status}" == "" ]
@@ -38,14 +47,17 @@ do_migrate()
echo ${status}
sleep 1;
done
-} >/dev/null
+
+ echo "End of Live Migration"
+}
output_qemu()
{
+ echo "Checking status of Migration"
# print detail information
echo "info migrate" | nc -U $src
echo "quit" | nc -U $src
- echo "quit" | nc -u $dst
+ echo "quit" | nc -U $dst
sleep 5
echo "Migration executed successfully"
@@ -65,8 +77,11 @@ echo -e "{ \
# main entry
main()
{
+ echo "Perform LiveMigration"
do_migrate
+ echo "LiveMigration Status"
output_qemu
+ echo "LiveMigration JSON output "
output_json
}
main
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index 3bade73e2..e7d9beea8 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -24,6 +24,8 @@ LOG = logging.getLogger(__name__)
class Ping(base.Scenario):
"""Execute ping between two hosts
+ If ping error, RTT will be set to 999999
+
Parameters
packetsize - number of data bytes to send
type: int
@@ -33,6 +35,8 @@ class Ping(base.Scenario):
__scenario_type__ = "Ping"
+ PING_ERROR_RTT = 999999
+
TARGET_SCRIPT = 'ping_benchmark.bash'
def __init__(self, scenario_cfg, context_cfg):
@@ -60,6 +64,7 @@ class Ping(base.Scenario):
rtt_result = {}
ping_result = {"rtt": rtt_result}
+ sla_max_rtt = self.scenario_cfg.get("sla", {}).get("max_rtt")
for pos, dest in enumerate(dest_list):
if 'targets' in self.scenario_cfg:
@@ -76,20 +81,34 @@ class Ping(base.Scenario):
if exit_status != 0:
raise RuntimeError(stderr)
+ if isinstance(target_vm, dict):
+ target_vm_name = target_vm.get("name")
+ else:
+ target_vm_name = target_vm.split('.')[0]
if stdout:
- if isinstance(target_vm, dict):
- target_vm_name = target_vm.get("name")
- else:
- target_vm_name = target_vm.split('.')[0]
- rtt_result[target_vm_name] = float(stdout)
- if "sla" in self.scenario_cfg:
- sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
+ rtt_result[target_vm_name] = float(stdout.strip())
+ # store result before potential AssertionError
+ result.update(utils.flatten_dict_key(ping_result))
+ if sla_max_rtt is not None:
+ sla_max_rtt = float(sla_max_rtt)
assert rtt_result[target_vm_name] <= sla_max_rtt,\
"rtt %f > sla: max_rtt(%f); " % \
(rtt_result[target_vm_name], sla_max_rtt)
else:
LOG.error("ping '%s' '%s' timeout", options, target_vm)
- result.update(utils.flatten_dict_key(ping_result))
+ # we need to specify a result to satisfy influxdb schema
+ # choose a very large number to inidcate timeout
+ # in this case choose an order of magnitude greater than the SLA
+ rtt_result[target_vm_name] = float(self.PING_ERROR_RTT)
+ # store result before potential AssertionError
+ result.update(utils.flatten_dict_key(ping_result))
+ if sla_max_rtt is not None:
+ raise AssertionError("packet dropped rtt {:f} > sla: max_rtt({:f})".format(
+ rtt_result[target_vm_name], sla_max_rtt))
+
+ else:
+ raise AssertionError(
+ "packet dropped rtt {:f}".format(rtt_result[target_vm_name]))
def _test(): # pragma: no cover
diff --git a/yardstick/cmd/NSBperf.py b/yardstick/cmd/NSBperf.py
index 2dc0f65e7..5d0aa746d 100755
--- a/yardstick/cmd/NSBperf.py
+++ b/yardstick/cmd/NSBperf.py
@@ -150,8 +150,24 @@ class YardstickNSCli(object):
testcases = os.listdir(test_path + vnf)
print(("VNF :(%s)" % vnf))
print("================")
- for testcase in [tc for tc in testcases if "tc_" in tc]:
- print('%s' % testcase)
+ test_cases = [tc for tc in testcases if "tc_" in tc and "template" not in tc]
+
+ print("\tBareMetal Testcase:")
+ print("\t===================")
+ for testcase in [tc for tc in test_cases if "baremetal" in tc]:
+ print("\t%s" % testcase)
+
+ print(os.linesep)
+ print("\tStandalone Virtualization Testcase:")
+ print("\t===================================")
+ for testcase in [tc for tc in test_cases if "ovs" in tc or "sriov" in tc]:
+ print("\t%s" % testcase)
+
+ print(os.linesep)
+ print("\tOpenstack Testcase:")
+ print("\t===================")
+ for testcase in [tc for tc in test_cases if "heat" in tc]:
+ print("\t%s" % testcase)
print(os.linesep)
raise SystemExit(0)
diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py
index f157e91f9..632b433b5 100644
--- a/yardstick/dispatcher/influxdb.py
+++ b/yardstick/dispatcher/influxdb.py
@@ -55,7 +55,9 @@ class InfluxdbDispatcher(DispatchBase):
for case, data in testcases.items():
tc_criteria = data['criteria']
for record in data['tc_data']:
- self._upload_one_record(record, case, tc_criteria)
+ # skip results with no data because we influxdb encode empty dicts
+ if record.get("data"):
+ self._upload_one_record(record, case, tc_criteria)
return 0
diff --git a/yardstick/network_services/nfvi/collectd.conf b/yardstick/network_services/nfvi/collectd.conf
index e6a1f0d8c..b166fe7fd 100644
--- a/yardstick/network_services/nfvi/collectd.conf
+++ b/yardstick/network_services/nfvi/collectd.conf
@@ -24,10 +24,17 @@ Interval {{ interval }}
##############################################################################
#LoadPlugin syslog
-{% for plugin in loadplugins %}
+{% for plugin in loadplugins if plugin != "ovs_stats" %}
LoadPlugin {{ plugin }}
{% endfor %}
+{% if "ovs_stats" in plugins %}
+<LoadPlugin ovs_stats>
+ Interval 1
+</LoadPlugin>
+{% endif %}
+
+
##############################################################################
# Plugin configuration #
#----------------------------------------------------------------------------#
@@ -82,12 +89,11 @@ LoadPlugin {{ plugin }}
{% endif %}
{% if "intel_pmu" in plugins %}
-<Plugin intel_pmu>
+<Plugin "intel_pmu">
ReportHardwareCacheEvents true
ReportKernelPMUEvents true
ReportSoftwareEvents true
- EventList "/opt/nsb_bin/pmu_event.json"
- HardwareEvents "L2_RQSTS.CODE_RD_HIT,L2_RQSTS.CODE_RD_MISS" "L2_RQSTS.ALL_CODE_RD"
+ EventList "{{ plugins['intel_pmu']['pmu_event_path'] }}"
</Plugin>
{% endif %}
@@ -110,6 +116,8 @@ LoadPlugin {{ plugin }}
{% if "virt" in plugins %}
<Plugin "virt">
# monitor all domains
+ RefreshInterval 60
+ ExtraStats "cpu_util disk disk_err domain_state fs_info job_stats_background pcpu perf vcpupin"
</Plugin>
{% endif %}
@@ -117,7 +125,7 @@ LoadPlugin {{ plugin }}
<Plugin "ovs_stats">
Port "6640"
Address "127.0.0.1"
- Socket "/usr/local/var/run/openvswitch/db.sock"
+ Socket "{{ plugins['ovs_stats']['ovs_socket_path'] }}"
# don't specify bridges, monitor all bridges
</Plugin>
{% endif %}
diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py
index fef44e207..dea754d8b 100644
--- a/yardstick/network_services/nfvi/resource.py
+++ b/yardstick/network_services/nfvi/resource.py
@@ -34,7 +34,7 @@ from yardstick import ssh
from yardstick.common.task_template import finalize_for_yaml
from yardstick.common.utils import validate_non_string_sequence
from yardstick.network_services.nfvi.collectd import AmqpConsumer
-from yardstick.network_services.utils import get_nsb_option
+
LOG = logging.getLogger(__name__)
@@ -53,6 +53,7 @@ class ResourceProfile(object):
AMPQ_PORT = 5672
DEFAULT_INTERVAL = 25
DEFAULT_TIMEOUT = 3600
+ OVS_SOCKET_PATH = "/usr/local/var/run/openvswitch/db.sock"
def __init__(self, mgmt, port_names=None, cores=None, plugins=None,
interval=None, timeout=None):
@@ -154,7 +155,6 @@ class ResourceProfile(object):
"dpdkstat": {},
"virt": {},
"ovs_stats": {},
- "intel_pmu": {},
}
testcase = ""
@@ -233,10 +233,32 @@ class ResourceProfile(object):
}
self._provide_config_file(config_file_path, self.COLLECTD_CONF, kwargs)
+ def _setup_intel_pmu(self, connection, bin_path):
+ pmu_event_path = os.path.join(bin_path, "pmu_event.json")
+ try:
+ self.plugins["intel_pmu"]["pmu_event_path"] = pmu_event_path
+ except KeyError:
+ # if intel_pmu is not a dict, force it into a dict
+ self.plugins["intel_pmu"] = {"pmu_event_path": pmu_event_path}
+ LOG.debug("Downloading event list for pmu_stats plugin")
+ cmd = 'cd {0}; PMU_EVENTS_PATH={1} python event_download_local.py'.format(
+ bin_path, pmu_event_path)
+ cmd = "sudo bash -c '{}'".format(cmd)
+ connection.execute(cmd)
+
+ def _setup_ovs_stats(self, connection):
+ try:
+ socket_path = self.plugins["ovs_stats"].get("ovs_socket_path", self.OVS_SOCKET_PATH)
+ except KeyError:
+ # ovs_stats is not a dict
+ socket_path = self.OVS_SOCKET_PATH
+ status = connection.execute("test -S {}".format(socket_path))[0]
+ if status != 0:
+ LOG.error("cannot find OVS socket %s", socket_path)
+
def _start_collectd(self, connection, bin_path):
LOG.debug("Starting collectd to collect NFVi stats")
connection.execute('sudo pkill -x -9 collectd')
- bin_path = get_nsb_option("bin_path")
collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd")
config_file_path = os.path.join(bin_path, "collectd", "etc")
exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
@@ -251,9 +273,10 @@ class ResourceProfile(object):
# collectd_installer, http_proxy, https_proxy))
return
if "intel_pmu" in self.plugins:
- LOG.debug("Downloading event list for pmu_stats plugin")
- cmd = 'sudo bash -c \'cd /opt/tempT/pmu-tools/; python event_download_local.py\''
- connection.execute(cmd)
+ self._setup_intel_pmu(connection, bin_path)
+ if "ovs_stats" in self.plugins:
+ self._setup_ovs_stats(connection)
+
LOG.debug("Starting collectd to collect NFVi stats")
# ensure collectd.conf.d exists to avoid error/warning
connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d")