diff options
author | rexlee8776 <limingjiang@huawei.com> | 2017-07-05 02:50:06 +0000 |
---|---|---|
committer | rexlee8776 <limingjiang@huawei.com> | 2017-07-06 07:49:10 +0000 |
commit | 25a37b2048281c64719bd6ad67860f65f6c31546 (patch) | |
tree | 726e1ff64ea3470ab6b91db8fdbc386383f08903 /yardstick/benchmark/scenarios | |
parent | 3369461d0e51b22aa96e5bfd0d80b3f0f7f82c67 (diff) |
move flatten dict key to common utils
So it can easily be used by other testcase to unify result
JIRA: YARDSTICK-702
Change-Id: Id4fde38a9a0c2a87a6c870bdb7b0c8f3a3b371ac
Signed-off-by: rexlee8776 <limingjiang@huawei.com>
Diffstat (limited to 'yardstick/benchmark/scenarios')
4 files changed, 21 insertions, 13 deletions
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py index c99fc988d..801f7fa80 100644 --- a/yardstick/benchmark/scenarios/compute/lmbench.py +++ b/yardstick/benchmark/scenarios/compute/lmbench.py @@ -15,6 +15,7 @@ import pkg_resources from oslo_serialization import jsonutils import yardstick.ssh as ssh +from yardstick.common import utils from yardstick.benchmark.scenarios import base LOG = logging.getLogger(__name__) @@ -127,30 +128,32 @@ class Lmbench(base.Scenario): if status: raise RuntimeError(stderr) + lmbench_result = {} if test_type == 'latency': - result.update( + lmbench_result.update( {"latencies": jsonutils.loads(stdout)}) else: - result.update(jsonutils.loads(stdout)) + lmbench_result.update(jsonutils.loads(stdout)) + result.update(utils.flatten_dict_key(lmbench_result)) if "sla" in self.scenario_cfg: sla_error = "" if test_type == 'latency': sla_max_latency = int(self.scenario_cfg['sla']['max_latency']) - for t_latency in result["latencies"]: + for t_latency in lmbench_result["latencies"]: latency = t_latency['latency'] if latency > sla_max_latency: sla_error += "latency %f > sla:max_latency(%f); " \ % (latency, sla_max_latency) elif test_type == 'bandwidth': sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth']) - bw = result["bandwidth(MBps)"] + bw = lmbench_result["bandwidth(MBps)"] if bw < sla_min_bw: sla_error += "bandwidth %f < " \ "sla:min_bandwidth(%f)" % (bw, sla_min_bw) elif test_type == 'latency_for_cache': sla_latency = float(self.scenario_cfg['sla']['max_latency']) - cache_latency = float(result['L1cache']) + cache_latency = float(lmbench_result['L1cache']) if sla_latency < cache_latency: sla_error += "latency %f > sla:max_latency(%f); " \ % (cache_latency, sla_latency) diff --git a/yardstick/benchmark/scenarios/compute/ramspeed.py b/yardstick/benchmark/scenarios/compute/ramspeed.py index 850ee5934..ca64935dd 100644 --- a/yardstick/benchmark/scenarios/compute/ramspeed.py +++ b/yardstick/benchmark/scenarios/compute/ramspeed.py @@ -14,6 +14,7 @@ import pkg_resources from oslo_serialization import jsonutils import yardstick.ssh as ssh +from yardstick.common import utils from yardstick.benchmark.scenarios import base LOG = logging.getLogger(__name__) @@ -128,12 +129,13 @@ class Ramspeed(base.Scenario): if status: raise RuntimeError(stderr) - result.update(jsonutils.loads(stdout)) + ramspeed_result = jsonutils.loads(stdout) + result.update(utils.flatten_dict_key(ramspeed_result)) if "sla" in self.scenario_cfg: sla_error = "" sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth']) - for i in result["Result"]: + for i in ramspeed_result["Result"]: bw = i["Bandwidth(MBps)"] if bw < sla_min_bw: sla_error += "Bandwidth %f < " \ diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py index 334f3a920..3135af9bd 100644 --- a/yardstick/benchmark/scenarios/networking/iperf3.py +++ b/yardstick/benchmark/scenarios/networking/iperf3.py @@ -19,6 +19,7 @@ import pkg_resources from oslo_serialization import jsonutils import yardstick.ssh as ssh +from yardstick.common import utils from yardstick.benchmark.scenarios import base LOG = logging.getLogger(__name__) @@ -131,8 +132,8 @@ For more info see http://software.es.net/iperf # Note: convert all ints to floats in order to avoid # schema conflicts in influxdb. We probably should add # a format func in the future. - result.update( - jsonutils.loads(stdout, parse_int=float)) + iperf_result = jsonutils.loads(stdout, parse_int=float) + result.update(utils.flatten_dict_key(iperf_result)) if "sla" in self.scenario_cfg: sla_iperf = self.scenario_cfg["sla"] @@ -141,7 +142,7 @@ For more info see http://software.es.net/iperf # convert bits per second to bytes per second bit_per_second = \ - int(result["end"]["sum_received"]["bits_per_second"]) + int(iperf_result["end"]["sum_received"]["bits_per_second"]) bytes_per_second = bit_per_second / 8 assert bytes_per_second >= sla_bytes_per_second, \ "bytes_per_second %d < sla:bytes_per_second (%d); " % \ @@ -149,7 +150,7 @@ For more info see http://software.es.net/iperf else: sla_jitter = float(sla_iperf["jitter"]) - jitter_ms = float(result["end"]["sum"]["jitter_ms"]) + jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"]) assert jitter_ms <= sla_jitter, \ "jitter_ms %f > sla:jitter %f; " % \ (jitter_ms, sla_jitter) diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py index a929e5337..6a7927de4 100644 --- a/yardstick/benchmark/scenarios/networking/ping.py +++ b/yardstick/benchmark/scenarios/networking/ping.py @@ -15,6 +15,7 @@ import pkg_resources import logging import yardstick.ssh as ssh +from yardstick.common import utils from yardstick.benchmark.scenarios import base LOG = logging.getLogger(__name__) @@ -57,8 +58,8 @@ class Ping(base.Scenario): destination = self.context_cfg['target'].get('ipaddr', '127.0.0.1') dest_list = [s.strip() for s in destination.split(',')] - result["rtt"] = {} - rtt_result = result["rtt"] + rtt_result = {} + ping_result = {"rtt": rtt_result} for pos, dest in enumerate(dest_list): if 'targets' in self.scenario_cfg: @@ -88,6 +89,7 @@ class Ping(base.Scenario): (rtt_result[target_vm_name], sla_max_rtt) else: LOG.error("ping '%s' '%s' timeout", options, target_vm) + result.update(utils.flatten_dict_key(ping_result)) def _test(): # pragma: no cover |