diff options
-rw-r--r-- | api/resources/env_action.py | 13 | ||||
-rw-r--r-- | api/resources/write_hosts.py | 23 | ||||
-rw-r--r-- | tests/unit/benchmark/scenarios/compute/test_lmbench.py | 13 | ||||
-rw-r--r-- | tests/unit/benchmark/scenarios/compute/test_ramspeed.py | 9 | ||||
-rw-r--r-- | tests/unit/benchmark/scenarios/networking/test_iperf3.py | 7 | ||||
-rw-r--r-- | tests/unit/benchmark/scenarios/networking/test_ping.py | 4 | ||||
-rw-r--r-- | tests/unit/common/test_utils.py | 31 | ||||
-rw-r--r-- | tests/unit/dispatcher/test_influxdb.py | 29 | ||||
-rw-r--r-- | yardstick/benchmark/core/task.py | 4 | ||||
-rw-r--r-- | yardstick/benchmark/scenarios/compute/lmbench.py | 13 | ||||
-rw-r--r-- | yardstick/benchmark/scenarios/compute/ramspeed.py | 6 | ||||
-rw-r--r-- | yardstick/benchmark/scenarios/networking/iperf3.py | 9 | ||||
-rw-r--r-- | yardstick/benchmark/scenarios/networking/ping.py | 6 | ||||
-rw-r--r-- | yardstick/common/utils.py | 23 | ||||
-rw-r--r-- | yardstick/dispatcher/influxdb.py | 26 |
15 files changed, 128 insertions, 88 deletions
diff --git a/api/resources/env_action.py b/api/resources/env_action.py index 2ea64ef1a..fed987063 100644 --- a/api/resources/env_action.py +++ b/api/resources/env_action.py @@ -18,6 +18,7 @@ import uuid import glob import yaml import collections +from subprocess import PIPE from six.moves import configparser from oslo_serialization import jsonutils @@ -415,10 +416,12 @@ def update_hosts(hosts_ip): if not isinstance(hosts_ip, dict): return result_handler(consts.API_ERROR, 'Error, args should be a dict') LOG.info('Writing hosts: Writing') - hosts_list = ['\n{} {}'.format(ip, host_name) - for host_name, ip in hosts_ip.items()] - LOG.debug('Writing: %s', hosts_list) - with open(consts.ETC_HOSTS, 'a') as f: - f.writelines(hosts_list) + LOG.debug('Writing: %s', hosts_ip) + cmd = ["sudo", "python", "write_hosts.py"] + p = subprocess.Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, + cwd = os.path.join(consts.REPOS_DIR, "api/resources")) + _, err = p.communicate(jsonutils.dumps(hosts_ip)) + if p.returncode != 0 : + return result_handler(consts.API_ERROR, err) LOG.info('Writing hosts: Done') return result_handler(consts.API_SUCCESS, 'success') diff --git a/api/resources/write_hosts.py b/api/resources/write_hosts.py new file mode 100644 index 000000000..e4b69846b --- /dev/null +++ b/api/resources/write_hosts.py @@ -0,0 +1,23 @@ +############################################################################## +# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +from __future__ import absolute_import + +import sys +import json + + +def write_hosts(hosts_ip): + hosts_list = ('\n{} {}'.format(ip, host_name) + for host_name, ip in hosts_ip.items()) + with open("/etc/hosts", 'a') as f: + f.writelines(hosts_list) + f.write("\n") + +if __name__ == "__main__": + write_hosts(json.load(sys.stdin)) diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py index 08f5da332..65939c6ba 100644 --- a/tests/unit/benchmark/scenarios/compute/test_lmbench.py +++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py @@ -68,8 +68,7 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '[{"latency": 4.944, "size": 0.00049}]' mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = jsonutils.loads( - '{"latencies": ' + sample_output + "}") + expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049} self.assertEqual(self.result, expected_result) def test_successful_bandwidth_run_no_sla(self, mock_ssh): @@ -105,8 +104,7 @@ class LmbenchTestCase(unittest.TestCase): sample_output = '[{"latency": 4.944, "size": 0.00049}]' mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') l.run(self.result) - expected_result = jsonutils.loads( - '{"latencies": ' + sample_output + "}") + expected_result = {"latencies0.latency": 4.944, "latencies0.size": 0.00049} self.assertEqual(self.result, expected_result) def test_successful_bandwidth_run_sla(self, mock_ssh): @@ -191,3 +189,10 @@ class LmbenchTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR') self.assertRaises(RuntimeError, l.run, self.result) + + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py b/tests/unit/benchmark/scenarios/compute/test_ramspeed.py index 85d49641e..4f71fbb36 100644 --- a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py +++ b/tests/unit/benchmark/scenarios/compute/test_ramspeed.py @@ -18,6 +18,7 @@ import unittest import mock from oslo_serialization import jsonutils +from yardstick.common import utils from yardstick.benchmark.scenarios.compute import ramspeed @@ -77,7 +78,7 @@ class RamspeedTestCase(unittest.TestCase): "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}' mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') r.run(self.result) - expected_result = jsonutils.loads(sample_output) + expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output)) self.assertEqual(self.result, expected_result) def test_ramspeed_successful_run_sla(self, mock_ssh): @@ -113,7 +114,7 @@ class RamspeedTestCase(unittest.TestCase): "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}' mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') r.run(self.result) - expected_result = jsonutils.loads(sample_output) + expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output)) self.assertEqual(self.result, expected_result) def test_ramspeed_unsuccessful_run_sla(self, mock_ssh): @@ -179,7 +180,7 @@ class RamspeedTestCase(unittest.TestCase): "Bandwidth(MBps)": 9401.58}]}' mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') r.run(self.result) - expected_result = jsonutils.loads(sample_output) + expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output)) self.assertEqual(self.result, expected_result) def test_ramspeed_mem_successful_run_sla(self, mock_ssh): @@ -200,7 +201,7 @@ class RamspeedTestCase(unittest.TestCase): "Bandwidth(MBps)": 9401.58}]}' mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') r.run(self.result) - expected_result = jsonutils.loads(sample_output) + expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output)) self.assertEqual(self.result, expected_result) def test_ramspeed_mem_unsuccessful_run_sla(self, mock_ssh): diff --git a/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/tests/unit/benchmark/scenarios/networking/test_iperf3.py index 45ff1b779..331245357 100644 --- a/tests/unit/benchmark/scenarios/networking/test_iperf3.py +++ b/tests/unit/benchmark/scenarios/networking/test_iperf3.py @@ -19,6 +19,7 @@ import unittest import mock from oslo_serialization import jsonutils +from yardstick.common import utils from yardstick.benchmark.scenarios.networking import iperf3 @@ -81,7 +82,7 @@ class IperfTestCase(unittest.TestCase): sample_output = self._read_sample_output(self.output_name_tcp) mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') - expected_result = jsonutils.loads(sample_output) + expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output)) p.run(result) self.assertEqual(result, expected_result) @@ -100,7 +101,7 @@ class IperfTestCase(unittest.TestCase): sample_output = self._read_sample_output(self.output_name_tcp) mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') - expected_result = jsonutils.loads(sample_output) + expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output)) p.run(result) self.assertEqual(result, expected_result) @@ -135,7 +136,7 @@ class IperfTestCase(unittest.TestCase): sample_output = self._read_sample_output(self.output_name_udp) mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '') - expected_result = jsonutils.loads(sample_output) + expected_result = utils.flatten_dict_key(jsonutils.loads(sample_output)) p.run(result) self.assertEqual(result, expected_result) diff --git a/tests/unit/benchmark/scenarios/networking/test_ping.py b/tests/unit/benchmark/scenarios/networking/test_ping.py index 5269309c2..06353249a 100644 --- a/tests/unit/benchmark/scenarios/networking/test_ping.py +++ b/tests/unit/benchmark/scenarios/networking/test_ping.py @@ -45,7 +45,7 @@ class PingTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.return_value = (0, '100', '') p.run(result) - self.assertEqual(result, {'rtt': {'ares': 100.0}}) + self.assertEqual(result, {'rtt.ares': 100.0}) @mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh') def test_ping_successful_sla(self, mock_ssh): @@ -61,7 +61,7 @@ class PingTestCase(unittest.TestCase): mock_ssh.SSH.from_node().execute.return_value = (0, '100', '') p.run(result) - self.assertEqual(result, {'rtt': {'ares': 100.0}}) + self.assertEqual(result, {'rtt.ares': 100.0}) @mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh') def test_ping_unsuccessful_sla(self, mock_ssh): diff --git a/tests/unit/common/test_utils.py b/tests/unit/common/test_utils.py index 8f52b53b0..c4c61ceeb 100644 --- a/tests/unit/common/test_utils.py +++ b/tests/unit/common/test_utils.py @@ -109,6 +109,37 @@ class GetParaFromYaml(unittest.TestCase): return file_path +class CommonUtilTestCase(unittest.TestCase): + def setUp(self): + self.data = { + "benchmark": { + "data": { + "mpstat": { + "cpu0": { + "%sys": "0.00", + "%idle": "99.00" + }, + "loadavg": [ + "1.09", + "0.29" + ] + }, + "rtt": "1.03" + } + } + } + def test__dict_key_flatten(self): + line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,' \ + 'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00' + # need to sort for assert to work + line = ",".join(sorted(line.split(','))) + flattened_data = utils.flatten_dict_key( + self.data['benchmark']['data']) + result = ",".join( + ("=".join(item) for item in sorted(flattened_data.items()))) + self.assertEqual(result, line) + + def main(): unittest.main() diff --git a/tests/unit/dispatcher/test_influxdb.py b/tests/unit/dispatcher/test_influxdb.py index a5d9b0754..7ebe8c90b 100644 --- a/tests/unit/dispatcher/test_influxdb.py +++ b/tests/unit/dispatcher/test_influxdb.py @@ -76,23 +76,6 @@ class InfluxdbDispatcherTestCase(unittest.TestCase): }, "runner_id": 8921 } - self.data3 = { - "benchmark": { - "data": { - "mpstat": { - "cpu0": { - "%sys": "0.00", - "%idle": "99.00" - }, - "loadavg": [ - "1.09", - "0.29" - ] - }, - "rtt": "1.03" - } - } - } self.yardstick_conf = {'dispatcher_influxdb': {}} @@ -113,18 +96,6 @@ class InfluxdbDispatcherTestCase(unittest.TestCase): } self.assertEqual(influxdb.flush_result_data(data), 0) - def test__dict_key_flatten(self): - line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,' \ - 'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00' - # need to sort for assert to work - line = ",".join(sorted(line.split(','))) - influxdb = InfluxdbDispatcher(self.yardstick_conf) - flattened_data = influxdb._dict_key_flatten( - self.data3['benchmark']['data']) - result = ",".join( - [k + "=" + v for k, v in sorted(flattened_data.items())]) - self.assertEqual(result, line) - def test__get_nano_timestamp(self): influxdb = InfluxdbDispatcher(self.yardstick_conf) results = {'timestamp': '1451461248.925574'} diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 30ad98f4b..0e85e6316 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -255,11 +255,7 @@ class Task(object): # pragma: no cover self.outputs.update(runner.get_output()) result.extend(runner.get_result()) - - if status != 0: - raise RuntimeError print("Background task ended") - return result def atexit_handler(self): diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py index c99fc988d..801f7fa80 100644 --- a/yardstick/benchmark/scenarios/compute/lmbench.py +++ b/yardstick/benchmark/scenarios/compute/lmbench.py @@ -15,6 +15,7 @@ import pkg_resources from oslo_serialization import jsonutils import yardstick.ssh as ssh +from yardstick.common import utils from yardstick.benchmark.scenarios import base LOG = logging.getLogger(__name__) @@ -127,30 +128,32 @@ class Lmbench(base.Scenario): if status: raise RuntimeError(stderr) + lmbench_result = {} if test_type == 'latency': - result.update( + lmbench_result.update( {"latencies": jsonutils.loads(stdout)}) else: - result.update(jsonutils.loads(stdout)) + lmbench_result.update(jsonutils.loads(stdout)) + result.update(utils.flatten_dict_key(lmbench_result)) if "sla" in self.scenario_cfg: sla_error = "" if test_type == 'latency': sla_max_latency = int(self.scenario_cfg['sla']['max_latency']) - for t_latency in result["latencies"]: + for t_latency in lmbench_result["latencies"]: latency = t_latency['latency'] if latency > sla_max_latency: sla_error += "latency %f > sla:max_latency(%f); " \ % (latency, sla_max_latency) elif test_type == 'bandwidth': sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth']) - bw = result["bandwidth(MBps)"] + bw = lmbench_result["bandwidth(MBps)"] if bw < sla_min_bw: sla_error += "bandwidth %f < " \ "sla:min_bandwidth(%f)" % (bw, sla_min_bw) elif test_type == 'latency_for_cache': sla_latency = float(self.scenario_cfg['sla']['max_latency']) - cache_latency = float(result['L1cache']) + cache_latency = float(lmbench_result['L1cache']) if sla_latency < cache_latency: sla_error += "latency %f > sla:max_latency(%f); " \ % (cache_latency, sla_latency) diff --git a/yardstick/benchmark/scenarios/compute/ramspeed.py b/yardstick/benchmark/scenarios/compute/ramspeed.py index 850ee5934..ca64935dd 100644 --- a/yardstick/benchmark/scenarios/compute/ramspeed.py +++ b/yardstick/benchmark/scenarios/compute/ramspeed.py @@ -14,6 +14,7 @@ import pkg_resources from oslo_serialization import jsonutils import yardstick.ssh as ssh +from yardstick.common import utils from yardstick.benchmark.scenarios import base LOG = logging.getLogger(__name__) @@ -128,12 +129,13 @@ class Ramspeed(base.Scenario): if status: raise RuntimeError(stderr) - result.update(jsonutils.loads(stdout)) + ramspeed_result = jsonutils.loads(stdout) + result.update(utils.flatten_dict_key(ramspeed_result)) if "sla" in self.scenario_cfg: sla_error = "" sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth']) - for i in result["Result"]: + for i in ramspeed_result["Result"]: bw = i["Bandwidth(MBps)"] if bw < sla_min_bw: sla_error += "Bandwidth %f < " \ diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py index 334f3a920..3135af9bd 100644 --- a/yardstick/benchmark/scenarios/networking/iperf3.py +++ b/yardstick/benchmark/scenarios/networking/iperf3.py @@ -19,6 +19,7 @@ import pkg_resources from oslo_serialization import jsonutils import yardstick.ssh as ssh +from yardstick.common import utils from yardstick.benchmark.scenarios import base LOG = logging.getLogger(__name__) @@ -131,8 +132,8 @@ For more info see http://software.es.net/iperf # Note: convert all ints to floats in order to avoid # schema conflicts in influxdb. We probably should add # a format func in the future. - result.update( - jsonutils.loads(stdout, parse_int=float)) + iperf_result = jsonutils.loads(stdout, parse_int=float) + result.update(utils.flatten_dict_key(iperf_result)) if "sla" in self.scenario_cfg: sla_iperf = self.scenario_cfg["sla"] @@ -141,7 +142,7 @@ For more info see http://software.es.net/iperf # convert bits per second to bytes per second bit_per_second = \ - int(result["end"]["sum_received"]["bits_per_second"]) + int(iperf_result["end"]["sum_received"]["bits_per_second"]) bytes_per_second = bit_per_second / 8 assert bytes_per_second >= sla_bytes_per_second, \ "bytes_per_second %d < sla:bytes_per_second (%d); " % \ @@ -149,7 +150,7 @@ For more info see http://software.es.net/iperf else: sla_jitter = float(sla_iperf["jitter"]) - jitter_ms = float(result["end"]["sum"]["jitter_ms"]) + jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"]) assert jitter_ms <= sla_jitter, \ "jitter_ms %f > sla:jitter %f; " % \ (jitter_ms, sla_jitter) diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py index a929e5337..6a7927de4 100644 --- a/yardstick/benchmark/scenarios/networking/ping.py +++ b/yardstick/benchmark/scenarios/networking/ping.py @@ -15,6 +15,7 @@ import pkg_resources import logging import yardstick.ssh as ssh +from yardstick.common import utils from yardstick.benchmark.scenarios import base LOG = logging.getLogger(__name__) @@ -57,8 +58,8 @@ class Ping(base.Scenario): destination = self.context_cfg['target'].get('ipaddr', '127.0.0.1') dest_list = [s.strip() for s in destination.split(',')] - result["rtt"] = {} - rtt_result = result["rtt"] + rtt_result = {} + ping_result = {"rtt": rtt_result} for pos, dest in enumerate(dest_list): if 'targets' in self.scenario_cfg: @@ -88,6 +89,7 @@ class Ping(base.Scenario): (rtt_result[target_vm_name], sla_max_rtt) else: LOG.error("ping '%s' '%s' timeout", options, target_vm) + result.update(utils.flatten_dict_key(ping_result)) def _test(): # pragma: no cover diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py index 7aab46942..7633777ae 100644 --- a/yardstick/common/utils.py +++ b/yardstick/common/utils.py @@ -23,6 +23,8 @@ import logging import os import subprocess import sys +import collections +import six from functools import reduce import yaml @@ -189,3 +191,24 @@ def get_port_ip(sshclient, port): if status: raise RuntimeError(stderr) return stdout.rstrip() + + +def flatten_dict_key(data): + next_data = {} + + # use list, because iterable is too generic + if not any(isinstance(v, (collections.Mapping, list)) for v in data.values()): + return data + + for k, v in six.iteritems(data): + if isinstance(v, collections.Mapping): + for n_k, n_v in six.iteritems(v): + next_data["%s.%s" % (k, n_k)] = n_v + # use list because iterable is too generic + elif isinstance(v, list): + for index, item in enumerate(v): + next_data["%s%d" % (k, index)] = item + else: + next_data[k] = v + + return flatten_dict_key(next_data) diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py index 373aae13a..f157e91f9 100644 --- a/yardstick/dispatcher/influxdb.py +++ b/yardstick/dispatcher/influxdb.py @@ -12,10 +12,9 @@ from __future__ import absolute_import import logging import time -import collections import requests -import six +from yardstick.common import utils from third_party.influxdb.influxdb_line_protocol import make_lines from yardstick.dispatcher.base import Base as DispatchBase @@ -80,7 +79,7 @@ class InfluxdbDispatcher(DispatchBase): msg = {} point = { "measurement": case, - "fields": self._dict_key_flatten(data["data"]), + "fields": utils.flatten_dict_key(data["data"]), "time": self._get_nano_timestamp(data), "tags": self._get_extended_tags(criteria), } @@ -89,27 +88,6 @@ class InfluxdbDispatcher(DispatchBase): return make_lines(msg).encode('utf-8') - def _dict_key_flatten(self, data): - next_data = {} - - # use list, because iterable is too generic - if not [v for v in data.values() if - isinstance(v, (collections.Mapping, list))]: - return data - - for k, v in six.iteritems(data): - if isinstance(v, collections.Mapping): - for n_k, n_v in six.iteritems(v): - next_data["%s.%s" % (k, n_k)] = n_v - # use list because iterable is too generic - elif isinstance(v, list): - for index, item in enumerate(v): - next_data["%s%d" % (k, index)] = item - else: - next_data[k] = v - - return self._dict_key_flatten(next_data) - def _get_nano_timestamp(self, results): try: timestamp = results["timestamp"] |