aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorchenjiankun <chenjiankun1@huawei.com>2017-02-25 00:48:07 +0000
committerchenjiankun <chenjiankun1@huawei.com>2017-03-08 08:42:32 +0000
commitaf3478e95314b5147c7837831dc8113d552ba067 (patch)
treee9411bebe78eca2f0d204d8a49464e7f620a0ff1
parent501175fbb095a771f5f1b9fb80dcf729192214d2 (diff)
Bugfix: yardstick will create stacks with the same name when run using API in parallel
JIRA: YARDSTICK-575 Currently yardstick will create stacks with the same name when run using API in parallel. The reason is there is a global variable in context base and the core will always deploy the first context in Context.list. When run in parallel, it will run in the one process. So yardstick will deploy stacks with the same name. The solution is do not use Context.list in yardstick core. And using a local variable instead. BTW, if we use API to call yardstick core, we can not config the output way. So I parse yardstick.conf when task start. And I think we can include scenario_cfg, context_cfg, yardstick_cfg in one config object later so that we can get all config in one object. Change-Id: I1ada4ef486bd252e78c3a2e49c6a39b3f8f16a7c Signed-off-by: chenjiankun <chenjiankun1@huawei.com>
-rw-r--r--tests/unit/dispatcher/test_influxdb.py14
-rw-r--r--yardstick/benchmark/contexts/base.py4
-rw-r--r--yardstick/benchmark/contexts/dummy.py2
-rw-r--r--yardstick/benchmark/contexts/heat.py2
-rw-r--r--yardstick/benchmark/contexts/node.py2
-rw-r--r--yardstick/benchmark/contexts/standalone.py2
-rw-r--r--yardstick/benchmark/core/task.py206
-rwxr-xr-xyardstick/benchmark/runners/base.py21
-rw-r--r--yardstick/common/utils.py17
-rw-r--r--yardstick/dispatcher/base.py4
-rw-r--r--yardstick/dispatcher/file.py2
-rw-r--r--yardstick/dispatcher/http.py2
-rw-r--r--yardstick/dispatcher/influxdb.py38
13 files changed, 166 insertions, 150 deletions
diff --git a/tests/unit/dispatcher/test_influxdb.py b/tests/unit/dispatcher/test_influxdb.py
index b84389e7e..0c7b58135 100644
--- a/tests/unit/dispatcher/test_influxdb.py
+++ b/tests/unit/dispatcher/test_influxdb.py
@@ -90,19 +90,21 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
}
}
+ self.yardstick_conf = {'yardstick': {}}
+
def test_record_result_data_no_target(self):
- influxdb = InfluxdbDispatcher(None)
+ influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
influxdb.target = ''
self.assertEqual(influxdb.record_result_data(self.data1), -1)
def test_record_result_data_no_case_name(self):
- influxdb = InfluxdbDispatcher(None)
+ influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
self.assertEqual(influxdb.record_result_data(self.data2), -1)
@mock.patch('yardstick.dispatcher.influxdb.requests')
def test_record_result_data(self, mock_requests):
type(mock_requests.post.return_value).status_code = 204
- influxdb = InfluxdbDispatcher(None)
+ influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
self.assertEqual(influxdb.record_result_data(self.data1), 0)
self.assertEqual(influxdb.record_result_data(self.data2), 0)
self.assertEqual(influxdb.flush_result_data(), 0)
@@ -112,7 +114,7 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
# need to sort for assert to work
line = ",".join(sorted(line.split(',')))
- influxdb = InfluxdbDispatcher(None)
+ influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
flattened_data = influxdb._dict_key_flatten(
self.data3['benchmark']['data'])
result = ",".join(
@@ -120,7 +122,7 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
self.assertEqual(result, line)
def test__get_nano_timestamp(self):
- influxdb = InfluxdbDispatcher(None)
+ influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
results = {'benchmark': {'timestamp': '1451461248.925574'}}
self.assertEqual(influxdb._get_nano_timestamp(results),
'1451461248925574144')
@@ -128,7 +130,7 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
@mock.patch('yardstick.dispatcher.influxdb.time')
def test__get_nano_timestamp_except(self, mock_time):
results = {}
- influxdb = InfluxdbDispatcher(None)
+ influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
mock_time.time.return_value = 1451461248.925574
self.assertEqual(influxdb._get_nano_timestamp(results),
'1451461248925574144')
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index 9f2b21537..0be2eee77 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -46,6 +46,10 @@ class Context(object):
@abc.abstractmethod
def undeploy(self):
"""Undeploy context."""
+ self._delete_context()
+
+ def _delete_context(self):
+ Context.list.remove(self)
@abc.abstractmethod
def _get_server(self, attr_name):
diff --git a/yardstick/benchmark/contexts/dummy.py b/yardstick/benchmark/contexts/dummy.py
index 0edc250f8..c658d3257 100644
--- a/yardstick/benchmark/contexts/dummy.py
+++ b/yardstick/benchmark/contexts/dummy.py
@@ -33,7 +33,7 @@ class DummyContext(Context):
def undeploy(self):
"""don't need to undeploy"""
- pass
+ super(DummyContext, self).undeploy()
def _get_server(self, attr_name):
return None
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 479548b34..64d913a47 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -257,6 +257,8 @@ class HeatContext(Context):
except OSError:
LOG.exception("Key filename %s", self.key_filename)
+ super(HeatContext, self).undeploy()
+
def _get_server(self, attr_name):
"""lookup server info by name from context
attr_name: either a name for a server created by yardstick or a dict
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index 6fa9aa99a..f8c38cb1c 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -89,6 +89,8 @@ class NodeContext(Context):
for host, info in teardown.items():
self._execute_script(host, info)
+ super(NodeContext, self).undeploy()
+
def _get_server(self, attr_name):
"""lookup server info by name from context
attr_name: a name for a server listed in nodes config file
diff --git a/yardstick/benchmark/contexts/standalone.py b/yardstick/benchmark/contexts/standalone.py
index eff700974..674e57f54 100644
--- a/yardstick/benchmark/contexts/standalone.py
+++ b/yardstick/benchmark/contexts/standalone.py
@@ -79,7 +79,7 @@ class StandaloneContext(Context):
"""don't need to undeploy"""
# Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
- pass
+ super(StandaloneContext, self).undeploy()
def _get_server(self, attr_name):
"""lookup server info by name from context
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index aecf5bf4a..6bc10050f 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -26,9 +26,11 @@ from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.runners import base as base_runner
from yardstick.common.task_template import TaskTemplate
from yardstick.common.utils import source_env
+from yardstick.common import utils
from yardstick.common import constants
output_file_default = "/tmp/yardstick.out"
+config_file = '/etc/yardstick/yardstick.conf'
test_cases_dir_default = "tests/opnfv/test_cases/"
LOG = logging.getLogger(__name__)
@@ -39,15 +41,21 @@ class Task(object): # pragma: no cover
Set of commands to manage benchmark tasks.
"""
+ def __init__(self):
+ self.config = {}
+ self.contexts = []
+
def start(self, args, **kwargs):
"""Start a benchmark scenario."""
- atexit.register(atexit_handler)
+ atexit.register(self.atexit_handler)
self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
check_environment()
+ self.config['yardstick'] = utils.parse_ini_file(config_file)
+
total_start_time = time.time()
parser = TaskParser(args.inputfile[0])
@@ -70,8 +78,11 @@ class Task(object): # pragma: no cover
for i in range(0, len(task_files)):
one_task_start_time = time.time()
parser.path = task_files[i]
- scenarios, run_in_parallel, meet_precondition = parser.parse_task(
- self.task_id, task_args[i], task_args_fnames[i])
+ scenarios, run_in_parallel, meet_precondition, contexts = \
+ parser.parse_task(self.task_id, task_args[i],
+ task_args_fnames[i])
+
+ self.contexts.extend(contexts)
if not meet_precondition:
LOG.info("meet_precondition is %s, please check envrionment",
@@ -83,11 +94,11 @@ class Task(object): # pragma: no cover
if args.keep_deploy:
# keep deployment, forget about stack
# (hide it for exit handler)
- Context.list = []
+ self.contexts = []
else:
- for context in Context.list[::-1]:
+ for context in self.contexts[::-1]:
context.undeploy()
- Context.list = []
+ self.contexts = []
one_task_end_time = time.time()
LOG.info("task %s finished in %d secs", task_files[i],
one_task_end_time - one_task_start_time)
@@ -100,7 +111,7 @@ class Task(object): # pragma: no cover
def _run(self, scenarios, run_in_parallel, output_file):
"""Deploys context and calls runners"""
- for context in Context.list:
+ for context in self.contexts:
context.deploy()
background_runners = []
@@ -108,14 +119,14 @@ class Task(object): # pragma: no cover
# Start all background scenarios
for scenario in filter(_is_background_scenario, scenarios):
scenario["runner"] = dict(type="Duration", duration=1000000000)
- runner = run_one_scenario(scenario, output_file)
+ runner = self.run_one_scenario(scenario, output_file)
background_runners.append(runner)
runners = []
if run_in_parallel:
for scenario in scenarios:
if not _is_background_scenario(scenario):
- runner = run_one_scenario(scenario, output_file)
+ runner = self.run_one_scenario(scenario, output_file)
runners.append(runner)
# Wait for runners to finish
@@ -126,7 +137,7 @@ class Task(object): # pragma: no cover
# run serially
for scenario in scenarios:
if not _is_background_scenario(scenario):
- runner = run_one_scenario(scenario, output_file)
+ runner = self.run_one_scenario(scenario, output_file)
runner_join(runner)
print("Runner ended, output in", output_file)
@@ -144,8 +155,91 @@ class Task(object): # pragma: no cover
base_runner.Runner.release(runner)
print("Background task ended")
+ def atexit_handler(self):
+ """handler for process termination"""
+ base_runner.Runner.terminate_all()
+
+ if self.contexts:
+ print("Undeploying all contexts")
+ for context in self.contexts[::-1]:
+ context.undeploy()
+
+ def run_one_scenario(self, scenario_cfg, output_file):
+ """run one scenario using context"""
+ runner_cfg = scenario_cfg["runner"]
+ runner_cfg['output_filename'] = output_file
+
+ # TODO support get multi hosts/vms info
+ context_cfg = {}
+ if "host" in scenario_cfg:
+ context_cfg['host'] = Context.get_server(scenario_cfg["host"])
+
+ if "target" in scenario_cfg:
+ if is_ip_addr(scenario_cfg["target"]):
+ context_cfg['target'] = {}
+ context_cfg['target']["ipaddr"] = scenario_cfg["target"]
+ else:
+ context_cfg['target'] = Context.get_server(
+ scenario_cfg["target"])
+ if self._is_same_heat_context(scenario_cfg["host"],
+ scenario_cfg["target"]):
+ context_cfg["target"]["ipaddr"] = \
+ context_cfg["target"]["private_ip"]
+ else:
+ context_cfg["target"]["ipaddr"] = \
+ context_cfg["target"]["ip"]
+
+ if "targets" in scenario_cfg:
+ ip_list = []
+ for target in scenario_cfg["targets"]:
+ if is_ip_addr(target):
+ ip_list.append(target)
+ context_cfg['target'] = {}
+ else:
+ context_cfg['target'] = Context.get_server(target)
+ if self._is_same_heat_context(scenario_cfg["host"],
+ target):
+ ip_list.append(context_cfg["target"]["private_ip"])
+ else:
+ ip_list.append(context_cfg["target"]["ip"])
+ context_cfg['target']['ipaddr'] = ','.join(ip_list)
+
+ if "nodes" in scenario_cfg:
+ context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+ runner = base_runner.Runner.get(runner_cfg, self.config)
+
+ print("Starting runner of type '%s'" % runner_cfg["type"])
+ runner.run(scenario_cfg, context_cfg)
+
+ return runner
+
+ def _is_same_heat_context(self, host_attr, target_attr):
+ """check if two servers are in the same heat context
+ host_attr: either a name for a server created by yardstick or a dict
+ with attribute name mapping when using external heat templates
+ target_attr: either a name for a server created by yardstick or a dict
+ with attribute name mapping when using external heat templates
+ """
+ return True
+ host = None
+ target = None
+ for context in self.contexts:
+ if context.__context_type__ != "Heat":
+ continue
+
+ host = context._get_server(host_attr)
+ if host is None:
+ continue
+
+ target = context._get_server(target_attr)
+ if target is None:
+ return False
-# TODO: Move stuff below into TaskCommands class !?
+ # Both host and target is not None, then they are in the
+ # same heat context.
+ return True
+
+ return False
class TaskParser(object): # pragma: no cover
@@ -265,6 +359,7 @@ class TaskParser(object): # pragma: no cover
else:
context_cfgs = [{"type": "Dummy"}]
+ contexts = []
name_suffix = '-{}'.format(task_id[:8])
for cfg_attrs in context_cfgs:
try:
@@ -286,6 +381,7 @@ class TaskParser(object): # pragma: no cover
context = Context.get(context_type)
context.init(cfg_attrs)
+ contexts.append(context)
run_in_parallel = cfg.get("run_in_parallel", False)
@@ -304,7 +400,7 @@ class TaskParser(object): # pragma: no cover
pass
# TODO we need something better here, a class that represent the file
- return cfg["scenarios"], run_in_parallel, meet_precondition
+ return cfg["scenarios"], run_in_parallel, meet_precondition, contexts
def _check_schema(self, cfg_schema, schema_type):
"""Check if config file is using the correct schema type"""
@@ -346,16 +442,6 @@ class TaskParser(object): # pragma: no cover
return True
-def atexit_handler():
- """handler for process termination"""
- base_runner.Runner.terminate_all()
-
- if len(Context.list) > 0:
- print("Undeploying all contexts")
- for context in Context.list[::-1]:
- context.undeploy()
-
-
def is_ip_addr(addr):
"""check if string addr is an IP address"""
try:
@@ -371,34 +457,6 @@ def is_ip_addr(addr):
return True
-def _is_same_heat_context(host_attr, target_attr):
- """check if two servers are in the same heat context
- host_attr: either a name for a server created by yardstick or a dict
- with attribute name mapping when using external heat templates
- target_attr: either a name for a server created by yardstick or a dict
- with attribute name mapping when using external heat templates
- """
- host = None
- target = None
- for context in Context.list:
- if context.__context_type__ != "Heat":
- continue
-
- host = context._get_server(host_attr)
- if host is None:
- continue
-
- target = context._get_server(target_attr)
- if target is None:
- return False
-
- # Both host and target is not None, then they are in the
- # same heat context.
- return True
-
- return False
-
-
def _is_background_scenario(scenario):
if "run_in_background" in scenario:
return scenario["run_in_background"]
@@ -406,54 +464,6 @@ def _is_background_scenario(scenario):
return False
-def run_one_scenario(scenario_cfg, output_file):
- """run one scenario using context"""
- runner_cfg = scenario_cfg["runner"]
- runner_cfg['output_filename'] = output_file
-
- # TODO support get multi hosts/vms info
- context_cfg = {}
- if "host" in scenario_cfg:
- context_cfg['host'] = Context.get_server(scenario_cfg["host"])
-
- if "target" in scenario_cfg:
- if is_ip_addr(scenario_cfg["target"]):
- context_cfg['target'] = {}
- context_cfg['target']["ipaddr"] = scenario_cfg["target"]
- else:
- context_cfg['target'] = Context.get_server(scenario_cfg["target"])
- if _is_same_heat_context(scenario_cfg["host"],
- scenario_cfg["target"]):
- context_cfg["target"]["ipaddr"] = \
- context_cfg["target"]["private_ip"]
- else:
- context_cfg["target"]["ipaddr"] = \
- context_cfg["target"]["ip"]
-
- if "targets" in scenario_cfg:
- ip_list = []
- for target in scenario_cfg["targets"]:
- if is_ip_addr(target):
- ip_list.append(target)
- context_cfg['target'] = {}
- else:
- context_cfg['target'] = Context.get_server(target)
- if _is_same_heat_context(scenario_cfg["host"], target):
- ip_list.append(context_cfg["target"]["private_ip"])
- else:
- ip_list.append(context_cfg["target"]["ip"])
- context_cfg['target']['ipaddr'] = ','.join(ip_list)
-
- if "nodes" in scenario_cfg:
- context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
- runner = base_runner.Runner.get(runner_cfg)
-
- print("Starting runner of type '%s'" % runner_cfg["type"])
- runner.run(scenario_cfg, context_cfg)
-
- return runner
-
-
def parse_nodes_with_context(scenario_cfg):
"""paras the 'nodes' fields in scenario """
nodes = scenario_cfg["nodes"]
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index 5b9081523..7c76e42df 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -35,15 +35,18 @@ log = logging.getLogger(__name__)
CONF = cfg.CONF
-def _output_serializer_main(filename, queue):
+def _output_serializer_main(filename, queue, config):
"""entrypoint for the singleton subprocess writing to outfile
Use of this process enables multiple instances of a scenario without
messing up the output file.
"""
- config = {}
- config["type"] = CONF.dispatcher.capitalize()
- config["file_path"] = filename
- dispatcher = DispatcherBase.get(config)
+ out_type = config['yardstick'].get('DEFAULT', {}).get('dispatcher', 'file')
+ conf = {
+ 'type': out_type.capitalize(),
+ 'file_path': filename
+ }
+
+ dispatcher = DispatcherBase.get(conf, config)
while True:
# blocks until data becomes available
@@ -123,21 +126,21 @@ class Runner(object):
return types
@staticmethod
- def get(config):
+ def get(runner_cfg, config):
"""Returns instance of a scenario runner for execution type.
"""
# if there is no runner, start the output serializer subprocess
if not Runner.runners:
log.debug("Starting dump process file '%s'",
- config["output_filename"])
+ runner_cfg["output_filename"])
Runner.queue = multiprocessing.Queue()
Runner.dump_process = multiprocessing.Process(
target=_output_serializer_main,
name="Dumper",
- args=(config["output_filename"], Runner.queue))
+ args=(runner_cfg["output_filename"], Runner.queue, config))
Runner.dump_process.start()
- return Runner.get_cls(config["type"])(config, Runner.queue)
+ return Runner.get_cls(runner_cfg["type"])(runner_cfg, Runner.queue)
@staticmethod
def release_dump_process():
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 3c5895f1e..e53f4b416 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -26,6 +26,7 @@ import sys
from functools import reduce
import yaml
+from six.moves import configparser
from oslo_utils import importutils
from oslo_serialization import jsonutils
@@ -144,3 +145,19 @@ def write_json_to_file(path, data, mode='w'):
def write_file(path, data, mode='w'):
with open(path, mode) as f:
f.write(data)
+
+
+def parse_ini_file(path):
+ parser = configparser.ConfigParser()
+ parser.read(path)
+
+ try:
+ default = {k: v for k, v in parser.items('DEFAULT')}
+ except configparser.NoSectionError:
+ default = {}
+
+ config = dict(DEFAULT=default,
+ **{s: {k: v for k, v in parser.items(
+ s)} for s in parser.sections()})
+
+ return config
diff --git a/yardstick/dispatcher/base.py b/yardstick/dispatcher/base.py
index 09ce8d1e8..a1c858297 100644
--- a/yardstick/dispatcher/base.py
+++ b/yardstick/dispatcher/base.py
@@ -38,10 +38,10 @@ class Base(object):
raise RuntimeError("No such dispatcher_type %s" % dispatcher_type)
@staticmethod
- def get(config):
+ def get(conf, config):
"""Returns instance of a dispatcher for dispatcher type.
"""
- return Base.get_cls(config["type"])(config)
+ return Base.get_cls(conf["type"])(conf, config)
@abc.abstractmethod
def record_result_data(self, data):
diff --git a/yardstick/dispatcher/file.py b/yardstick/dispatcher/file.py
index 6fc81d419..8acd5dfbb 100644
--- a/yardstick/dispatcher/file.py
+++ b/yardstick/dispatcher/file.py
@@ -29,7 +29,7 @@ class FileDispatcher(DispatchBase):
__dispatcher_type__ = "File"
- def __init__(self, conf):
+ def __init__(self, conf, config):
super(FileDispatcher, self).__init__(conf)
self.result = []
diff --git a/yardstick/dispatcher/http.py b/yardstick/dispatcher/http.py
index 790086155..e3bcbc89b 100644
--- a/yardstick/dispatcher/http.py
+++ b/yardstick/dispatcher/http.py
@@ -51,7 +51,7 @@ class HttpDispatcher(DispatchBase):
__dispatcher_type__ = "Http"
- def __init__(self, conf):
+ def __init__(self, conf, config):
super(HttpDispatcher, self).__init__(conf)
self.headers = {'Content-type': 'application/json'}
self.timeout = CONF.dispatcher_http.timeout
diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py
index d388d28a1..10fff4d1f 100644
--- a/yardstick/dispatcher/influxdb.py
+++ b/yardstick/dispatcher/influxdb.py
@@ -15,7 +15,6 @@ import time
import requests
import six
-from oslo_config import cfg
from oslo_serialization import jsonutils
from third_party.influxdb.influxdb_line_protocol import make_lines
@@ -23,30 +22,6 @@ from yardstick.dispatcher.base import Base as DispatchBase
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-influx_dispatcher_opts = [
- cfg.StrOpt('target',
- default='http://127.0.0.1:8086',
- help='The target where the http request will be sent. '
- 'If this is not set, no data will be posted. For '
- 'example: target = http://hostname:1234/path'),
- cfg.StrOpt('db_name',
- default='yardstick',
- help='The database name to store test results.'),
- cfg.StrOpt('username',
- default='root',
- help='The user name to access database.'),
- cfg.StrOpt('password',
- default='root',
- help='The user password to access database.'),
- cfg.IntOpt('timeout',
- default=5,
- help='The max time in seconds to wait for a request to '
- 'timeout.'),
-]
-
-CONF.register_opts(influx_dispatcher_opts, group="dispatcher_influxdb")
-
class InfluxdbDispatcher(DispatchBase):
"""Dispatcher class for posting data into an influxdb target.
@@ -54,13 +29,14 @@ class InfluxdbDispatcher(DispatchBase):
__dispatcher_type__ = "Influxdb"
- def __init__(self, conf):
+ def __init__(self, conf, config):
super(InfluxdbDispatcher, self).__init__(conf)
- self.timeout = CONF.dispatcher_influxdb.timeout
- self.target = CONF.dispatcher_influxdb.target
- self.db_name = CONF.dispatcher_influxdb.db_name
- self.username = CONF.dispatcher_influxdb.username
- self.password = CONF.dispatcher_influxdb.password
+ db_conf = config['yardstick'].get('dispatcher_influxdb', {})
+ self.timeout = int(db_conf.get('timeout', 5))
+ self.target = db_conf.get('target', 'http://127.0.0.1:8086')
+ self.db_name = db_conf.get('db_name', 'yardstick')
+ self.username = db_conf.get('username', 'root')
+ self.password = db_conf.get('password', 'root')
self.influxdb_url = "%s/write?db=%s" % (self.target, self.db_name)
self.raw_result = []
self.case_name = ""