aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/__init__.py38
-rw-r--r--yardstick/benchmark/contexts/node.py6
-rwxr-xr-xyardstick/benchmark/runners/arithmetic.py6
-rwxr-xr-xyardstick/benchmark/runners/base.py18
-rw-r--r--yardstick/benchmark/runners/duration.py6
-rw-r--r--yardstick/benchmark/runners/iteration.py6
-rw-r--r--yardstick/benchmark/runners/sequence.py6
-rw-r--r--yardstick/benchmark/scenarios/availability/actionrollbackers.py8
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py16
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_general.py6
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py6
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/director.py6
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py6
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py10
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_general.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py6
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/baseoperation.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/operation_general.py6
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py8
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py2
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/cachestat.py2
-rw-r--r--yardstick/benchmark/scenarios/compute/cpuload.py2
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py14
-rw-r--r--yardstick/benchmark/scenarios/compute/memload.py2
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_benchmark.bash45
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_install.bash8
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_node.py27
-rw-r--r--yardstick/benchmark/scenarios/networking/netutilization.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py67
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py14
-rw-r--r--yardstick/cmd/cli.py49
-rw-r--r--yardstick/cmd/commands/env.py39
-rw-r--r--yardstick/cmd/commands/task.py16
-rw-r--r--yardstick/common/constants.py39
-rw-r--r--yardstick/common/httpClient.py30
-rw-r--r--yardstick/common/utils.py50
-rw-r--r--yardstick/dispatcher/file.py1
-rw-r--r--yardstick/dispatcher/http.py4
-rw-r--r--yardstick/dispatcher/influxdb.py8
-rw-r--r--yardstick/ssh.py110
43 files changed, 516 insertions, 193 deletions
diff --git a/yardstick/__init__.py b/yardstick/__init__.py
index c31948d43..5c279c800 100644
--- a/yardstick/__init__.py
+++ b/yardstick/__init__.py
@@ -8,18 +8,40 @@
##############################################################################
import logging
-import logging.config
-import sys
import os
+import sys
+
import yardstick.vTC.apexlake as apexlake
# Hack to be able to run apexlake unit tests
# without having to install apexlake.
sys.path.append(os.path.dirname(apexlake.__file__))
-logging.basicConfig(
- level=logging.WARNING,
- format='[%(asctime)s] %(name)-20s %(filename)s:%(lineno)d '
- '%(levelname)s %(message)s', # noqa
- datefmt='%m/%d/%y %H:%M:%S')
-logging.getLogger(__name__).setLevel(logging.INFO)
+LOG_FILE = '/tmp/yardstick.log'
+LOG_FORMATTER = ('%(asctime)s '
+ '%(name)s %(filename)s:%(lineno)d '
+ '%(levelname)s %(message)s')
+
+_LOG_FORMATTER = logging.Formatter(LOG_FORMATTER)
+_LOG_STREAM_HDLR = logging.StreamHandler()
+_LOG_FILE_HDLR = logging.FileHandler(LOG_FILE)
+
+LOG = logging.getLogger(__name__)
+
+
+def _init_logging():
+
+ _LOG_STREAM_HDLR.setFormatter(_LOG_FORMATTER)
+
+ # don't append to log file, clobber
+ _LOG_FILE_HDLR.setFormatter(_LOG_FORMATTER)
+
+ del logging.root.handlers[:]
+ logging.root.addHandler(_LOG_STREAM_HDLR)
+ logging.root.addHandler(_LOG_FILE_HDLR)
+ logging.debug("logging.root.handlers = %s", logging.root.handlers)
+
+ if os.environ.get('CI_DEBUG', '').lower() in {'1', 1, 'y', "yes", "true"}:
+ LOG.setLevel(logging.DEBUG)
+ else:
+ LOG.setLevel(logging.INFO)
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index c4e603a46..78bce8259 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -35,9 +35,9 @@ class NodeContext(Context):
def init(self, attrs):
'''initializes itself from the supplied arguments'''
self.name = attrs["name"]
- self.file_path = attrs.get("file", "")
+ self.file_path = attrs.get("file", "pod.yaml")
if not os.path.exists(self.file_path):
- self.file_path = YARDSTICK_ROOT_PATH + self.file_path
+ self.file_path = os.path.join(YARDSTICK_ROOT_PATH, self.file_path)
LOG.info("Parsing pod file: %s", self.file_path)
@@ -83,7 +83,7 @@ class NodeContext(Context):
return None
elif len(nodes) > 1:
LOG.error("Duplicate nodes!!!")
- LOG.error("Nodes: %r" % nodes)
+ LOG.error("Nodes: %r", nodes)
sys.exit(-1)
# A clone is created in order to avoid affecting the
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 74a236f44..69ea915a1 100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -93,7 +93,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
if aborted.is_set():
break
- LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
{"runner": runner_cfg["runner_id"], "sequence": sequence})
for i, value in enumerate(comb_values):
@@ -109,7 +109,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s" % assertion.args)
+ LOG.warning("SLA validation failed: %s", assertion.args)
errors = assertion.args
except Exception as e:
errors = traceback.format_exc()
@@ -129,7 +129,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
queue.put(record)
- LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"], "sequence": sequence})
sequence += 1
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index 23749924f..8f3f75fa1 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -63,7 +63,7 @@ def _execute_shell_command(command):
except Exception:
exitcode = -1
output = traceback.format_exc()
- log.error("exec command '%s' error:\n " % command)
+ log.error("exec command '%s' error:\n ", command)
log.error(traceback.format_exc())
return exitcode, output
@@ -76,10 +76,10 @@ def _single_action(seconds, command, queue):
log.debug("single action: executing command: '%s'", command)
ret_code, data = _execute_shell_command(command)
if ret_code < 0:
- log.error("single action error! command:%s" % command)
+ log.error("single action error! command:%s", command)
queue.put({'single-action-data': data})
return
- log.debug("single action data: \n%s" % data)
+ log.debug("single action data: \n%s", data)
queue.put({'single-action-data': data})
@@ -96,7 +96,7 @@ def _periodic_action(interval, command, queue):
log.error("periodic action error! command:%s", command)
queue.put({'periodic-action-data': data})
break
- log.debug("periodic action data: \n%s" % data)
+ log.debug("periodic action data: \n%s", data)
queue.put({'periodic-action-data': data})
@@ -127,7 +127,7 @@ class Runner(object):
"""
# if there is no runner, start the output serializer subprocess
if len(Runner.runners) == 0:
- log.debug("Starting dump process file '%s'" %
+ log.debug("Starting dump process file '%s'",
config["output_filename"])
Runner.queue = multiprocessing.Queue()
Runner.dump_process = multiprocessing.Process(
@@ -196,13 +196,13 @@ class Runner(object):
'''run a potentially configured post-stop action'''
if "post-stop-action" in self.config:
command = self.config["post-stop-action"]["command"]
- log.debug("post stop action: command: '%s'" % command)
+ log.debug("post stop action: command: '%s'", command)
ret_code, data = _execute_shell_command(command)
if ret_code < 0:
log.error("post action error! command:%s", command)
self.result_queue.put({'post-stop-action-data': data})
return
- log.debug("post-stop data: \n%s" % data)
+ log.debug("post-stop data: \n%s", data)
self.result_queue.put({'post-stop-action-data': data})
def run(self, scenario_cfg, context_cfg):
@@ -219,13 +219,13 @@ class Runner(object):
# run a potentially configured pre-start action
if "pre-start-action" in self.config:
command = self.config["pre-start-action"]["command"]
- log.debug("pre start action: command: '%s'" % command)
+ log.debug("pre start action: command: '%s'", command)
ret_code, data = _execute_shell_command(command)
if ret_code < 0:
log.error("pre-start action error! command:%s", command)
self.result_queue.put({'pre-start-action-data': data})
return
- log.debug("pre-start data: \n%s" % data)
+ log.debug("pre-start data: \n%s", data)
self.result_queue.put({'pre-start-action-data': data})
if "single-shot-action" in self.config:
diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py
index 1f51f513f..1412c0caa 100644
--- a/yardstick/benchmark/runners/duration.py
+++ b/yardstick/benchmark/runners/duration.py
@@ -58,7 +58,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
start = time.time()
while True:
- LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
{"runner": runner_cfg["runner_id"], "sequence": sequence})
data = {}
@@ -71,7 +71,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s" % assertion.args)
+ LOG.warning("SLA validation failed: %s", assertion.args)
errors = assertion.args
except Exception as e:
errors = traceback.format_exc()
@@ -91,7 +91,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
queue.put(record)
- LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"], "sequence": sequence})
sequence += 1
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index b23b32b08..3a839b65f 100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -60,7 +60,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
if "run" in run_step:
while True:
- LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
{"runner": runner_cfg["runner_id"],
"sequence": sequence})
@@ -74,7 +74,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s" % assertion.args)
+ LOG.warning("SLA validation failed: %s", assertion.args)
errors = assertion.args
except Exception as e:
errors = traceback.format_exc()
@@ -94,7 +94,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
queue.put(record)
- LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"],
"sequence": sequence})
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index fe53412ca..3b06e2a36 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -67,7 +67,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
for value in sequence_values:
options[arg_name] = value
- LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
{"runner": runner_cfg["runner_id"], "sequence": sequence})
data = {}
@@ -80,7 +80,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s" % assertion.args)
+ LOG.warning("SLA validation failed: %s", assertion.args)
errors = assertion.args
except Exception as e:
errors = traceback.format_exc()
@@ -100,7 +100,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
queue.put(record)
- LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"], "sequence": sequence})
sequence += 1
diff --git a/yardstick/benchmark/scenarios/availability/actionrollbackers.py b/yardstick/benchmark/scenarios/availability/actionrollbackers.py
index 4b732a10c..38f57d476 100644
--- a/yardstick/benchmark/scenarios/availability/actionrollbackers.py
+++ b/yardstick/benchmark/scenarios/availability/actionrollbackers.py
@@ -28,8 +28,8 @@ class AttackerRollbacker(ActionRollbacker):
def rollback(self):
LOG.debug(
- "\033[93m recovering attacker %s \033[0m"
- % (self.underlyingAttacker.key))
+ "\033[93m recovering attacker %s \033[0m",
+ self.underlyingAttacker.key)
self.underlyingAttacker.recover()
@@ -40,6 +40,6 @@ class OperationRollbacker(ActionRollbacker):
def rollback(self):
LOG.debug(
- "\033[93m rollback operation %s \033[0m"
- % (self.underlyingOperation.key))
+ "\033[93m rollback operation %s \033[0m",
+ self.underlyingOperation.key)
self.underlyingOperation.rollback()
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
index 6561f6b65..3b1f8ef76 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -24,7 +24,7 @@ def _execute_shell_command(command, stdin=None):
except Exception:
exitcode = -1
output = traceback.format_exc()
- LOG.error("exec command '%s' error:\n " % command)
+ LOG.error("exec command '%s' error:\n ", command)
LOG.error(traceback.format_exc())
return exitcode, output
@@ -34,7 +34,7 @@ class BaremetalAttacker(BaseAttacker):
__attacker_type__ = 'bare-metal-down'
def setup(self):
- LOG.debug("config:%s context:%s" % (self._config, self._context))
+ LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
ip = host.get("ip", None)
user = host.get("user", "root")
@@ -65,10 +65,10 @@ class BaremetalAttacker(BaseAttacker):
"/bin/sh -s {0} -W 10".format(self.host_ip),
stdin=open(self.check_script, "r"))
- LOG.debug("check ret: %s out:%s err:%s" %
- (exit_status, stdout, stderr))
+ LOG.debug("check ret: %s out:%s err:%s",
+ exit_status, stdout, stderr)
if not stdout or "running" not in stdout:
- LOG.info("the host (ipmi_ip:%s) is not running!" % self.ipmi_ip)
+ LOG.info("the host (ipmi_ip:%s) is not running!", self.ipmi_ip)
return False
return True
@@ -76,8 +76,8 @@ class BaremetalAttacker(BaseAttacker):
def inject_fault(self):
exit_status, stdout, stderr = self.connection.execute(
"shutdown -h now")
- LOG.debug("inject fault ret: %s out:%s err:%s" %
- (exit_status, stdout, stderr))
+ LOG.debug("inject fault ret: %s out:%s err:%s",
+ exit_status, stdout, stderr)
if not exit_status:
LOG.info("inject fault success")
@@ -91,7 +91,7 @@ class BaremetalAttacker(BaseAttacker):
ssh_port = host.get("ssh_port", ssh.DEFAULT_PORT)
pwd = host.get("pwd", None)
- LOG.debug("jump_host ip:%s user:%s" % (ip, user))
+ LOG.debug("jump_host ip:%s user:%s", ip, user)
self.jump_connection = ssh.SSH(user, ip, password=pwd,
port=ssh_port)
self.jump_connection.wait(timeout=600)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_general.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_general.py
index 5e7716e49..a452c37ac 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_general.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_general.py
@@ -20,7 +20,7 @@ class GeneralAttacker(BaseAttacker):
__attacker_type__ = 'general-attacker'
def setup(self):
- LOG.debug("config:%s context:%s" % (self._config, self._context))
+ LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
ip = host.get("ip", None)
user = host.get("user", "root")
@@ -79,8 +79,8 @@ class GeneralAttacker(BaseAttacker):
.format(stdout))
else:
LOG.error(
- "the inject_fault's error, stdout:%s, stderr:%s" %
- (stdout, stderr))
+ "the inject_fault's error, stdout:%s, stderr:%s",
+ stdout, stderr)
def recover(self):
if "rollback_parameter" in self._config:
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
index 0a844f56c..2ccc231da 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -19,7 +19,7 @@ class ProcessAttacker(BaseAttacker):
__attacker_type__ = 'kill-process'
def setup(self):
- LOG.debug("config:%s context:%s" % (self._config, self._context))
+ LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
ip = host.get("ip", None)
user = host.get("user", "root")
@@ -54,8 +54,8 @@ class ProcessAttacker(BaseAttacker):
return True
else:
LOG.error(
- "the host envrioment is error, stdout:%s, stderr:%s" %
- (stdout, stderr))
+ "the host envrioment is error, stdout:%s, stderr:%s",
+ stdout, stderr)
return False
def inject_fault(self):
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index 78276efa2..f96e57728 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -26,7 +26,7 @@ class AttackerMgr(object):
self._attacker_list = []
def init_attackers(self, attacker_cfgs, context):
- LOG.debug("attackerMgr confg: %s" % attacker_cfgs)
+ LOG.debug("attackerMgr confg: %s", attacker_cfgs)
for cfg in attacker_cfgs:
attacker_cls = BaseAttacker.get_attacker_cls(cfg)
diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py
index 267933dd0..104c68380 100644
--- a/yardstick/benchmark/scenarios/availability/director.py
+++ b/yardstick/benchmark/scenarios/availability/director.py
@@ -63,7 +63,7 @@ class Director(object):
def createActionPlayer(self, type, key):
LOG.debug(
- "the type of current action is %s, the key is %s" % (type, key))
+ "the type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionplayers.AttackerPlayer(self.attackerMgr[key])
if type == ActionType.MONITOR:
@@ -77,13 +77,13 @@ class Director(object):
def createActionRollbacker(self, type, key):
LOG.debug(
- "the type of current action is %s, the key is %s" % (type, key))
+ "the type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
if type == ActionType.OPERATION:
return actionrollbackers.OperationRollbacker(
self.operationMgr[key])
- LOG.debug("no rollbacker created for %s" % (key))
+ LOG.debug("no rollbacker created for %s", key)
def verify(self):
result = True
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
index d26c99c75..38d1c4e5c 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -27,7 +27,7 @@ class MonitorMgr(object):
self._monitor_list = []
def init_monitors(self, monitor_cfgs, context):
- LOG.debug("monitorMgr config: %s" % monitor_cfgs)
+ LOG.debug("monitorMgr config: %s", monitor_cfgs)
for monitor_cfg in monitor_cfgs:
monitor_type = monitor_cfg["monitor_type"]
@@ -87,7 +87,7 @@ class BaseMonitor(multiprocessing.Process):
return os.path.join(base_path, path)
def run(self):
- LOG.debug("config:%s context:%s" % (self._config, self._context))
+ LOG.debug("config:%s context:%s", self._config, self._context)
self.setup()
monitor_time = self._config.get("monitor_time", 0)
@@ -140,7 +140,7 @@ class BaseMonitor(multiprocessing.Process):
def wait_monitor(self):
self.join()
self._result = self._queue.get()
- LOG.debug("the monitor result:%s" % self._result)
+ LOG.debug("the monitor result:%s", self._result)
def setup(self): # pragma: no cover
pass
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
index b55cc3134..366d16e73 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -24,7 +24,7 @@ def _execute_shell_command(command):
except Exception:
exitcode = -1
output = traceback.format_exc()
- LOG.error("exec command '%s' error:\n " % command)
+ LOG.error("exec command '%s' error:\n ", command)
LOG.error(traceback.format_exc())
return exitcode, output
@@ -62,8 +62,8 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
"/bin/bash -s '{0}'".format(self.cmd),
stdin=open(self.check_script, "r"))
- LOG.debug("the ret stats: %s stdout: %s stderr: %s" %
- (exit_status, stdout, stderr))
+ LOG.debug("the ret stats: %s stdout: %s stderr: %s",
+ exit_status, stdout, stderr)
else:
exit_status, stdout = _execute_shell_command(self.cmd)
if exit_status:
@@ -72,10 +72,10 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
def verify_SLA(self):
outage_time = self._result.get('outage_time', None)
- LOG.debug("the _result:%s" % self._result)
+ LOG.debug("the _result:%s", self._result)
max_outage_time = self._config["sla"]["max_outage_time"]
if outage_time > max_outage_time:
- LOG.info("SLA failure: %f > %f" % (outage_time, max_outage_time))
+ LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
LOG.info("the sla is passed")
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_general.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_general.py
index f9ddb2505..359cde671 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_general.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_general.py
@@ -61,14 +61,14 @@ class GeneralMonitor(basemonitor.BaseMonitor):
return True
def verify_SLA(self):
- LOG.debug("the _result:%s" % self._result)
+ LOG.debug("the _result:%s", self._result)
outage_time = self._result.get('outage_time', None)
max_outage_time = self._config["sla"]["max_outage_time"]
if outage_time is None:
LOG.error("There is no outage_time in monitor result.")
return False
if outage_time > max_outage_time:
- LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+ LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
return True
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
index 403ec4d37..a88b8d42e 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -39,17 +39,17 @@ class MonitorProcess(basemonitor.BaseMonitor):
"/bin/sh -s {0}".format(self.process_name),
stdin=open(self.check_script, "r"))
if not stdout or int(stdout) <= 0:
- LOG.info("the process (%s) is not running!" % self.process_name)
+ LOG.info("the process (%s) is not running!", self.process_name)
return False
return True
def verify_SLA(self):
- LOG.debug("the _result:%s" % self._result)
+ LOG.debug("the _result:%s", self._result)
outage_time = self._result.get('outage_time', None)
max_outage_time = self._config["sla"]["max_recover_time"]
if outage_time > max_outage_time:
- LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+ LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
return True
diff --git a/yardstick/benchmark/scenarios/availability/operation/baseoperation.py b/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
index e776e87ae..80efd1b02 100644
--- a/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
+++ b/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
@@ -26,7 +26,7 @@ class OperationMgr(object):
self._operation_list = []
def init_operations(self, operation_cfgs, context):
- LOG.debug("operationMgr confg: %s" % operation_cfgs)
+ LOG.debug("operationMgr confg: %s", operation_cfgs)
for cfg in operation_cfgs:
operation_type = cfg['operation_type']
operation_cls = BaseOperation.get_operation_cls(operation_type)
diff --git a/yardstick/benchmark/scenarios/availability/operation/operation_general.py b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
index aa28472f7..b3a20c344 100644
--- a/yardstick/benchmark/scenarios/availability/operation/operation_general.py
+++ b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
@@ -19,7 +19,7 @@ class GeneralOperaion(BaseOperation):
__operation__type__ = "general-operation"
def setup(self):
- LOG.debug("config:%s context:%s" % (self._config, self._context))
+ LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
ip = host.get("ip", None)
user = host.get("user", "root")
@@ -67,8 +67,8 @@ class GeneralOperaion(BaseOperation):
LOG.debug("success,the operation's output is: {0}".format(stdout))
else:
LOG.error(
- "the operation's error, stdout:%s, stderr:%s" %
- (stdout, stderr))
+ "the operation's error, stdout:%s, stderr:%s",
+ stdout, stderr)
def rollback(self):
if "rollback_parameter" in self._config:
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py b/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
index 1bdb9f2c2..a24f26e81 100644
--- a/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
+++ b/yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
@@ -26,7 +26,7 @@ class ResultCheckerMgr(object):
self._result_checker_list = []
def init_ResultChecker(self, resultchecker_cfgs, context):
- LOG.debug("resultcheckerMgr confg: %s" % resultchecker_cfgs)
+ LOG.debug("resultcheckerMgr confg: %s", resultchecker_cfgs)
for cfg in resultchecker_cfgs:
resultchecker_type = cfg['checker_type']
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
index ae896c2b2..8c9d16026 100644
--- a/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
+++ b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
@@ -20,7 +20,7 @@ class GeneralResultChecker(BaseResultChecker):
__result_checker__type__ = "general-result-checker"
def setup(self):
- LOG.debug("config:%s context:%s" % (self._config, self._context))
+ LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
ip = host.get("ip", None)
user = host.get("user", "root")
@@ -67,7 +67,7 @@ class GeneralResultChecker(BaseResultChecker):
LOG.debug("action script of the operation is: {0}"
.format(self.verify_script))
- LOG.debug("exit_status ,stdout : {0} ,{1}".format(exit_status, stdout))
+ LOG.debug("exit_status ,stdout : %s ,%s", exit_status, stdout)
if exit_status == 0 and stdout:
self.actualResult = stdout
LOG.debug("verifying resultchecker: {0}".format(self.key))
@@ -104,6 +104,6 @@ class GeneralResultChecker(BaseResultChecker):
LOG.error(stderr)
LOG.debug(
- "verifying resultchecker: {0},the result is : {1}"
- .format(self.key, self.success))
+ "verifying resultchecker: %s,the result is : %s", self.key,
+ self.success)
return self.success
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 0a128aa09..b064c6724 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -22,7 +22,7 @@ class ScenarioGeneral(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
LOG.debug(
- "scenario_cfg:%s context_cfg:%s" % (scenario_cfg, context_cfg))
+ "scenario_cfg:%s context_cfg:%s", scenario_cfg, context_cfg)
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 10f2c4f45..46a197c3b 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -21,8 +21,8 @@ class ServiceHA(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
LOG.debug(
- "scenario_cfg:%s context_cfg:%s" %
- (scenario_cfg, context_cfg))
+ "scenario_cfg:%s context_cfg:%s",
+ scenario_cfg, context_cfg)
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.setup_done = False
diff --git a/yardstick/benchmark/scenarios/compute/cachestat.py b/yardstick/benchmark/scenarios/compute/cachestat.py
index 117702098..25300dd46 100644
--- a/yardstick/benchmark/scenarios/compute/cachestat.py
+++ b/yardstick/benchmark/scenarios/compute/cachestat.py
@@ -92,7 +92,7 @@ class CACHEstat(base.Scenario):
def _execute_command(self, cmd):
"""Execute a command on server."""
- LOG.info("Executing: %s" % cmd)
+ LOG.info("Executing: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
if status:
raise RuntimeError("Failed executing command: ",
diff --git a/yardstick/benchmark/scenarios/compute/cpuload.py b/yardstick/benchmark/scenarios/compute/cpuload.py
index a7fae44ec..9d71038ef 100644
--- a/yardstick/benchmark/scenarios/compute/cpuload.py
+++ b/yardstick/benchmark/scenarios/compute/cpuload.py
@@ -96,7 +96,7 @@ class CPULoad(base.Scenario):
def _execute_command(self, cmd):
"""Execute a command on server."""
- LOG.info("Executing: %s" % cmd)
+ LOG.info("Executing: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
if status != 0:
raise RuntimeError("Failed executing command: ",
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index 6a1afe223..a6c4d95cf 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -69,14 +69,14 @@ class Cyclictest(base.Scenario):
rpm_dir = setup_options["rpm_dir"]
script_dir = setup_options["script_dir"]
image_dir = setup_options["image_dir"]
- LOG.debug("Send RPMs from %s to workspace %s" %
- (rpm_dir, self.WORKSPACE))
+ LOG.debug("Send RPMs from %s to workspace %s",
+ rpm_dir, self.WORKSPACE)
client.put(rpm_dir, self.WORKSPACE, recursive=True)
- LOG.debug("Send scripts from %s to workspace %s" %
- (script_dir, self.WORKSPACE))
+ LOG.debug("Send scripts from %s to workspace %s",
+ script_dir, self.WORKSPACE)
client.put(script_dir, self.WORKSPACE, recursive=True)
- LOG.debug("Send guest image from %s to workspace %s" %
- (image_dir, self.WORKSPACE))
+ LOG.debug("Send guest image from %s to workspace %s",
+ image_dir, self.WORKSPACE)
client.put(image_dir, self.WORKSPACE, recursive=True)
def _connect_host(self):
@@ -102,7 +102,7 @@ class Cyclictest(base.Scenario):
self.guest.wait(timeout=600)
def _run_setup_cmd(self, client, cmd):
- LOG.debug("Run cmd: %s" % cmd)
+ LOG.debug("Run cmd: %s", cmd)
status, stdout, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
diff --git a/yardstick/benchmark/scenarios/compute/memload.py b/yardstick/benchmark/scenarios/compute/memload.py
index 48088f87c..e1ba93d02 100644
--- a/yardstick/benchmark/scenarios/compute/memload.py
+++ b/yardstick/benchmark/scenarios/compute/memload.py
@@ -61,7 +61,7 @@ class MEMLoad(base.Scenario):
def _execute_command(self, cmd):
"""Execute a command on server."""
- LOG.info("Executing: %s" % cmd)
+ LOG.info("Executing: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
if status:
raise RuntimeError("Failed executing command: ",
diff --git a/yardstick/benchmark/scenarios/networking/netperf_benchmark.bash b/yardstick/benchmark/scenarios/networking/netperf_benchmark.bash
index a425c5df0..f6245c9cd 100755
--- a/yardstick/benchmark/scenarios/networking/netperf_benchmark.bash
+++ b/yardstick/benchmark/scenarios/networking/netperf_benchmark.bash
@@ -12,6 +12,7 @@
set -e
# Commandline arguments
+OPTIONS_SIZE="$#"
OPTIONS="$@"
OUTPUT_FILE=/tmp/netperf-out.log
@@ -24,14 +25,40 @@ run_netperf()
# write the result to stdout in json format
output_json()
{
- mean=$(awk '/\/s/{print $3}' $OUTPUT_FILE)
- troughput=$(awk '/\/s/{print $1}' $OUTPUT_FILE)
- unit=$(awk '/\/s/{print $2}' $OUTPUT_FILE)
- echo -e "{ \
- \"mean_latency\":\"$mean\", \
- \"troughput\":\"$troughput\", \
- \"troughput_unit\":\"$unit\" \
- }"
+ #ARR=($OPTIONS)
+ #declare -p ARR
+ read -r -a ARR <<< "$OPTIONS"
+ opt_size=0
+ while [ $opt_size -lt "$OPTIONS_SIZE" ]
+ do
+ if [ "${ARR[$opt_size]}" = "-O" ]
+ then
+ break
+ fi
+ opt_size=$((opt_size+1))
+ done
+ opt_size=$((opt_size+1))
+ out_opt="${ARR[$opt_size]}"
+ IFS=, read -r -a PARTS <<< "$out_opt"
+ #declare -p PARTS
+ part_num=${#PARTS[*]}
+ tran_num=0
+ for f in "${PARTS[@]}"
+ do
+ array_name[$tran_num]=$(echo "$f" | tr '[A-Z]' '[a-z]')
+ tran_num=$((tran_num+1))
+ done
+ read -r -a DATA_PARTS <<< "$(sed -n '$p' $OUTPUT_FILE)"
+ out_str="{"
+ for((i=0;i<part_num-1;i++))
+ do
+ modify_str=\"${array_name[i]}\":\"${DATA_PARTS[i]}\",
+ out_str=$out_str$modify_str
+ done
+ modify_str=\"${array_name[part_num-1]}\":\"${DATA_PARTS[part_num-1]}\"
+ out_str=$out_str$modify_str"}"
+
+ echo -e "$out_str"
}
# main entry
@@ -44,4 +71,4 @@ main()
output_json
}
-main \ No newline at end of file
+main
diff --git a/yardstick/benchmark/scenarios/networking/netperf_install.bash b/yardstick/benchmark/scenarios/networking/netperf_install.bash
index eaa9f530a..0e3808f9c 100755
--- a/yardstick/benchmark/scenarios/networking/netperf_install.bash
+++ b/yardstick/benchmark/scenarios/networking/netperf_install.bash
@@ -11,6 +11,13 @@
set -e
+svc="netserver"
+if pgrep $svc >/dev/null
+then
+ echo "$svc have existed, exit!"
+ exit 0
+fi
+
echo "===Install netperf before test begin!!!==="
cp /etc/apt/sources.list /etc/apt/sources.list_bkp
cp /etc/resolv.conf /etc/resolv.conf_bkp
@@ -30,3 +37,4 @@ sudo apt-get install -y netperf
service netperf start
echo "===Install netperf before test end!!!==="
+
diff --git a/yardstick/benchmark/scenarios/networking/netperf_node.py b/yardstick/benchmark/scenarios/networking/netperf_node.py
index 1578da7d8..a76982b6f 100755
--- a/yardstick/benchmark/scenarios/networking/netperf_node.py
+++ b/yardstick/benchmark/scenarios/networking/netperf_node.py
@@ -86,9 +86,8 @@ class NetperfNode(base.Scenario):
self.client.wait(timeout=600)
# copy script to host
- self.client.run("cat > ~/netperf.sh",
- stdin=open(self.target_script, "rb"))
-
+ with open(self.target_script, "rb") as file_run:
+ self.client.run("cat > ~/netperf.sh", stdin=file_run)
# copy script to host and client
self.install_script = pkg_resources.resource_filename(
'yardstick.benchmark.scenarios.networking',
@@ -97,14 +96,14 @@ class NetperfNode(base.Scenario):
'yardstick.benchmark.scenarios.networking',
NetperfNode.REMOVE_SCRIPT)
- self.server.run("cat > ~/netperf_install.sh",
- stdin=open(self.install_script, "rb"))
- self.client.run("cat > ~/netperf_install.sh",
- stdin=open(self.install_script, "rb"))
- self.server.run("cat > ~/netperf_remove.sh",
- stdin=open(self.remove_script, "rb"))
- self.client.run("cat > ~/netperf_remove.sh",
- stdin=open(self.remove_script, "rb"))
+ with open(self.install_script, "rb") as file_install:
+ self.server.run("cat > ~/netperf_install.sh", stdin=file_install)
+ with open(self.install_script, "rb") as file_install:
+ self.client.run("cat > ~/netperf_install.sh", stdin=file_install)
+ with open(self.remove_script, "rb") as file_remove:
+ self.server.run("cat > ~/netperf_remove.sh", stdin=file_remove)
+ with open(self.remove_script, "rb") as file_remove:
+ self.client.run("cat > ~/netperf_remove.sh", stdin=file_remove)
self.server.execute("sudo bash netperf_install.sh")
self.client.execute("sudo bash netperf_install.sh")
@@ -131,10 +130,12 @@ class NetperfNode(base.Scenario):
else:
testlen = 20
- cmd_args = "-H %s -l %s -t %s" % (ipaddr, testlen, testname)
+ cmd_args = "-H %s -l %s -t %s -c -C" % (ipaddr, testlen, testname)
# get test specific options
- default_args = "-O 'THROUGHPUT,THROUGHPUT_UNITS,MEAN_LATENCY'"
+ output_opt = options.get(
+ "output_opt", "THROUGHPUT,THROUGHPUT_UNITS,MEAN_LATENCY")
+ default_args = "-O %s" % output_opt
cmd_args += " -- %s" % default_args
option_pair_list = [("send_msg_size", "-m"),
("recv_msg_size", "-M"),
diff --git a/yardstick/benchmark/scenarios/networking/netutilization.py b/yardstick/benchmark/scenarios/networking/netutilization.py
index ecde7568e..1ea92cca3 100644
--- a/yardstick/benchmark/scenarios/networking/netutilization.py
+++ b/yardstick/benchmark/scenarios/networking/netutilization.py
@@ -83,7 +83,7 @@ class NetUtilization(base.Scenario):
def _execute_command(self, cmd):
"""Execute a command on target."""
- LOG.info("Executing: %s" % cmd)
+ LOG.info("Executing: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
if status:
raise RuntimeError("Failed executing command: ",
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 82db1e254..39912a95a 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -32,14 +32,11 @@ class Vsperf(base.Scenario):
the valid values are "rfc2544", "continuous", "back2back"
type: string
default: "rfc2544"
- pkt_sizes - a packet size for which test should be executed;
- Multiple packet sizes can be tested by modification of Sequence runner
+ frame_size - a frame size for which test should be executed;
+ Multiple frame sizes can be tested by modification of sequence runner
section inside TC YAML definition.
type: string
default: "64"
- duration - sets duration for which traffic will be generated
- type: int
- default: 30
bidirectional - speficies if traffic will be uni (False) or bi-directional
(True)
type: string
@@ -47,9 +44,6 @@ class Vsperf(base.Scenario):
iload - specifies frame rate
type: string
default: 100
- rfc2544_trials - the number of trials performed for each packet size
- type: string
- default: NA
multistream - the number of simulated streams
type: string
default: 0 (disabled)
@@ -57,11 +51,24 @@ class Vsperf(base.Scenario):
the valid values are "L4", "L3" and "L2"
type: string
default: "L4"
- conf-file - path to the vsperf configuration file, which will be uploaded
- to the VM
+ test_params - specifies a string with a list of vsperf configuration
+ parameters, which will be passed to the '--test-params' CLI argument;
+ Parameters should be stated in the form of 'param=value' and separated
+ by a semicolon. Please check VSPERF documentation for details about
+ available configuration parameters and their data types.
+ In case that both 'test_params' and 'conf_file' are specified,
+ then values from 'test_params' will override values defined
+ in the configuration file.
+ type: string
+ default: NA
+ conf_file - path to the vsperf configuration file, which will be uploaded
+ to the VM;
+ In case that both 'test_params' and 'conf_file' are specified,
+ then values from 'test_params' will override values defined
+ in configuration file.
type: string
default: NA
- setup-script - path to the setup script, which will be executed during
+ setup_script - path to the setup script, which will be executed during
setup and teardown phases
type: string
default: NA
@@ -80,8 +87,6 @@ class Vsperf(base.Scenario):
"""
__scenario_type__ = "Vsperf"
- VSPERF_CONF = '~/vsperf-yardstick.conf'
-
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
@@ -93,13 +98,18 @@ class Vsperf(base.Scenario):
None)
self.br_ex = self.scenario_cfg['options'].get('external_bridge',
'br-ex')
- self.vsperf_conf = os.path.expanduser(
- self.scenario_cfg['options'].get('conf-file', Vsperf.VSPERF_CONF))
- self.setup_script = self.scenario_cfg['options'].get('setup-script',
+ self.vsperf_conf = self.scenario_cfg['options'].get('conf_file', None)
+ if self.vsperf_conf:
+ self.vsperf_conf = os.path.expanduser(self.vsperf_conf)
+
+ self.setup_script = self.scenario_cfg['options'].get('setup_script',
None)
if self.setup_script:
self.setup_script = os.path.expanduser(self.setup_script)
+ self.test_params = self.scenario_cfg['options'].get('test-params',
+ None)
+
def setup(self):
'''scenario setup'''
vsperf = self.context_cfg['host']
@@ -123,9 +133,10 @@ class Vsperf(base.Scenario):
# traffic generation could last long
self.client.wait(timeout=1800)
- # copy script to host
- self.client.run("cat > ~/vsperf.conf",
- stdin=open(self.vsperf_conf, "rb"))
+ # copy script to host if needed
+ if self.vsperf_conf:
+ self.client.run("cat > ~/vsperf.conf",
+ stdin=open(self.vsperf_conf, "rb"))
# execute external setup script
if self.setup_script:
@@ -166,18 +177,26 @@ class Vsperf(base.Scenario):
options = self.scenario_cfg['options']
test_params = []
test_params.append(add_test_params(options, "traffic_type", "rfc2544"))
- test_params.append(add_test_params(options, "pkt_sizes", "64"))
- test_params.append(add_test_params(options, "duration", None))
test_params.append(add_test_params(options, "bidirectional", "False"))
test_params.append(add_test_params(options, "iload", 100))
- test_params.append(add_test_params(options, "rfc2544_trials", None))
test_params.append(add_test_params(options, "multistream", None))
test_params.append(add_test_params(options, "stream_type", None))
+ if 'frame_size' in options:
+ test_params.append("%s=(%s,)" % ('TRAFFICGEN_PKT_SIZES',
+ options['frame_size']))
+ if 'test_params' in options:
+ test_params.append(options['test_params'])
+
+ # filter empty parameters and escape quotes and double quotes
+ test_params = [tp.replace('"', '\\"').replace("'", "\\'")
+ for tp in test_params if tp]
# execute vsperf
cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; "
- cmd += "./vsperf --mode trafficgen --conf-file ~/vsperf.conf "
- cmd += "--test-params=\"%s\"" % (';'.join(filter(None, test_params)))
+ cmd += "./vsperf --mode trafficgen "
+ if self.vsperf_conf:
+ cmd += "--conf-file ~/vsperf.conf "
+ cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index 77df6db28..72ceff7ce 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -76,8 +76,8 @@ class StorPerf(base.Scenario):
setup_query_content = json.loads(setup_query.content)
if setup_query_content["stack_created"]:
self.setup_done = True
- LOG.debug("stack_created: %s"
- % setup_query_content["stack_created"])
+ LOG.debug("stack_created: %s",
+ setup_query_content["stack_created"])
def setup(self):
"""Set the configuration."""
@@ -91,8 +91,8 @@ class StorPerf(base.Scenario):
except KeyError:
pass
- LOG.info("Creating a stack on node %s with parameters %s" %
- (self.target, env_args))
+ LOG.info("Creating a stack on node %s with parameters %s",
+ self.target, env_args)
setup_res = requests.post('http://%s:5000/api/v1.0/configurations'
% self.target, json=env_args)
@@ -102,7 +102,7 @@ class StorPerf(base.Scenario):
raise RuntimeError("Failed to create a stack, error message:",
setup_res_content["message"])
elif setup_res.status_code == 200:
- LOG.info("stack_id: %s" % setup_res_content["stack_id"])
+ LOG.info("stack_id: %s", setup_res_content["stack_id"])
while not self.setup_done:
self._query_setup_state()
@@ -150,7 +150,7 @@ class StorPerf(base.Scenario):
except KeyError:
pass
- LOG.info("Starting a job with parameters %s" % job_args)
+ LOG.info("Starting a job with parameters %s", job_args)
job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
json=job_args)
@@ -161,7 +161,7 @@ class StorPerf(base.Scenario):
job_res_content["message"])
elif job_res.status_code == 200:
job_id = job_res_content["job_id"]
- LOG.info("Started job id: %s..." % job_id)
+ LOG.info("Started job id: %s...", job_id)
while not self.job_completed:
self._query_job_state(job_id)
diff --git a/yardstick/cmd/cli.py b/yardstick/cmd/cli.py
index 3896ce47c..beaa187aa 100644
--- a/yardstick/cmd/cli.py
+++ b/yardstick/cmd/cli.py
@@ -19,11 +19,13 @@ from pkg_resources import get_distribution
from argparse import RawDescriptionHelpFormatter
from oslo_config import cfg
+from yardstick import _init_logging, LOG
from yardstick.cmd.commands import task
from yardstick.cmd.commands import runner
from yardstick.cmd.commands import scenario
from yardstick.cmd.commands import testcase
from yardstick.cmd.commands import plugin
+from yardstick.cmd.commands import env
CONF = cfg.CONF
cli_opts = [
@@ -62,10 +64,12 @@ class YardstickCLI():
'runner': runner.RunnerCommands,
'scenario': scenario.ScenarioCommands,
'testcase': testcase.TestcaseCommands,
- 'plugin': plugin.PluginCommands
+ 'plugin': plugin.PluginCommands,
+ 'env': env.EnvCommand
}
def __init__(self):
+ self.opts = []
self._version = 'yardstick version %s ' % \
get_distribution('yardstick').version
@@ -111,7 +115,12 @@ class YardstickCLI():
title="Command categories",
help="Available categories",
handler=parser)
- CONF.register_cli_opt(category_opt)
+ self._register_opt(category_opt)
+
+ def _register_opt(self, opt):
+
+ CONF.register_cli_opt(opt)
+ self.opts.append(opt)
def _load_cli_config(self, argv):
@@ -121,15 +130,12 @@ class YardstickCLI():
def _handle_global_opts(self):
- # handle global opts
- logger = logging.getLogger('yardstick')
- logger.setLevel(logging.WARNING)
-
+ _init_logging()
if CONF.verbose:
- logger.setLevel(logging.INFO)
+ LOG.setLevel(logging.INFO)
if CONF.debug:
- logger.setLevel(logging.DEBUG)
+ LOG.setLevel(logging.DEBUG)
def _dispath_func_notask(self):
@@ -143,22 +149,33 @@ class YardstickCLI():
func = CONF.category.func
func(CONF.category, task_id=task_id)
+ def _clear_config_opts(self):
+
+ CONF.clear()
+ CONF.unregister_opts(self.opts)
+
def main(self, argv): # pragma: no cover
'''run the command line interface'''
- self._register_cli_opt()
+ try:
+ self._register_cli_opt()
- self._load_cli_config(argv)
+ self._load_cli_config(argv)
- self._handle_global_opts()
+ self._handle_global_opts()
- self._dispath_func_notask()
+ self._dispath_func_notask()
+ finally:
+ self._clear_config_opts()
def api(self, argv, task_id): # pragma: no cover
'''run the api interface'''
- self._register_cli_opt()
+ try:
+ self._register_cli_opt()
- self._load_cli_config(argv)
+ self._load_cli_config(argv)
- self._handle_global_opts()
+ self._handle_global_opts()
- self._dispath_func_task(task_id)
+ self._dispath_func_task(task_id)
+ finally:
+ self._clear_config_opts()
diff --git a/yardstick/cmd/commands/env.py b/yardstick/cmd/commands/env.py
new file mode 100644
index 000000000..098379ae1
--- /dev/null
+++ b/yardstick/cmd/commands/env.py
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from yardstick.common.httpClient import HttpClient
+from yardstick.common import constants
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class EnvCommand(object):
+ '''
+
+ Set of commands to prepare environment
+ '''
+ def do_influxdb(self, args):
+ url = constants.YARDSTICK_ENV_ACTION_API
+ data = {'action': 'createInfluxDBContainer'}
+ HttpClient().post(url, data)
+ logger.debug('Now creating and configing influxdb')
+
+ def do_grafana(self, args):
+ url = constants.YARDSTICK_ENV_ACTION_API
+ data = {'action': 'createGrafanaContainer'}
+ HttpClient().post(url, data)
+ logger.debug('Now creating and configing grafana')
+
+ def do_prepare(self, args):
+ url = constants.YARDSTICK_ENV_ACTION_API
+ data = {'action': 'prepareYardstickEnv'}
+ HttpClient().post(url, data)
+ logger.debug('Now preparing environment')
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index 47fb2ee60..9524778ba 100644
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -17,12 +17,15 @@ import ipaddress
import time
import logging
import uuid
+import errno
from itertools import ifilter
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.runners import base as base_runner
from yardstick.common.task_template import TaskTemplate
from yardstick.common.utils import cliargs
+from yardstick.common.utils import source_env
+from yardstick.common import constants
output_file_default = "/tmp/yardstick.out"
test_cases_dir_default = "tests/opnfv/test_cases/"
@@ -58,6 +61,8 @@ class TaskCommands(object):
self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
+ check_environment()
+
total_start_time = time.time()
parser = TaskParser(args.inputfile[0])
@@ -483,3 +488,14 @@ def parse_task_args(src_name, args):
% {"src": src_name, "src_type": type(kw)})
raise TypeError()
return kw
+
+
+def check_environment():
+ auth_url = os.environ.get('OS_AUTH_URL', None)
+ if not auth_url:
+ try:
+ source_env(constants.OPENSTACK_RC_FILE)
+ except IOError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ LOG.debug('OPENRC file not found')
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 40b29a717..443b3e810 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -1,3 +1,38 @@
-CONFIG_SAMPLE = '/etc/yardstick/config.yaml'
+import os
-RELENG_DIR = 'releng.dir'
+DOCKER_URL = 'unix://var/run/docker.sock'
+
+# database config
+USER = 'root'
+PASSWORD = 'root'
+DATABASE = 'yardstick'
+
+INFLUXDB_IMAGE = 'tutum/influxdb'
+INFLUXDB_TAG = '0.13'
+
+GRAFANA_IMAGE = 'grafana/grafana'
+GRAFANA_TAGS = '3.1.1'
+
+dirname = os.path.dirname
+abspath = os.path.abspath
+sep = os.path.sep
+
+INSTALLERS = ['apex', 'compass', 'fuel', 'joid']
+
+YARDSTICK_ROOT_PATH = dirname(dirname(dirname(abspath(__file__)))) + sep
+
+YARDSTICK_REPOS_DIR = '/home/opnfv/repos/yardstick'
+
+YARDSTICK_CONFIG_DIR = '/etc/yardstick/'
+
+YARDSTICK_CONFIG_FILE = os.path.join(YARDSTICK_CONFIG_DIR, 'yardstick.conf')
+
+RELENG_DIR = '/home/opnfv/repos/releng'
+
+OS_FETCH_SCRIPT = 'utils/fetch_os_creds.sh'
+
+LOAD_IMAGES_SCRIPT = 'tests/ci/load_images.sh'
+
+OPENSTACK_RC_FILE = os.path.join(YARDSTICK_CONFIG_DIR, 'openstack.creds')
+
+YARDSTICK_ENV_ACTION_API = 'http://localhost:5000/yardstick/env/action'
diff --git a/yardstick/common/httpClient.py b/yardstick/common/httpClient.py
new file mode 100644
index 000000000..ab2e9a379
--- /dev/null
+++ b/yardstick/common/httpClient.py
@@ -0,0 +1,30 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import json
+import logging
+
+import requests
+
+logger = logging.getLogger(__name__)
+
+
+class HttpClient(object):
+
+ def post(self, url, data):
+ data = json.dumps(data)
+ headers = {'Content-Type': 'application/json'}
+ try:
+ response = requests.post(url, data=data, headers=headers)
+ result = response.json()
+ logger.debug('The result is: %s', result)
+
+ return result
+ except Exception as e:
+ logger.debug('Failed: %s', e)
+ raise
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index d639fb66a..3ecb0ae20 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -18,10 +18,20 @@
import os
import sys
import yaml
+import errno
+import subprocess
+import logging
+
from oslo_utils import importutils
+from keystoneauth1 import identity
+from keystoneauth1 import session
+from neutronclient.v2_0 import client
import yardstick
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
# Decorator for cli-args
def cliargs(*args, **kwargs):
@@ -91,3 +101,43 @@ def get_para_from_yaml(file_path, args):
else:
print 'file not exist'
return None
+
+
+def makedirs(d):
+ try:
+ os.makedirs(d)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def execute_command(cmd):
+ exec_msg = "Executing command: '%s'" % cmd
+ logger.debug(exec_msg)
+
+ output = subprocess.check_output(cmd.split()).split(os.linesep)
+
+ return output
+
+
+def source_env(env_file):
+ p = subprocess.Popen(". %s; env" % env_file, stdout=subprocess.PIPE,
+ shell=True)
+ output = p.communicate()[0]
+ env = dict((line.split('=', 1) for line in output.splitlines()))
+ os.environ.update(env)
+ return env
+
+
+def get_openstack_session():
+ auth = identity.Password(auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+ return session.Session(auth=auth)
+
+
+def get_neutron_client():
+ sess = get_openstack_session()
+ neutron_client = client.Client(session=sess)
+ return neutron_client
diff --git a/yardstick/dispatcher/file.py b/yardstick/dispatcher/file.py
index ab67796e9..c2cc265ba 100644
--- a/yardstick/dispatcher/file.py
+++ b/yardstick/dispatcher/file.py
@@ -17,6 +17,7 @@
# ceilometer/ceilometer/dispatcher/file.py
import logging
+import logging.handlers
import json
from oslo_config import cfg
diff --git a/yardstick/dispatcher/http.py b/yardstick/dispatcher/http.py
index 2298d00cc..98e772dd8 100644
--- a/yardstick/dispatcher/http.py
+++ b/yardstick/dispatcher/http.py
@@ -81,14 +81,14 @@ class HttpDispatcher(DispatchBase):
case_name = v["scenario_cfg"]["tc"]
break
if case_name == "":
- LOG.error('Test result : %s' % json.dumps(self.result))
+ LOG.error('Test result : %s', json.dumps(self.result))
LOG.error('The case_name cannot be found, no data will be posted.')
return
self.result["case_name"] = case_name
try:
- LOG.debug('Test result : %s' % json.dumps(self.result))
+ LOG.debug('Test result : %s', json.dumps(self.result))
res = requests.post(self.target,
data=json.dumps(self.result),
headers=self.headers,
diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py
index 8673253b4..fc9f3e932 100644
--- a/yardstick/dispatcher/influxdb.py
+++ b/yardstick/dispatcher/influxdb.py
@@ -127,7 +127,7 @@ class InfluxdbDispatcher(DispatchBase):
return make_lines(msg).encode('utf-8')
def record_result_data(self, data):
- LOG.debug('Test result : %s' % json.dumps(data))
+ LOG.debug('Test result : %s', json.dumps(data))
self.raw_result.append(data)
if self.target == '':
# if the target was not set, do not do anything
@@ -148,13 +148,13 @@ class InfluxdbDispatcher(DispatchBase):
return 0
if self.tc == "":
- LOG.error('Test result : %s' % json.dumps(data))
+ LOG.error('Test result : %s', json.dumps(data))
LOG.error('The case_name cannot be found, no data will be posted.')
return -1
try:
line = self._data_to_line_protocol(data)
- LOG.debug('Test result line format : %s' % line)
+ LOG.debug('Test result line format : %s', line)
res = requests.post(self.influxdb_url,
data=line,
auth=(self.username, self.password),
@@ -171,5 +171,5 @@ class InfluxdbDispatcher(DispatchBase):
return 0
def flush_result_data(self):
- LOG.debug('Test result all : %s' % json.dumps(self.raw_result))
+ LOG.debug('Test result all : %s', json.dumps(self.raw_result))
return 0
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index 8b71fe606..46d53b7d2 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -29,24 +29,29 @@ Execute command and get output:
Execute command with huge output:
- class PseudoFile(object):
+ class PseudoFile(io.RawIOBase):
def write(chunk):
if "error" in chunk:
email_admin(chunk)
- ssh = sshclient.SSH("root", "example.com")
- ssh.run("tail -f /var/log/syslog", stdout=PseudoFile(), timeout=False)
+ ssh = SSH("root", "example.com")
+ with PseudoFile() as p:
+ ssh.run("tail -f /var/log/syslog", stdout=p, timeout=False)
Execute local script on remote side:
ssh = sshclient.SSH("user", "example.com")
- status, out, err = ssh.execute("/bin/sh -s arg1 arg2",
- stdin=open("~/myscript.sh", "r"))
+
+ with open("~/myscript.sh", "r") as stdin_file:
+ status, out, err = ssh.execute('/bin/sh -s "arg1" "arg2"',
+ stdin=stdin_file)
Upload file:
- ssh = sshclient.SSH("user", "example.com")
- ssh.run("cat > ~/upload/file.gz", stdin=open("/store/file.gz", "rb"))
+ ssh = SSH("user", "example.com")
+ # use rb for binary files
+ with open("/store/file.gz", "rb") as stdin_file:
+ ssh.run("cat > ~/upload/file.gz", stdin=stdin_file)
Eventlet:
@@ -54,20 +59,19 @@ Eventlet:
or
eventlet.monkey_patch()
or
- sshclient = eventlet.import_patched("opentstack.common.sshclient")
+ sshclient = eventlet.import_patched("yardstick.ssh")
"""
-
+import os
import select
import socket
import time
+import logging
import paramiko
from scp import SCPClient
import six
-import logging
-LOG = logging.getLogger(__name__)
DEFAULT_PORT = 22
@@ -84,7 +88,7 @@ class SSH(object):
"""Represent ssh connection."""
def __init__(self, user, host, port=DEFAULT_PORT, pkey=None,
- key_filename=None, password=None):
+ key_filename=None, password=None, name=None):
"""Initialize SSH client.
:param user: ssh username
@@ -94,6 +98,11 @@ class SSH(object):
:param key_filename: private key filename
:param password: password
"""
+ self.name = name
+ if name:
+ self.log = logging.getLogger(__name__ + '.' + self.name)
+ else:
+ self.log = logging.getLogger(__name__)
self.user = user
self.host = host
@@ -103,6 +112,13 @@ class SSH(object):
self.password = password
self.key_filename = key_filename
self._client = False
+ # paramiko loglevel debug will output ssh protocl debug
+ # we don't ever really want that unless we are debugging paramiko
+ # ssh issues
+ if os.environ.get("PARAMIKO_DEBUG", "").lower() == "true":
+ logging.getLogger("paramiko").setLevel(logging.DEBUG)
+ else:
+ logging.getLogger("paramiko").setLevel(logging.WARN)
def _get_pkey(self, key):
if isinstance(key, six.string_types):
@@ -140,10 +156,12 @@ class SSH(object):
self._client = False
def run(self, cmd, stdin=None, stdout=None, stderr=None,
- raise_on_error=True, timeout=3600):
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False):
"""Execute specified command on the server.
:param cmd: Command to be executed.
+ :type cmd: str
:param stdin: Open file or string to pass to stdin.
:param stdout: Open file to connect to stdout.
:param stderr: Open file to connect to stderr.
@@ -151,6 +169,8 @@ class SSH(object):
then exception will be raized if non-zero code.
:param timeout: Timeout in seconds for command execution.
Default 1 hour. No timeout if set to 0.
+ :param keep_stdin_open: don't close stdin on empty reads
+ :type keep_stdin_open: bool
"""
client = self._get_client()
@@ -160,10 +180,12 @@ class SSH(object):
return self._run(client, cmd, stdin=stdin, stdout=stdout,
stderr=stderr, raise_on_error=raise_on_error,
- timeout=timeout)
+ timeout=timeout,
+ keep_stdin_open=keep_stdin_open)
def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
- raise_on_error=True, timeout=3600):
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False):
transport = client.get_transport()
session = transport.open_session()
@@ -186,14 +208,14 @@ class SSH(object):
if session.recv_ready():
data = session.recv(4096)
- LOG.debug("stdout: %r" % data)
+ self.log.debug("stdout: %r", data)
if stdout is not None:
stdout.write(data)
continue
if session.recv_stderr_ready():
stderr_data = session.recv_stderr(4096)
- LOG.debug("stderr: %r" % stderr_data)
+ self.log.debug("stderr: %r", stderr_data)
if stderr is not None:
stderr.write(stderr_data)
continue
@@ -203,13 +225,15 @@ class SSH(object):
if not data_to_send:
data_to_send = stdin.read(4096)
if not data_to_send:
- stdin.close()
- session.shutdown_write()
- writes = []
- continue
- sent_bytes = session.send(data_to_send)
- # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
- data_to_send = data_to_send[sent_bytes:]
+ # we may need to keep stdin open
+ if not keep_stdin_open:
+ stdin.close()
+ session.shutdown_write()
+ writes = []
+ if data_to_send:
+ sent_bytes = session.send(data_to_send)
+ # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
+ data_to_send = data_to_send[sent_bytes:]
if session.exit_status_ready():
break
@@ -256,10 +280,10 @@ class SSH(object):
try:
return self.execute("uname")
except (socket.error, SSHError) as e:
- LOG.debug("Ssh is still unavailable: %r" % e)
+ self.log.debug("Ssh is still unavailable: %r", e)
time.sleep(interval)
if time.time() > (start_time + timeout):
- raise SSHTimeout("Timeout waiting for '%s'" % self.host)
+ raise SSHTimeout("Timeout waiting for '%s'", self.host)
def put(self, files, remote_path=b'.', recursive=False):
client = self._get_client()
@@ -271,3 +295,37 @@ class SSH(object):
def send_command(self, command):
client = self._get_client()
client.exec_command(command, get_pty=True)
+
+ def _put_file_sftp(self, localpath, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.put(localpath, remotepath)
+ if mode is None:
+ mode = 0o777 & os.stat(localpath).st_mode
+ sftp.chmod(remotepath, mode)
+
+ def _put_file_shell(self, localpath, remotepath, mode=None):
+ # quote to stop wordpslit
+ cmd = ['cat > "%s"' % remotepath]
+ if mode is not None:
+ # use -- so no options
+ cmd.append('chmod -- 0%o "%s"' % (mode, remotepath))
+
+ with open(localpath, "rb") as localfile:
+ # only chmod on successful cat
+ cmd = "&& ".join(cmd)
+ self.run(cmd, stdin=localfile)
+
+ def put_file(self, localpath, remotepath, mode=None):
+ """Copy specified local file to the server.
+
+ :param localpath: Local filename.
+ :param remotepath: Remote filename.
+ :param mode: Permissions to set after upload
+ """
+ import socket
+ try:
+ self._put_file_sftp(localpath, remotepath, mode=mode)
+ except (paramiko.SSHException, socket.error):
+ self._put_file_shell(localpath, remotepath, mode=mode)