summaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/benchmark/contexts/node.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_general.py3
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker_conf.yaml2
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_general.py6
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor_conf.yaml6
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/operation_general.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/operation_conf.yaml6
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py3
-rw-r--r--yardstick/benchmark/scenarios/compute/computecapacity.py2
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_install.bash32
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_node.py203
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_remove.bash21
-rw-r--r--yardstick/benchmark/scenarios/networking/sfc_openstack.py2
-rw-r--r--yardstick/cmd/commands/plugin.py6
-rw-r--r--yardstick/cmd/commands/task.py108
-rw-r--r--yardstick/resources/scripts/install/sample.bash (renamed from yardstick/resources/script/install/sample.bash)0
-rw-r--r--yardstick/resources/scripts/remove/sample.bash (renamed from yardstick/resources/script/remove/sample.bash)0
17 files changed, 356 insertions, 52 deletions
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index 54ee076f4..c3d652119 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -82,6 +82,8 @@ class NodeContext(Context):
LOG.error("Nodes: %r" % nodes)
sys.exit(-1)
- node = nodes[0]
+ # A clone is created in order to avoid affecting the
+ # original one.
+ node = dict(nodes[0])
node["name"] = attr_name
return node
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_general.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_general.py
index 018362a15..816e7e37d 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_general.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_general.py
@@ -31,6 +31,7 @@ class GeneralAttacker(BaseAttacker):
LOG.debug("ssh host success!")
self.key = self._config['key']
+ self.attack_key = self._config['attack_key']
if "action_parameter" in self._config:
actionParameter = self._config['action_parameter']
@@ -50,7 +51,7 @@ class GeneralAttacker(BaseAttacker):
l = list(item for item in rollbackParameter.values())
self.rollback_param = str.format(*l)
- self.fault_cfg = BaseAttacker.attacker_cfgs.get(self.key)
+ self.fault_cfg = BaseAttacker.attacker_cfgs.get(self.attack_key)
self.inject_script = self.get_script_fullpath(
self.fault_cfg['inject_script'])
self.recovery_script = self.get_script_fullpath(
diff --git a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
index 16b3d735c..e5d1b9f0b 100644
--- a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
@@ -12,6 +12,6 @@ bare-metal-down:
check_script: ha_tools/check_host_ping.bash
recovery_script: ha_tools/ipmi_power.bash
-stop_service:
+stop-service:
inject_script: ha_tools/stop_service.bash
recovery_script: ha_tools/start_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_general.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_general.py
index 515514c29..61efc0520 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_general.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_general.py
@@ -27,6 +27,7 @@ class GeneralMonitor(basemonitor.BaseMonitor):
user = host.get("user", "root")
key_filename = host.get("key_filename", "~/.ssh/id_rsa")
self.key = self._config["key"]
+ self.monitor_key = self._config["monitor_key"]
self.monitor_type = self._config["monitor_type"]
if "parameter" in self._config:
@@ -35,7 +36,8 @@ class GeneralMonitor(basemonitor.BaseMonitor):
l = list(item for item in parameter.values())
self.cmd_param = str.format(*l)
- self.monitor_cfg = basemonitor.BaseMonitor.monitor_cfgs.get(self.key)
+ self.monitor_cfg = basemonitor.BaseMonitor.monitor_cfgs.get(
+ self.monitor_key)
self.monitor_script = self.get_script_fullpath(
self.monitor_cfg['monitor_script'])
self.connection = ssh.SSH(user, ip, key_filename=key_filename)
@@ -59,7 +61,7 @@ class GeneralMonitor(basemonitor.BaseMonitor):
def verify_SLA(self):
LOG.debug("the _result:%s" % self._result)
outage_time = self._result.get('outage_time', None)
- max_outage_time = self._config["sla"]["max_recover_time"]
+ max_outage_time = self._config["sla"]["max_outage_time"]
if outage_time is None:
LOG.error("There is no outage_time in monitor result.")
return False
diff --git a/yardstick/benchmark/scenarios/availability/monitor_conf.yaml b/yardstick/benchmark/scenarios/availability/monitor_conf.yaml
index 9efceed88..d7bbdfe60 100644
--- a/yardstick/benchmark/scenarios/availability/monitor_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/monitor_conf.yaml
@@ -3,9 +3,9 @@
#
schema: "yardstick:task:0.1"
-process_status:
+process-status:
monitor_script: ha_tools/check_process_python.bash
-nova_image_list:
+nova-image-list:
monitor_script: ha_tools/nova_image_list.bash
-service_status:
+service-status:
monitor_script: ha_tools/check_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/operation/operation_general.py b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
index d41371629..e43f6e1d5 100644
--- a/yardstick/benchmark/scenarios/availability/operation/operation_general.py
+++ b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
@@ -30,6 +30,7 @@ class GeneralOperaion(BaseOperation):
LOG.debug("ssh host success!")
self.key = self._config['key']
+ self.operation_key = self._config['operation_key']
if "action_parameter" in self._config:
actionParameter = self._config['action_parameter']
@@ -43,7 +44,8 @@ class GeneralOperaion(BaseOperation):
l = list(item for item in rollbackParameter.values())
self.rollback_param = str.format(*l)
- self.operation_cfgs = BaseOperation.operation_cfgs.get(self.key)
+ self.operation_cfgs = BaseOperation.operation_cfgs.get(
+ self.operation_key)
self.action_script = self.get_script_fullpath(
self.operation_cfgs['action_script'])
self.rollback_script = self.get_script_fullpath(
diff --git a/yardstick/benchmark/scenarios/availability/operation_conf.yaml b/yardstick/benchmark/scenarios/availability/operation_conf.yaml
index 78c996d05..1e6746302 100644
--- a/yardstick/benchmark/scenarios/availability/operation_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/operation_conf.yaml
@@ -7,10 +7,10 @@ nova-create-instance:
action_script: ha_tools/nova/create_instance_from_image.bash
rollback_script: ha_tools/nova/delete_instance.bash
-swift_upload_file:
+swift-upload-file:
action_script: ha_tools/swift/upload.bash
rollback_script: ha_tools/swift/delete.bash
-swift_download_file:
+swift-download-file:
action_script: ha_tools/swift/download.bash
- rollback_script: ha_tools/file/remove_file.bash \ No newline at end of file
+ rollback_script: ha_tools/file/remove_file.bash
diff --git a/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
index 70bf9aea6..681fbf63f 100644
--- a/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
+++ b/yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
@@ -32,6 +32,7 @@ class GeneralResultChecker(BaseResultChecker):
LOG.debug("ssh host success!")
self.key = self._config['key']
+ self.resultchecker_key = self._config['checker_key']
self.type = self._config['checker_type']
self.condition = self._config['condition']
self.expectedResult = self._config['expectedValue']
@@ -45,7 +46,7 @@ class GeneralResultChecker(BaseResultChecker):
self.shell_cmd = str.format(*l)
self.resultchecker_cfgs = BaseResultChecker.resultchecker_cfgs.get(
- self.key)
+ self.resultchecker_key)
self.verify_script = self.get_script_fullpath(
self.resultchecker_cfgs['verify_script'])
diff --git a/yardstick/benchmark/scenarios/compute/computecapacity.py b/yardstick/benchmark/scenarios/compute/computecapacity.py
index 366b470e8..0d7d76143 100644
--- a/yardstick/benchmark/scenarios/compute/computecapacity.py
+++ b/yardstick/benchmark/scenarios/compute/computecapacity.py
@@ -38,7 +38,7 @@ class ComputeCapacity(base.Scenario):
ComputeCapacity.TARGET_SCRIPT)
nodes = self.context_cfg['nodes']
- node = nodes.get('host1', None)
+ node = nodes.get('host', None)
host_user = node.get('user', 'ubuntu')
host_ip = node.get('ip', None)
host_pwd = node.get('password', 'root')
diff --git a/yardstick/benchmark/scenarios/networking/netperf_install.bash b/yardstick/benchmark/scenarios/networking/netperf_install.bash
new file mode 100755
index 000000000..eaa9f530a
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/netperf_install.bash
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -e
+
+echo "===Install netperf before test begin!!!==="
+cp /etc/apt/sources.list /etc/apt/sources.list_bkp
+cp /etc/resolv.conf /etc/resolv.conf_bkp
+echo "nameserver 8.8.4.4" >> /etc/resolv.conf
+
+cat <<EOF >/etc/apt/sources.list
+deb http://archive.ubuntu.com/ubuntu/ trusty main restricted universe multiverse
+deb http://archive.ubuntu.com/ubuntu/ trusty-security main restricted universe multiverse
+deb http://archive.ubuntu.com/ubuntu/ trusty-updates main restricted universe multiverse
+deb http://archive.ubuntu.com/ubuntu/ trusty-proposed main restricted universe multiverse
+deb http://archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe multiverse
+EOF
+
+sudo apt-get update
+sudo apt-get install -y netperf
+
+service netperf start
+
+echo "===Install netperf before test end!!!==="
diff --git a/yardstick/benchmark/scenarios/networking/netperf_node.py b/yardstick/benchmark/scenarios/networking/netperf_node.py
new file mode 100755
index 000000000..87aa8d78d
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/netperf_node.py
@@ -0,0 +1,203 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# bulk data test and req/rsp test are supported
+import pkg_resources
+import logging
+import json
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class NetperfNode(base.Scenario):
+ """Execute netperf between two nodes
+
+ Parameters
+ testname - to specify the test you wish to perform.
+ the valid testnames are TCP_STREAM, TCP_RR, UDP_STREAM, UDP_RR
+ type: string
+ unit: na
+ default: TCP_STREAM
+ send_msg_size - value set the local send size to value bytes.
+ type: int
+ unit: bytes
+ default: na
+ recv_msg_size - setting the receive size for the remote system.
+ type: int
+ unit: bytes
+ default: na
+ req_rsp_size - set the request and/or response sizes based on sizespec.
+ type: string
+ unit: na
+ default: na
+ duration - duration of the test
+ type: int
+ unit: seconds
+ default: 20
+
+ read link below for more netperf args description:
+ http://www.netperf.org/netperf/training/Netperf.html
+ """
+ __scenario_type__ = "NetperfNode"
+ TARGET_SCRIPT = 'netperf_benchmark.bash'
+ INSTALL_SCRIPT = 'netperf_install.bash'
+ REMOVE_SCRIPT = 'netperf_remove.bash'
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+ self.target_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ NetperfNode.TARGET_SCRIPT)
+ host = self.context_cfg['host']
+ host_user = host.get('user', 'ubuntu')
+ host_ip = host.get('ip', None)
+ target = self.context_cfg['target']
+ target_user = target.get('user', 'ubuntu')
+ target_ip = target.get('ip', None)
+ self.target_ip = target.get('ip', None)
+ host_password = host.get('password', None)
+ target_password = target.get('password', None)
+
+ LOG.info("host_pw:%s, target_pw:%s", host_password, target_password)
+ # netserver start automatically during the vm boot
+ LOG.info("user:%s, target:%s", target_user, target_ip)
+ self.server = ssh.SSH(target_user, target_ip,
+ password=target_password)
+ self.server.wait(timeout=600)
+
+ LOG.info("user:%s, host:%s", host_user, host_ip)
+ self.client = ssh.SSH(host_user, host_ip,
+ password=host_password)
+ self.client.wait(timeout=600)
+
+ # copy script to host
+ self.client.run("cat > ~/netperf.sh",
+ stdin=open(self.target_script, "rb"))
+
+ # copy script to host and client
+ self.install_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ NetperfNode.INSTALL_SCRIPT)
+ self.remove_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ NetperfNode.REMOVE_SCRIPT)
+
+ self.server.run("cat > ~/netperf_install.sh",
+ stdin=open(self.install_script, "rb"))
+ self.client.run("cat > ~/netperf_install.sh",
+ stdin=open(self.install_script, "rb"))
+ self.server.run("cat > ~/netperf_remove.sh",
+ stdin=open(self.remove_script, "rb"))
+ self.client.run("cat > ~/netperf_remove.sh",
+ stdin=open(self.remove_script, "rb"))
+ self.server.execute("sudo bash netperf_install.sh")
+ self.client.execute("sudo bash netperf_install.sh")
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the benchmark"""
+
+ if not self.setup_done:
+ self.setup()
+
+ # get global options
+ ipaddr = self.context_cfg['target'].get("ipaddr", '127.0.0.1')
+ ipaddr = self.target_ip
+ options = self.scenario_cfg['options']
+ testname = options.get("testname", 'TCP_STREAM')
+ duration_time = self.scenario_cfg["runner"].get("duration", None) \
+ if "runner" in self.scenario_cfg else None
+ arithmetic_time = options.get("duration", None)
+ if duration_time:
+ testlen = duration_time
+ elif arithmetic_time:
+ testlen = arithmetic_time
+ else:
+ testlen = 20
+
+ cmd_args = "-H %s -l %s -t %s" % (ipaddr, testlen, testname)
+
+ # get test specific options
+ default_args = "-O 'THROUGHPUT,THROUGHPUT_UNITS,MEAN_LATENCY'"
+ cmd_args += " -- %s" % default_args
+ option_pair_list = [("send_msg_size", "-m"),
+ ("recv_msg_size", "-M"),
+ ("req_rsp_size", "-r")]
+ for option_pair in option_pair_list:
+ if option_pair[0] in options:
+ cmd_args += " %s %s" % (option_pair[1],
+ options[option_pair[0]])
+
+ cmd = "sudo bash netperf.sh %s" % (cmd_args)
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+
+ result.update(json.loads(stdout))
+
+ if result['mean_latency'] == '':
+ raise RuntimeError(stdout)
+
+ # sla check
+ mean_latency = float(result['mean_latency'])
+ if "sla" in self.scenario_cfg:
+ sla_max_mean_latency = int(
+ self.scenario_cfg["sla"]["mean_latency"])
+
+ assert mean_latency <= sla_max_mean_latency, \
+ "mean_latency %f > sla_max_mean_latency(%f); " % \
+ (mean_latency, sla_max_mean_latency)
+
+ def teardown(self):
+ '''remove netperf from nodes after test'''
+ self.server.execute("sudo bash netperf_remove.sh")
+ self.client.execute("sudo bash netperf_remove.sh")
+
+
+def _test(): # pragma: no cover
+ '''internal test function'''
+ ctx = {
+ "host": {
+ "ip": "192.168.10.10",
+ "user": "root",
+ "password": "root"
+ },
+ "target": {
+ "ip": "192.168.10.11",
+ "user": "root",
+ "password": "root"
+ }
+ }
+
+ logger = logging.getLogger("yardstick")
+ logger.setLevel(logging.DEBUG)
+
+ options = {
+ "testname": 'TCP_STREAM'
+ }
+
+ args = {"options": options}
+ result = {}
+
+ netperf = NetperfNode(args, ctx)
+ netperf.run(result)
+ print result
+
+if __name__ == '__main__':
+ _test()
diff --git a/yardstick/benchmark/scenarios/networking/netperf_remove.bash b/yardstick/benchmark/scenarios/networking/netperf_remove.bash
new file mode 100755
index 000000000..bb2299a2b
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/netperf_remove.bash
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -e
+echo "===Remove netperf after install begin!==="
+cp /etc/apt/sources.list_bkp /etc/apt/sources.list
+cp /etc/resolv.conf_bkp /etc/resolv.conf
+
+service netperf stop
+
+sudo apt-get purge -y netperf
+
+echo "===Remove netperf after install end!!!==="
diff --git a/yardstick/benchmark/scenarios/networking/sfc_openstack.py b/yardstick/benchmark/scenarios/networking/sfc_openstack.py
index 2a5fbde1c..d1d45d8e4 100644
--- a/yardstick/benchmark/scenarios/networking/sfc_openstack.py
+++ b/yardstick/benchmark/scenarios/networking/sfc_openstack.py
@@ -1,5 +1,5 @@
import os
-from novaclient.v2 import client as novaclient
+from novaclient import client as novaclient
from neutronclient.v2_0 import client as neutronclient
diff --git a/yardstick/cmd/commands/plugin.py b/yardstick/cmd/commands/plugin.py
index e65c818fa..8e3ddb5a5 100644
--- a/yardstick/cmd/commands/plugin.py
+++ b/yardstick/cmd/commands/plugin.py
@@ -50,6 +50,8 @@ class PluginCommands(object):
print("Done, exiting")
+ @cliargs("input_file", type=str, help="path to plugin configuration file",
+ nargs=1)
def do_remove(self, args):
'''Remove a plugin.'''
@@ -74,7 +76,7 @@ class PluginCommands(object):
'''Deployment environment setup'''
target_script = plugin_name + ".bash"
self.script = pkg_resources.resource_filename(
- 'yardstick.resources', 'script/install/' + target_script)
+ 'yardstick.resources', 'scripts/install/' + target_script)
deployment_user = deployment.get("user")
deployment_ip = deployment.get("ip")
@@ -93,7 +95,7 @@ class PluginCommands(object):
'''Deployment environment setup'''
target_script = plugin_name + ".bash"
self.script = pkg_resources.resource_filename(
- 'yardstick.resources', 'script/remove/' + target_script)
+ 'yardstick.resources', 'scripts/remove/' + target_script)
deployment_user = deployment.get("user")
deployment_ip = deployment.get("ip")
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index 2bc5abe29..18b72e726 100644
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -59,33 +59,29 @@ class TaskCommands(object):
total_start_time = time.time()
parser = TaskParser(args.inputfile[0])
- suite_params = {}
if args.suite:
- suite_params = parser.parse_suite()
- test_cases_dir = suite_params["test_cases_dir"]
- if test_cases_dir[-1] != os.sep:
- test_cases_dir += os.sep
- task_files = [test_cases_dir + task
- for task in suite_params["task_fnames"]]
+ # 1.parse suite, return suite_params info
+ task_files, task_args, task_args_fnames = \
+ parser.parse_suite()
else:
task_files = [parser.path]
+ task_args = [args.task_args]
+ task_args_fnames = [args.task_args_file]
- task_args = suite_params.get("task_args", [args.task_args])
- task_args_fnames = suite_params.get("task_args_fnames",
- [args.task_args_file])
+ LOG.info("\ntask_files:%s, \ntask_args:%s, \ntask_args_fnames:%s",
+ task_files, task_args, task_args_fnames)
if args.parse_only:
sys.exit(0)
if os.path.isfile(args.output_file):
os.remove(args.output_file)
-
+ # parse task_files
for i in range(0, len(task_files)):
one_task_start_time = time.time()
parser.path = task_files[i]
- task_name = os.path.splitext(os.path.basename(task_files[i]))[0]
scenarios, run_in_parallel, meet_precondition = parser.parse_task(
- task_name, task_args[i], task_args_fnames[i])
+ task_args[i], task_args_fnames[i])
if not meet_precondition:
LOG.info("meet_precondition is %s, please check envrionment",
@@ -167,10 +163,34 @@ class TaskParser(object):
def __init__(self, path):
self.path = path
+ def _meet_constraint(self, task, cur_pod, cur_installer):
+ if "constraint" in task:
+ constraint = task.get('constraint', None)
+ if constraint is not None:
+ tc_fit_pod = constraint.get('pod', None)
+ tc_fit_installer = constraint.get('installer', None)
+ LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
+ cur_pod, cur_installer, constraint)
+ if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
+ return False
+ if cur_installer and tc_fit_installer and \
+ cur_installer not in tc_fit_installer:
+ return False
+ return True
+
+ def _get_task_para(self, task, cur_pod):
+ task_args = task.get('task_args', None)
+ if task_args is not None:
+ task_args = task_args.get(cur_pod, None)
+ task_args_fnames = task.get('task_args_fnames', None)
+ if task_args_fnames is not None:
+ task_args_fnames = task_args_fnames.get(cur_pod, None)
+ return task_args, task_args_fnames
+
def parse_suite(self):
'''parse the suite file and return a list of task config file paths
and lists of optional parameters if present'''
- print "Parsing suite file:", self.path
+ LOG.info("\nParsing suite file:%s", self.path)
try:
with open(self.path) as stream:
@@ -179,35 +199,40 @@ class TaskParser(object):
sys.exit(ioerror)
self._check_schema(cfg["schema"], "suite")
- print "Starting suite:", cfg["name"]
+ LOG.info("\nStarting scenario:%s", cfg["name"])
test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
- task_fnames = []
- task_args = []
- task_args_fnames = []
+ if test_cases_dir[-1] != os.sep:
+ test_cases_dir += os.sep
+
+ cur_pod = os.environ.get('NODE_NAME', None)
+ cur_installer = os.environ.get('INSTALL_TYPE', None)
+
+ valid_task_files = []
+ valid_task_args = []
+ valid_task_args_fnames = []
for task in cfg["test_cases"]:
- task_fnames.append(task["file_name"])
- if "task_args" in task:
- task_args.append(task["task_args"])
+ # 1.check file_name
+ if "file_name" in task:
+ task_fname = task.get('file_name', None)
+ if task_fname is None:
+ continue
else:
- task_args.append(None)
-
- if "task_args_file" in task:
- task_args_fnames.append(task["task_args_file"])
+ continue
+ # 2.check constraint
+ if self._meet_constraint(task, cur_pod, cur_installer):
+ valid_task_files.append(test_cases_dir + task_fname)
else:
- task_args_fnames.append(None)
-
- suite_params = {
- "test_cases_dir": test_cases_dir,
- "task_fnames": task_fnames,
- "task_args": task_args,
- "task_args_fnames": task_args_fnames
- }
+ continue
+ # 3.fetch task parameters
+ task_args, task_args_fnames = self._get_task_para(task, cur_pod)
+ valid_task_args.append(task_args)
+ valid_task_args_fnames.append(task_args_fnames)
- return suite_params
+ return valid_task_files, valid_task_args, valid_task_args_fnames
- def parse_task(self, task_name, task_args=None, task_args_file=None):
+ def parse_task(self, task_args=None, task_args_file=None):
'''parses the task file and return an context and scenario instances'''
print "Parsing task config:", self.path
@@ -262,6 +287,7 @@ class TaskParser(object):
# add tc and task id for influxdb extended tags
task_id = str(uuid.uuid4())
for scenario in cfg["scenarios"]:
+ task_name = os.path.splitext(os.path.basename(self.path))[0]
scenario["tc"] = task_name
scenario["task_id"] = task_id
@@ -282,8 +308,17 @@ class TaskParser(object):
precondition = cfg["precondition"]
installer_type = precondition.get("installer_type", None)
deploy_scenarios = precondition.get("deploy_scenarios", None)
+ tc_fit_pods = precondition.get("pod_name", None)
installer_type_env = os.environ.get('INSTALL_TYPE', None)
deploy_scenario_env = os.environ.get('DEPLOY_SCENARIO', None)
+ pod_name_env = os.environ.get('NODE_NAME', None)
+
+ LOG.info("installer_type: %s, installer_type_env: %s",
+ installer_type, installer_type_env)
+ LOG.info("deploy_scenarios: %s, deploy_scenario_env: %s",
+ deploy_scenarios, deploy_scenario_env)
+ LOG.info("tc_fit_pods: %s, pod_name_env: %s",
+ tc_fit_pods, pod_name_env)
if installer_type and installer_type_env:
if installer_type_env not in installer_type:
return False
@@ -293,6 +328,9 @@ class TaskParser(object):
if deploy_scenario_env.startswith(deploy_scenario):
return True
return False
+ if tc_fit_pods and pod_name_env:
+ if pod_name_env not in tc_fit_pods:
+ return False
return True
diff --git a/yardstick/resources/script/install/sample.bash b/yardstick/resources/scripts/install/sample.bash
index 21eb14680..21eb14680 100644
--- a/yardstick/resources/script/install/sample.bash
+++ b/yardstick/resources/scripts/install/sample.bash
diff --git a/yardstick/resources/script/remove/sample.bash b/yardstick/resources/scripts/remove/sample.bash
index 15618df2d..15618df2d 100644
--- a/yardstick/resources/script/remove/sample.bash
+++ b/yardstick/resources/scripts/remove/sample.bash