aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--qtip/cli/commands/cmd_metric.py15
-rw-r--r--qtip/cli/commands/cmd_qpi.py15
-rw-r--r--qtip/cli/commands/cmd_report.py9
-rw-r--r--qtip/cli/entry.py20
-rw-r--r--qtip/runner/project.py25
-rw-r--r--tests/unit/cli/cmd_project_test.py18
-rw-r--r--tests/unit/cli/options_test.py4
7 files changed, 51 insertions, 55 deletions
diff --git a/qtip/cli/commands/cmd_metric.py b/qtip/cli/commands/cmd_metric.py
index 1741fb48..0a385898 100644
--- a/qtip/cli/commands/cmd_metric.py
+++ b/qtip/cli/commands/cmd_metric.py
@@ -14,22 +14,17 @@ import os
from qtip.base.error import InvalidContentError
from qtip.base.error import NotFoundError
from qtip.cli import utils
-from qtip.cli.entry import Context
from qtip.loader.metric import MetricSpec
-pass_context = click.make_pass_decorator(Context, ensure=False)
-
@click.group()
-@pass_context
-def cli(ctx):
+def cli():
''' Performance Metrics Group '''
pass
@cli.command('list', help='List all the Metric Groups')
-@pass_context
-def cmd_list(ctx):
+def cmd_list():
metrics = MetricSpec.list_all()
table = utils.table('Metrics', metrics)
click.echo(table)
@@ -37,8 +32,7 @@ def cmd_list(ctx):
@cli.command('show', help='View details of a Metric')
@click.argument('name')
-@pass_context
-def show(ctx, name):
+def show(name):
try:
metric = MetricSpec('{}.yaml'.format(name))
except NotFoundError as nf:
@@ -54,8 +48,7 @@ def show(ctx, name):
@cli.command('run', help='Run performance test')
@click.argument('name')
@click.option('-p', '--path', help='Path to store results')
-@pass_context
-def run(ctx, name, path):
+def run(name, path):
runner_path = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir,
'runner/runner.py')
os.system('python {0} -b {1} -d {2}'.format(runner_path, name, path))
diff --git a/qtip/cli/commands/cmd_qpi.py b/qtip/cli/commands/cmd_qpi.py
index a47442b7..d08842a4 100644
--- a/qtip/cli/commands/cmd_qpi.py
+++ b/qtip/cli/commands/cmd_qpi.py
@@ -15,22 +15,17 @@ import os
from qtip.base.error import InvalidContentError
from qtip.base.error import NotFoundError
from qtip.cli import utils
-from qtip.cli.entry import Context
from qtip.loader.qpi import QPISpec
-pass_context = click.make_pass_decorator(Context, ensure=False)
-
@click.group()
-@pass_context
-def cli(ctx):
+def cli():
''' Collection of performance tests '''
pass
@cli.command('list', help='List all the QPI specs')
-@pass_context
-def cmd_list(ctx):
+def cmd_list():
qpis = QPISpec.list_all()
table = utils.table('QPIs', qpis)
click.echo(table)
@@ -38,8 +33,7 @@ def cmd_list(ctx):
@cli.command('show', help='View details of a QPI')
@click.argument('name')
-@pass_context
-def show(ctx, name):
+def show(name):
try:
qpi = QPISpec('{}.yaml'.format(name))
except NotFoundError as nf:
@@ -55,8 +49,7 @@ def show(ctx, name):
@cli.command('run', help='Run performance tests for the specified QPI')
@click.argument('name')
@click.option('-p', '--path', help='Path to store results')
-@pass_context
-def run(ctx, name, path):
+def run(name, path):
runner_path = path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir,
'runner/runner.py')
os.system('python {0} -b all -d {1}'.format(runner_path, path))
diff --git a/qtip/cli/commands/cmd_report.py b/qtip/cli/commands/cmd_report.py
index ebc0ef77..4176fd90 100644
--- a/qtip/cli/commands/cmd_report.py
+++ b/qtip/cli/commands/cmd_report.py
@@ -9,15 +9,11 @@
import click
-from qtip.cli.entry import Context
from qtip.reporter.console import ConsoleReporter
-pass_context = click.make_pass_decorator(Context, ensure=False)
-
@click.group()
-@pass_context
-def cli(ctx):
+def cli():
""" View QTIP results"""
pass
@@ -25,8 +21,7 @@ def cli(ctx):
@cli.command('show')
@click.argument('metric')
@click.option('-p', '--path', help='Path to result directory')
-@pass_context
-def show(ctx, metric, path):
+def show(metric, path):
reporter = ConsoleReporter({})
report = reporter.render(metric, path)
click.echo(report)
diff --git a/qtip/cli/entry.py b/qtip/cli/entry.py
index b557047d..0825d5e1 100644
--- a/qtip/cli/entry.py
+++ b/qtip/cli/entry.py
@@ -9,22 +9,12 @@
import click
import os
-import pkg_resources as pkg
import sys
from qtip.cli.commands.cmd_project import cli as project_commands
-CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
-# TODO (taseer) define user friendly error messages
sys.tracebacklimit = 0
-
-
-class Context(object):
- """ Load configuration and pass to subcommands """
-
-
-pass_context = click.make_pass_decorator(Context, ensure=True)
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
'commands'))
@@ -51,9 +41,9 @@ class SubCommand(click.MultiCommand):
return mod.cli
-@click.command(cls=SubCommand, context_settings=CONTEXT_SETTINGS,
+@click.command(cls=SubCommand,
invoke_without_command=True)
-def sub_commands(ctx, verbose, debug):
+def sub_commands(debug):
pass
@@ -61,10 +51,8 @@ def sub_commands(ctx, verbose, debug):
help="Platform performance benchmarking",
sources=[sub_commands, project_commands],
invoke_without_command=True)
-@click.option('-v', '--verbose', is_flag=True, help='Enable verbose mode.')
@click.option('-d', '--debug', is_flag=True, help='Enable debug mode.')
-@click.version_option(pkg.require("qtip")[0])
-@pass_context
-def cli(ctx, verbose, debug):
+@click.version_option()
+def cli(debug):
if debug:
sys.tracebacklimit = 8
diff --git a/qtip/runner/project.py b/qtip/runner/project.py
index 90d1e079..a0617228 100644
--- a/qtip/runner/project.py
+++ b/qtip/runner/project.py
@@ -15,13 +15,26 @@ def convert(vals):
return " ".join(vals)
-def setup(extra_val=None):
- os.system('ansible-playbook setup.yml {}'.format(convert(extra_val)))
+ARGS = 'ansible-playbook {}.yml {}'
+NO_ARGS = 'ansible-playbook {}.yml'
-def run(extra_val=None):
- os.system('ansible-playbook run.yml {}'.format(convert(extra_val)))
+def setup(extra_val):
+ if extra_val:
+ os.system(ARGS.format('setup', convert(extra_val)))
+ else:
+ os.system(NO_ARGS.format('setup'))
-def teardown(extra_val=None):
- os.system('ansible-playbook teardown.yml {}'.format(convert(extra_val)))
+def run(extra_val):
+ if extra_val:
+ os.system(ARGS.format('run', convert(extra_val)))
+ else:
+ os.system(NO_ARGS.format('run'))
+
+
+def teardown(extra_val):
+ if extra_val:
+ os.system(ARGS.format('teardown', convert(extra_val)))
+ else:
+ os.system(NO_ARGS.format('teardown'))
diff --git a/tests/unit/cli/cmd_project_test.py b/tests/unit/cli/cmd_project_test.py
index 8b9216fa..102e9ed7 100644
--- a/tests/unit/cli/cmd_project_test.py
+++ b/tests/unit/cli/cmd_project_test.py
@@ -21,17 +21,35 @@ def runner():
def test_run(mocker, runner):
mocker.patch('os.system')
+ runner.invoke(cli, ['run'])
+ os.system.assert_called_once_with('ansible-playbook run.yml')
+
+
+def test_run_verbose(mocker, runner):
+ mocker.patch('os.system')
runner.invoke(cli, ['run', '-vvv'])
os.system.assert_called_once_with('ansible-playbook run.yml -vvv')
def test_setup(mocker, runner):
mocker.patch('os.system')
+ runner.invoke(cli, ['setup'])
+ os.system.assert_called_once_with('ansible-playbook setup.yml')
+
+
+def test_setup_verbose(mocker, runner):
+ mocker.patch('os.system')
runner.invoke(cli, ['setup', '-vvv'])
os.system.assert_called_once_with('ansible-playbook setup.yml -vvv')
def test_teardown(mocker, runner):
mocker.patch('os.system')
+ runner.invoke(cli, ['teardown'])
+ os.system.assert_called_once_with('ansible-playbook teardown.yml')
+
+
+def test_teardown_verbose(mocker, runner):
+ mocker.patch('os.system')
runner.invoke(cli, ['teardown', '-vvv'])
os.system.assert_called_once_with('ansible-playbook teardown.yml -vvv')
diff --git a/tests/unit/cli/options_test.py b/tests/unit/cli/options_test.py
index d7c0f700..6aef139c 100644
--- a/tests/unit/cli/options_test.py
+++ b/tests/unit/cli/options_test.py
@@ -21,10 +21,6 @@ class TestClass(object):
def runner(self):
return CliRunner()
- def test_verbose(self, runner):
- result = runner.invoke(cli, ['-v'])
- assert result.output == ''
-
def test_version(self, runner):
result = runner.invoke(cli, ['--version'])
assert re.search(r'\d+\.\d+\.\d+', result.output)