summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--qtip/cli/commands/cmd_qpi.py48
-rw-r--r--resources/QPI/compute.yaml7
-rw-r--r--tests/unit/cli/cmd_qpi_test.py24
3 files changed, 33 insertions, 46 deletions
diff --git a/qtip/cli/commands/cmd_qpi.py b/qtip/cli/commands/cmd_qpi.py
index d08842a4..4865b7ae 100644
--- a/qtip/cli/commands/cmd_qpi.py
+++ b/qtip/cli/commands/cmd_qpi.py
@@ -9,47 +9,41 @@
import click
-from colorama import Fore
import os
+from os import path
+from prettytable import PrettyTable
+import yaml
-from qtip.base.error import InvalidContentError
-from qtip.base.error import NotFoundError
-from qtip.cli import utils
-from qtip.loader.qpi import QPISpec
+QPI_PATH = path.join(path.dirname(__file__), '..', '..', '..',
+ 'resources', 'QPI')
@click.group()
def cli():
- ''' Collection of performance tests '''
+ """ Collection of performance tests """
pass
@cli.command('list', help='List all the QPI specs')
def cmd_list():
- qpis = QPISpec.list_all()
- table = utils.table('QPIs', qpis)
+ table = PrettyTable(['QPI', 'Description'])
+ table.align = 'l'
+ for qpi in os.listdir(QPI_PATH):
+ if qpi.endswith('yaml'):
+ with open('{}/{}'.format(QPI_PATH, qpi)) as conf:
+ details = yaml.safe_load(conf)
+ table.add_row([details['name'], details['description']])
click.echo(table)
@cli.command('show', help='View details of a QPI')
@click.argument('name')
def show(name):
- try:
- qpi = QPISpec('{}.yaml'.format(name))
- except NotFoundError as nf:
- click.echo(Fore.RED + "ERROR: qpi spec: " + nf.message)
- except InvalidContentError as ice:
- click.echo(Fore.RED + "ERROR: qpi spec: " + ice.message)
- else:
- cnt = qpi.content
- output = utils.render('qpi', cnt)
- click.echo(output)
-
-
-@cli.command('run', help='Run performance tests for the specified QPI')
-@click.argument('name')
-@click.option('-p', '--path', help='Path to store results')
-def run(name, path):
- runner_path = path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir,
- 'runner/runner.py')
- os.system('python {0} -b all -d {1}'.format(runner_path, path))
+ table = PrettyTable(['QPI', 'Description', 'Formula'])
+ table.align = 'l'
+ with open('{}/{}.yaml'.format(QPI_PATH, name)) as conf:
+ qpi = yaml.safe_load(conf)
+ for section in qpi['sections']:
+ table.add_row([section['name'], section['description'],
+ section['formula']])
+ click.echo(table)
diff --git a/resources/QPI/compute.yaml b/resources/QPI/compute.yaml
index 8169da4f..50c1cfbf 100644
--- a/resources/QPI/compute.yaml
+++ b/resources/QPI/compute.yaml
@@ -29,7 +29,7 @@ sections: # split based on different application
metrics:
- name: ssl_rsa
description: performance of cryptographic using RSA cipher algorithm
- formual: geometric mean
+ formula: geometric mean
workloads:
- name: rsa_sign_512
description: rsa 512 bits sign per second
@@ -50,7 +50,7 @@ sections: # split based on different application
- name: ssl_aes
description: >
performance of advanced encryption standard (AES) cipher algorithm in cipher block chaining (CBC) mode
- formual: geometric mean
+ formula: geometric mean
workloads:
- name: aes_128_cbc_16_bytes
description: aes 128 bits key cbc on 16 bytes blocks
@@ -64,6 +64,7 @@ sections: # split based on different application
description: aes 128 bits key cbc on 8192 bytes blocks
- name: DPI
description: deep packet inspection
+ formula: geometric mean
metrics:
- name: dpi_throughput
description: deep packet inspection throughput
@@ -74,6 +75,7 @@ sections: # split based on different application
description: DPI bits per second
- name: memory
description: cache and memory performance
+ formula: geometric mean
metrics:
- name: floatmem
description: >
@@ -123,6 +125,7 @@ sections: # split based on different application
(A = m*B + C).
- name: arithmetic
description: arithmetic computing speed
+ formula: geometric mean
metrics:
- name: integer
description: >
diff --git a/tests/unit/cli/cmd_qpi_test.py b/tests/unit/cli/cmd_qpi_test.py
index e7823c9b..ba5f814e 100644
--- a/tests/unit/cli/cmd_qpi_test.py
+++ b/tests/unit/cli/cmd_qpi_test.py
@@ -20,24 +20,14 @@ def runner():
def test_list(runner):
result = runner.invoke(cli, ['qpi', 'list'])
- assert 'QPIs' and 'compute' in result.output
-
-
-def test_run(runner):
- result = runner.invoke(cli, ['qpi', 'run', 'fake-qpi'])
- assert result.output == ''
-
- result = runner.invoke(cli, ['qpi', 'run'])
- assert 'Missing argument "name".' in result.output
+ assert 'QPI' in result.output
+ assert 'compute' in result.output
+ assert 'Description' in result.output
+ assert 'QTIP Performance Index of compute' in result.output
def test_show(runner):
result = runner.invoke(cli, ['qpi', 'show', 'compute'])
- assert 'Name: compute' in result.output
- assert 'Description: sample performance index of computing' in result.output
-
- result = runner.invoke(cli, ['qpi', 'show'])
- assert 'Missing argument "name".' in result.output
-
- result = runner.invoke(cli, ['qpi', 'show', 'xyz'])
- assert "ERROR: qpi spec: xyz not found" in result.output
+ assert 'QPI' in result.output
+ assert 'Description' in result.output
+ assert 'Formula' in result.output