aboutsummaryrefslogtreecommitdiffstats
path: root/func
diff options
context:
space:
mode:
Diffstat (limited to 'func')
-rw-r--r--func/args_handler.py16
-rw-r--r--func/cli.py22
2 files changed, 28 insertions, 10 deletions
diff --git a/func/args_handler.py b/func/args_handler.py
index 50d803eb..59712800 100644
--- a/func/args_handler.py
+++ b/func/args_handler.py
@@ -14,14 +14,14 @@ from func.spawn_vm import SpawnVM
from func.driver import Driver
-def get_files_in_test_list(suit_name, case_type='all'):
- benchmark_list = json.load(file('test_list/{0}'.format(suit_name)))
+def get_files_in_test_list(suite_name, case_type='all'):
+ benchmark_list = json.load(file('test_list/{0}'.format(suite_name)))
return reduce(add, benchmark_list.values()) \
if case_type == 'all' else benchmark_list[case_type]
-def get_files_in_test_case(lab, suit_name, case_type='all'):
- test_case_all = os.listdir('./test_cases/{0}/{1}'.format(lab, suit_name))
+def get_files_in_test_case(lab, suite_name, case_type='all'):
+ test_case_all = os.listdir('./test_cases/{0}/{1}'.format(lab, suite_name))
return test_case_all if case_type == 'all' else \
filter(lambda x: case_type in x, test_case_all)
@@ -30,14 +30,18 @@ def get_benchmark_path(lab, suit, benchmark):
return './test_cases/{0}/{1}/{2}'.format(lab, suit, benchmark)
-def check_suit_in_test_list(suit_name):
- return True if os.path.isfile('test_list/' + suit_name) else False
+def check_suite_in_test_list(suite_name):
+ return True if os.path.isfile('test_list/' + suite_name) else False
def check_lab_name(lab_name):
return True if os.path.isdir('test_cases/' + lab_name) else False
+def check_benchmark_name(lab, file, benchmark):
+ return os.path.isfile('test_cases/' + lab + '/' + file + '/' + benchmark)
+
+
def _get_f_name(test_case_path):
return test_case_path.split('/')[-1]
diff --git a/func/cli.py b/func/cli.py
index f91615e2..9222da51 100644
--- a/func/cli.py
+++ b/func/cli.py
@@ -32,12 +32,15 @@ class cli:
'\n network '
'They contain all the tests that will be run. They are listed by suite.'
'Please ensure there are no empty lines')
+ parser.add_argument('-b', '--benchmark', help='Name of the benchmark.'
+ 'Can be found in test_lists/file_name')
+
return parser.parse_args(args)
def __init__(self, args=sys.argv[1:]):
args = self._parse_args(args)
- if not args_handler.check_suit_in_test_list(args.file):
+ if not args_handler.check_suite_in_test_list(args.file):
print('\n\n ERROR: Test File Does not exist in test_list/ please enter correct file \n\n')
sys.exit(1)
@@ -50,9 +53,20 @@ class cli:
test_cases = args_handler.get_files_in_test_case(args.lab, suite)
benchmarks_list = filter(lambda x: x in test_cases, benchmarks)
- map(lambda x: args_handler.prepare_and_run_benchmark(
- os.environ['INSTALLER_TYPE'], os.environ['PWD'],
- args_handler.get_benchmark_path(args.lab.lower(), suite, x)), benchmarks_list)
+ if args.benchmark:
+ if not args_handler.check_benchmark_name(args.lab, args.file, args.benchmark):
+ print('\n\n You have specified an incorrect benchmark. Please'
+ 'enter the correct one.\n\n')
+ sys.exit(1)
+ else:
+ print("Starting with " + args.benchmark)
+ args_handler.prepare_and_run_benchmark(
+ os.environ['INSTALLER_TYPE'], os.environ['PWD'],
+ args_handler.get_benchmark_path(args.lab.lower(), args.file, args.benchmark))
+ else:
+ map(lambda x: args_handler.prepare_and_run_benchmark(
+ os.environ['INSTALLER_TYPE'], os.environ['PWD'],
+ args_handler.get_benchmark_path(args.lab.lower(), suite, x)), benchmarks_list)
print('{0} is not a Template in the Directory Enter a Valid file name.'
'or use qtip.py -h for list'.format(filter(lambda x: x not in test_cases, benchmarks)))