summaryrefslogtreecommitdiffstats
path: root/func/args_handler.py
diff options
context:
space:
mode:
Diffstat (limited to 'func/args_handler.py')
-rw-r--r--func/args_handler.py18
1 files changed, 10 insertions, 8 deletions
diff --git a/func/args_handler.py b/func/args_handler.py
index 59712800..624f90c4 100644
--- a/func/args_handler.py
+++ b/func/args_handler.py
@@ -14,8 +14,8 @@ from func.spawn_vm import SpawnVM
from func.driver import Driver
-def get_files_in_test_list(suite_name, case_type='all'):
- benchmark_list = json.load(file('test_list/{0}'.format(suite_name)))
+def get_files_in_suite(suite_name, case_type='all'):
+ benchmark_list = json.load(file('benchmarks/suite/{0}'.format(suite_name)))
return reduce(add, benchmark_list.values()) \
if case_type == 'all' else benchmark_list[case_type]
@@ -30,8 +30,8 @@ def get_benchmark_path(lab, suit, benchmark):
return './test_cases/{0}/{1}/{2}'.format(lab, suit, benchmark)
-def check_suite_in_test_list(suite_name):
- return True if os.path.isfile('test_list/' + suite_name) else False
+def check_suite(suite_name):
+ return True if os.path.isfile('benchmarks/suite/' + suite_name) else False
def check_lab_name(lab_name):
@@ -59,10 +59,12 @@ def prepare_ansible_env(benchmark_test_case):
def run_benchmark(installer_type, pwd, benchmark, benchmark_details,
proxy_info, env_setup, benchmark_test_case):
driver = Driver()
- return driver.drive_bench(installer_type, pwd, benchmark,
- env_setup.roles_dict.items(),
- _get_f_name(benchmark_test_case),
- benchmark_details, env_setup.ip_pw_dict.items(), proxy_info)
+ result = driver.drive_bench(installer_type, pwd, benchmark,
+ env_setup.roles_dict.items(),
+ _get_f_name(benchmark_test_case),
+ benchmark_details, env_setup.ip_pw_dict.items(), proxy_info)
+ env_setup.cleanup_authorized_keys()
+ return result
def prepare_and_run_benchmark(installer_type, pwd, benchmark_test_case):