summaryrefslogtreecommitdiffstats
path: root/restful_server/qtip_server.py
diff options
context:
space:
mode:
Diffstat (limited to 'restful_server/qtip_server.py')
-rw-r--r--restful_server/qtip_server.py31
1 files changed, 20 insertions, 11 deletions
diff --git a/restful_server/qtip_server.py b/restful_server/qtip_server.py
index 734a471c..a059ca3c 100644
--- a/restful_server/qtip_server.py
+++ b/restful_server/qtip_server.py
@@ -24,12 +24,13 @@ class JobModel:
resource_fields = {
'installer_type': fields.String,
'installer_ip': fields.String,
- 'max-minutes': fields.Integer,
+ 'max_minutes': fields.Integer,
'pod_name': fields.String,
'suite_name': fields.String,
- 'type': fields.String
+ 'type': fields.String,
+ 'benchmark_name': fields.String
}
- required = ['installer_type', 'install_ip']
+ required = ['installer_type', 'installer_ip']
@swagger.model
@@ -91,7 +92,7 @@ class JobList(Resource):
"installer_ip": The installer ip of the pod,
-"max-minutes": If specified, the maximum duration in minutes
+"max_minutes": If specified, the maximum duration in minutes
for any single test iteration, default is '60',
"pod_name": If specified, the Pod name, default is 'default',
@@ -99,6 +100,8 @@ for any single test iteration, default is '60',
"suite_name": If specified, Test suite name, for example 'compute', 'network', 'storage',
default is 'compute'
"type": BM or VM,default is 'BM'
+"benchmark_name": If specified, benchmark name in suite, for example 'dhrystone_bm.yaml',
+default is all benchmarks in suite with specified type
""",
"required": True,
"type": "JobModel",
@@ -123,15 +126,16 @@ default is 'compute'
)
def post(self):
parser = reqparse.RequestParser()
- parser.add_argument('installer_type', type=str, required=True, help='Installer_type is required')
- parser.add_argument('installer_ip', type=str, required=True, help='Installer_ip is required')
- parser.add_argument('max-minutes', type=int, required=False, default=60, help='max-minutes should be integer')
+ parser.add_argument('installer_type', type=str, required=True, help='installer_type is required')
+ parser.add_argument('installer_ip', type=str, required=True, help='installer_ip is required')
+ parser.add_argument('max_minutes', type=int, required=False, default=60, help='max_minutes should be integer')
parser.add_argument('pod_name', type=str, required=False, default='default', help='pod_name should be string')
parser.add_argument('suite_name', type=str, required=False, default='compute', help='suite_name should be string')
parser.add_argument('type', type=str, required=False, default='BM', help='type should be BM, VM and ALL')
+ parser.add_argument('benchmark_name', type=str, required=False, default='all', help='benchmark_name should be string')
args = parser.parse_args()
- if not args_handler.check_suit_in_test_list(args["suite_name"]):
- return abort(404, 'message:Test Suit {0} does not exist in test_list'.format(args["suite_name"]))
+ if not args_handler.check_suite_in_test_list(args["suite_name"]):
+ return abort(404, 'message:Test suite {0} does not exist in test_list'.format(args["suite_name"]))
if not args_handler.check_lab_name(args["pod_name"]):
return abort(404, 'message: You have specified a lab {0}\
that is not present in test_cases'.format(args['pod_name']))
@@ -146,6 +150,11 @@ default is 'compute'
args["suite_name"],
args["type"].lower())
benchmarks_list = filter(lambda x: x in test_cases, benchmarks)
+ if args["benchmark_name"] in benchmarks_list:
+ benchmarks_list = [args["benchmark_name"]]
+ if (args["benchmark_name"] is not 'all') and args["benchmark_name"] not in benchmarks_list:
+ return abort(404, 'message: Benchmark name {0} does not exist in suit {1}'.format(args["benchmark_name"],
+ args["suite_name"]))
state_detail = map(lambda x: {'benchmark': x, 'state': 'idle'}, benchmarks_list)
db.update_job_state_detail(job_id, copy(state_detail))
thread_stop = threading.Event()
@@ -162,14 +171,14 @@ default is 'compute'
for benchmark in benchmarks_list:
if db.is_job_timeout(job_id) or stop_event.is_set():
break
- db.update_benmark_state_in_state_detail(job_id, benchmark, 'processing')
+ db.update_benchmark_state(job_id, benchmark, 'processing')
result = args_handler.prepare_and_run_benchmark(installer_type,
'/home',
args_handler.get_benchmark_path(pod_name,
suite_name,
benchmark))
db.update_job_result_detail(job_id, benchmark, copy(result))
- db.update_benmark_state_in_state_detail(job_id, benchmark, 'finished')
+ db.update_benchmark_state(job_id, benchmark, 'finished')
db.finish_job(job_id)