From 9edb0f9e48cc923ed443d3d66886713cde9b628d Mon Sep 17 00:00:00 2001 From: Yujun Zhang Date: Thu, 10 Nov 2016 10:59:26 +0800 Subject: Adjust folder structure Note: this patchset may break existing features, but it will help to accelerate the refactoring work. JIRA: QTIP-131 Change-Id: Ie0cd9d185e6b02316878daef905e26f4e533a66b Signed-off-by: Yujun Zhang --- restful_server/__init__.py | 0 restful_server/db.py | 98 ------------------- restful_server/qtip_server.py | 206 --------------------------------------- restful_server/result_handler.py | 22 ----- 4 files changed, 326 deletions(-) delete mode 100644 restful_server/__init__.py delete mode 100644 restful_server/db.py delete mode 100644 restful_server/qtip_server.py delete mode 100644 restful_server/result_handler.py (limited to 'restful_server') diff --git a/restful_server/__init__.py b/restful_server/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/restful_server/db.py b/restful_server/db.py deleted file mode 100644 index 24fc27a5..00000000 --- a/restful_server/db.py +++ /dev/null @@ -1,98 +0,0 @@ -############################################################################## -# Copyright (c) 2016 ZTE Corp and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -from datetime import datetime -from operator import add -import uuid - -jobs = {} -threads = {} - - -def create_job(args): - if len(filter(lambda x: jobs[x]['state'] == 'processing', jobs.keys())) > 0: - return None - else: - job = {'job_id': str(uuid.uuid4()), - 'installer_type': args["installer_type"], - 'installer_ip': args["installer_ip"], - 'pod_name': args["pod_name"], - 'suite_name': args["suite_name"], - 'max_minutes': args["max_minutes"], - 'type': args["type"], - 'testdb_url': args["testdb_url"], - 'node_name': args["node_name"], - 'start_time': str(datetime.now()), - 'end_time': None, - 'state': 'processing', - 'state_detail': [], - 'result': None, - 'result_detail': []} - jobs[job['job_id']] = job - return job['job_id'] - - -def delete_job(job_id): - if job_id in threads: - stop_thread(job_id) - if job_id in jobs: - jobs[job_id]['end_time'] = str(datetime.now()) - jobs[job_id]['state'] = 'terminated' - return True - else: - return False - - -def get_job_info(job_id): - if job_id in jobs: - return jobs[job_id] - else: - return None - - -def finish_job(job_id): - jobs[job_id]['end_time'] = str(datetime.now()) - jobs[job_id]['state'] = 'finished' - jobs[job_id]['result'] = reduce(add, map(lambda x: x['result'], - jobs[job_id]['result_detail'])) - del threads[job_id] - - -def update_job_state_detail(job_id, state_detail): - jobs[job_id]['state_detail'] = state_detail - - -def update_job_result_detail(job_id, benchmark, result): - result['benchmark'] = benchmark - jobs[job_id]['result_detail'].append(result) - - -def is_job_timeout(job_id): - period = datetime.now() - datetime.strptime(jobs[job_id]['start_time'], - "%Y-%m-%d %H:%M:%S.%f") - return True if jobs[job_id]['max_minutes'] * 60 < period.total_seconds()\ - else False - - -def start_thread(job_id, thread, thread_stop): - threads[job_id] = {'thread': thread, - 'thread_stop': thread_stop} - thread.start() - - -def stop_thread(job_id): - if threads[job_id]['thread'].isAlive(): - threads[job_id]['thread_stop'].set() - threads[job_id]['thread'].join() - if job_id in threads: - del threads[job_id] - - -def update_benchmark_state(job_id, benchmark, benchmark_state): - filter(lambda x: x["benchmark"] == benchmark, - get_job_info(job_id)["state_detail"])[0]['state'] = benchmark_state diff --git a/restful_server/qtip_server.py b/restful_server/qtip_server.py deleted file mode 100644 index 537b2c05..00000000 --- a/restful_server/qtip_server.py +++ /dev/null @@ -1,206 +0,0 @@ -############################################################################## -# Copyright (c) 2016 ZTE Corp and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -from flask import Flask, abort -from flask_restful import Api, Resource, fields, reqparse -from flask_restful_swagger import swagger -import threading -from copy import copy -import db -import utils.args_handler as args_handler -import restful_server.result_handler as result_handler - - -app = Flask(__name__) -api = swagger.docs(Api(app), apiVersion='0.1') - - -@swagger.model -class JobModel: - resource_fields = { - 'installer_type': fields.String, - 'installer_ip': fields.String, - 'max_minutes': fields.Integer, - 'pod_name': fields.String, - 'suite_name': fields.String, - 'type': fields.String, - 'benchmark_name': fields.String, - 'testdb_url': fields.String, - 'node_name': fields.String - } - required = ['installer_type', 'installer_ip'] - - -@swagger.model -class JobResponseModel: - resource_fields = { - 'job_id': fields.String - } - - -class Job(Resource): - @swagger.operation( - notes='get a job by ID', - nickname='get', - parameters=[], - responseMessages=[ - { - "code": 200, - "message": "Job detail info." - }, - { - "code": 404, - "message": "Can't not find the job id XXXXXXX" - } - ] - ) - def get(self, id): - ret = db.get_job_info(id) - return ret if ret else abort(404, " Can't not find the job id %s" % id) - - @swagger.operation( - notes='delete a job by ID', - nickname='delete', - parameters=[], - responseMessages=[ - { - "code": 200, - "message": "Delete successfully" - }, - { - "code": 404, - "message": "Can not find job_id XXXXXXXXX" - } - ] - ) - def delete(self, id): - ret = db.delete_job(id) - return {'result': "Delete successfully"} if ret else abort(404, "Can not find job_id %s" % id) - - -class JobList(Resource): - @swagger.operation( - note='create a job with parameters', - nickname='create', - parameters=[ - { - "name": "body", - "description": """ -"installer_type": The installer type, for example fuel, compass.., - -"installer_ip": The installer ip of the pod, - -"max_minutes": If specified, the maximum duration in minutes -for any single test iteration, default is '60', - -"pod_name": If specified, the Pod name, default is 'default', - -"suite_name": If specified, Test suite name, for example 'compute', 'network', 'storage', -default is 'compute', - -"type": BM or VM,default is 'BM', - -"benchmark_name": If specified, benchmark name in suite, for example 'dhrystone_bm.yaml', -default is all benchmarks in suite with specified type, - -"testdb_url": test db http url, for example 'http://testresults.opnfv.org/test/api/v1', - -"node_name": node name reported to test db - """, - "required": True, - "type": "JobModel", - "paramType": "body" - } - ], - type=JobResponseModel.__name__, - responseMessages=[ - { - "code": 200, - "message": "Job submitted" - }, - { - "code": 400, - "message": "Missing configuration data" - }, - { - "code": 409, - "message": "It already has one job running now!" - } - ] - ) - def post(self): - parser = reqparse.RequestParser() - parser.add_argument('installer_type', type=str, required=True, help='installer_type is required') - parser.add_argument('installer_ip', type=str, required=True, help='installer_ip is required') - parser.add_argument('max_minutes', type=int, required=False, default=60, help='max_minutes should be integer') - parser.add_argument('pod_name', type=str, required=False, default='default', help='pod_name should be string') - parser.add_argument('suite_name', type=str, required=False, default='compute', help='suite_name should be string') - parser.add_argument('type', type=str, required=False, default='BM', help='type should be BM, VM and ALL') - parser.add_argument('benchmark_name', type=str, required=False, default='all', help='benchmark_name should be string') - parser.add_argument('testdb_url', type=str, required=False, default=None, - help='testdb_url should be test db http url,for example http://testresults.opnfv.org/test/api/v1') - parser.add_argument('node_name', type=str, required=False, default=None, help='node_name should be string') - args = parser.parse_args() - if not args_handler.check_suite(args["suite_name"]): - return abort(404, 'message:Test suite {0} does not exist under benchmarks/suite'.format(args["suite_name"])) - if not args_handler.check_lab_name(args["pod_name"]): - return abort(404, 'message: You have specified a lab {0}\ - that is not present in test_cases'.format(args['pod_name'])) - - job_id = db.create_job(args) - if not job_id: - return abort(409, 'message:It already has one job running now!') - - benchmarks = args_handler.get_files_in_suite(args["suite_name"], - args["type"].lower()) - test_cases = args_handler.get_files_in_test_case(args["pod_name"], - args["suite_name"], - args["type"].lower()) - benchmarks_list = filter(lambda x: x in test_cases, benchmarks) - if args["benchmark_name"] in benchmarks_list: - benchmarks_list = [args["benchmark_name"]] - if (args["benchmark_name"] is not 'all') and args["benchmark_name"] not in benchmarks_list: - return abort(404, 'message: Benchmark name {0} does not exist in suit {1}'.format(args["benchmark_name"], - args["suite_name"])) - state_detail = map(lambda x: {'benchmark': x, 'state': 'idle'}, benchmarks_list) - db.update_job_state_detail(job_id, copy(state_detail)) - thread_stop = threading.Event() - post_thread = threading.Thread(target=self.thread_post, args=(args["installer_type"], - benchmarks_list, - args["pod_name"], - args["suite_name"], - job_id, - args["testdb_url"], - args["node_name"], - thread_stop)) - db.start_thread(job_id, post_thread, thread_stop) - return {'job_id': str(job_id)} - - def thread_post(self, installer_type, benchmarks_list, pod_name, suite_name, - job_id, testdb_url, node_name, stop_event): - for benchmark in benchmarks_list: - if db.is_job_timeout(job_id) or stop_event.is_set(): - break - db.update_benchmark_state(job_id, benchmark, 'processing') - result = args_handler.prepare_and_run_benchmark(installer_type, - '/home', - args_handler.get_benchmark_path(pod_name, - suite_name, - benchmark)) - db.update_job_result_detail(job_id, benchmark, copy(result)) - db.update_benchmark_state(job_id, benchmark, 'finished') - if (result_handler.dump_suite_result(suite_name) and testdb_url): - result_handler.push_suite_result_to_db(suite_name, testdb_url, installer_type, node_name) - db.finish_job(job_id) - - -api.add_resource(JobList, '/api/v1.0/jobs') -api.add_resource(Job, '/api/v1.0/jobs/') - -if __name__ == "__main__": - app.run(host='0.0.0.0') diff --git a/restful_server/result_handler.py b/restful_server/result_handler.py deleted file mode 100644 index 200330cb..00000000 --- a/restful_server/result_handler.py +++ /dev/null @@ -1,22 +0,0 @@ -############################################################################## -# Copyright (c) 2016 ZTE Corp and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -import json -import scripts.ref_results.suite_result as suite_result -import dashboard.pushtoDB as push_to_db - - -def dump_suite_result(suite_name): - return suite_result.get_suite_result(suite_name) - - -def push_suite_result_to_db(suite_name, test_db_url, installer_type, node_name): - with open('results/{0}_result.json'.format(suite_name), 'r') as result_file: - j = json.load(result_file) - push_to_db.push_results_to_db(test_db_url, '{0}_test_suite'.format(suite_name), - j, installer_type, node_name) -- cgit 1.2.3-korg