diff options
author | Morgan Richomme <morgan.richomme@orange.com> | 2015-10-17 18:19:12 +0200 |
---|---|---|
committer | Morgan Richomme <morgan.richomme@orange.com> | 2015-10-19 10:48:40 +0200 |
commit | 97ce096c0ed49a09301d762b812504f3038e9843 (patch) | |
tree | 36ea5a0c8cc463c8de6ac0c3e37a657a5f87ca8c /utils | |
parent | ed1f97c622b272f91fe3bcb6fb1b8b65a27bd624 (diff) |
add dashboard method to test result collection API
+ add new fields in pod section
JIRA: RELENG-45
Change-Id: I8e833207b7014d7fd07769af415a4892b9e9d924
Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
Diffstat (limited to 'utils')
6 files changed, 345 insertions, 2 deletions
diff --git a/utils/test/result_collection_api/dashboard/__init__.py b/utils/test/result_collection_api/dashboard/__init__.py new file mode 100644 index 000000000..05c0c9392 --- /dev/null +++ b/utils/test/result_collection_api/dashboard/__init__.py @@ -0,0 +1,8 @@ +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## diff --git a/utils/test/result_collection_api/dashboard/dashboard_utils.py b/utils/test/result_collection_api/dashboard/dashboard_utils.py new file mode 100644 index 000000000..06c90acf5 --- /dev/null +++ b/utils/test/result_collection_api/dashboard/dashboard_utils.py @@ -0,0 +1,70 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 Orange +# morgan.richomme@orange.com +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# This script is used to retieve data from test DB +# and format them into a json format adapted for a dashboard +# +# v0.1: basic example +# +import os +import re +from functest2Dashboard import format_functest_for_dashboard, \ + check_functest_case_exist + +# any project test project wishing to provide dashboard ready values +# must include at least 2 methods +# - format_<Project>_for_dashboard +# - check_<Project>_case_exist + + +def check_dashboard_ready_project(test_project, path): + # Check that the first param corresponds to a project + # for whoch dashboard processing is available + subdirectories = os.listdir(path) + for testfile in subdirectories: + m = re.search('^(.*)(2Dashboard.py)$', testfile) + if m: + if (m.group(1) == test_project): + return True + return False + + +def check_dashboard_ready_case(project, case): + cmd = "check_" + project + "_case_exist(case)" + return eval(cmd) + + +def get_dashboard_cases(path): + # Retrieve all the test cases that could provide + # Dashboard ready graphs + # look in the releng repo + # search all the project2Dashboard.py files + # we assume that dashboard processing of project <Project> + # is performed in the <Project>2Dashboard.py file + dashboard_test_cases = [] + subdirectories = os.listdir(path) + for testfile in subdirectories: + m = re.search('^(.*)(2Dashboard.py)$', testfile) + if m: + dashboard_test_cases.append(m.group(1)) + + return dashboard_test_cases + + +def get_dashboard_result(project, case, results): + # get the dashboard ready results + # paramters are: + # project: project name + # results: array of raw results pre-filterded + # according to the parameters of the request + cmd = "format_" + project + "_for_dashboard(case,results)" + res = eval(cmd) + return res diff --git a/utils/test/result_collection_api/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/dashboard/functest2Dashboard.py new file mode 100644 index 000000000..427de76a5 --- /dev/null +++ b/utils/test/result_collection_api/dashboard/functest2Dashboard.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 Orange +# morgan.richomme@orange.com +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# This script is used to build dashboard ready json results +# It may be used for all the test case of the Functest project +# a new method format_<Test_case>_for_dashboard(results) +# v0.1: basic example with methods for odl, Tempest, Rally and vPing +# + + +def get_functest_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["vPing", "Tempest", "odl", "Rally"] + + +def format_functest_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + if check_functest_case_exist(case): + cmd = "format_" + case + "_for_dashboard(results)" + res = eval(cmd) + else: + res = [] + print "Test cases not declared" + return res + + +def check_functest_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + functest_cases = get_functest_cases() + + if (case is None or case not in functest_cases): + return False + else: + return True + + +def format_Tempest_for_dashboard(results): + """ + Post processing for the Tempest test case + """ + test_data = [{'description': 'Tempest results for Dashboard'}] + return test_data + + +def format_odl_for_dashboard(results): + """ + Post processing for the odl test case + """ + test_data = [{'description': 'odl results for Dashboard'}] + return test_data + + +def format_Rally_for_dashboard(results): + """ + Post processing for the Rally test case + """ + test_data = [{'description': 'Rally results for Dashboard'}] + return test_data + + +def format_vPing_for_dashboard(results): + """ + Post processing for the vPing test case + """ + test_data = [{'description': 'vPing results for Dashboard'}] + + # Graph 1: Test_Duration = f(time) + # ******************************** + new_element = [] + for data in results: + new_element.append({'x': data['creation_date'], + 'y': data['details']['duration']}) + + test_data.append({'name': "vPing duration", + 'info': {'type': "graph", + 'xlabel': 'time', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: bar + # ************ + nbTest = 0 + nbTestOk = 0 + + for data in results: + nbTest += 1 + if data['details']['status'] == "OK": + nbTestOk += 1 + + test_data.append({'name': "vPing status", + 'info': {"type": "bar"}, + 'data_set': [{'Nb tests': nbTest, + 'Nb Success': nbTestOk}]}) + + return test_data diff --git a/utils/test/result_collection_api/resources/handlers.py b/utils/test/result_collection_api/resources/handlers.py index 27204a5b3..85c6172a5 100644 --- a/utils/test/result_collection_api/resources/handlers.py +++ b/utils/test/result_collection_api/resources/handlers.py @@ -18,6 +18,10 @@ from common.constants import DEFAULT_REPRESENTATION, HTTP_BAD_REQUEST, \ HTTP_NOT_FOUND, HTTP_FORBIDDEN from common.config import prepare_put_request +from dashboard.dashboard_utils import get_dashboard_cases, \ + check_dashboard_ready_project, check_dashboard_ready_case, \ + get_dashboard_result + class GenericApiHandler(RequestHandler): """ @@ -630,3 +634,133 @@ class TestResultsHandler(GenericApiHandler): test_result._id = result self.finish_request(test_result.format_http()) + + +class DashboardHandler(GenericApiHandler): + """ + DashboardHandler Class + Handle the requests about the Test project's results + in a dahboard ready format + HTTP Methdods : + - GET : Get all test results and details about a specific one + """ + def initialize(self): + """ Prepares the database for the entire class """ + super(DashboardHandler, self).initialize() + self.name = "dashboard" + + @asynchronous + @gen.coroutine + def get(self, result_id=None): + """ + Retrieve dashboard ready result(s) for a test project + Available filters for this request are : + - project : project name + - case : case name + - pod : pod name + - version : platform version (Arno-R1, ...) + - installer (fuel, ...) + - period : x (x last days) + + + :param result_id: Get a result by ID + :raise HTTPError + + GET /dashboard?project=functest&case=vPing&version=Arno-R1 \ + &pod=pod_name&period=15 + => get results with optional filters + """ + + project_arg = self.get_query_argument("project", None) + case_arg = self.get_query_argument("case", None) + pod_arg = self.get_query_argument("pod", None) + version_arg = self.get_query_argument("version", None) + installer_arg = self.get_query_argument("installer", None) + period_arg = self.get_query_argument("period", None) + + # prepare request + get_request = dict() + + # /dashboard?project=<>&pod=<>... + if (result_id is None): + if project_arg is not None: + get_request["project_name"] = project_arg + + if case_arg is not None: + get_request["case_name"] = case_arg + + if pod_arg is not None: + get_request["pod_name"] = pod_arg + + if version_arg is not None: + get_request["version"] = version_arg + + if installer_arg is not None: + get_request["installer"] = installer_arg + + if period_arg is not None: + try: + period_arg = int(period_arg) + except: + raise HTTPError(HTTP_BAD_REQUEST) + if period_arg > 0: + period = datetime.now() - timedelta(days=period_arg) + obj = {"$gte": period} + get_request["creation_date"] = obj + else: + get_request["_id"] = result_id + + dashboard = [] + + # on /dashboard retrieve the list of projects and testcases + # ready for dashboard + if project_arg is None: + raise HTTPError(HTTP_NOT_FOUND, + "error:Project name missing") + elif check_dashboard_ready_project(project_arg, "./dashboard"): + res = [] + # fetching results + cursor = self.db.test_results.find(get_request) + while (yield cursor.fetch_next): + test_result = TestResult.test_result_from_dict( + cursor.next_object()) + res.append(test_result.format_http()) + + if case_arg is None: + raise HTTPError( + HTTP_NOT_FOUND, + "error:Test case missing for project " + project_arg) + elif check_dashboard_ready_case(project_arg, case_arg): + dashboard = get_dashboard_result(project_arg, case_arg, res) + else: + raise HTTPError( + HTTP_NOT_FOUND, + "error:" + case_arg + + " test case not case dashboard ready on project " + + project_arg) + + else: + dashboard.append( + {"error": "Project not recognized or not dashboard ready"}) + dashboard.append( + {"Dashboard-ready-projects": + get_dashboard_cases("./dashboard")}) + raise HTTPError( + HTTP_NOT_FOUND, + "error: no dashboard ready data for this project") + + # fetching results + # cursor = self.db.test_results.find(get_request) + # while (yield cursor.fetch_next): + # test_result = TestResult.test_result_from_dict( + # cursor.next_object()) + # res.append(test_result.format_http()) + + # building meta object + meta = dict() + + # final response object + answer = dict() + answer["dashboard"] = dashboard + answer["meta"] = meta + self.finish_request(answer) diff --git a/utils/test/result_collection_api/resources/models.py b/utils/test/result_collection_api/resources/models.py index 3b4d843f3..6829416bc 100644 --- a/utils/test/result_collection_api/resources/models.py +++ b/utils/test/result_collection_api/resources/models.py @@ -14,6 +14,8 @@ class Pod: self._id = "" self.name = "" self.creation_date = "" + self.mode = "" + self.details = "" @staticmethod def pod_from_dict(pod_dict): @@ -24,11 +26,15 @@ class Pod: p._id = pod_dict.get('_id') p.creation_date = str(pod_dict.get('creation_date')) p.name = pod_dict.get('name') + p.mode = pod_dict.get('mode') + p.details = pod_dict.get('details') return p def format(self): return { "name": self.name, + "mode": self.mode, + "details": self.details, "creation_date": str(self.creation_date), } @@ -36,6 +42,8 @@ class Pod: return { "_id": str(self._id), "name": self.name, + "mode": self.mode, + "details": self.details, "creation_date": str(self.creation_date), } diff --git a/utils/test/result_collection_api/result_collection_api.py b/utils/test/result_collection_api/result_collection_api.py index 49695772d..69c03b899 100644 --- a/utils/test/result_collection_api/result_collection_api.py +++ b/utils/test/result_collection_api/result_collection_api.py @@ -34,7 +34,7 @@ import motor import argparse from resources.handlers import VersionHandler, PodHandler, \ - TestProjectHandler, TestCasesHandler, TestResultsHandler + TestProjectHandler, TestCasesHandler, TestResultsHandler, DashboardHandler from common.config import APIConfig @@ -80,10 +80,19 @@ def make_app(): # => get results with optional filters # POST /results => # Push results with mandatory request payload parameters - # (project, case, and pod_id) + # (project, case, and pod) (r"/results", TestResultsHandler), (r"/results([^/]*)", TestResultsHandler), (r"/results/([^/]*)", TestResultsHandler), + + # Method to manage Dashboard ready results + # GET /dashboard?project=functest&case=vPing&pod=opnfv-jump2 + # => get results in dasboard ready format + # get /dashboard + # => get the list of project with dashboard ready results + (r"/dashboard", DashboardHandler), + (r"/dashboard([^/]*)", DashboardHandler), + (r"/dashboard/([^/]*)", DashboardHandler), ], db=db, debug=CONF.api_debug_on, |