diff options
Diffstat (limited to 'utils/test/result_collection_api')
6 files changed, 373 insertions, 6 deletions
diff --git a/utils/test/result_collection_api/dashboard/dashboard_utils.py b/utils/test/result_collection_api/dashboard/dashboard_utils.py index 8d83b006a..472bbc727 100644 --- a/utils/test/result_collection_api/dashboard/dashboard_utils.py +++ b/utils/test/result_collection_api/dashboard/dashboard_utils.py @@ -26,6 +26,10 @@ from bottlenecks2Dashboard import format_bottlenecks_for_dashboard, \ check_bottlenecks_case_exist from qtip2Dashboard import format_qtip_for_dashboard, \ check_qtip_case_exist +from promise2Dashboard import format_promise_for_dashboard, \ + check_promise_case_exist +from doctor2Dashboard import format_doctor_for_dashboard, \ + check_doctor_case_exist # any project test project wishing to provide dashboard ready values # must include at least 2 methods diff --git a/utils/test/result_collection_api/dashboard/doctor2Dashboard.py b/utils/test/result_collection_api/dashboard/doctor2Dashboard.py new file mode 100644 index 000000000..eba35b57b --- /dev/null +++ b/utils/test/result_collection_api/dashboard/doctor2Dashboard.py @@ -0,0 +1,105 @@ + #!/usr/bin/python +# +# Copyright (c) 2015 Orange +# morgan.richomme@orange.com +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# This script is used to build dashboard ready json results +# It may be used for all the test case of the Doctor project +# a new method format_<Test_case>_for_dashboard(results) +# +import re +import datetime + + +def get_doctor_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["doctor-notification","doctor-mark-down"] + + +def format_doctor_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + + if check_doctor_case_exist(case): + # note we add _case because testcase and project had the same name + # TODO refactoring...looks fine at the beginning wit only 1 project + # not very ugly now and clearly not optimized... + cmd = "format_" + case.replace('-','_') + "_case_for_dashboard(results)" + res = eval(cmd) + else: + res = [] + return res + + +def check_doctor_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + doctor_cases = get_doctor_cases() + + if (case is None or case not in doctor_cases): + return False + else: + return True + + +def format_doctor_mark_down_case_for_dashboard(results): + """ + Post processing for the doctor test case + """ + test_data = [{'description': 'doctor-mark-down results for Dashboard'}] + return test_data + + +def format_doctor_notification_case_for_dashboard(results): + """ + Post processing for the doctor-notification test case + """ + test_data = [{'description': 'doctor results for Dashboard'}] + # Graph 1: (duration)=f(time) + # *************************************** + new_element = [] + + # default duration 0:00:08.999904 + # consider only seconds => 09 + for data in results: + t = data['details']['duration'] + new_element.append({'x': data['creation_date'], + 'y': t}) + + test_data.append({'name': "doctor-notification duration ", + 'info': {'type': "graph", + 'xlabel': 'time (s)', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: bar + # ************ + nbTest = 0 + nbTestOk = 0 + + for data in results: + nbTest += 1 + if data['details']['status'] == "OK": + nbTestOk += 1 + + test_data.append({'name': "doctor-notification status", + 'info': {"type": "bar"}, + 'data_set': [{'Nb tests': nbTest, + 'Nb Success': nbTestOk}]}) + + return test_data diff --git a/utils/test/result_collection_api/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/dashboard/functest2Dashboard.py index c1d116112..3f4e1a2db 100644 --- a/utils/test/result_collection_api/dashboard/functest2Dashboard.py +++ b/utils/test/result_collection_api/dashboard/functest2Dashboard.py @@ -14,13 +14,17 @@ # a new method format_<Test_case>_for_dashboard(results) # v0.1: basic example with methods for odl, Tempest, Rally and vPing # +import datetime +import re + def get_functest_cases(): """ get the list of the supported test cases TODO: update the list when adding a new test case for the dashboard """ - return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "odl", "Rally"] + return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "ODL", + "ONOS", "Rally"] def format_functest_for_dashboard(case, results): @@ -64,7 +68,9 @@ def format_status_for_dashboard(results): test_data.append({'nb test suite(s) run': len(testcases)-1}) test_data.append({'vPing': '100%'}) test_data.append({'VIM status': '82%'}) - test_data.append({'SDN Controllers': {'odl':'92%', 'onos':'95%', 'opencontrail':'93%'}}) + test_data.append({'SDN Controllers': {'odl': '92%', + 'onos': '95%', + 'ocl': '93%'}}) test_data.append({'VNF deployment': '95%'}) return test_data @@ -210,11 +216,125 @@ def format_Tempest_for_dashboard(results): return test_data -def format_odl_for_dashboard(results): +def format_ODL_for_dashboard(results): + """ + Post processing for the ODL test case + """ + test_data = [{'description': 'ODL results for Dashboard'}] + + # Graph 1: (Nb test, nb failure)=f(time) + # *************************************** + new_element = [] + + for data in results: + odl_results = data['details']['details'] + nbFailures = 0 + for odl in odl_results: + if (odl['test_status']['@status'] == "FAIL"): + nbFailures += 1 + new_element.append({'x': data['creation_date'], + 'y1': len(odl_results), + 'y2': nbFailures}) + + test_data.append({'name': "ODL nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests', + 'y2label': 'Number of failures'}, + 'data_set': new_element}) + return test_data + + +def format_ONOS_for_dashboard(results): """ Post processing for the odl test case """ - test_data = [{'description': 'odl results for Dashboard'}] + test_data = [{'description': 'ONOS results for Dashboard'}] + # Graph 1: (duration FUNCvirtNet)=f(time) + # *************************************** + new_element = [] + + # default duration 0:00:08.999904 + # consider only seconds => 09 + for data in results: + t = data['details']['FUNCvirNet']['duration'] + h, m, s = re.split(':', t) + s = round(float(s)) + new_duration = int(datetime.timedelta(hours=int(h), + minutes=int(m), + seconds=int(s)).total_seconds()) + new_element.append({'x': data['creation_date'], + 'y': new_duration}) + + test_data.append({'name': "ONOS FUNCvirNet duration ", + 'info': {'type': "graph", + 'xlabel': 'time (s)', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: (Nb test, nb failure)FuncvirtNet=f(time) + # *************************************** + new_element = [] + + for data in results: + onos_results = data['details']['FUNCvirNet']['status'] + nbFailures = 0 + for onos in onos_results: + if (onos['Case result'] == "FAIL"): + nbFailures += 1 + new_element.append({'x': data['creation_date'], + 'y1': len(onos_results), + 'y2': nbFailures}) + + test_data.append({'name': "ONOS FUNCvirNet nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests', + 'y2label': 'Number of failures'}, + 'data_set': new_element}) + + # Graph 3: (duration FUNCvirtNetL3)=f(time) + # *************************************** + new_element = [] + + # default duration 0:00:08.999904 + # consider only seconds => 09 + for data in results: + t = data['details']['FUNCvirNetL3']['duration'] + h, m, s = re.split(':', t) + s = round(float(s)) + new_duration = int(datetime.timedelta(hours=int(h), + minutes=int(m), + seconds=int(s)).total_seconds()) + new_element.append({'x': data['creation_date'], + 'y': new_duration}) + + test_data.append({'name': "ONOS FUNCvirNetL3 duration", + 'info': {'type': "graph", + 'xlabel': 'time (s)', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 4: (Nb test, nb failure)FuncvirtNetL3=f(time) + # *************************************** + new_element = [] + + for data in results: + onos_results = data['details']['FUNCvirNetL3']['status'] + nbFailures = 0 + for onos in onos_results: + if (onos['Case result'] == "FAIL"): + nbFailures += 1 + new_element.append({'x': data['creation_date'], + 'y1': len(onos_results), + 'y2': nbFailures}) + + test_data.append({'name': "ONOS FUNCvirNetL3 nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests', + 'y2label': 'Number of failures'}, + 'data_set': new_element}) return test_data @@ -223,6 +343,33 @@ def format_Rally_for_dashboard(results): Post processing for the Rally test case """ test_data = [{'description': 'Rally results for Dashboard'}] + # Graph 1: Test_Duration = f(time) + # ******************************** + new_element = [] + for data in results: + summary_cursor = len(data) + new_element.append({'x': data['creation_date'], + 'y': int(data['details'][summary_cursor]['summary']['duration'])}) + + test_data.append({'name': "rally duration", + 'info': {'type': "graph", + 'xlabel': 'time', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: Success rate = f(time) + # ******************************** + new_element = [] + for data in results: + new_element.append({'x': data['creation_date'], + 'y': float(data['details'][summary_cursor]['summary']['nb success'])}) + + test_data.append({'name': "rally success rate", + 'info': {'type': "graph", + 'xlabel': 'time', + 'ylabel': 'success rate (%)'}, + 'data_set': new_element}) + return test_data @@ -262,6 +409,7 @@ def format_vPing_for_dashboard(results): return test_data + def format_vPing_userdata_for_dashboard(results): """ Post processing for the vPing_userdata test case diff --git a/utils/test/result_collection_api/dashboard/promise2Dashboard.py b/utils/test/result_collection_api/dashboard/promise2Dashboard.py new file mode 100644 index 000000000..84f43a7d1 --- /dev/null +++ b/utils/test/result_collection_api/dashboard/promise2Dashboard.py @@ -0,0 +1,103 @@ + #!/usr/bin/python +# +# Copyright (c) 2015 Orange +# morgan.richomme@orange.com +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# This script is used to build dashboard ready json results +# It may be used for all the test case of the Promise project +# a new method format_<Test_case>_for_dashboard(results) +# v0.1: basic example with methods for odl, Tempest, Rally and vPing +# +import re +import datetime + + +def get_promise_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["promise"] + + +def format_promise_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + if check_promise_case_exist(case): + # note we add _case because testcase and project had the same name + # TODO refactoring...looks fine at the beginning wit only 1 project + # not very ugly now and clearly not optimized... + cmd = "format_" + case + "_case_for_dashboard(results)" + res = eval(cmd) + else: + res = [] + print "Test cases not declared" + return res + + +def check_promise_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + promise_cases = get_promise_cases() + + if (case is None or case not in promise_cases): + return False + else: + return True + + + + + +def format_promise_case_for_dashboard(results): + """ + Post processing for the promise test case + """ + test_data = [{'description': 'Promise results for Dashboard'}] + # Graph 1: (duration)=f(time) + # *************************************** + new_element = [] + + # default duration 0:00:08.999904 + # consider only seconds => 09 + for data in results: + t = data['details']['duration'] + new_element.append({'x': data['creation_date'], + 'y': t}) + + test_data.append({'name': "Promise duration ", + 'info': {'type': "graph", + 'xlabel': 'time (s)', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: (Nb test, nb failure)=f(time) + # *************************************** + new_element = [] + + for data in results: + promise_results = data['details'] + new_element.append({'x': data['creation_date'], + 'y1': promise_results['tests'], + 'y2': promise_results['failures']}) + + test_data.append({'name': "Promise nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests', + 'y2label': 'Number of failures'}, + 'data_set': new_element}) + + return test_data diff --git a/utils/test/result_collection_api/tools/backup-db.sh b/utils/test/result_collection_api/tools/backup-db.sh index 8d12a6fc7..aa36aa370 100644 --- a/utils/test/result_collection_api/tools/backup-db.sh +++ b/utils/test/result_collection_api/tools/backup-db.sh @@ -1,5 +1,12 @@ #!/bin/bash - +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2016 Orange and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## echo "Backup Test collection DB" now=$(date +"%m_%d_%Y_%H_%M_%S") echo $now diff --git a/utils/test/result_collection_api/tools/samples/sample.json.postman_collection b/utils/test/result_collection_api/tools/samples/sample.json.postman_collection index a9372624c..9ee35d15e 100644 --- a/utils/test/result_collection_api/tools/samples/sample.json.postman_collection +++ b/utils/test/result_collection_api/tools/samples/sample.json.postman_collection @@ -1156,4 +1156,4 @@ "responses": [] } ] -}
\ No newline at end of file +} |