summaryrefslogtreecommitdiffstats
path: root/utils/test/result_collection_api/opnfv_testapi/dashboard
diff options
context:
space:
mode:
authorSerenaFeng <feng.xiaowei@zte.com.cn>2016-06-01 15:36:17 +0800
committerSerenaFeng <feng.xiaowei@zte.com.cn>2016-06-01 16:21:46 +0800
commita55651eb098da2e1aa90c93294a59857711b48c1 (patch)
tree654e3adace1e4ae1690d9612c5d7b1704a607a5e /utils/test/result_collection_api/opnfv_testapi/dashboard
parente0d66ea067ea59724c1dc300abe8052d4dcf88d1 (diff)
project-ize testAPI
JIRA: FUNCTEST-284 Change-Id: I219e934bb11f50de84df2aa0345ecc7885223491 Signed-off-by: SerenaFeng <feng.xiaowei@zte.com.cn>
Diffstat (limited to 'utils/test/result_collection_api/opnfv_testapi/dashboard')
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py8
-rwxr-xr-xutils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py199
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py78
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py105
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py472
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py103
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py121
-rwxr-xr-xutils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py121
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py210
9 files changed, 1417 insertions, 0 deletions
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py
new file mode 100644
index 000000000..05c0c9392
--- /dev/null
+++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py
@@ -0,0 +1,8 @@
+##############################################################################
+# Copyright (c) 2015 Orange
+# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py
new file mode 100755
index 000000000..2e106bec8
--- /dev/null
+++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+#
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# This script is used to build dashboard ready json results
+# It may be used for all the test case of the Bottlenecks project
+# a new method format_<Test_case>_for_dashboard(results)
+# v0.1: basic example with methods for Rubbos.
+#
+import os
+import requests
+import json
+
+
+def get_bottlenecks_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["rubbos", "tu1", "tu3"]
+
+
+def check_bottlenecks_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ bottlenecks_cases = get_bottlenecks_cases()
+
+ if case is None or case not in bottlenecks_cases:
+ return False
+ else:
+ return True
+
+
+def format_bottlenecks_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+ if check_bottlenecks_case_exist(case):
+ cmd = "format_" + case + "_for_dashboard(results)"
+ res = eval(cmd)
+ else:
+ res = []
+ print "Test cases not declared"
+ return res
+
+
+def format_rubbos_for_dashboard(results):
+ """
+ Post processing for the Rubbos test case
+ """
+ test_data = [{'description': 'Rubbos results'}]
+
+ # Graph 1:Rubbos maximal throughput
+ # ********************************
+ #new_element = []
+ #for each_result in results:
+ # throughput_data = [record['throughput'] for record in each_result['details']]
+ # new_element.append({'x': each_result['start_date'],
+ # 'y': max(throughput_data)})
+
+ #test_data.append({'name': "Rubbos max throughput",
+ # 'info': {'type': "graph",
+ # 'xlabel': 'time',
+ # 'ylabel': 'maximal throughput'},
+ # 'data_set': new_element})
+
+ # Graph 2: Rubbos last record
+ # ********************************
+ new_element = []
+ latest_result = results[-1]["details"]
+ for data in latest_result:
+ client_num = int(data["client"])
+ throughput = int(data["throughput"])
+ new_element.append({'x': client_num,
+ 'y': throughput})
+ test_data.append({'name': "Rubbos throughput vs client number",
+ 'info': {'type': "graph",
+ 'xlabel': 'client number',
+ 'ylabel': 'throughput'},
+ 'data_set': new_element})
+
+ return test_data
+
+
+def format_tu1_for_dashboard(results):
+ test_data = [{'description': 'Tu-1 performance result'}]
+ line_element = []
+ bar_element = {}
+ last_result = results[-1]["details"]
+ for key in sorted(last_result):
+ bandwith = last_result[key]["Bandwidth"]
+ pktsize = int(key)
+ line_element.append({'x': pktsize,
+ 'y': bandwith * 1000})
+ bar_element[key] = bandwith * 1000
+ # graph1, line
+ test_data.append({'name': "VM2VM max single directional throughput",
+ 'info': {'type': "graph",
+ 'xlabel': 'pktsize',
+ 'ylabel': 'bandwith(kpps)'},
+ 'data_set': line_element})
+ # graph2, bar
+ test_data.append({'name': "VM2VM max single directional throughput",
+ 'info': {"type": "bar"},
+ 'data_set': bar_element})
+ return test_data
+
+
+def format_tu3_for_dashboard(results):
+ test_data = [{'description': 'Tu-3 performance result'}]
+ new_element = []
+ bar_element = {}
+ last_result = results[-1]["details"]
+ for key in sorted(last_result):
+ bandwith = last_result[key]["Bandwidth"]
+ pktsize = int(key)
+ new_element.append({'x': pktsize,
+ 'y': bandwith * 1000})
+ bar_element[key] = bandwith * 1000
+ # graph1, line
+ test_data.append({'name': "VM2VM max bidirectional throughput",
+ 'info': {'type': "graph",
+ 'xlabel': 'pktsize',
+ 'ylabel': 'bandwith(kpps)'},
+ 'data_set': new_element})
+ # graph2, bar
+ test_data.append({'name': "VM2VM max single directional throughput",
+ 'info': {"type": "bar"},
+ 'data_set': bar_element})
+ return test_data
+
+
+############################ For local test ################################
+
+def _read_sample_output(filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ output = os.path.join(curr_path, filename)
+ with open(output) as f:
+ sample_output = f.read()
+
+ result = json.loads(sample_output)
+ return result
+
+
+# Copy form functest/testcases/Dashboard/dashboard_utils.py
+# and did some minor modification for local test.
+def _get_results(db_url, test_criteria):
+ test_project = test_criteria["project"]
+ testcase = test_criteria["testcase"]
+
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ # build the request
+ # if criteria is all => remove criteria
+ url = db_url + "/results?project=" + test_project + "&case=" + testcase
+
+ # Send Request to Test DB
+ myData = requests.get(url, headers=headers)
+
+ # Get result as a json object
+ myNewData = json.loads(myData.text)
+
+ # Get results
+ myDataResults = myNewData['test_results']
+ return myDataResults
+
+#only for local test
+def _test():
+ db_url = "http://testresults.opnfv.org/testapi"
+ results = _get_results(db_url, {"project": "bottlenecks", "testcase": "rubbos"})
+ test_result = format_rubbos_for_dashboard(results)
+ print json.dumps(test_result, indent=4)
+
+ results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu1"})
+ #results = _read_sample_output("sample")
+ #print json.dumps(results, indent=4)
+ test_result = format_tu1_for_dashboard(results)
+ print json.dumps(test_result, indent=4)
+ results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu3"})
+ test_result = format_tu3_for_dashboard(results)
+ print json.dumps(test_result, indent=4)
+
+
+if __name__ == '__main__':
+ _test()
+
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py
new file mode 100644
index 000000000..090aaa5b4
--- /dev/null
+++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to retieve data from test DB
+# and format them into a json format adapted for a dashboard
+#
+# v0.1: basic example
+#
+import os
+import re
+import sys
+from functest2Dashboard import format_functest_for_dashboard, \
+ check_functest_case_exist
+from yardstick2Dashboard import format_yardstick_for_dashboard, \
+ check_yardstick_case_exist
+from vsperf2Dashboard import format_vsperf_for_dashboard, \
+ check_vsperf_case_exist
+from bottlenecks2Dashboard import format_bottlenecks_for_dashboard, \
+ check_bottlenecks_case_exist
+from qtip2Dashboard import format_qtip_for_dashboard, \
+ check_qtip_case_exist
+from promise2Dashboard import format_promise_for_dashboard, \
+ check_promise_case_exist
+from doctor2Dashboard import format_doctor_for_dashboard, \
+ check_doctor_case_exist
+
+# any project test project wishing to provide dashboard ready values
+# must include at least 2 methods
+# - format_<Project>_for_dashboard
+# - check_<Project>_case_exist
+
+
+def check_dashboard_ready_project(test_project):
+ # Check that the first param corresponds to a project
+ # for whoch dashboard processing is available
+ # print("test_project: %s" % test_project)
+ project_module = 'opnfv_testapi.dashboard.'+test_project + '2Dashboard'
+ return True if project_module in sys.modules else False
+
+
+def check_dashboard_ready_case(project, case):
+ cmd = "check_" + project + "_case_exist(case)"
+ return eval(cmd)
+
+
+def get_dashboard_cases():
+ # Retrieve all the test cases that could provide
+ # Dashboard ready graphs
+ # look in the releng repo
+ # search all the project2Dashboard.py files
+ # we assume that dashboard processing of project <Project>
+ # is performed in the <Project>2Dashboard.py file
+ modules = []
+ cp = re.compile('dashboard.*2Dashboard')
+ for module in sys.modules:
+ if re.match(cp, module):
+ modules.append(module)
+
+ return modules
+
+
+def get_dashboard_result(project, case, results=None):
+ # get the dashboard ready results
+ # paramters are:
+ # project: project name
+ # results: array of raw results pre-filterded
+ # according to the parameters of the request
+ cmd = "format_" + project + "_for_dashboard(case,results)"
+ res = eval(cmd)
+ return res
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py
new file mode 100644
index 000000000..38b23abb4
--- /dev/null
+++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py
@@ -0,0 +1,105 @@
+ #!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build dashboard ready json results
+# It may be used for all the test case of the Doctor project
+# a new method format_<Test_case>_for_dashboard(results)
+#
+import re
+import datetime
+
+
+def get_doctor_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["doctor-notification","doctor-mark-down"]
+
+
+def format_doctor_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+
+ if check_doctor_case_exist(case):
+ # note we add _case because testcase and project had the same name
+ # TODO refactoring...looks fine at the beginning wit only 1 project
+ # not very ugly now and clearly not optimized...
+ cmd = "format_" + case.replace('-','_') + "_case_for_dashboard(results)"
+ res = eval(cmd)
+ else:
+ res = []
+ return res
+
+
+def check_doctor_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ doctor_cases = get_doctor_cases()
+
+ if (case is None or case not in doctor_cases):
+ return False
+ else:
+ return True
+
+
+def format_doctor_mark_down_case_for_dashboard(results):
+ """
+ Post processing for the doctor test case
+ """
+ test_data = [{'description': 'doctor-mark-down results for Dashboard'}]
+ return test_data
+
+
+def format_doctor_notification_case_for_dashboard(results):
+ """
+ Post processing for the doctor-notification test case
+ """
+ test_data = [{'description': 'doctor results for Dashboard'}]
+ # Graph 1: (duration)=f(time)
+ # ***************************************
+ new_element = []
+
+ # default duration 0:00:08.999904
+ # consider only seconds => 09
+ for data in results:
+ t = data['details']['duration']
+ new_element.append({'x': data['start_date'],
+ 'y': t})
+
+ test_data.append({'name': "doctor-notification duration ",
+ 'info': {'type': "graph",
+ 'xlabel': 'time (s)',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: bar
+ # ************
+ nbTest = 0
+ nbTestOk = 0
+
+ for data in results:
+ nbTest += 1
+ if data['details']['status'] == "OK":
+ nbTestOk += 1
+
+ test_data.append({'name': "doctor-notification status",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Nb tests': nbTest,
+ 'Nb Success': nbTestOk}]})
+
+ return test_data
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py
new file mode 100644
index 000000000..86521b984
--- /dev/null
+++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py
@@ -0,0 +1,472 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build dashboard ready json results
+# It may be used for all the test case of the Functest project
+# a new method format_<Test_case>_for_dashboard(results)
+# v0.1: basic example with methods for odl, Tempest, Rally and vPing
+#
+import datetime
+import re
+
+
+def get_functest_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "ODL",
+ "ONOS", "Rally"]
+
+
+def format_functest_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+ if check_functest_case_exist(case):
+ cmd = "format_" + case + "_for_dashboard(results)"
+ res = eval(cmd)
+ else:
+ res = []
+ print "Test cases not declared"
+ return res
+
+
+def check_functest_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ functest_cases = get_functest_cases()
+
+ if (case is None or case not in functest_cases):
+ return False
+ else:
+ return True
+
+
+def format_status_for_dashboard(results):
+ test_data = [{'description': 'Functest status'}]
+
+ # define magic equation for the status....
+ # 5 suites: vPing, odl, Tempest, vIMS, Rally
+ # Which overall KPI make sense...
+
+ # TODO to be done and discussed
+ testcases = get_functest_cases()
+ test_data.append({'nb test suite(s) run': len(testcases)-1})
+ test_data.append({'vPing': '100%'})
+ test_data.append({'VIM status': '82%'})
+ test_data.append({'SDN Controllers': {'odl': '92%',
+ 'onos': '95%',
+ 'ocl': '93%'}})
+ test_data.append({'VNF deployment': '95%'})
+
+ return test_data
+
+
+def format_vIMS_for_dashboard(results):
+ """
+ Post processing for the vIMS test case
+ """
+ test_data = [{'description': 'vIMS results for Dashboard'}]
+
+ # Graph 1: (duration_deployment_orchestrator,
+ # duration_deployment_vnf,
+ # duration_test) = f(time)
+ # ********************************
+ new_element = []
+
+ for data in results:
+ new_element.append({'x': data['start_date'],
+ 'y1': data['details']['orchestrator']['duration'],
+ 'y2': data['details']['vIMS']['duration'],
+ 'y3': data['details']['sig_test']['duration']})
+
+ test_data.append({'name': "vIMS orchestrator/VNF/test duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'orchestation deployment duration',
+ 'y2label': 'vIMS deployment duration',
+ 'y3label': 'vIMS test duration'},
+ 'data_set': new_element})
+
+ # Graph 2: (Nb test, nb failure, nb skipped)=f(time)
+ # **************************************************
+ new_element = []
+
+ for data in results:
+ # Retrieve all the tests
+ nbTests = 0
+ nbFailures = 0
+ nbSkipped = 0
+ vIMS_test = data['details']['sig_test']['result']
+
+ for data_test in vIMS_test:
+ # Calculate nb of tests run and nb of tests failed
+ # vIMS_results = get_vIMSresults(vIMS_test)
+ # print vIMS_results
+ try:
+ if data_test['result'] == "Passed":
+ nbTests += 1
+ elif data_test['result'] == "Failed":
+ nbFailures += 1
+ elif data_test['result'] == "Skipped":
+ nbSkipped += 1
+ except:
+ nbTests = 0
+
+ new_element.append({'x': data['start_date'],
+ 'y1': nbTests,
+ 'y2': nbFailures,
+ 'y3': nbSkipped})
+
+ test_data.append({'name': "vIMS nb tests passed/failed/skipped",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests passed',
+ 'y2label': 'Number of tests failed',
+ 'y3label': 'Number of tests skipped'},
+ 'data_set': new_element})
+
+ # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed)
+ # ********************************************************
+ nbTests = 0
+ nbFailures = 0
+
+ for data in results:
+ vIMS_test = data['details']['sig_test']['result']
+
+ for data_test in vIMS_test:
+ nbTestsOK = 0
+ nbTestsKO = 0
+
+ try:
+ if data_test['result'] == "Passed":
+ nbTestsOK += 1
+ elif data_test['result'] == "Failed":
+ nbTestsKO += 1
+ except:
+ nbTestsOK = 0
+
+ nbTests += nbTestsOK + nbTestsKO
+ nbFailures += nbTestsKO
+
+ test_data.append({'name': "Total number of tests run/failure tests",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Run': nbTests,
+ 'Failed': nbFailures}]})
+
+ return test_data
+
+
+def format_Tempest_for_dashboard(results):
+ """
+ Post processing for the Tempest test case
+ """
+ test_data = [{'description': 'Tempest results for Dashboard'}]
+
+ # Graph 1: Test_Duration = f(time)
+ # ********************************
+ new_element = []
+ for data in results:
+ new_element.append({'x': data['start_date'],
+ 'y': data['details']['duration']})
+
+ test_data.append({'name': "Tempest duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: (Nb test, nb failure)=f(time)
+ # ***************************************
+ new_element = []
+ for data in results:
+ new_element.append({'x': data['start_date'],
+ 'y1': data['details']['tests'],
+ 'y2': data['details']['failures']})
+
+ test_data.append({'name': "Tempest nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
+
+ # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed)
+ # ********************************************************
+ nbTests = 0
+ nbFailures = 0
+
+ for data in results:
+ nbTests += data['details']['tests']
+ nbFailures += data['details']['failures']
+
+ test_data.append({'name': "Total number of tests run/failure tests",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Run': nbTests,
+ 'Failed': nbFailures}]})
+
+ # Graph 4: (Success rate)=f(time)
+ # ***************************************
+ new_element = []
+ for data in results:
+ try:
+ diff = (int(data['details']['tests']) - int(data['details']['failures']))
+ success_rate = 100*diff/int(data['details']['tests'])
+ except:
+ success_rate = 0
+
+ new_element.append({'x': data['start_date'],
+ 'y1': success_rate})
+
+ test_data.append({'name': "Tempest success rate",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Success rate'},
+ 'data_set': new_element})
+
+ return test_data
+
+
+def format_ODL_for_dashboard(results):
+ """
+ Post processing for the ODL test case
+ """
+ test_data = [{'description': 'ODL results for Dashboard'}]
+
+ # Graph 1: (Nb test, nb failure)=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ odl_results = data['details']['details']
+ nbFailures = 0
+ for odl in odl_results:
+ if (odl['test_status']['@status'] == "FAIL"):
+ nbFailures += 1
+ new_element.append({'x': data['start_date'],
+ 'y1': len(odl_results),
+ 'y2': nbFailures})
+
+ test_data.append({'name': "ODL nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
+ return test_data
+
+
+def format_ONOS_for_dashboard(results):
+ """
+ Post processing for the odl test case
+ """
+ test_data = [{'description': 'ONOS results for Dashboard'}]
+ # Graph 1: (duration FUNCvirtNet)=f(time)
+ # ***************************************
+ new_element = []
+
+ # default duration 0:00:08.999904
+ # consider only seconds => 09
+ for data in results:
+ t = data['details']['FUNCvirNet']['duration']
+ h, m, s = re.split(':', t)
+ s = round(float(s))
+ new_duration = int(datetime.timedelta(hours=int(h),
+ minutes=int(m),
+ seconds=int(s)).total_seconds())
+ new_element.append({'x': data['start_date'],
+ 'y': new_duration})
+
+ test_data.append({'name': "ONOS FUNCvirNet duration ",
+ 'info': {'type': "graph",
+ 'xlabel': 'time (s)',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: (Nb test, nb failure)FuncvirtNet=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ onos_results = data['details']['FUNCvirNet']['status']
+ nbFailures = 0
+ for onos in onos_results:
+ if (onos['Case result'] == "FAIL"):
+ nbFailures += 1
+ new_element.append({'x': data['start_date'],
+ 'y1': len(onos_results),
+ 'y2': nbFailures})
+
+ test_data.append({'name': "ONOS FUNCvirNet nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
+
+ # Graph 3: (duration FUNCvirtNetL3)=f(time)
+ # ***************************************
+ new_element = []
+
+ # default duration 0:00:08.999904
+ # consider only seconds => 09
+ for data in results:
+ t = data['details']['FUNCvirNetL3']['duration']
+ h, m, s = re.split(':', t)
+ s = round(float(s))
+ new_duration = int(datetime.timedelta(hours=int(h),
+ minutes=int(m),
+ seconds=int(s)).total_seconds())
+ new_element.append({'x': data['start_date'],
+ 'y': new_duration})
+
+ test_data.append({'name': "ONOS FUNCvirNetL3 duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time (s)',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 4: (Nb test, nb failure)FuncvirtNetL3=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ onos_results = data['details']['FUNCvirNetL3']['status']
+ nbFailures = 0
+ for onos in onos_results:
+ if (onos['Case result'] == "FAIL"):
+ nbFailures += 1
+ new_element.append({'x': data['start_date'],
+ 'y1': len(onos_results),
+ 'y2': nbFailures})
+
+ test_data.append({'name': "ONOS FUNCvirNetL3 nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
+ return test_data
+
+
+def format_Rally_for_dashboard(results):
+ """
+ Post processing for the Rally test case
+ """
+ test_data = [{'description': 'Rally results for Dashboard'}]
+ # Graph 1: Test_Duration = f(time)
+ # ********************************
+ new_element = []
+ for data in results:
+ summary_cursor = len(data['details']) - 1
+ new_element.append({'x': data['start_date'],
+ 'y': int(data['details'][summary_cursor]['summary']['duration'])})
+
+ test_data.append({'name': "rally duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: Success rate = f(time)
+ # ********************************
+ new_element = []
+ for data in results:
+ new_element.append({'x': data['start_date'],
+ 'y': float(data['details'][summary_cursor]['summary']['nb success'])})
+
+ test_data.append({'name': "rally success rate",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'success rate (%)'},
+ 'data_set': new_element})
+
+ return test_data
+
+
+def format_vPing_for_dashboard(results):
+ """
+ Post processing for the vPing test case
+ """
+ test_data = [{'description': 'vPing results for Dashboard'}]
+
+ # Graph 1: Test_Duration = f(time)
+ # ********************************
+ new_element = []
+ for data in results:
+ new_element.append({'x': data['start_date'],
+ 'y': data['details']['duration']})
+
+ test_data.append({'name': "vPing duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: bar
+ # ************
+ nbTest = 0
+ nbTestOk = 0
+
+ for data in results:
+ nbTest += 1
+ if data['details']['status'] == "OK":
+ nbTestOk += 1
+
+ test_data.append({'name': "vPing status",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Nb tests': nbTest,
+ 'Nb Success': nbTestOk}]})
+
+ return test_data
+
+
+def format_vPing_userdata_for_dashboard(results):
+ """
+ Post processing for the vPing_userdata test case
+ """
+ test_data = [{'description': 'vPing_userdata results for Dashboard'}]
+
+ # Graph 1: Test_Duration = f(time)
+ # ********************************
+ new_element = []
+ for data in results:
+ new_element.append({'x': data['start_date'],
+ 'y': data['details']['duration']})
+
+ test_data.append({'name': "vPing_userdata duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: bar
+ # ************
+ nbTest = 0
+ nbTestOk = 0
+
+ for data in results:
+ nbTest += 1
+ if data['details']['status'] == "OK":
+ nbTestOk += 1
+
+ test_data.append({'name': "vPing_userdata status",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Nb tests': nbTest,
+ 'Nb Success': nbTestOk}]})
+
+ return test_data
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py
new file mode 100644
index 000000000..84f43a7d1
--- /dev/null
+++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py
@@ -0,0 +1,103 @@
+ #!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build dashboard ready json results
+# It may be used for all the test case of the Promise project
+# a new method format_<Test_case>_for_dashboard(results)
+# v0.1: basic example with methods for odl, Tempest, Rally and vPing
+#
+import re
+import datetime
+
+
+def get_promise_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["promise"]
+
+
+def format_promise_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+ if check_promise_case_exist(case):
+ # note we add _case because testcase and project had the same name
+ # TODO refactoring...looks fine at the beginning wit only 1 project
+ # not very ugly now and clearly not optimized...
+ cmd = "format_" + case + "_case_for_dashboard(results)"
+ res = eval(cmd)
+ else:
+ res = []
+ print "Test cases not declared"
+ return res
+
+
+def check_promise_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ promise_cases = get_promise_cases()
+
+ if (case is None or case not in promise_cases):
+ return False
+ else:
+ return True
+
+
+
+
+
+def format_promise_case_for_dashboard(results):
+ """
+ Post processing for the promise test case
+ """
+ test_data = [{'description': 'Promise results for Dashboard'}]
+ # Graph 1: (duration)=f(time)
+ # ***************************************
+ new_element = []
+
+ # default duration 0:00:08.999904
+ # consider only seconds => 09
+ for data in results:
+ t = data['details']['duration']
+ new_element.append({'x': data['creation_date'],
+ 'y': t})
+
+ test_data.append({'name': "Promise duration ",
+ 'info': {'type': "graph",
+ 'xlabel': 'time (s)',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: (Nb test, nb failure)=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ promise_results = data['details']
+ new_element.append({'x': data['creation_date'],
+ 'y1': promise_results['tests'],
+ 'y2': promise_results['failures']})
+
+ test_data.append({'name': "Promise nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
+
+ return test_data
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py
new file mode 100644
index 000000000..6ceccd374
--- /dev/null
+++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+
+##############################################################################
+# Copyright (c) 2015 Dell Inc and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+def get_qtip_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["compute_test_suite","storage_test_suite","network_test_suite"]
+
+def check_qtip_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ qtip_cases = get_qtip_cases()
+ if (case is None or case not in qtip_cases):
+ return False
+ else:
+ return True
+
+def format_qtip_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+ if check_qtip_case_exist(case):
+ res = format_common_for_dashboard(case, results)
+ else:
+ res = []
+ print "Test cases not declared"
+ return res
+
+def format_common_for_dashboard(case, results):
+ """
+ Common post processing
+ """
+ test_data_description = case + " results for Dashboard"
+ test_data = [{'description': test_data_description}]
+
+ graph_name = ''
+ if "network_test_suite" in case:
+ graph_name = "Throughput index"
+ else:
+ graph_name = "Index"
+
+ # Graph 1:
+ # ********************************
+ new_element = []
+ for date, index in results:
+ new_element.append({'x': date,
+ 'y1': index,
+ })
+
+ test_data.append({'name': graph_name,
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Index Number'},
+ 'data_set': new_element})
+
+ return test_data
+
+
+############################ For local test ################################
+import os
+import requests
+import json
+from collections import defaultdict
+
+def _get_results(db_url, testcase):
+
+ testproject = testcase["project"]
+ testcase = testcase["testcase"]
+ resultarray = defaultdict()
+ #header
+ header = {'Content-Type': 'application/json'}
+ #url
+ url = db_url + "/results?project="+testproject+"&case="+testcase
+ data = requests.get(url,header)
+ datajson = data.json()
+ for x in range(0, len(datajson['test_results'])):
+
+ rawresults = datajson['test_results'][x]['details']
+ index = rawresults['index']
+ resultarray[str(datajson['test_results'][x]['start_date'])]=index
+
+ return resultarray
+
+def _test():
+
+ db_url = "http://testresults.opnfv.org/testapi"
+ raw_result = defaultdict()
+
+ raw_result = _get_results(db_url, {"project": "qtip", "testcase": "compute_test_suite"})
+ resultitems= raw_result.items()
+ result = format_qtip_for_dashboard("compute_test_suite", resultitems)
+ print result
+
+ raw_result = _get_results(db_url, {"project": "qtip", "testcase": "storage_test_suite"})
+ resultitems= raw_result.items()
+ result = format_qtip_for_dashboard("storage_test_suite", resultitems)
+ print result
+
+ raw_result = _get_results(db_url, {"project": "qtip", "testcase": "network_test_suite"})
+ resultitems= raw_result.items()
+ result = format_qtip_for_dashboard("network_test_suite", resultitems)
+ print result
+
+if __name__ == '__main__':
+ _test()
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py
new file mode 100755
index 000000000..5a6882da4
--- /dev/null
+++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+
+# Copyright 2015 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"),
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+def get_vsperf_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["tput_ovsdpdk", "tput_ovs",
+ "b2b_ovsdpdk", "b2b_ovs",
+ "tput_mod_vlan_ovsdpdk", "tput_mod_vlan_ovs",
+ "cont_ovsdpdk", "cont_ovs",
+ "pvp_cont_ovsdpdkuser", "pvp_cont_ovsdpdkcuse", "pvp_cont_ovsvirtio",
+ "pvvp_cont_ovsdpdkuser", "pvvp_cont_ovsdpdkcuse", "pvvp_cont_ovsvirtio",
+ "scalability_ovsdpdk", "scalability_ovs",
+ "pvp_tput_ovsdpdkuser", "pvp_tput_ovsdpdkcuse", "pvp_tput_ovsvirtio",
+ "pvp_b2b_ovsdpdkuser", "pvp_b2b_ovsdpdkcuse", "pvp_b2b_ovsvirtio",
+ "pvvp_tput_ovsdpdkuser", "pvvp_tput_ovsdpdkcuse", "pvvp_tput_ovsvirtio",
+ "pvvp_b2b_ovsdpdkuser", "pvvp_b2b_ovsdpdkcuse", "pvvp_b2b_ovsvirtio",
+ "cpu_load_ovsdpdk", "cpu_load_ovs",
+ "mem_load_ovsdpdk", "mem_load_ovs"]
+
+
+def check_vsperf_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ vsperf_cases = get_vsperf_cases()
+
+ if (case is None or case not in vsperf_cases):
+ return False
+ else:
+ return True
+
+
+def format_vsperf_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+ if check_vsperf_case_exist(case):
+ res = format_common_for_dashboard(case, results)
+ else:
+ res = []
+ print "Test cases not declared"
+ return res
+
+
+def format_common_for_dashboard(case, results):
+ """
+ Common post processing
+ """
+ test_data_description = case + " results for Dashboard"
+ test_data = [{'description': test_data_description}]
+
+ graph_name = ''
+ if "b2b" in case:
+ graph_name = "B2B frames"
+ else:
+ graph_name = "Rx frames per second"
+
+ # Graph 1: Rx fps = f(time)
+ # ********************************
+ new_element = []
+ for data in results:
+ new_element.append({'x': data['start_date'],
+ 'y1': data['details']['64'],
+ 'y2': data['details']['128'],
+ 'y3': data['details']['512'],
+ 'y4': data['details']['1024'],
+ 'y5': data['details']['1518']})
+
+ test_data.append({'name': graph_name,
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'frame size 64B',
+ 'y2label': 'frame size 128B',
+ 'y3label': 'frame size 512B',
+ 'y4label': 'frame size 1024B',
+ 'y5label': 'frame size 1518B'},
+ 'data_set': new_element})
+
+ return test_data
+
+
+
+
+############################ For local test ################################
+import os
+
+def _test():
+ ans = [{'start_date': '2015-09-12', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '26.804', '1024': '1097.284', '512': '178.137', '1518': '12635.860', '128': '100.564'}},
+ {'start_date': '2015-09-33', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '16.804', '1024': '1087.284', '512': '168.137', '1518': '12625.860', '128': '99.564'}}]
+
+ result = format_vsperf_for_dashboard("pvp_cont_ovsdpdkcuse", ans)
+ print result
+
+ result = format_vsperf_for_dashboard("b2b_ovsdpdk", ans)
+ print result
+
+ result = format_vsperf_for_dashboard("non_existing", ans)
+ print result
+
+if __name__ == '__main__':
+ _test()
diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py
new file mode 100644
index 000000000..4f022d5b9
--- /dev/null
+++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+#
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# This script is used to build dashboard ready json results
+# It may be used for all the test case of the Yardstick project
+# a new method format_<Test_case>_for_dashboard(results)
+# v0.1: basic example with methods for Ping, Iperf, Netperf, Pktgen,
+# Fio, Lmbench, Perf, Cyclictest.
+#
+
+
+def get_yardstick_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["Ping", "Iperf", "Netperf", "Pktgen", "Fio", "Lmbench",
+ "Perf", "Cyclictest"]
+
+
+def format_yardstick_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+ if check_yardstick_case_exist(case):
+ cmd = "format_" + case + "_for_dashboard(results)"
+ res = eval(cmd)
+ else:
+ res = []
+ print "Test cases not declared"
+ return res
+
+
+def check_yardstick_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ yardstick_cases = get_yardstick_cases()
+
+ if (case is None or case not in yardstick_cases):
+ return False
+ else:
+ return True
+
+
+def _get_test_status_bar(results):
+ nbTest = 0
+ nbTestOk = 0
+
+ for data in results:
+ nbTest += 1
+ records = [record for record in data['details']
+ if "benchmark" in record
+ and record["benchmark"]["errors"] != ""]
+ if len(records) == 0:
+ nbTestOk += 1
+ return nbTest, nbTestOk
+
+
+def format_Ping_for_dashboard(results):
+ """
+ Post processing for the Ping test case
+ """
+ test_data = [{'description': 'Ping results for Dashboard'}]
+
+ # Graph 1: Test_Duration = f(time)
+ # ********************************
+ new_element = []
+ for data in results:
+ records = [record["benchmark"]["data"]["rtt"]
+ for record in data['details']
+ if "benchmark" in record]
+
+ avg_rtt = sum(records) / len(records)
+ new_element.append({'x': data['start_date'],
+ 'y': avg_rtt})
+
+ test_data.append({'name': "ping duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: bar
+ # ************
+ nbTest, nbTestOk = _get_test_status_bar(results)
+
+ test_data.append({'name': "ping status",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Nb tests': nbTest,
+ 'Nb Success': nbTestOk}]})
+
+ return test_data
+
+
+def format_iperf_for_dashboard(results):
+ """
+ Post processing for the Iperf test case
+ """
+ test_data = [{'description': 'Iperf results for Dashboard'}]
+ return test_data
+
+
+def format_netperf_for_dashboard(results):
+ """
+ Post processing for the Netperf test case
+ """
+ test_data = [{'description': 'Netperf results for Dashboard'}]
+ return test_data
+
+
+def format_pktgen_for_dashboard(results):
+ """
+ Post processing for the Pktgen test case
+ """
+ test_data = [{'description': 'Pktgen results for Dashboard'}]
+ return test_data
+
+
+def format_fio_for_dashboard(results):
+ """
+ Post processing for the Fio test case
+ """
+ test_data = [{'description': 'Fio results for Dashboard'}]
+ return test_data
+
+
+def format_lmbench_for_dashboard(results):
+ """
+ Post processing for the Lmbench test case
+ """
+ test_data = [{'description': 'Lmbench results for Dashboard'}]
+ return test_data
+
+
+def format_perf_for_dashboard(results):
+ """
+ Post processing for the Perf test case
+ """
+ test_data = [{'description': 'Perf results for Dashboard'}]
+ return test_data
+
+
+def format_cyclictest_for_dashboard(results):
+ """
+ Post processing for the Cyclictest test case
+ """
+ test_data = [{'description': 'Cyclictest results for Dashboard'}]
+ return test_data
+
+
+############################ For local test ################################
+import json
+import os
+import requests
+
+def _read_sample_output(filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ output = os.path.join(curr_path, filename)
+ with open(output) as f:
+ sample_output = f.read()
+
+ result = json.loads(sample_output)
+ return result
+
+# Copy form functest/testcases/Dashboard/dashboard_utils.py
+# and did some minor modification for local test.
+def _get_results(db_url, test_criteria):
+
+ test_project = test_criteria["project"]
+ testcase = test_criteria["testcase"]
+
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ # build the request
+ # if criteria is all => remove criteria
+ url = db_url + "/results?project=" + test_project + "&case=" + testcase
+
+ # Send Request to Test DB
+ myData = requests.get(url, headers=headers)
+
+ # Get result as a json object
+ myNewData = json.loads(myData.text)
+
+ # Get results
+ myDataResults = myNewData['test_results']
+
+ return myDataResults
+
+def _test():
+ db_url = "http://213.77.62.197"
+ result = _get_results(db_url,
+ {"project": "yardstick", "testcase": "Ping"})
+ print format_ping_for_dashboard(result)
+
+if __name__ == '__main__':
+ _test()