summaryrefslogtreecommitdiffstats
path: root/utils/test/result_collection_api
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test/result_collection_api')
-rwxr-xr-xutils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py38
-rw-r--r--utils/test/result_collection_api/dashboard/dashboard_utils.py4
-rw-r--r--utils/test/result_collection_api/dashboard/doctor2Dashboard.py105
-rw-r--r--utils/test/result_collection_api/dashboard/functest2Dashboard.py135
-rw-r--r--utils/test/result_collection_api/dashboard/promise2Dashboard.py103
-rw-r--r--utils/test/result_collection_api/resources/handlers.py9
-rw-r--r--utils/test/result_collection_api/resources/models.py397
-rw-r--r--utils/test/result_collection_api/samples/sample.json.postman_collection (renamed from utils/test/result_collection_api/tools/samples/sample.json.postman_collection)0
-rw-r--r--utils/test/result_collection_api/tools/backup-db.sh33
-rw-r--r--utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.css53
-rw-r--r--utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.html25
-rw-r--r--utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.js117
-rw-r--r--utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests_conf.json29
13 files changed, 573 insertions, 475 deletions
diff --git a/utils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py b/utils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py
index 8d5326eb4..9a7e4ce1b 100755
--- a/utils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py
+++ b/utils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py
@@ -62,19 +62,35 @@ def format_rubbos_for_dashboard(results):
"""
test_data = [{'description': 'Rubbos results'}]
- # Graph 1:
+ # Graph 1:Rubbos maximal throughput
+ # ********************************
+ #new_element = []
+ #for each_result in results:
+ # throughput_data = [record['throughput'] for record in each_result['details']]
+ # new_element.append({'x': each_result['creation_date'],
+ # 'y': max(throughput_data)})
+
+ #test_data.append({'name': "Rubbos max throughput",
+ # 'info': {'type': "graph",
+ # 'xlabel': 'time',
+ # 'ylabel': 'maximal throughput'},
+ # 'data_set': new_element})
+
+ # Graph 2: Rubbos last record
# ********************************
new_element = []
- for each_result in results:
- throughput_data = [record['throughput'] for record in each_result['details']]
- new_element.append({'x': each_result['creation_date'],
- 'y': max(throughput_data)})
-
- test_data.append({'name': "Rubbos max throughput",
+ latest_result = results[-1]["details"]
+ for data in latest_result:
+ client_num = int(data["client"])
+ throughput = int(data["throughput"])
+ new_element.append({'x': client_num,
+ 'y': throughput})
+ test_data.append({'name': "Rubbos throughput vs client number",
'info': {'type': "graph",
- 'xlabel': 'time',
- 'ylabel': 'maximal throughput'},
+ 'xlabel': 'client number',
+ 'ylabel': 'throughput'},
'data_set': new_element})
+
return test_data
@@ -161,9 +177,9 @@ def _get_results(db_url, test_criteria):
myDataResults = myNewData['test_results']
return myDataResults
-
+#only for local test
def _test():
- db_url = "http://213.77.62.197"
+ db_url = "http://testresults.opnfv.org/testapi"
results = _get_results(db_url, {"project": "bottlenecks", "testcase": "rubbos"})
test_result = format_rubbos_for_dashboard(results)
print json.dumps(test_result, indent=4)
diff --git a/utils/test/result_collection_api/dashboard/dashboard_utils.py b/utils/test/result_collection_api/dashboard/dashboard_utils.py
index 8d83b006a..472bbc727 100644
--- a/utils/test/result_collection_api/dashboard/dashboard_utils.py
+++ b/utils/test/result_collection_api/dashboard/dashboard_utils.py
@@ -26,6 +26,10 @@ from bottlenecks2Dashboard import format_bottlenecks_for_dashboard, \
check_bottlenecks_case_exist
from qtip2Dashboard import format_qtip_for_dashboard, \
check_qtip_case_exist
+from promise2Dashboard import format_promise_for_dashboard, \
+ check_promise_case_exist
+from doctor2Dashboard import format_doctor_for_dashboard, \
+ check_doctor_case_exist
# any project test project wishing to provide dashboard ready values
# must include at least 2 methods
diff --git a/utils/test/result_collection_api/dashboard/doctor2Dashboard.py b/utils/test/result_collection_api/dashboard/doctor2Dashboard.py
new file mode 100644
index 000000000..eba35b57b
--- /dev/null
+++ b/utils/test/result_collection_api/dashboard/doctor2Dashboard.py
@@ -0,0 +1,105 @@
+ #!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build dashboard ready json results
+# It may be used for all the test case of the Doctor project
+# a new method format_<Test_case>_for_dashboard(results)
+#
+import re
+import datetime
+
+
+def get_doctor_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["doctor-notification","doctor-mark-down"]
+
+
+def format_doctor_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+
+ if check_doctor_case_exist(case):
+ # note we add _case because testcase and project had the same name
+ # TODO refactoring...looks fine at the beginning wit only 1 project
+ # not very ugly now and clearly not optimized...
+ cmd = "format_" + case.replace('-','_') + "_case_for_dashboard(results)"
+ res = eval(cmd)
+ else:
+ res = []
+ return res
+
+
+def check_doctor_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ doctor_cases = get_doctor_cases()
+
+ if (case is None or case not in doctor_cases):
+ return False
+ else:
+ return True
+
+
+def format_doctor_mark_down_case_for_dashboard(results):
+ """
+ Post processing for the doctor test case
+ """
+ test_data = [{'description': 'doctor-mark-down results for Dashboard'}]
+ return test_data
+
+
+def format_doctor_notification_case_for_dashboard(results):
+ """
+ Post processing for the doctor-notification test case
+ """
+ test_data = [{'description': 'doctor results for Dashboard'}]
+ # Graph 1: (duration)=f(time)
+ # ***************************************
+ new_element = []
+
+ # default duration 0:00:08.999904
+ # consider only seconds => 09
+ for data in results:
+ t = data['details']['duration']
+ new_element.append({'x': data['creation_date'],
+ 'y': t})
+
+ test_data.append({'name': "doctor-notification duration ",
+ 'info': {'type': "graph",
+ 'xlabel': 'time (s)',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: bar
+ # ************
+ nbTest = 0
+ nbTestOk = 0
+
+ for data in results:
+ nbTest += 1
+ if data['details']['status'] == "OK":
+ nbTestOk += 1
+
+ test_data.append({'name': "doctor-notification status",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Nb tests': nbTest,
+ 'Nb Success': nbTestOk}]})
+
+ return test_data
diff --git a/utils/test/result_collection_api/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/dashboard/functest2Dashboard.py
index 65dbca654..a2ed3085c 100644
--- a/utils/test/result_collection_api/dashboard/functest2Dashboard.py
+++ b/utils/test/result_collection_api/dashboard/functest2Dashboard.py
@@ -14,15 +14,17 @@
# a new method format_<Test_case>_for_dashboard(results)
# v0.1: basic example with methods for odl, Tempest, Rally and vPing
#
-import re
import datetime
+import re
+
def get_functest_cases():
"""
get the list of the supported test cases
TODO: update the list when adding a new test case for the dashboard
"""
- return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "ODL", "ONOS", "Rally"]
+ return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "ODL",
+ "ONOS", "Rally"]
def format_functest_for_dashboard(case, results):
@@ -66,7 +68,9 @@ def format_status_for_dashboard(results):
test_data.append({'nb test suite(s) run': len(testcases)-1})
test_data.append({'vPing': '100%'})
test_data.append({'VIM status': '82%'})
- test_data.append({'SDN Controllers': {'odl':'92%', 'onos':'95%', 'opencontrail':'93%'}})
+ test_data.append({'SDN Controllers': {'odl': '92%',
+ 'onos': '95%',
+ 'ocl': '93%'}})
test_data.append({'VNF deployment': '95%'})
return test_data
@@ -209,6 +213,25 @@ def format_Tempest_for_dashboard(results):
'data_set': [{'Run': nbTests,
'Failed': nbFailures}]})
+ # Graph 4: (Success rate)=f(time)
+ # ***************************************
+ new_element = []
+ for data in results:
+ try:
+ diff = (int(data['details']['tests']) - int(data['details']['failures']))
+ success_rate = 100*diff/int(data['details']['tests'])
+ except:
+ success_rate = 0
+
+ new_element.append({'x': data['creation_date'],
+ 'y1': success_rate})
+
+ test_data.append({'name': "Tempest success rate",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Success rate'},
+ 'data_set': new_element})
+
return test_data
@@ -227,7 +250,7 @@ def format_ODL_for_dashboard(results):
nbFailures = 0
for odl in odl_results:
if (odl['test_status']['@status'] == "FAIL"):
- nbFailures+=1
+ nbFailures += 1
new_element.append({'x': data['creation_date'],
'y1': len(odl_results),
'y2': nbFailures})
@@ -246,25 +269,91 @@ def format_ONOS_for_dashboard(results):
Post processing for the odl test case
"""
test_data = [{'description': 'ONOS results for Dashboard'}]
- # Graph 1: (duration)=f(time)
+ # Graph 1: (duration FUNCvirtNet)=f(time)
+ # ***************************************
+ new_element = []
+
+ # default duration 0:00:08.999904
+ # consider only seconds => 09
+ for data in results:
+ t = data['details']['FUNCvirNet']['duration']
+ h, m, s = re.split(':', t)
+ s = round(float(s))
+ new_duration = int(datetime.timedelta(hours=int(h),
+ minutes=int(m),
+ seconds=int(s)).total_seconds())
+ new_element.append({'x': data['creation_date'],
+ 'y': new_duration})
+
+ test_data.append({'name': "ONOS FUNCvirNet duration ",
+ 'info': {'type': "graph",
+ 'xlabel': 'time (s)',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: (Nb test, nb failure)FuncvirtNet=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ onos_results = data['details']['FUNCvirNet']['status']
+ nbFailures = 0
+ for onos in onos_results:
+ if (onos['Case result'] == "FAIL"):
+ nbFailures += 1
+ new_element.append({'x': data['creation_date'],
+ 'y1': len(onos_results),
+ 'y2': nbFailures})
+
+ test_data.append({'name': "ONOS FUNCvirNet nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
+
+ # Graph 3: (duration FUNCvirtNetL3)=f(time)
# ***************************************
new_element = []
# default duration 0:00:08.999904
# consider only seconds => 09
for data in results:
- t = data['details']['duration']
- h,m,s = re.split(':',t)
+ t = data['details']['FUNCvirNetL3']['duration']
+ h, m, s = re.split(':', t)
s = round(float(s))
- new_duration = int(datetime.timedelta(hours=int(h),minutes=int(m),seconds=int(s)).total_seconds())
+ new_duration = int(datetime.timedelta(hours=int(h),
+ minutes=int(m),
+ seconds=int(s)).total_seconds())
new_element.append({'x': data['creation_date'],
'y': new_duration})
- test_data.append({'name': "ONOS duration",
+ test_data.append({'name': "ONOS FUNCvirNetL3 duration",
'info': {'type': "graph",
'xlabel': 'time (s)',
'ylabel': 'duration (s)'},
'data_set': new_element})
+
+ # Graph 4: (Nb test, nb failure)FuncvirtNetL3=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ onos_results = data['details']['FUNCvirNetL3']['status']
+ nbFailures = 0
+ for onos in onos_results:
+ if (onos['Case result'] == "FAIL"):
+ nbFailures += 1
+ new_element.append({'x': data['creation_date'],
+ 'y1': len(onos_results),
+ 'y2': nbFailures})
+
+ test_data.append({'name': "ONOS FUNCvirNetL3 nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
return test_data
@@ -273,6 +362,33 @@ def format_Rally_for_dashboard(results):
Post processing for the Rally test case
"""
test_data = [{'description': 'Rally results for Dashboard'}]
+ # Graph 1: Test_Duration = f(time)
+ # ********************************
+ new_element = []
+ for data in results:
+ summary_cursor = len(data)
+ new_element.append({'x': data['creation_date'],
+ 'y': int(data['details'][summary_cursor]['summary']['duration'])})
+
+ test_data.append({'name': "rally duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: Success rate = f(time)
+ # ********************************
+ new_element = []
+ for data in results:
+ new_element.append({'x': data['creation_date'],
+ 'y': float(data['details'][summary_cursor]['summary']['nb success'])})
+
+ test_data.append({'name': "rally success rate",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'success rate (%)'},
+ 'data_set': new_element})
+
return test_data
@@ -312,6 +428,7 @@ def format_vPing_for_dashboard(results):
return test_data
+
def format_vPing_userdata_for_dashboard(results):
"""
Post processing for the vPing_userdata test case
diff --git a/utils/test/result_collection_api/dashboard/promise2Dashboard.py b/utils/test/result_collection_api/dashboard/promise2Dashboard.py
new file mode 100644
index 000000000..84f43a7d1
--- /dev/null
+++ b/utils/test/result_collection_api/dashboard/promise2Dashboard.py
@@ -0,0 +1,103 @@
+ #!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build dashboard ready json results
+# It may be used for all the test case of the Promise project
+# a new method format_<Test_case>_for_dashboard(results)
+# v0.1: basic example with methods for odl, Tempest, Rally and vPing
+#
+import re
+import datetime
+
+
+def get_promise_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["promise"]
+
+
+def format_promise_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+ if check_promise_case_exist(case):
+ # note we add _case because testcase and project had the same name
+ # TODO refactoring...looks fine at the beginning wit only 1 project
+ # not very ugly now and clearly not optimized...
+ cmd = "format_" + case + "_case_for_dashboard(results)"
+ res = eval(cmd)
+ else:
+ res = []
+ print "Test cases not declared"
+ return res
+
+
+def check_promise_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ promise_cases = get_promise_cases()
+
+ if (case is None or case not in promise_cases):
+ return False
+ else:
+ return True
+
+
+
+
+
+def format_promise_case_for_dashboard(results):
+ """
+ Post processing for the promise test case
+ """
+ test_data = [{'description': 'Promise results for Dashboard'}]
+ # Graph 1: (duration)=f(time)
+ # ***************************************
+ new_element = []
+
+ # default duration 0:00:08.999904
+ # consider only seconds => 09
+ for data in results:
+ t = data['details']['duration']
+ new_element.append({'x': data['creation_date'],
+ 'y': t})
+
+ test_data.append({'name': "Promise duration ",
+ 'info': {'type': "graph",
+ 'xlabel': 'time (s)',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: (Nb test, nb failure)=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ promise_results = data['details']
+ new_element.append({'x': data['creation_date'],
+ 'y1': promise_results['tests'],
+ 'y2': promise_results['failures']})
+
+ test_data.append({'name': "Promise nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
+
+ return test_data
diff --git a/utils/test/result_collection_api/resources/handlers.py b/utils/test/result_collection_api/resources/handlers.py
index be08c9791..1f4d0bb7b 100644
--- a/utils/test/result_collection_api/resources/handlers.py
+++ b/utils/test/result_collection_api/resources/handlers.py
@@ -508,6 +508,7 @@ class TestResultsHandler(GenericApiHandler):
- pod : pod name
- version : platform version (Arno-R1, ...)
- installer (fuel, ...)
+ - build_tag : Jenkins build tag name
- period : x (x last days)
@@ -524,6 +525,7 @@ class TestResultsHandler(GenericApiHandler):
pod_arg = self.get_query_argument("pod", None)
version_arg = self.get_query_argument("version", None)
installer_arg = self.get_query_argument("installer", None)
+ build_tag_arg = self.get_query_argument("build_tag", None)
period_arg = self.get_query_argument("period", None)
# prepare request
@@ -544,6 +546,9 @@ class TestResultsHandler(GenericApiHandler):
if installer_arg is not None:
get_request["installer"] = installer_arg
+ if build_tag_arg is not None:
+ get_request["build_tag"] = build_tag_arg
+
if period_arg is not None:
try:
period_arg = int(period_arg)
@@ -552,7 +557,7 @@ class TestResultsHandler(GenericApiHandler):
if period_arg > 0:
period = datetime.now() - timedelta(days=period_arg)
- obj = {"$gte": period}
+ obj = {"$gte": str(period)}
get_request["creation_date"] = obj
else:
get_request["_id"] = result_id
@@ -705,7 +710,7 @@ class DashboardHandler(GenericApiHandler):
raise HTTPError(HTTP_BAD_REQUEST)
if period_arg > 0:
period = datetime.now() - timedelta(days=period_arg)
- obj = {"$gte": period}
+ obj = {"$gte": str(period)}
get_request["creation_date"] = obj
else:
get_request["_id"] = result_id
diff --git a/utils/test/result_collection_api/resources/models.py b/utils/test/result_collection_api/resources/models.py
index a15d4e34b..6f9386208 100644
--- a/utils/test/result_collection_api/resources/models.py
+++ b/utils/test/result_collection_api/resources/models.py
@@ -1,196 +1,201 @@
-##############################################################################
-# Copyright (c) 2015 Orange
-# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-class Pod:
- """ describes a POD platform """
- def __init__(self):
- self._id = ""
- self.name = ""
- self.creation_date = ""
- self.mode = ""
- self.details = ""
-
- @staticmethod
- def pod_from_dict(pod_dict):
- if pod_dict is None:
- return None
-
- p = Pod()
- p._id = pod_dict.get('_id')
- p.creation_date = str(pod_dict.get('creation_date'))
- p.name = pod_dict.get('name')
- p.mode = pod_dict.get('mode')
- p.details = pod_dict.get('details')
- return p
-
- def format(self):
- return {
- "name": self.name,
- "mode": self.mode,
- "details": self.details,
- "creation_date": str(self.creation_date),
- }
-
- def format_http(self):
- return {
- "_id": str(self._id),
- "name": self.name,
- "mode": self.mode,
- "details": self.details,
- "creation_date": str(self.creation_date),
- }
-
-
-class TestProject:
- """ Describes a test project"""
-
- def __init__(self):
- self._id = None
- self.name = None
- self.description = None
- self.creation_date = None
-
- @staticmethod
- def testproject_from_dict(testproject_dict):
-
- if testproject_dict is None:
- return None
-
- t = TestProject()
- t._id = testproject_dict.get('_id')
- t.creation_date = testproject_dict.get('creation_date')
- t.name = testproject_dict.get('name')
- t.description = testproject_dict.get('description')
-
- return t
-
- def format(self):
- return {
- "name": self.name,
- "description": self.description,
- "creation_date": str(self.creation_date)
- }
-
- def format_http(self, test_cases=0):
- return {
- "_id": str(self._id),
- "name": self.name,
- "description": self.description,
- "creation_date": str(self.creation_date),
- }
-
-
-class TestCase:
- """ Describes a test case"""
-
- def __init__(self):
- self._id = None
- self.name = None
- self.project_name = None
- self.description = None
- self.url = None
- self.creation_date = None
-
- @staticmethod
- def test_case_from_dict(testcase_dict):
-
- if testcase_dict is None:
- return None
-
- t = TestCase()
- t._id = testcase_dict.get('_id')
- t.project_name = testcase_dict.get('project_name')
- t.creation_date = testcase_dict.get('creation_date')
- t.name = testcase_dict.get('name')
- t.description = testcase_dict.get('description')
- t.url = testcase_dict.get('url')
-
- return t
-
- def format(self):
- return {
- "name": self.name,
- "description": self.description,
- "project_name": self.project_name,
- "creation_date": str(self.creation_date),
- "url": self.url
- }
-
- def format_http(self, test_project=None):
- res = {
- "_id": str(self._id),
- "name": self.name,
- "description": self.description,
- "creation_date": str(self.creation_date),
- "url": self.url,
- }
- if test_project is not None:
- res["test_project"] = test_project
-
- return res
-
-
-class TestResult:
- """ Describes a test result"""
-
- def __init__(self):
- self._id = None
- self.case_name = None
- self.project_name = None
- self.pod_name = None
- self.installer = None
- self.version = None
- self.description = None
- self.creation_date = None
- self.details = None
-
- @staticmethod
- def test_result_from_dict(test_result_dict):
-
- if test_result_dict is None:
- return None
-
- t = TestResult()
- t._id = test_result_dict.get('_id')
- t.case_name = test_result_dict.get('case_name')
- t.pod_name = test_result_dict.get('pod_name')
- t.project_name = test_result_dict.get('project_name')
- t.description = test_result_dict.get('description')
- t.creation_date = str(test_result_dict.get('creation_date'))
- t.details = test_result_dict.get('details')
- t.version = test_result_dict.get('version')
- t.installer = test_result_dict.get('installer')
-
- return t
-
- def format(self):
- return {
- "case_name": self.case_name,
- "project_name": self.project_name,
- "pod_name": self.pod_name,
- "description": self.description,
- "creation_date": str(self.creation_date),
- "version": self.version,
- "installer": self.installer,
- "details": self.details,
- }
-
- def format_http(self):
- return {
- "_id": str(self._id),
- "case_name": self.case_name,
- "project_name": self.project_name,
- "pod_name": self.pod_name,
- "description": self.description,
- "creation_date": str(self.creation_date),
- "version": self.version,
- "installer": self.installer,
- "details": self.details,
- }
+##############################################################################
+# Copyright (c) 2015 Orange
+# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class Pod:
+ """ describes a POD platform """
+ def __init__(self):
+ self._id = ""
+ self.name = ""
+ self.creation_date = ""
+ self.mode = ""
+ self.details = ""
+
+ @staticmethod
+ def pod_from_dict(pod_dict):
+ if pod_dict is None:
+ return None
+
+ p = Pod()
+ p._id = pod_dict.get('_id')
+ p.creation_date = str(pod_dict.get('creation_date'))
+ p.name = pod_dict.get('name')
+ p.mode = pod_dict.get('mode')
+ p.details = pod_dict.get('details')
+ return p
+
+ def format(self):
+ return {
+ "name": self.name,
+ "mode": self.mode,
+ "details": self.details,
+ "creation_date": str(self.creation_date),
+ }
+
+ def format_http(self):
+ return {
+ "_id": str(self._id),
+ "name": self.name,
+ "mode": self.mode,
+ "details": self.details,
+ "creation_date": str(self.creation_date),
+ }
+
+
+class TestProject:
+ """ Describes a test project"""
+
+ def __init__(self):
+ self._id = None
+ self.name = None
+ self.description = None
+ self.creation_date = None
+
+ @staticmethod
+ def testproject_from_dict(testproject_dict):
+
+ if testproject_dict is None:
+ return None
+
+ t = TestProject()
+ t._id = testproject_dict.get('_id')
+ t.creation_date = testproject_dict.get('creation_date')
+ t.name = testproject_dict.get('name')
+ t.description = testproject_dict.get('description')
+
+ return t
+
+ def format(self):
+ return {
+ "name": self.name,
+ "description": self.description,
+ "creation_date": str(self.creation_date)
+ }
+
+ def format_http(self, test_cases=0):
+ return {
+ "_id": str(self._id),
+ "name": self.name,
+ "description": self.description,
+ "creation_date": str(self.creation_date),
+ }
+
+
+class TestCase:
+ """ Describes a test case"""
+
+ def __init__(self):
+ self._id = None
+ self.name = None
+ self.project_name = None
+ self.description = None
+ self.url = None
+ self.creation_date = None
+
+ @staticmethod
+ def test_case_from_dict(testcase_dict):
+
+ if testcase_dict is None:
+ return None
+
+ t = TestCase()
+ t._id = testcase_dict.get('_id')
+ t.project_name = testcase_dict.get('project_name')
+ t.creation_date = testcase_dict.get('creation_date')
+ t.name = testcase_dict.get('name')
+ t.description = testcase_dict.get('description')
+ t.url = testcase_dict.get('url')
+
+ return t
+
+ def format(self):
+ return {
+ "name": self.name,
+ "description": self.description,
+ "project_name": self.project_name,
+ "creation_date": str(self.creation_date),
+ "url": self.url
+ }
+
+ def format_http(self, test_project=None):
+ res = {
+ "_id": str(self._id),
+ "name": self.name,
+ "description": self.description,
+ "creation_date": str(self.creation_date),
+ "url": self.url,
+ }
+ if test_project is not None:
+ res["test_project"] = test_project
+
+ return res
+
+
+class TestResult:
+ """ Describes a test result"""
+
+ def __init__(self):
+ self._id = None
+ self.case_name = None
+ self.project_name = None
+ self.pod_name = None
+ self.installer = None
+ self.version = None
+ self.description = None
+ self.creation_date = None
+ self.details = None
+ self.build_tag = None
+
+ @staticmethod
+ def test_result_from_dict(test_result_dict):
+
+ if test_result_dict is None:
+ return None
+
+ t = TestResult()
+ t._id = test_result_dict.get('_id')
+ t.case_name = test_result_dict.get('case_name')
+ t.pod_name = test_result_dict.get('pod_name')
+ t.project_name = test_result_dict.get('project_name')
+ t.description = test_result_dict.get('description')
+ t.creation_date = str(test_result_dict.get('creation_date'))
+ t.details = test_result_dict.get('details')
+ t.version = test_result_dict.get('version')
+ t.installer = test_result_dict.get('installer')
+ t.build_tag = test_result_dict.get('build_tag')
+
+ return t
+
+ def format(self):
+ return {
+ "case_name": self.case_name,
+ "project_name": self.project_name,
+ "pod_name": self.pod_name,
+ "description": self.description,
+ "creation_date": str(self.creation_date),
+ "version": self.version,
+ "installer": self.installer,
+ "details": self.details,
+ "build_tag": self.build_tag
+ }
+
+ def format_http(self):
+ return {
+ "_id": str(self._id),
+ "case_name": self.case_name,
+ "project_name": self.project_name,
+ "pod_name": self.pod_name,
+ "description": self.description,
+ "creation_date": str(self.creation_date),
+ "version": self.version,
+ "installer": self.installer,
+ "details": self.details,
+ "build_tag": self.build_tag
+ }
+
diff --git a/utils/test/result_collection_api/tools/samples/sample.json.postman_collection b/utils/test/result_collection_api/samples/sample.json.postman_collection
index 9ee35d15e..9ee35d15e 100644
--- a/utils/test/result_collection_api/tools/samples/sample.json.postman_collection
+++ b/utils/test/result_collection_api/samples/sample.json.postman_collection
diff --git a/utils/test/result_collection_api/tools/backup-db.sh b/utils/test/result_collection_api/tools/backup-db.sh
deleted file mode 100644
index aa36aa370..000000000
--- a/utils/test/result_collection_api/tools/backup-db.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 Orange and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-echo "Backup Test collection DB"
-now=$(date +"%m_%d_%Y_%H_%M_%S")
-echo $now
-echo " ------------- "
-TARGET_DIR=./$now
-TEST_RESULT_DB_BACKUP="test_collection_db."$now".tar.gz"
-
-echo "Create Directory for backup"
-mkdir -p $TARGET_DIR
-
-echo "Export results"
-mongoexport -db test_results_collection -c test_results --out $TARGET_DIR/results.json
-echo "Export test cases"
-mongoexport --db test_results_collection -c test_cases --out $TARGET_DIR/backup-cases.json
-echo "Export projects"
-mongoexport --db test_results_collection -c test_projects --out $TARGET_DIR/backup-projects.json
-echo "Export pods"
-mongoexport --db test_results_collection -c pod --out $TARGET_DIR/backup-pod.json
-
-echo "Create tar.gz"
-tar -cvzf $TEST_RESULT_DB_BACKUP $TARGET_DIR
-
-echo "Delete temp directory"
-rm -Rf $TARGET_DIR
diff --git a/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.css b/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.css
deleted file mode 100644
index fc7f248b9..000000000
--- a/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.css
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Page CSS*/
-body {
- background-color:#000000;
- font: 10px "Comic Sans MS" ;
- color: orange;
-}
-#title {
- font-size: 30px;
-}
-
-#tests {
- font-size: 20px;
- color: white;
-}
-
-#test_unit {
- position: relative;
- left: 200px;
-}
-
-/* Chart CSS */
-.chart {
- border: 1px dashed orange;
- margin: 5px;
- padding: 2px;
- width: 600px;
- height:300px;
- float:left;
-}
-
-/* Dygraph CSS */
-/* This applies to the title, x-axis label and y-axis label */
-#div_g .dygraph-label {
- font-family: Arial, Helvetica, sans-serif;
-}
-/* This rule only applies to the chart title */
-#div_g .dygraph-title {
- font-size: 12px;
- color : orange;
-}
- /* This rule only applies to the y-axis label */
-#div_g .dygraph-ylabel {
- font-size: 10px;
- color : orange;
-}
-
-/* Overrides dygraph-legend */
-.dygraph-legend {
- font-size: 10px !important;
- width: 400px !important;
- text-align: left !important;
- left: 200px !important;
-}
diff --git a/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.html b/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.html
deleted file mode 100644
index 908624afe..000000000
--- a/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!--
-##############################################################################
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
--->
-<!DOCTYPE html>
-<html>
- <head>
- <meta charset="utf-8">
- <meta http-equiv="X-UA-Compatible" content="IE=EmulateIE7; IE=EmulateIE9">
- <title>OPNFV Functest</title>
- <script type="text/javascript" src="http://dygraphs.com/dygraph-combined.js"></script>
- <script type="text/javascript" src="https://code.jquery.com/jquery-2.2.0.min.js"></script>
- <script type="text/javascript" src="opnfv_dashboard_tests_conf.json"></script>
- <link rel="stylesheet" href="opnfv_dashboard_tests.css">
- </head>
- <body>
- <div id="title">FuncTest</div>
- <div id="tests"></div>
- <script type="text/javascript" src="opnfv_dashboard_tests.js"></script>
- </body>
-</html>
diff --git a/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.js b/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.js
deleted file mode 100644
index 93c75c3cb..000000000
--- a/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests.js
+++ /dev/null
@@ -1,117 +0,0 @@
-/*#############################################################################
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-*/
-
-// Function to sort data to be ordered according to the time
-function sortFunction(a,b){
- var dateA = new Date(a.date).getTime();
- var dateB = new Date(b.date).getTime();
- return dateA > dateB ? 1 : -1;
-};
-
-// Function to format date according to JS
-function formatDate(inputDate){
- var input=inputDate.slice(0,-7);
- input=input.replace(' ','T');
- input+='Z';
- return new Date(Date.parse(input));
-}
-
-// Draw a single graph for a specific test for a specific installer
-function drawGraph(filename,installer,test_unit){
- $.getJSON( filename, function(data) {
- var serie=[];
- index_test=0;
- // find index mapping to the test_unit
- for (var i=0;i<data.dashboard.length;i++)
- if (data.dashboard[i].name==test_unit){index_test=i; break;}
-
- // build the data according to dygraph
- for (i=0;i<data.dashboard[index_test].data_set.length;i++) {
- var d=[];
- result=data.dashboard[index_test].data_set[i];
- d.push(formatDate(result.x));
-
- // push y data if available
- var keys=Object.keys(result);
- for (var y in opnfv_dashboard_ys)
- if ($.inArray(opnfv_dashboard_ys[y], keys)!=-1) d.push(result[opnfv_dashboard_ys[y]]);
- serie.push(d);
- };
-
- // sort by date/time
- serie.sort(function(a,b){
- return new Date(a[0]).getTime()-new Date(b[0]).getTime()
- });
-
- // Label management
- var yLabel='';
- if (test_unit.includes('nb'))
- yLabel='number';
- else if (test_unit.includes('duration'))
- yLabel='seconds';
- var labels=[];
- labels.push('time');
- var keys=Object.keys(data.dashboard[index_test].info);
- for (var y in opnfv_dashboard_y_labels)
- if ($.inArray(opnfv_dashboard_y_labels[y], keys)!=-1) labels.push(data.dashboard[index_test].info[opnfv_dashboard_y_labels[y]]);
-
- // Draw the graph
- g=new Dygraph(
- document.getElementById(installer),
- serie,
- {
- colors:[opnfv_dashboard_graph_color_ok, opnfv_dashboard_graph_color_nok, opnfv_dashboard_graph_color_other],
- fillGraph:true,
- legend:opnfv_dashboard_graph_legend,
- title:installer,
- titleHeight:opnfv_dashboard_graph_title_height,
- ylabel:yLabel,
- labelsDivStyles:{
- 'text-align': opnfv_dashboard_graph_text_align,
- 'background-color': opnfv_dashboard_graph_background_color
- },
- axisLabelColor:opnfv_dashboard_graph_axis_label_color,
- labels:labels,
- highlightSeriesOpts:{strokeWidth:opnfv_dashboard_graph_stroke_width},
- stepPlot:true
- }
- );
-});
-}
-
-// function to generate all the graphs for all installers
-function drawGraphsSerie(project,test,test_unit) {
- for (i=0;i<opnfv_dashboard_installers.length;i++){
- var filename='./'+opnfv_dashboard_file_directory+'/'+project+'/'+opnfv_dashboard_file_prefix+project+'_'+test+'_'+opnfv_dashboard_installers[i]+opnfv_dashboard_file_suffix;
- drawGraph(filename,opnfv_dashboard_installers[i],test_unit);
- }
-}
-
-// generate text and buttons for each test and unit test
-var text_html='';
-for (var i in opnfv_dashboard_projects)
- for (var project in opnfv_dashboard_projects[i])
- for (var test in opnfv_dashboard_projects[i][project]){
- text_html+=test+' ';
- for (var t in opnfv_dashboard_projects[i][project][test]){
- test_unit=opnfv_dashboard_projects[i][project][test][t];
- text_html+='<button onClick="drawGraphsSerie(\''+project+'\',\''+test +'\',\''+test_unit+'\')">'+test_unit+'</button>';
- }
- text_html+='<br>';
- }
-document.getElementById('tests').innerHTML=text_html;
-
-// debug
-console.log(text_html);
-
-// generate a div per installer (to host the graph)
-for (var i in opnfv_dashboard_installers){
- var div_installer='<div class= "chart" id="'+opnfv_dashboard_installers[i]+'"/>'
- var $newdiv=$(div_installer);
- $("body").append($newdiv);
-}
diff --git a/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests_conf.json b/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests_conf.json
deleted file mode 100644
index 07c1a5e85..000000000
--- a/utils/test/result_collection_api/tools/dashboard/opnfv_dashboard_tests_conf.json
+++ /dev/null
@@ -1,29 +0,0 @@
-var opnfv_dashboard_installers=['apex','compass','fuel','joid'];
-var opnfv_dashboard_projects=[
- {
- 'functest':{
- 'tempest':['Tempest duration','Tempest nb tests/nb failures'],
- 'vPing':['vPing duration'],
- 'vPing_userdata':['vPing_userdata duration'],
- 'vIMS':['vIMS nb tests passed/failed/skipped','vIMS orchestrator/VNF/test duration']
- }
- }
-];
-
-var opnfv_dashboard_file_directory='res';
-var opnfv_dashboard_file_prefix='res_';
-var opnfv_dashboard_file_suffix='.json';
-
-var opnfv_dashboard_ys=['y','y1','y2','y3'];
-var opnfv_dashboard_y_labels=['ylabel','y1label','y2label','y3label'];
-
-var opnfv_dashboard_graph_color_ok="#00FF00";
-var opnfv_dashboard_graph_color_nok="#FF0000";
-var opnfv_dashboard_graph_color_other="#0000FF";
-
-var opnfv_dashboard_graph_legend='always'; // legend print
-var opnfv_dashboard_graph_title_height=30; // height for the graph title
-var opnfv_dashboard_graph_stroke_width=5; // line stroke when mouse over
-var opnfv_dashboard_graph_axis_label_color='orange';
-var opnfv_dashboard_graph_text_align='right';
-var opnfv_dashboard_graph_background_color='transparent';