summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rwxr-xr-xutils/docs-build.sh2
-rwxr-xr-xutils/fetch_os_creds.sh2
-rw-r--r--utils/push-test-logs.sh4
-rw-r--r--utils/test/result_collection_api/dashboard/dashboard_utils.py2
-rw-r--r--utils/test/result_collection_api/dashboard/functest2Dashboard.py88
-rw-r--r--utils/test/result_collection_api/dashboard/promise2Dashboard.py103
6 files changed, 189 insertions, 12 deletions
diff --git a/utils/docs-build.sh b/utils/docs-build.sh
index e5d6b4a7f..3c0e8bd80 100755
--- a/utils/docs-build.sh
+++ b/utils/docs-build.sh
@@ -41,7 +41,7 @@ revision="$(git rev-parse --short HEAD)"
rev_full="$(git rev-parse HEAD)"
version="$(git describe --abbrev=0 2> /dev/null || echo draft) ($revision)"
project="$(basename $(git rev-parse --show-toplevel))"
-html_notes="\n Revision: $rev_full\n\n Build date: |today|"
+html_notes=" Revision: $rev_full\n Build date: |today|"
default_conf='releng/docs/etc/conf.py'
opnfv_logo='releng/docs/etc/opnfv-logo.png'
diff --git a/utils/fetch_os_creds.sh b/utils/fetch_os_creds.sh
index d1b192dcb..a5c601b82 100755
--- a/utils/fetch_os_creds.sh
+++ b/utils/fetch_os_creds.sh
@@ -94,7 +94,7 @@ if [ "$installer_type" == "fuel" ]; then
#This file contains the mgmt keystone API, we need the public one for our rc file
admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//')
public_ip=$(sshpass -p r00tme ssh $ssh_options root@${installer_ip} \
- "ssh ${controller_ip} 'source openrc; keystone endpoint-list'" \
+ "ssh ${controller_ip} 'source openrc; openstack endpoint list --long'" \
| grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null
#| grep http | head -1 | cut -d '|' -f 4 | sed 's/v1\/.*/v1\//' | sed 's/ //g') &> /dev/null
#NOTE: this is super ugly sed 's/v1\/.*/v1\//'OS_AUTH_URL
diff --git a/utils/push-test-logs.sh b/utils/push-test-logs.sh
index 331cbd942..68f80874a 100644
--- a/utils/push-test-logs.sh
+++ b/utils/push-test-logs.sh
@@ -36,9 +36,9 @@ if [ -d "$dir_result" ]; then
if [ $? != 0 ]; then
echo "Not possible to push results to artifact: gsutil not installed";
else
- gsutil ls gs://artifacts.opnfv.org/"$project_artifact"/ &>/dev/null
+ gsutil ls gs://artifacts.opnfv.org/"$project"/ &>/dev/null
if [ $? != 0 ]; then
- echo "Not possible to push results to artifact: wrong credentials.";
+ echo "Not possible to push results to artifact: gsutil not installed.";
else
echo "copy result files to artifact $project_artifact"
gsutil -m cp -r "$dir_result" gs://artifacts.opnfv.org/"$project_artifact"/
diff --git a/utils/test/result_collection_api/dashboard/dashboard_utils.py b/utils/test/result_collection_api/dashboard/dashboard_utils.py
index 8d83b006a..3252c3af6 100644
--- a/utils/test/result_collection_api/dashboard/dashboard_utils.py
+++ b/utils/test/result_collection_api/dashboard/dashboard_utils.py
@@ -26,6 +26,8 @@ from bottlenecks2Dashboard import format_bottlenecks_for_dashboard, \
check_bottlenecks_case_exist
from qtip2Dashboard import format_qtip_for_dashboard, \
check_qtip_case_exist
+from promise2Dashboard import format_promise_for_dashboard, \
+ check_promise_case_exist
# any project test project wishing to provide dashboard ready values
# must include at least 2 methods
diff --git a/utils/test/result_collection_api/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/dashboard/functest2Dashboard.py
index 65dbca654..a817f7597 100644
--- a/utils/test/result_collection_api/dashboard/functest2Dashboard.py
+++ b/utils/test/result_collection_api/dashboard/functest2Dashboard.py
@@ -17,12 +17,14 @@
import re
import datetime
+
def get_functest_cases():
"""
get the list of the supported test cases
TODO: update the list when adding a new test case for the dashboard
"""
- return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "ODL", "ONOS", "Rally"]
+ return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "ODL",
+ "ONOS", "Rally"]
def format_functest_for_dashboard(case, results):
@@ -66,7 +68,9 @@ def format_status_for_dashboard(results):
test_data.append({'nb test suite(s) run': len(testcases)-1})
test_data.append({'vPing': '100%'})
test_data.append({'VIM status': '82%'})
- test_data.append({'SDN Controllers': {'odl':'92%', 'onos':'95%', 'opencontrail':'93%'}})
+ test_data.append({'SDN Controllers': {'odl': '92%',
+ 'onos': '95%',
+ 'ocl': '93%'}})
test_data.append({'VNF deployment': '95%'})
return test_data
@@ -227,7 +231,7 @@ def format_ODL_for_dashboard(results):
nbFailures = 0
for odl in odl_results:
if (odl['test_status']['@status'] == "FAIL"):
- nbFailures+=1
+ nbFailures += 1
new_element.append({'x': data['creation_date'],
'y1': len(odl_results),
'y2': nbFailures})
@@ -246,25 +250,91 @@ def format_ONOS_for_dashboard(results):
Post processing for the odl test case
"""
test_data = [{'description': 'ONOS results for Dashboard'}]
- # Graph 1: (duration)=f(time)
+ # Graph 1: (duration FUNCvirtNet)=f(time)
# ***************************************
new_element = []
# default duration 0:00:08.999904
# consider only seconds => 09
for data in results:
- t = data['details']['duration']
- h,m,s = re.split(':',t)
+ t = data['details']['FUNCvirNet']['duration']
+ h, m, s = re.split(':', t)
s = round(float(s))
- new_duration = int(datetime.timedelta(hours=int(h),minutes=int(m),seconds=int(s)).total_seconds())
+ new_duration = int(datetime.timedelta(hours=int(h),
+ minutes=int(m),
+ seconds=int(s)).total_seconds())
new_element.append({'x': data['creation_date'],
'y': new_duration})
- test_data.append({'name': "ONOS duration",
+ test_data.append({'name': "ONOS FUNCvirNet duration ",
'info': {'type': "graph",
'xlabel': 'time (s)',
'ylabel': 'duration (s)'},
'data_set': new_element})
+
+ # Graph 2: (Nb test, nb failure)FuncvirtNet=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ onos_results = data['details']['FUNCvirNet']['status']
+ nbFailures = 0
+ for onos in onos_results:
+ if (onos['Case result'] == "FAIL"):
+ nbFailures += 1
+ new_element.append({'x': data['creation_date'],
+ 'y1': len(onos_results),
+ 'y2': nbFailures})
+
+ test_data.append({'name': "ONOS FUNCvirNet nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
+
+ # Graph 3: (duration FUNCvirtNetL3)=f(time)
+ # ***************************************
+ new_element = []
+
+ # default duration 0:00:08.999904
+ # consider only seconds => 09
+ for data in results:
+ t = data['details']['FUNCvirNetL3']['duration']
+ h, m, s = re.split(':', t)
+ s = round(float(s))
+ new_duration = int(datetime.timedelta(hours=int(h),
+ minutes=int(m),
+ seconds=int(s)).total_seconds())
+ new_element.append({'x': data['creation_date'],
+ 'y': new_duration})
+
+ test_data.append({'name': "ONOS FUNCvirNetL3 duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time (s)',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 4: (Nb test, nb failure)FuncvirtNetL3=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ onos_results = data['details']['FUNCvirNetL3']['status']
+ nbFailures = 0
+ for onos in onos_results:
+ if (onos['Case result'] == "FAIL"):
+ nbFailures += 1
+ new_element.append({'x': data['creation_date'],
+ 'y1': len(onos_results),
+ 'y2': nbFailures})
+
+ test_data.append({'name': "ONOS FUNCvirNetL3 nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
return test_data
@@ -312,6 +382,7 @@ def format_vPing_for_dashboard(results):
return test_data
+
def format_vPing_userdata_for_dashboard(results):
"""
Post processing for the vPing_userdata test case
@@ -347,3 +418,4 @@ def format_vPing_userdata_for_dashboard(results):
'Nb Success': nbTestOk}]})
return test_data
+
diff --git a/utils/test/result_collection_api/dashboard/promise2Dashboard.py b/utils/test/result_collection_api/dashboard/promise2Dashboard.py
new file mode 100644
index 000000000..84f43a7d1
--- /dev/null
+++ b/utils/test/result_collection_api/dashboard/promise2Dashboard.py
@@ -0,0 +1,103 @@
+ #!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build dashboard ready json results
+# It may be used for all the test case of the Promise project
+# a new method format_<Test_case>_for_dashboard(results)
+# v0.1: basic example with methods for odl, Tempest, Rally and vPing
+#
+import re
+import datetime
+
+
+def get_promise_cases():
+ """
+ get the list of the supported test cases
+ TODO: update the list when adding a new test case for the dashboard
+ """
+ return ["promise"]
+
+
+def format_promise_for_dashboard(case, results):
+ """
+ generic method calling the method corresponding to the test case
+ check that the testcase is properly declared first
+ then build the call to the specific method
+ """
+ if check_promise_case_exist(case):
+ # note we add _case because testcase and project had the same name
+ # TODO refactoring...looks fine at the beginning wit only 1 project
+ # not very ugly now and clearly not optimized...
+ cmd = "format_" + case + "_case_for_dashboard(results)"
+ res = eval(cmd)
+ else:
+ res = []
+ print "Test cases not declared"
+ return res
+
+
+def check_promise_case_exist(case):
+ """
+ check if the testcase exists
+ if the test case is not defined or not declared in the list
+ return False
+ """
+ promise_cases = get_promise_cases()
+
+ if (case is None or case not in promise_cases):
+ return False
+ else:
+ return True
+
+
+
+
+
+def format_promise_case_for_dashboard(results):
+ """
+ Post processing for the promise test case
+ """
+ test_data = [{'description': 'Promise results for Dashboard'}]
+ # Graph 1: (duration)=f(time)
+ # ***************************************
+ new_element = []
+
+ # default duration 0:00:08.999904
+ # consider only seconds => 09
+ for data in results:
+ t = data['details']['duration']
+ new_element.append({'x': data['creation_date'],
+ 'y': t})
+
+ test_data.append({'name': "Promise duration ",
+ 'info': {'type': "graph",
+ 'xlabel': 'time (s)',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: (Nb test, nb failure)=f(time)
+ # ***************************************
+ new_element = []
+
+ for data in results:
+ promise_results = data['details']
+ new_element.append({'x': data['creation_date'],
+ 'y1': promise_results['tests'],
+ 'y2': promise_results['failures']})
+
+ test_data.append({'name': "Promise nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests',
+ 'y2label': 'Number of failures'},
+ 'data_set': new_element})
+
+ return test_data