summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rwxr-xr-xutils/docu-build-new.sh19
-rwxr-xr-xutils/fetch_os_creds.sh10
-rwxr-xr-xutils/jenkins-jnlp-connect.sh6
-rw-r--r--utils/test/result_collection_api/dashboard/functest2Dashboard.py109
-rw-r--r--utils/test/result_collection_api/resources/handlers.py21
5 files changed, 143 insertions, 22 deletions
diff --git a/utils/docu-build-new.sh b/utils/docu-build-new.sh
index 00d046479..67a62e381 100755
--- a/utils/docu-build-new.sh
+++ b/utils/docu-build-new.sh
@@ -29,17 +29,22 @@ while read -d $'\n'; do
done < <(find docs/ -name 'index.rst' -printf '%h\n' | sort -u )
for dir in "${{directories[@]}}"; do
+ _name="${{dir##*/}}"
+ _build="${{dir}}/build"
+ _output="docs/output/${{_name}}"
echo
- echo "#############################"
- echo "Building DOCS in ${{dir##*/}}"
- echo "#############################"
+ echo "#################${{_name//?/#}}"
+ echo "Building DOCS in ${{_name}}"
+ echo "#################${{_name//?/#}}"
echo
- if [[ ! -d docs/output/"${{dir##*/}}/" ]]; then
- mkdir -p docs/output/"${{dir##*/}}/"
- fi
+ mkdir -p "${{_output}}"
+
+ sphinx-build -b html -E -c docs/etc "${{dir}}" "${{_output}}"
- sphinx-build -b html -E -c docs/etc/ ""$dir"/" docs/output/"${{dir##*/}}/"
+ sphinx-build -b latex -E -c docs/etc "${{dir}}" "${{_build}}"
+ make -C "${{_build}}" LATEXOPTS='--interaction=nonstopmode' all-pdf
+ mv "${{_build}}"/*.pdf "${{_output}}"
done
diff --git a/utils/fetch_os_creds.sh b/utils/fetch_os_creds.sh
index cefc85761..7a5f8121a 100755
--- a/utils/fetch_os_creds.sh
+++ b/utils/fetch_os_creds.sh
@@ -126,14 +126,12 @@ elif [ "$installer_type" == "foreman" ]; then
| grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null
elif [ "$installer_type" == "compass" ]; then
- #ip_compass="10.1.0.12"
verify_connectivity $installer_ip
-
- # controller_ip='10.1.0.222'
- controller_ip=$(sshpass -p'root' ssh 2>/dev/null -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@10.1.0.12 \
- 'mysql -ucompass -pcompass -Dcompass -e"select package_config from cluster;"' \
- | awk -F"," '{for(i=1;i<NF;i++)if($i~/\"ha_proxy\": {\"vip\":/)print $i}' \
+ controller_ip=$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \
+ 'mysql -ucompass -pcompass -Dcompass -e"select * from cluster;"' \
+ | awk -F"," '{for(i=1;i<NF;i++)if($i~/\"host1\"/) {print $(i+1);break;}}' \
| grep -oP "\d+.\d+.\d+.\d+")
+
if [ -z $controller_ip ]; then
error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
fi
diff --git a/utils/jenkins-jnlp-connect.sh b/utils/jenkins-jnlp-connect.sh
index 03e47b8b2..d263b198a 100755
--- a/utils/jenkins-jnlp-connect.sh
+++ b/utils/jenkins-jnlp-connect.sh
@@ -78,7 +78,7 @@ makemonit () {
echo "Writing the following as monit config:"
cat << EOF | tee $monitconfdir/jenkins
check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid
-start program = "/bin/bash -c 'cd $dir; export started_monit=true; $0 $@'" as uid "$jenkinsuser" and gid "$jenkinsuser"
+start program = "/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $dir; export started_monit=true; $0 $@'"
stop program = "/bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'"
EOF
}
@@ -87,7 +87,7 @@ if [[ -f $monitconfdir/jenkins ]]; then
#test for diff
if [[ "$(diff $monitconfdir/jenkins <(echo "\
check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid
-start program = \"/bin/bash -c 'cd $dir; export started_monit=true; $0 $@'\" as uid \"$jenkinsuser\" and gid \"$jenkinsuser\"
+start program = \"usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $dir; export started_monit=true; $0 $@'\"
stop program = \" /bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'\"\
") )" ]]; then
echo "Updating monit config..."
@@ -169,7 +169,7 @@ do
s ) slave_secret="$OPTARG";;
h ) usage; exit;;
t ) started_monit=true
- skip_monit=true
+ skip_monit=true
run_in_foreground=true ;;
f ) test_firewall ;;
\? ) echo "Unknown option: -$OPTARG" >&2; exit 1;;
diff --git a/utils/test/result_collection_api/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/dashboard/functest2Dashboard.py
index 688f0c28c..bfb7c8729 100644
--- a/utils/test/result_collection_api/dashboard/functest2Dashboard.py
+++ b/utils/test/result_collection_api/dashboard/functest2Dashboard.py
@@ -21,7 +21,7 @@ def get_functest_cases():
get the list of the supported test cases
TODO: update the list when adding a new test case for the dashboard
"""
- return ["vPing", "Tempest", "odl", "Rally"]
+ return ["status", "vPing", "vIMS", "Tempest", "odl", "Rally"]
def format_functest_for_dashboard(case, results):
@@ -53,6 +53,113 @@ def check_functest_case_exist(case):
return True
+def format_status_for_dashboard(results):
+ test_data = [{'description': 'Functest status'}]
+
+ # define magic equation for the status....
+ # 5 suites: vPing, odl, Tempest, vIMS, Rally
+ # Which overall KPI make sense...
+
+ # TODO to be done and discussed
+ testcases = get_functest_cases()
+ test_data.append({'nb test suite(s) run': len(testcases)-1})
+ # test_data.append({'nb test suite(s) failed':1})
+ # test_data.append({'test suite run': ['vPing', 'tempest', 'vIMS' ]})
+ # test_data.append({'average Openstack Tempest failure rate (%)': 10})
+ # test_data.append({'average odl failure rate (%)': 10})
+
+ return test_data
+
+
+def format_vIMS_for_dashboard(results):
+ """
+ Post processing for the vIMS test case
+ """
+ test_data = [{'description': 'vIMS results for Dashboard'}]
+
+ # Graph 1: (duration_deployment_orchestrator,
+ # duration_deployment_vnf,
+ # duration_test) = f(time)
+ # ********************************
+ new_element = []
+
+ for data in results:
+ new_element.append({'x': data['creation_date'],
+ 'y1': data['details']['orchestrator']['duration'],
+ 'y2': data['details']['vIMS']['duration'],
+ 'y3': data['details']['sig_test']['duration']})
+
+ test_data.append({'name': "Tempest nb tests/nb failures",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'orchestation deployment duration',
+ 'y2label': 'vIMS deployment duration',
+ 'y3label': 'vIMS test duration'},
+ 'data_set': new_element})
+
+ # Graph 2: (Nb test, nb failure, nb skipped)=f(time)
+ # **************************************************
+ new_element = []
+
+ for data in results:
+ # Retrieve all the tests
+ nbTests = 0
+ nbFailures = 0
+ nbSkipped = 0
+ vIMS_test = data['details']['sig_test']['result']
+
+ for data_test in vIMS_test:
+ # Calculate nb of tests run and nb of tests failed
+ # vIMS_results = get_vIMSresults(vIMS_test)
+ # print vIMS_results
+ if data_test['result'] == "Passed":
+ nbTests += 1
+ elif data_test['result'] == "Failed":
+ nbFailures += 1
+ elif data_test['result'] == "Skipped":
+ nbSkipped += 1
+
+ new_element.append({'x': data['creation_date'],
+ 'y1': nbTests,
+ 'y2': nbFailures,
+ 'y3': nbSkipped})
+
+ test_data.append({'name': "vIMS nb tests passed/failed/skipped",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'y1label': 'Number of tests passed',
+ 'y2label': 'Number of tests failed',
+ 'y3label': 'Number of tests skipped'},
+ 'data_set': new_element})
+
+ # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed)
+ # ********************************************************
+ nbTests = 0
+ nbFailures = 0
+
+ for data in results:
+ vIMS_test = data['details']['sig_test']['result']
+
+ for data_test in vIMS_test:
+ nbTestsOK = 0
+ nbTestsKO = 0
+
+ if data_test['result'] == "Passed":
+ nbTestsOK += 1
+ elif data_test['result'] == "Failed":
+ nbTestsKO += 1
+
+ nbTests += nbTestsOK + nbTestsKO
+ nbFailures += nbTestsKO
+
+ test_data.append({'name': "Total number of tests run/failure tests",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Run': nbTests,
+ 'Failed': nbFailures}]})
+
+ return test_data
+
+
def format_Tempest_for_dashboard(results):
"""
Post processing for the Tempest test case
diff --git a/utils/test/result_collection_api/resources/handlers.py b/utils/test/result_collection_api/resources/handlers.py
index 85c6172a5..be08c9791 100644
--- a/utils/test/result_collection_api/resources/handlers.py
+++ b/utils/test/result_collection_api/resources/handlers.py
@@ -719,6 +719,21 @@ class DashboardHandler(GenericApiHandler):
"error:Project name missing")
elif check_dashboard_ready_project(project_arg, "./dashboard"):
res = []
+
+ if case_arg is None:
+ raise HTTPError(
+ HTTP_NOT_FOUND,
+ "error:Test case missing for project " + project_arg)
+
+ # special case of status for project
+ if case_arg == "status":
+ del get_request["case_name"]
+ # retention time to be agreed
+ # last five days by default?
+ # TODO move to DB
+ period = datetime.now() - timedelta(days=5)
+ get_request["creation_date"] = {"$gte": period}
+
# fetching results
cursor = self.db.test_results.find(get_request)
while (yield cursor.fetch_next):
@@ -726,11 +741,7 @@ class DashboardHandler(GenericApiHandler):
cursor.next_object())
res.append(test_result.format_http())
- if case_arg is None:
- raise HTTPError(
- HTTP_NOT_FOUND,
- "error:Test case missing for project " + project_arg)
- elif check_dashboard_ready_case(project_arg, case_arg):
+ if check_dashboard_ready_case(project_arg, case_arg):
dashboard = get_dashboard_result(project_arg, case_arg, res)
else:
raise HTTPError(