diff options
Diffstat (limited to 'utils')
-rwxr-xr-x | utils/fetch_os_creds.sh | 10 | ||||
-rwxr-xr-x | utils/jenkins-jnlp-connect.sh | 6 | ||||
-rw-r--r-- | utils/test/result_collection_api/dashboard/functest2Dashboard.py | 109 | ||||
-rwxr-xr-x | utils/test/result_collection_api/dashboard/vsperf2Dashboard.py | 121 | ||||
-rw-r--r-- | utils/test/result_collection_api/resources/handlers.py | 21 |
5 files changed, 252 insertions, 15 deletions
diff --git a/utils/fetch_os_creds.sh b/utils/fetch_os_creds.sh index cefc85761..7a5f8121a 100755 --- a/utils/fetch_os_creds.sh +++ b/utils/fetch_os_creds.sh @@ -126,14 +126,12 @@ elif [ "$installer_type" == "foreman" ]; then | grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null elif [ "$installer_type" == "compass" ]; then - #ip_compass="10.1.0.12" verify_connectivity $installer_ip - - # controller_ip='10.1.0.222' - controller_ip=$(sshpass -p'root' ssh 2>/dev/null -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@10.1.0.12 \ - 'mysql -ucompass -pcompass -Dcompass -e"select package_config from cluster;"' \ - | awk -F"," '{for(i=1;i<NF;i++)if($i~/\"ha_proxy\": {\"vip\":/)print $i}' \ + controller_ip=$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \ + 'mysql -ucompass -pcompass -Dcompass -e"select * from cluster;"' \ + | awk -F"," '{for(i=1;i<NF;i++)if($i~/\"host1\"/) {print $(i+1);break;}}' \ | grep -oP "\d+.\d+.\d+.\d+") + if [ -z $controller_ip ]; then error "The controller $controller_ip is not up. Please check that the POD is correctly deployed." fi diff --git a/utils/jenkins-jnlp-connect.sh b/utils/jenkins-jnlp-connect.sh index 03e47b8b2..d263b198a 100755 --- a/utils/jenkins-jnlp-connect.sh +++ b/utils/jenkins-jnlp-connect.sh @@ -78,7 +78,7 @@ makemonit () { echo "Writing the following as monit config:" cat << EOF | tee $monitconfdir/jenkins check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid -start program = "/bin/bash -c 'cd $dir; export started_monit=true; $0 $@'" as uid "$jenkinsuser" and gid "$jenkinsuser" +start program = "/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $dir; export started_monit=true; $0 $@'" stop program = "/bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'" EOF } @@ -87,7 +87,7 @@ if [[ -f $monitconfdir/jenkins ]]; then #test for diff if [[ "$(diff $monitconfdir/jenkins <(echo "\ check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid -start program = \"/bin/bash -c 'cd $dir; export started_monit=true; $0 $@'\" as uid \"$jenkinsuser\" and gid \"$jenkinsuser\" +start program = \"usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $dir; export started_monit=true; $0 $@'\" stop program = \" /bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'\"\ ") )" ]]; then echo "Updating monit config..." @@ -169,7 +169,7 @@ do s ) slave_secret="$OPTARG";; h ) usage; exit;; t ) started_monit=true - skip_monit=true + skip_monit=true run_in_foreground=true ;; f ) test_firewall ;; \? ) echo "Unknown option: -$OPTARG" >&2; exit 1;; diff --git a/utils/test/result_collection_api/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/dashboard/functest2Dashboard.py index 688f0c28c..bfb7c8729 100644 --- a/utils/test/result_collection_api/dashboard/functest2Dashboard.py +++ b/utils/test/result_collection_api/dashboard/functest2Dashboard.py @@ -21,7 +21,7 @@ def get_functest_cases(): get the list of the supported test cases TODO: update the list when adding a new test case for the dashboard """ - return ["vPing", "Tempest", "odl", "Rally"] + return ["status", "vPing", "vIMS", "Tempest", "odl", "Rally"] def format_functest_for_dashboard(case, results): @@ -53,6 +53,113 @@ def check_functest_case_exist(case): return True +def format_status_for_dashboard(results): + test_data = [{'description': 'Functest status'}] + + # define magic equation for the status.... + # 5 suites: vPing, odl, Tempest, vIMS, Rally + # Which overall KPI make sense... + + # TODO to be done and discussed + testcases = get_functest_cases() + test_data.append({'nb test suite(s) run': len(testcases)-1}) + # test_data.append({'nb test suite(s) failed':1}) + # test_data.append({'test suite run': ['vPing', 'tempest', 'vIMS' ]}) + # test_data.append({'average Openstack Tempest failure rate (%)': 10}) + # test_data.append({'average odl failure rate (%)': 10}) + + return test_data + + +def format_vIMS_for_dashboard(results): + """ + Post processing for the vIMS test case + """ + test_data = [{'description': 'vIMS results for Dashboard'}] + + # Graph 1: (duration_deployment_orchestrator, + # duration_deployment_vnf, + # duration_test) = f(time) + # ******************************** + new_element = [] + + for data in results: + new_element.append({'x': data['creation_date'], + 'y1': data['details']['orchestrator']['duration'], + 'y2': data['details']['vIMS']['duration'], + 'y3': data['details']['sig_test']['duration']}) + + test_data.append({'name': "Tempest nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'orchestation deployment duration', + 'y2label': 'vIMS deployment duration', + 'y3label': 'vIMS test duration'}, + 'data_set': new_element}) + + # Graph 2: (Nb test, nb failure, nb skipped)=f(time) + # ************************************************** + new_element = [] + + for data in results: + # Retrieve all the tests + nbTests = 0 + nbFailures = 0 + nbSkipped = 0 + vIMS_test = data['details']['sig_test']['result'] + + for data_test in vIMS_test: + # Calculate nb of tests run and nb of tests failed + # vIMS_results = get_vIMSresults(vIMS_test) + # print vIMS_results + if data_test['result'] == "Passed": + nbTests += 1 + elif data_test['result'] == "Failed": + nbFailures += 1 + elif data_test['result'] == "Skipped": + nbSkipped += 1 + + new_element.append({'x': data['creation_date'], + 'y1': nbTests, + 'y2': nbFailures, + 'y3': nbSkipped}) + + test_data.append({'name': "vIMS nb tests passed/failed/skipped", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests passed', + 'y2label': 'Number of tests failed', + 'y3label': 'Number of tests skipped'}, + 'data_set': new_element}) + + # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed) + # ******************************************************** + nbTests = 0 + nbFailures = 0 + + for data in results: + vIMS_test = data['details']['sig_test']['result'] + + for data_test in vIMS_test: + nbTestsOK = 0 + nbTestsKO = 0 + + if data_test['result'] == "Passed": + nbTestsOK += 1 + elif data_test['result'] == "Failed": + nbTestsKO += 1 + + nbTests += nbTestsOK + nbTestsKO + nbFailures += nbTestsKO + + test_data.append({'name': "Total number of tests run/failure tests", + 'info': {"type": "bar"}, + 'data_set': [{'Run': nbTests, + 'Failed': nbFailures}]}) + + return test_data + + def format_Tempest_for_dashboard(results): """ Post processing for the Tempest test case diff --git a/utils/test/result_collection_api/dashboard/vsperf2Dashboard.py b/utils/test/result_collection_api/dashboard/vsperf2Dashboard.py new file mode 100755 index 000000000..323d3915c --- /dev/null +++ b/utils/test/result_collection_api/dashboard/vsperf2Dashboard.py @@ -0,0 +1,121 @@ +#!/usr/bin/python + +# Copyright 2015 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"), +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +def get_vsperf_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["tput_ovsdpdk", "tput_ovs", + "b2b_ovsdpdk", "b2b_ovs", + "tput_mod_vlan_ovsdpdk", "tput_mod_vlan_ovs", + "cont_ovsdpdk", "cont_ovs", + "pvp_cont_ovsdpdkuser", "pvp_cont_ovsdpdkcuse", "pvp_cont_ovsvirtio", + "pvvp_cont_ovsdpdkuser", "pvvp_cont_ovsdpdkcuse", "pvvp_cont_ovsvirtio", + "scalability_ovsdpdk", "scalability_ovs", + "pvp_tput_ovsdpdkuser", "pvp_tput_ovsdpdkcuse", "pvp_tput_ovsvirtio", + "pvp_b2b_ovsdpdkuser", "pvp_b2b_ovsdpdkcuse", "pvp_b2b_ovsvirtio", + "pvvp_tput_ovsdpdkuser", "pvvp_tput_ovsdpdkcuse", "pvvp_tput_ovsvirtio", + "pvvp_b2b_ovsdpdkuser", "pvvp_b2b_ovsdpdkcuse", "pvvp_b2b_ovsvirtio", + "cpu_load_ovsdpdk", "cpu_load_ovs", + "mem_load_ovsdpdk", "mem_load_ovs"] + + +def check_vsperf_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + vsperf_cases = get_vsperf_cases() + + if (case is None or case not in vsperf_cases): + return False + else: + return True + + +def format_vsperf_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + if check_vsperf_case_exist(case): + res = format_common_for_dashboard(case, results) + else: + res = [] + print "Test cases not declared" + return res + + +def format_common_for_dashboard(case, results): + """ + Common post processing + """ + test_data_description = case + " results for Dashboard" + test_data = [{'description': test_data_description}] + + graph_name = '' + if "b2b" in case: + graph_name = "B2B frames" + else: + graph_name = "Rx frames per second" + + # Graph 1: Rx fps = f(time) + # ******************************** + new_element = [] + for data in results: + new_element.append({'x': data['creation_date'], + 'y1': data['details']['64'], + 'y2': data['details']['128'], + 'y3': data['details']['512'], + 'y4': data['details']['1024'], + 'y5': data['details']['1518']}) + + test_data.append({'name': graph_name, + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'frame size 64B', + 'y2label': 'frame size 128B', + 'y3label': 'frame size 512B', + 'y4label': 'frame size 1024B', + 'y5label': 'frame size 1518B'}, + 'data_set': new_element}) + + return test_data + + + + +############################ For local test ################################ +import os + +def _test(): + ans = [{'creation_date': '2015-09-12', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '26.804', '1024': '1097.284', '512': '178.137', '1518': '12635.860', '128': '100.564'}}, + {'creation_date': '2015-09-33', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '16.804', '1024': '1087.284', '512': '168.137', '1518': '12625.860', '128': '99.564'}}] + + result = format_vsperf_for_dashboard("pvp_cont_ovsdpdkcuse", ans) + print result + + result = format_vsperf_for_dashboard("b2b_ovsdpdk", ans) + print result + + result = format_vsperf_for_dashboard("non_existing", ans) + print result + +if __name__ == '__main__': + _test() diff --git a/utils/test/result_collection_api/resources/handlers.py b/utils/test/result_collection_api/resources/handlers.py index 85c6172a5..be08c9791 100644 --- a/utils/test/result_collection_api/resources/handlers.py +++ b/utils/test/result_collection_api/resources/handlers.py @@ -719,6 +719,21 @@ class DashboardHandler(GenericApiHandler): "error:Project name missing") elif check_dashboard_ready_project(project_arg, "./dashboard"): res = [] + + if case_arg is None: + raise HTTPError( + HTTP_NOT_FOUND, + "error:Test case missing for project " + project_arg) + + # special case of status for project + if case_arg == "status": + del get_request["case_name"] + # retention time to be agreed + # last five days by default? + # TODO move to DB + period = datetime.now() - timedelta(days=5) + get_request["creation_date"] = {"$gte": period} + # fetching results cursor = self.db.test_results.find(get_request) while (yield cursor.fetch_next): @@ -726,11 +741,7 @@ class DashboardHandler(GenericApiHandler): cursor.next_object()) res.append(test_result.format_http()) - if case_arg is None: - raise HTTPError( - HTTP_NOT_FOUND, - "error:Test case missing for project " + project_arg) - elif check_dashboard_ready_case(project_arg, case_arg): + if check_dashboard_ready_case(project_arg, case_arg): dashboard = get_dashboard_result(project_arg, case_arg, res) else: raise HTTPError( |