summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xreporting/reporting/functest/reporting-tempest.py92
-rw-r--r--reporting/reporting/qtip/reporting-status.py3
-rw-r--r--reporting/reporting/qtip/template/index-status-tmpl.html9
-rw-r--r--testapi/3rd_party/static/testapi-ui/components/pods/pods.html2
-rw-r--r--testapi/3rd_party/static/testapi-ui/components/pods/podsController.js35
-rw-r--r--testapi/tools/watchdog/docker_watch.sh178
6 files changed, 255 insertions, 64 deletions
diff --git a/reporting/reporting/functest/reporting-tempest.py b/reporting/reporting/functest/reporting-tempest.py
index bc28856..d78d9a1 100755
--- a/reporting/reporting/functest/reporting-tempest.py
+++ b/reporting/reporting/functest/reporting-tempest.py
@@ -8,58 +8,57 @@
# http://www.apache.org/licenses/LICENSE-2.0
# SPDX-license-identifier: Apache-2.0
-from urllib2 import Request, urlopen, URLError
from datetime import datetime
import json
-import jinja2
import os
-# manage conf
-import utils.reporting_utils as rp_utils
+from urllib2 import Request, urlopen, URLError
+import jinja2
+
+import reporting.utils.reporting_utils as rp_utils
-installers = rp_utils.get_config('general.installers')
-items = ["tests", "Success rate", "duration"]
+INSTALLERS = rp_utils.get_config('general.installers')
+ITEMS = ["tests", "Success rate", "duration"]
CURRENT_DIR = os.getcwd()
PERIOD = rp_utils.get_config('general.period')
-criteria_nb_test = 165
-criteria_duration = 1800
-criteria_success_rate = 90
+CRITERIA_NB_TEST = 100
+CRITERIA_DURATION = 1800
+CRITERIA_SUCCESS_RATE = 100
logger = rp_utils.getLogger("Tempest")
logger.info("************************************************")
logger.info("* Generating reporting Tempest_smoke_serial *")
-logger.info("* Data retention = %s days *" % PERIOD)
+logger.info("* Data retention = %s days *", PERIOD)
logger.info("* *")
logger.info("************************************************")
logger.info("Success criteria:")
-logger.info("nb tests executed > %s s " % criteria_nb_test)
-logger.info("test duration < %s s " % criteria_duration)
-logger.info("success rate > %s " % criteria_success_rate)
+logger.info("nb tests executed > %s s ", CRITERIA_NB_TEST)
+logger.info("test duration < %s s ", CRITERIA_DURATION)
+logger.info("success rate > %s ", CRITERIA_SUCCESS_RATE)
# For all the versions
for version in rp_utils.get_config('general.versions'):
- for installer in installers:
+ for installer in INSTALLERS:
# we consider the Tempest results of the last PERIOD days
url = ("http://" + rp_utils.get_config('testapi.url') +
- "?case=tempest_smoke_serial")
- request = Request(url + '&period=' + str(PERIOD) +
- '&installer=' + installer +
- '&version=' + version)
- logger.info("Search tempest_smoke_serial results for installer %s"
- " for version %s"
- % (installer, version))
+ "?case=tempest_smoke_serial&period=" + str(PERIOD) +
+ "&installer=" + installer + "&version=" + version)
+ request = Request(url)
+ logger.info(("Search tempest_smoke_serial results for installer %s"
+ " for version %s"), installer, version)
try:
response = urlopen(request)
k = response.read()
results = json.loads(k)
- except URLError as e:
- logger.error("Error code: %s" % e)
-
+ except URLError as err:
+ logger.error("Error code: %s", err)
+ logger.debug("request sent: %s", url)
+ logger.debug("Results from API: %s", results)
test_results = results['results']
-
+ logger.debug("Test results: %s", test_results)
scenario_results = {}
criteria = {}
errors = {}
@@ -72,27 +71,37 @@ for version in rp_utils.get_config('general.versions'):
scenario_results[r['scenario']] = []
scenario_results[r['scenario']].append(r)
+ logger.debug("Scenario results: %s", scenario_results)
+
for s, s_result in scenario_results.items():
scenario_results[s] = s_result[0:5]
# For each scenario, we build a result object to deal with
# results, criteria and error handling
for result in scenario_results[s]:
result["start_date"] = result["start_date"].split(".")[0]
+ logger.debug("start_date= %s", result["start_date"])
# retrieve results
# ****************
nb_tests_run = result['details']['tests']
nb_tests_failed = result['details']['failures']
- if nb_tests_run != 0:
- success_rate = 100 * ((int(nb_tests_run) -
+ logger.debug("nb_tests_run= %s", nb_tests_run)
+ logger.debug("nb_tests_failed= %s", nb_tests_failed)
+
+ try:
+ success_rate = (100 * (int(nb_tests_run) -
int(nb_tests_failed)) /
- int(nb_tests_run))
- else:
+ int(nb_tests_run))
+ except ZeroDivisionError:
success_rate = 0
result['details']["tests"] = nb_tests_run
result['details']["Success rate"] = str(success_rate) + "%"
+ logger.info("nb_tests_run= %s", result['details']["tests"])
+ logger.info("test rate = %s",
+ result['details']["Success rate"])
+
# Criteria management
# *******************
crit_tests = False
@@ -100,11 +109,11 @@ for version in rp_utils.get_config('general.versions'):
crit_time = False
# Expect that at least 165 tests are run
- if nb_tests_run >= criteria_nb_test:
+ if nb_tests_run >= CRITERIA_NB_TEST:
crit_tests = True
# Expect that at least 90% of success
- if success_rate >= criteria_success_rate:
+ if success_rate >= CRITERIA_SUCCESS_RATE:
crit_rate = True
# Expect that the suite duration is inferior to 30m
@@ -114,28 +123,27 @@ for version in rp_utils.get_config('general.versions'):
'%Y-%m-%d %H:%M:%S')
delta = stop_date - start_date
- if (delta.total_seconds() < criteria_duration):
+
+ if delta.total_seconds() < CRITERIA_DURATION:
crit_time = True
result['criteria'] = {'tests': crit_tests,
'Success rate': crit_rate,
'duration': crit_time}
try:
- logger.debug("Scenario %s, Installer %s"
- % (s_result[1]['scenario'], installer))
- logger.debug("Nb Test run: %s" % nb_tests_run)
- logger.debug("Test duration: %s"
- % result['details']['duration'])
- logger.debug("Success rate: %s" % success_rate)
- except:
+ logger.debug("Nb Test run: %s", nb_tests_run)
+ logger.debug("Test duration: %s", delta)
+ logger.debug("Success rate: %s", success_rate)
+ except Exception: # pylint: disable=broad-except
logger.error("Data format error")
# Error management
# ****************
try:
errors = result['details']['errors']
- result['errors'] = errors.replace('{0}', '')
- except:
+ logger.info("errors: %s", errors)
+ result['errors'] = errors
+ except Exception: # pylint: disable=broad-except
logger.error("Error field not present (Brahamputra runs?)")
templateLoader = jinja2.FileSystemLoader(".")
@@ -146,7 +154,7 @@ for version in rp_utils.get_config('general.versions'):
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(scenario_results=scenario_results,
- items=items,
+ items=ITEMS,
installer=installer)
with open("./display/" + version +
diff --git a/reporting/reporting/qtip/reporting-status.py b/reporting/reporting/qtip/reporting-status.py
index f0127b5..56f9e0a 100644
--- a/reporting/reporting/qtip/reporting-status.py
+++ b/reporting/reporting/qtip/reporting-status.py
@@ -33,8 +33,7 @@ def prepare_profile_file(version):
if not os.path.exists(profile_dir):
os.makedirs(profile_dir)
- profile_file = "{}/{}/scenario_history.txt".format(profile_dir,
- version)
+ profile_file = "{}/scenario_history.txt".format(profile_dir)
if not os.path.exists(profile_file):
with open(profile_file, 'w') as f:
info = 'date,scenario,installer,details,score\n'
diff --git a/reporting/reporting/qtip/template/index-status-tmpl.html b/reporting/reporting/qtip/template/index-status-tmpl.html
index 26da36c..92f3395 100644
--- a/reporting/reporting/qtip/template/index-status-tmpl.html
+++ b/reporting/reporting/qtip/template/index-status-tmpl.html
@@ -46,10 +46,11 @@
<nav>
<ul class="nav nav-justified">
<li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
- <li><a href="index-status-apex.html">Apex</a></li>
- <li><a href="index-status-compass.html">Compass</a></li>
- <li><a href="index-status-fuel.html">Fuel</a></li>
- <li><a href="index-status-joid.html">Joid</a></li>
+ <li><a href="status-apex.html">Apex</a></li>
+ <li><a href="status-compass.html">Compass</a></li>
+ <li><a href="status-daisy.html">Daisy</a></li>
+ <li><a href="status-fuel.html">Fuel</a></li>
+ <li><a href="status-joid.html">Joid</a></li>
</ul>
</nav>
</div>
diff --git a/testapi/3rd_party/static/testapi-ui/components/pods/pods.html b/testapi/3rd_party/static/testapi-ui/components/pods/pods.html
index cdfcfaf..7ce36ca 100644
--- a/testapi/3rd_party/static/testapi-ui/components/pods/pods.html
+++ b/testapi/3rd_party/static/testapi-ui/components/pods/pods.html
@@ -63,7 +63,7 @@
</tbody>
</table>
</div>
-
+<br>
<div ng-show="ctrl.showError" class="alert alert-danger" role="alert">
<span class="glyphicon glyphicon-exclamation-sign" aria-hidden="true"></span>
<span class="sr-only">Error:</span>
diff --git a/testapi/3rd_party/static/testapi-ui/components/pods/podsController.js b/testapi/3rd_party/static/testapi-ui/components/pods/podsController.js
index 53e8b1e..2012586 100644
--- a/testapi/3rd_party/static/testapi-ui/components/pods/podsController.js
+++ b/testapi/3rd_party/static/testapi-ui/components/pods/podsController.js
@@ -31,7 +31,6 @@
function PodsController($scope, $http, $filter, $state, testapiApiUrl,
raiseAlert) {
var ctrl = this;
-
ctrl.url = testapiApiUrl + '/pods';
ctrl.create = create;
@@ -82,21 +81,27 @@
*/
function create() {
ctrl.showError = false;
- var pods_url = ctrl.url;
- var body = {
- name: ctrl.name,
- mode: ctrl.mode,
- role: ctrl.role,
- details: ctrl.details
- };
- ctrl.podsRequest =
- $http.post(pods_url, body).error(function (error) {
- ctrl.showError = true;
- ctrl.error =
- 'Error creating the new pod from server: ' +
- angular.toJson(error);
- });
+ if(ctrl.name != ""){
+ var pods_url = ctrl.url;
+ var body = {
+ name: ctrl.name,
+ mode: ctrl.mode,
+ role: ctrl.role,
+ details: ctrl.details
+ };
+ ctrl.podsRequest =
+ $http.post(pods_url, body).error(function (error) {
+ ctrl.showError = true;
+ ctrl.error =
+ 'Error creating the new pod from server: ' +
+ angular.toJson(error);
+ });
+ }
+ else{
+ ctrl.showError = true;
+ ctrl.error = 'Name is missing.'
+ }
}
/**
diff --git a/testapi/tools/watchdog/docker_watch.sh b/testapi/tools/watchdog/docker_watch.sh
new file mode 100644
index 0000000..d67e4b3
--- /dev/null
+++ b/testapi/tools/watchdog/docker_watch.sh
@@ -0,0 +1,178 @@
+# *
+# http://www.apache.org/licenses/LICENSE-2.0 *
+# *
+# Unless required by applicable law or agreed to in writing, *
+# software distributed under the License is distributed on an *
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+# KIND, either express or implied. See the License for the *
+# specific language governing permissions and limitations *
+# under the License. *
+
+# This script checks if deployments are working or and then
+# starts the specified containers in case one of the containers
+# crash. The only solution is restarting docker as of now.
+
+#!/bin/bash
+
+## List of modules
+modules=(testapi reporting)
+
+## Ports of the modules
+declare -A ports=( ["testapi"]="8082" ["reporting"]="8084")
+
+## Urls to check if the modules are deployed or not ?
+#declare -A urls=( ["testapi"]="http://testresults.opnfv.org/test/" \
+# ["reporting"]="http://testresults.opnfv.org/reporting2/reporting/index.html")
+
+declare -A urls=( ["testapi"]="http://localhost:8082/" \
+ ["reporting"]="http://testresults.opnfv.org/reporting2/reporting/index.html")
+
+
+### Functions related to checking.
+
+function is_deploying() {
+ echo -e "Checking job statuses"
+ for module in "${modules[@]}"
+ do
+ if get_status $module; then
+ exit 0
+ fi
+ done
+}
+
+function get_status() {
+ xml=$(curl -m10 "https://build.opnfv.org/ci/job/${1}-automate-master/lastBuild/api/xml?depth=1")
+ building=$(grep -oPm1 "(?<=<building>)[^<]+" <<< "$xml")
+ if [[ $building == "false" ]]
+ then
+ return 1
+ else
+ return 0
+ fi
+}
+
+function get_docker_status() {
+ status=$(service docker status | sed -n 3p | cut -d ' ' -f5)
+ echo -e "Docker status: $status"
+ if [ $status = "active" ]
+ then
+ return 1
+ else
+ return 0
+ fi
+}
+
+function check_connectivity() {
+ echo "Checking $1 connection : $2"
+ cmd=`curl --head -m10 --request GET ${2} | grep '200 OK' > /dev/null`
+ rc=$?
+ if [[ $rc == 0 ]]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+function check_modules() {
+ echo -e "Checking modules"
+ failed_modules=()
+ for module in "${modules[@]}"
+ do
+ if ! check_connectivity $module "${urls[$module]}"; then
+ echo -e "$module failed"
+ failed_modules+=($module)
+ fi
+ done
+ if [ ! -z "$failed_modules" ]; then
+ echo -e "Failed Modules: $failed_modules"
+ return 1
+ else
+ echo -e "All modules working good"
+ exit 0
+ fi
+}
+
+### Functions related fixes.
+
+function restart_docker_fix() {
+ echo -e "Running restart_docker_fix"
+ service docker restart
+ start_containers_fix "${modules[@]}"
+}
+
+function docker_proxy_fix() {
+ echo -e "Running docker_proxy_fix"
+ fix_modules=("${@}")
+ for module in "${fix_modules[@]}"
+ do
+ echo -e "Kill docker proxy and restart containers"
+ pid=$(netstat -nlp | grep :${ports[$module]} | awk '{print $7}' | cut -d'/' -f1)
+ echo $pid
+ if [ ! -z "$pid" ]; then
+ kill $pid
+ start_containers_fix $module
+ fi
+ done
+}
+
+function start_containers_fix() {
+ echo "Runnning start_containers_fix"
+ start_modules=("${@}")
+ for module in "${start_modules[@]}"
+ do
+ echo -e "Starting a container $module"
+ sudo docker stop $module
+ sudo docker start $module
+ sleep 5
+ if ! check_connectivity $module "${urls[$module]}"; then
+ echo -e "Starting an old container $module_old"
+ sudo docker stop $module
+ sudo docker start $module"_old"
+ sleep 5
+ fi
+ done
+}
+
+### Main Flow
+
+echo -e
+echo -e "WatchDog Started"
+echo -e
+echo -e `date "+%Y-%m-%d %H:%M:%S.%N"`
+echo -e
+
+if ! is_deploying; then
+ echo -e "Jenkins Jobs running"
+ exit
+fi
+
+## If the problem is related to docker daemon
+
+if get_docker_status; then
+ restart_docker_fix
+ if ! check_modules; then
+ echo -e "Watchdog failed while restart_docker_fix"
+ fi
+ exit
+fi
+
+## If the problem is related to docker containers
+
+if ! check_modules; then
+ start_containers_fix "${failed_modules[@]}"
+fi
+
+## If the problem is related to docker proxy
+
+if ! check_modules; then
+ docker_proxy_fix "${failed_modules[@]}"
+fi
+
+## If nothing works out
+
+if ! check_modules; then
+ echo -e "Watchdog failed"
+fi
+
+sudo docker ps
+sudo docker images \ No newline at end of file