summaryrefslogtreecommitdiffstats
path: root/utils/test
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test')
-rwxr-xr-x[-rw-r--r--]utils/test/reporting/functest/reporting-status.py9
-rwxr-xr-x[-rw-r--r--]utils/test/reporting/functest/reporting-tempest.py204
-rwxr-xr-x[-rw-r--r--]utils/test/reporting/functest/reporting-vims.py158
-rw-r--r--utils/test/reporting/functest/reportingConf.py9
-rw-r--r--utils/test/reporting/functest/template/index-status-tmpl.html4
-rw-r--r--utils/test/reporting/functest/template/index-tempest-tmpl.html2
-rw-r--r--utils/test/reporting/functest/template/index-vims-tmpl.html2
-rw-r--r--utils/test/reporting/functest/testCase.py16
-rw-r--r--utils/test/result_collection_api/update/README.md27
-rwxr-xr-xutils/test/result_collection_api/update/playbook-update.sh90
-rwxr-xr-xutils/test/result_collection_api/update/templates/rm_images.sh8
-rw-r--r--utils/test/result_collection_api/update/test.yml12
-rw-r--r--utils/test/result_collection_api/update/update.yml11
13 files changed, 344 insertions, 208 deletions
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
index 622c375cc..7c943d8b3 100644..100755
--- a/utils/test/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -99,8 +99,9 @@ for version in conf.versions:
for test_case in testValid:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
- logger.debug("testcase %s is %s" %
+ logger.debug("testcase %s (%s) is %s" %
(test_case.getDisplayName(),
+ test_case.getName(),
test_case.isRunnable))
time.sleep(1)
if test_case.isRunnable:
@@ -131,8 +132,10 @@ for version in conf.versions:
for test_case in otherTestCases:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
- logger.info("testcase %s is %s" %
- (test_case.getName(), test_case.isRunnable))
+ logger.debug("testcase %s (%s) is %s" %
+ (test_case.getDisplayName(),
+ test_case.getName(),
+ test_case.isRunnable))
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py
index e3f4e3306..0dc1dd343 100644..100755
--- a/utils/test/reporting/functest/reporting-tempest.py
+++ b/utils/test/reporting/functest/reporting-tempest.py
@@ -24,104 +24,108 @@ logger.info("nb tests executed > %s s " % criteria_nb_test)
logger.info("test duration < %s s " % criteria_duration)
logger.info("success rate > %s " % criteria_success_rate)
-for installer in installers:
- # we consider the Tempest results of the last PERIOD days
- url = conf.URL_BASE + "?case=tempest_smoke_serial"
- request = Request(url + '&period=' + str(PERIOD) +
- '&installer=' + installer + '&version=master')
- logger.info("Search tempest_smoke_serial results for installer %s"
- % installer)
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError, e:
- logger.error("Error code: %s" % e)
-
- test_results = results['results']
-
- scenario_results = {}
- criteria = {}
- errors = {}
-
- for r in test_results:
- # Retrieve all the scenarios per installer
- # In Brahmaputra use version
- # Since Colorado use scenario
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- # For each scenario, we build a result object to deal with
- # results, criteria and error handling
- for result in scenario_results[s]:
- result["start_date"] = result["start_date"].split(".")[0]
-
- # retrieve results
- # ****************
- nb_tests_run = result['details']['tests']
- nb_tests_failed = result['details']['failures']
- if nb_tests_run != 0:
- success_rate = 100*(int(nb_tests_run) -
- int(nb_tests_failed)) / int(nb_tests_run)
- else:
- success_rate = 0
-
- result['details']["tests"] = nb_tests_run
- result['details']["Success rate"] = str(success_rate) + "%"
-
- # Criteria management
- # *******************
- crit_tests = False
- crit_rate = False
- crit_time = False
-
- # Expect that at least 165 tests are run
- if nb_tests_run >= criteria_nb_test:
- crit_tests = True
-
- # Expect that at least 90% of success
- if success_rate >= criteria_success_rate:
- crit_rate = True
-
- # Expect that the suite duration is inferior to 30m
- if result['details']['duration'] < criteria_duration:
- crit_time = True
-
- result['criteria'] = {'tests': crit_tests,
- 'Success rate': crit_rate,
- 'duration': crit_time}
- try:
- logger.debug("Scenario %s, Installer %s"
- % (s_result[1]['scenario'], installer))
- logger.debug("Nb Test run: %s" % nb_tests_run)
- logger.debug("Test duration: %s"
- % result['details']['duration'])
- logger.debug("Success rate: %s" % success_rate)
- except:
- logger.error("Data format error")
-
- # Error management
- # ****************
- try:
- errors = result['details']['errors']
- result['errors'] = errors.replace('{0}', '')
- except:
- logger.error("Error field not present (Brahamputra runs?)")
-
- templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
- templateEnv = jinja2.Environment(loader=templateLoader)
-
- TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_results,
- items=items,
- installer=installer)
-
- with open(conf.REPORTING_PATH + "/release/master/index-tempest-" +
- installer + ".html", "wb") as fh:
- fh.write(outputText)
+# For all the versions
+for version in conf.versions:
+ for installer in conf.installers:
+ # we consider the Tempest results of the last PERIOD days
+ url = conf.URL_BASE + "?case=tempest_smoke_serial"
+ request = Request(url + '&period=' + str(PERIOD) +
+ '&installer=' + installer +
+ '&version=' + version)
+ logger.info("Search tempest_smoke_serial results for installer %s"
+ " for version %s"
+ % (installer, version))
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError, e:
+ logger.error("Error code: %s" % e)
+
+ test_results = results['results']
+
+ scenario_results = {}
+ criteria = {}
+ errors = {}
+
+ for r in test_results:
+ # Retrieve all the scenarios per installer
+ # In Brahmaputra use version
+ # Since Colorado use scenario
+ if not r['scenario'] in scenario_results.keys():
+ scenario_results[r['scenario']] = []
+ scenario_results[r['scenario']].append(r)
+
+ for s, s_result in scenario_results.items():
+ scenario_results[s] = s_result[0:5]
+ # For each scenario, we build a result object to deal with
+ # results, criteria and error handling
+ for result in scenario_results[s]:
+ result["start_date"] = result["start_date"].split(".")[0]
+
+ # retrieve results
+ # ****************
+ nb_tests_run = result['details']['tests']
+ nb_tests_failed = result['details']['failures']
+ if nb_tests_run != 0:
+ success_rate = 100*(int(nb_tests_run) -
+ int(nb_tests_failed)) / int(nb_tests_run)
+ else:
+ success_rate = 0
+
+ result['details']["tests"] = nb_tests_run
+ result['details']["Success rate"] = str(success_rate) + "%"
+
+ # Criteria management
+ # *******************
+ crit_tests = False
+ crit_rate = False
+ crit_time = False
+
+ # Expect that at least 165 tests are run
+ if nb_tests_run >= criteria_nb_test:
+ crit_tests = True
+
+ # Expect that at least 90% of success
+ if success_rate >= criteria_success_rate:
+ crit_rate = True
+
+ # Expect that the suite duration is inferior to 30m
+ if result['details']['duration'] < criteria_duration:
+ crit_time = True
+
+ result['criteria'] = {'tests': crit_tests,
+ 'Success rate': crit_rate,
+ 'duration': crit_time}
+ try:
+ logger.debug("Scenario %s, Installer %s"
+ % (s_result[1]['scenario'], installer))
+ logger.debug("Nb Test run: %s" % nb_tests_run)
+ logger.debug("Test duration: %s"
+ % result['details']['duration'])
+ logger.debug("Success rate: %s" % success_rate)
+ except:
+ logger.error("Data format error")
+
+ # Error management
+ # ****************
+ try:
+ errors = result['details']['errors']
+ result['errors'] = errors.replace('{0}', '')
+ except:
+ logger.error("Error field not present (Brahamputra runs?)")
+
+ templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+ templateEnv = jinja2.Environment(loader=templateLoader)
+
+ TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_results,
+ items=items,
+ installer=installer)
+
+ with open(conf.REPORTING_PATH + "/release/" + version +
+ "/index-tempest-" + installer + ".html", "wb") as fh:
+ fh.write(outputText)
logger.info("Tempest automatic reporting succesfully generated.")
diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py
index d0436ed14..a83d92f0a 100644..100755
--- a/utils/test/reporting/functest/reporting-vims.py
+++ b/utils/test/reporting/functest/reporting-vims.py
@@ -33,81 +33,87 @@ logger.info("****************************************")
installers = conf.installers
step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
logger.info("Start processing....")
-for installer in installers:
- logger.info("Search vIMS results for installer %s" % installer)
- request = Request(conf.URL_BASE + '?case=vims&installer=' + installer)
-
- try:
- response = urlopen(request)
- k = response.read()
- results = json.loads(k)
- except URLError, e:
- logger.error("Error code: %s" % e)
-
- test_results = results['results']
-
- logger.debug("Results found: %s" % test_results)
-
- scenario_results = {}
- for r in test_results:
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- for s, s_result in scenario_results.items():
- scenario_results[s] = s_result[0:5]
- logger.debug("Search for success criteria")
- for result in scenario_results[s]:
- result["start_date"] = result["start_date"].split(".")[0]
- sig_test = result['details']['sig_test']['result']
- if not sig_test == "" and isinstance(sig_test, list):
- format_result = sig_test_format(sig_test)
- if format_result['failures'] > format_result['passed']:
- result['details']['sig_test']['duration'] = 0
- result['details']['sig_test']['result'] = format_result
- nb_step_ok = 0
- nb_step = len(result['details'])
-
- for step_name, step_result in result['details'].items():
- if step_result['duration'] != 0:
- nb_step_ok += 1
- m, s = divmod(step_result['duration'], 60)
- m_display = ""
- if int(m) != 0:
- m_display += str(int(m)) + "m "
- step_result['duration_display'] = m_display + str(int(s)) + "s"
-
- result['pr_step_ok'] = 0
- if nb_step != 0:
- result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100
- try:
- logger.debug("Scenario %s, Installer %s"
- % (s_result[1]['scenario'], installer))
- logger.debug("Orchestrator deployment: %s s"
- % result['details']['orchestrator']['duration'])
- logger.debug("vIMS deployment: %s s"
- % result['details']['vIMS']['duration'])
- logger.debug("Signaling testing: %s s"
- % result['details']['sig_test']['duration'])
- logger.debug("Signaling testing results: %s"
- % format_result)
- except:
- logger.error("Data badly formatted")
- logger.debug("------------------------------------------------")
-
- templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
- templateEnv = jinja2.Environment(loader=templateLoader)
-
- TEMPLATE_FILE = "/template/index-vims-tmpl.html"
- template = templateEnv.get_template(TEMPLATE_FILE)
-
- outputText = template.render(scenario_results=scenario_results,
- step_order=step_order,
- installer=installer)
-
- with open(conf.REPORTING_PATH +
- "/release/master/index-vims-" +
- installer + ".html", "wb") as fh:
- fh.write(outputText)
+
+# For all the versions
+for version in conf.versions:
+ for installer in installers:
+ logger.info("Search vIMS results for installer: %s, version: %s"
+ % (installer, version))
+ request = Request(conf.URL_BASE + '?case=vims&installer=' +
+ installer + '&version=' + version)
+
+ try:
+ response = urlopen(request)
+ k = response.read()
+ results = json.loads(k)
+ except URLError, e:
+ logger.error("Error code: %s" % e)
+
+ test_results = results['results']
+
+ logger.debug("Results found: %s" % test_results)
+
+ scenario_results = {}
+ for r in test_results:
+ if not r['scenario'] in scenario_results.keys():
+ scenario_results[r['scenario']] = []
+ scenario_results[r['scenario']].append(r)
+
+ for s, s_result in scenario_results.items():
+ scenario_results[s] = s_result[0:5]
+ logger.debug("Search for success criteria")
+ for result in scenario_results[s]:
+ result["start_date"] = result["start_date"].split(".")[0]
+ sig_test = result['details']['sig_test']['result']
+ if not sig_test == "" and isinstance(sig_test, list):
+ format_result = sig_test_format(sig_test)
+ if format_result['failures'] > format_result['passed']:
+ result['details']['sig_test']['duration'] = 0
+ result['details']['sig_test']['result'] = format_result
+ nb_step_ok = 0
+ nb_step = len(result['details'])
+
+ for step_name, step_result in result['details'].items():
+ if step_result['duration'] != 0:
+ nb_step_ok += 1
+ m, s = divmod(step_result['duration'], 60)
+ m_display = ""
+ if int(m) != 0:
+ m_display += str(int(m)) + "m "
+
+ step_result['duration_display'] = m_display + str(int(s)) + "s"
+
+ result['pr_step_ok'] = 0
+ if nb_step != 0:
+ result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100
+ try:
+ logger.debug("Scenario %s, Installer %s"
+ % (s_result[1]['scenario'], installer))
+ logger.debug("Orchestrator deployment: %s s"
+ % result['details']['orchestrator']['duration'])
+ logger.debug("vIMS deployment: %s s"
+ % result['details']['vIMS']['duration'])
+ logger.debug("Signaling testing: %s s"
+ % result['details']['sig_test']['duration'])
+ logger.debug("Signaling testing results: %s"
+ % format_result)
+ except:
+ logger.error("Data badly formatted")
+ logger.debug("----------------------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+ templateEnv = jinja2.Environment(loader=templateLoader)
+
+ TEMPLATE_FILE = "/template/index-vims-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_results,
+ step_order=step_order,
+ installer=installer)
+
+ with open(conf.REPORTING_PATH +
+ "/release/" + version + "/index-vims-" +
+ installer + ".html", "wb") as fh:
+ fh.write(outputText)
logger.info("vIMS report succesfully generated")
diff --git a/utils/test/reporting/functest/reportingConf.py b/utils/test/reporting/functest/reportingConf.py
index a58eeecc9..9230cb286 100644
--- a/utils/test/reporting/functest/reportingConf.py
+++ b/utils/test/reporting/functest/reportingConf.py
@@ -10,14 +10,13 @@
#
# ****************************************************
installers = ["apex", "compass", "fuel", "joid"]
-# installers = ["apex"]
# list of test cases declared in testcases.yaml but that must not be
# taken into account for the scoring
-blacklist = ["odl", "ovno", "security_scan", "copper", "moon"]
+blacklist = ["ovno", "security_scan", 'odl-sfc']
# versions = ["brahmaputra", "master"]
-versions = ["master"]
-PERIOD = 10
-MAX_SCENARIO_CRITERIA = 18
+versions = ["master", "colorado"]
+PERIOD = 50
+MAX_SCENARIO_CRITERIA = 50
# get the last 5 test results to determinate the success criteria
NB_TESTS = 5
# REPORTING_PATH = "/usr/share/nginx/html/reporting/functest"
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
index 0c3fa9426..da2213bc0 100644
--- a/utils/test/reporting/functest/template/index-status-tmpl.html
+++ b/utils/test/reporting/functest/template/index-status-tmpl.html
@@ -21,7 +21,7 @@
<h3 class="text-muted">Functest status page ({{version}})</h3>
<nav>
<ul class="nav nav-justified">
- <li class="active"><a href="index.html">Home</a></li>
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
<li><a href="index-status-apex.html">Apex</a></li>
<li><a href="index-status-compass.html">Compass</a></li>
<li><a href="index-status-fuel.html">Fuel</a></li>
@@ -101,7 +101,7 @@
</div>
</div>
{%- endfor %}
- *: not used for scenario validation
+ see <a href="https://wiki.opnfv.org/pages/viewpage.action?pageId=6828617">Functest scoring wiki page</a> for details on scenario scoring
</div>
<div class="col-md-1"></div>
</div>
diff --git a/utils/test/reporting/functest/template/index-tempest-tmpl.html b/utils/test/reporting/functest/template/index-tempest-tmpl.html
index c56214346..42d7ed339 100644
--- a/utils/test/reporting/functest/template/index-tempest-tmpl.html
+++ b/utils/test/reporting/functest/template/index-tempest-tmpl.html
@@ -21,7 +21,7 @@
<h3 class="text-muted">Tempest status page</h3>
<nav>
<ul class="nav nav-justified">
- <li class="active"><a href="index.html">Home</a></li>
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
<li><a href="index-tempest-apex.html">Apex</a></li>
<li><a href="index-tempest-compass.html">Compass</a></li>
<li><a href="index-tempest-fuel.html">Fuel</a></li>
diff --git a/utils/test/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/functest/template/index-vims-tmpl.html
index 25499dc46..3836be91f 100644
--- a/utils/test/reporting/functest/template/index-vims-tmpl.html
+++ b/utils/test/reporting/functest/template/index-vims-tmpl.html
@@ -21,7 +21,7 @@
<h3 class="text-muted">vIMS status page</h3>
<nav>
<ul class="nav nav-justified">
- <li class="active"><a href="index.html">Home</a></li>
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
<li><a href="index-vims-fuel.html">Fuel</a></li>
<li><a href="index-vims-compass.html">Compass</a></li>
<li><a href="index-vims-joid.html">JOID</a></li>
diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py
index e19853a09..a906f0da8 100644
--- a/utils/test/reporting/functest/testCase.py
+++ b/utils/test/reporting/functest/testCase.py
@@ -35,7 +35,12 @@ class TestCase(object):
'promise': 'Promise',
'moon': 'moon',
'copper': 'copper',
- 'security_scan': 'security'
+ 'security_scan': 'security',
+ 'multisite': 'multisite',
+ 'domino': 'domino',
+ 'odl-sfc': 'SFC',
+ 'onos_sfc': 'SFC',
+ 'parser':'parser'
}
try:
self.displayName = display_name_matrix[self.name]
@@ -122,8 +127,13 @@ class TestCase(object):
'doctor': 'doctor-notification',
'promise': 'promise',
'moon': 'moon',
- 'copper': 'copper',
- 'security_scan': 'security'
+ 'copper': 'copper-notification',
+ 'security_scan': 'security',
+ 'multisite': 'multisite',
+ 'domino': 'domino-multinode',
+ 'odl-sfc': 'odl-sfc',
+ 'onos_sfc': 'onos_sfc',
+ 'parser':'parser-basics'
}
try:
return test_match_matrix[self.name]
diff --git a/utils/test/result_collection_api/update/README.md b/utils/test/result_collection_api/update/README.md
index d3aef7efe..cb0e67b33 100644
--- a/utils/test/result_collection_api/update/README.md
+++ b/utils/test/result_collection_api/update/README.md
@@ -79,26 +79,21 @@ install ansible, please refer:
```
http://docs.ansible.com/ansible/intro_installation.html
```
-run update.yml
+
+playbook-update.sh
+
arguments:
-: host: remote server, must provide
-user: user used to access to remote server, default to root
-port: exposed port used to access to testapi, default to 8000
-image: testapi's docker image, default to opnfv/testapi:latest
-update_path: templates directory in remote server, default to /tmp/testapi
-mongodb_url: url of mongodb, default to 172.17.0.1, docker0 ip
-swagger_url: swagger access url, default to http://host:port
+: -h|--help show this help text
+-r|--remote remote server
+-u|--user ssh username used to access to remote server
+-i|--identity ssh PublicKey file used to access to remote server
+-e|--execute execute update, if not set just check the ansible connectivity
usage:
```
-ansible-playbook update.yml --extra-vars "
-host=10.63.243.17
-user=zte
-port=8000
-image=opnfv/testapi
-update_path=/tmp/testapi
-mongodb_url=mongodb://172.17.0.1:27017
-swagger_url=http://10.63.243.17:8000"```
+ssh-agent ./playbook-update.sh -r testresults.opnfv.org -u serena -i ~/.ssh/id_rsa -e
+```
+
> **Note:**
> - If documents need to be changed, please modify file
diff --git a/utils/test/result_collection_api/update/playbook-update.sh b/utils/test/result_collection_api/update/playbook-update.sh
new file mode 100755
index 000000000..86d30e4b2
--- /dev/null
+++ b/utils/test/result_collection_api/update/playbook-update.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+#
+# Author: Serena Feng (feng.xiaoewi@zte.com.cn)
+# Update testapi on remote server using ansible playbook automatically
+#
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+set -e
+
+usage="Script to trigger update automatically.
+
+usage:
+ bash $(basename "$0") [-h|--help] [-h <host>] [-u username] [-i identityfile] [-e|--execute]
+
+where:
+ -h|--help show this help text
+ -r|--remote remote server
+ -u|--user ssh username used to access to remote server
+ -i|--identity ssh PublicKey file used to access to remote server
+ -e|--execute execute update, if not set just check the ansible connectivity"
+
+remote=testresults.opnfv.org
+user=root
+identity=~/.ssh/id_rsa
+hosts=./hosts
+execute=false
+
+# Parse parameters
+while [[ $# > 0 ]]
+ do
+ key="$1"
+ case $key in
+ -h|--help)
+ echo "$usage"
+ exit 0
+ shift
+ ;;
+ -r|--remote)
+ remote="$2"
+ shift
+ ;;
+ -u|--user)
+ user="$2"
+ shift
+ ;;
+ -i|--identity)
+ identity="$2"
+ shift
+ ;;
+ -e|--execute)
+ execute=true
+ ;;
+ *)
+ echo "unknown option"
+ exit 1
+ ;;
+ esac
+ shift # past argument or value
+done
+
+echo $remote > $hosts
+
+echo "add authentication"
+ssh-add $identity
+
+echo "test ansible connectivity"
+ansible -i ./hosts $remote -m ping -u $user
+
+echo "test playbook connectivity"
+ansible-playbook -i $hosts test.yml -e "host=$remote user=$user"
+
+if [ $execute == true ]; then
+ echo "do update"
+ ansible-playbook -i $hosts update.yml -e "host=$remote \
+ user=$user \
+ port=8082 \
+ image=opnfv/testapi \
+ update_path=/home/$user/testapi \
+ mongodb_url=mongodb://172.17.0.1:27017 \
+ swagger_url=http://testresults.opnfv.org/test"
+fi
+
+rm -fr $hosts
+ssh-agent -k
diff --git a/utils/test/result_collection_api/update/templates/rm_images.sh b/utils/test/result_collection_api/update/templates/rm_images.sh
new file mode 100755
index 000000000..6722573b4
--- /dev/null
+++ b/utils/test/result_collection_api/update/templates/rm_images.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+number=`docker images | awk 'NR != 1' | grep testapi | wc -l`
+if [ $number -gt 0 ]; then
+ images=`docker images -a | awk 'NR != 1' | grep testapi | awk '{print $1}'`
+ echo "begin to rm images $images"
+ docker images | awk 'NR != 1' | grep testapi | awk '{print $3}' | xargs docker rmi -f &>/dev/null
+fi
diff --git a/utils/test/result_collection_api/update/test.yml b/utils/test/result_collection_api/update/test.yml
new file mode 100644
index 000000000..a8868720d
--- /dev/null
+++ b/utils/test/result_collection_api/update/test.yml
@@ -0,0 +1,12 @@
+---
+- hosts: "{{ host }}"
+ remote_user: "{{ user }}"
+ become: yes
+ become_method: sudo
+ vars:
+ user: "root"
+ tasks:
+ - name: test connectivity
+ command: "echo hello {{ host }}"
+ register: result
+ - debug: msg="{{ result }}"
diff --git a/utils/test/result_collection_api/update/update.yml b/utils/test/result_collection_api/update/update.yml
index 08839564a..e6663d905 100644
--- a/utils/test/result_collection_api/update/update.yml
+++ b/utils/test/result_collection_api/update/update.yml
@@ -8,6 +8,7 @@
port: "8000"
update_path: "/tmp/testapi"
image: "opnfv/testapi"
+ mode: "pull"
mongodb_url: "mongodb://172.17.0.1:27017"
swagger_url: "http://{{ host }}:{{ port }}"
tasks:
@@ -19,6 +20,11 @@
copy:
src: templates/
dest: "{{ update_path }}"
+ - name: transfer Dockerfile
+ copy:
+ src: ../docker/Dockerfile
+ dest: "{{ update_path }}"
+ when: mode == "build"
- name: backup mongodb database
command: "python {{ update_path }}/backup_mongodb.py -u {{ mongodb_url }} -o {{ update_path }}"
- name: stop and remove old versions
@@ -26,10 +32,13 @@
register: rm_result
- debug: msg="{{ rm_result.stderr }}"
- name: delete old docker images
- command: docker rmi "{{ image }}"
+ command: bash "{{ update_path }}/rm_images.sh"
ignore_errors: true
- name: update mongodb
command: "python {{ update_path }}/update_mongodb.py -u {{ mongodb_url }}"
+ - name: docker build image
+ command: "docker build -t {{ image }} {{ update_path }}"
+ when: mode == "build"
- name: docker start testapi server
command: docker run -dti -p "{{ port }}:8000"
-e "mongodb_url={{ mongodb_url }}"