aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ci/config_functest.yaml2
-rw-r--r--docs/com/img/testapi0.pngbin0 -> 37907 bytes
-rw-r--r--docs/com/img/testapi1.pngbin0 -> 63056 bytes
-rwxr-xr-xdocs/com/pres/conversation.html204
-rwxr-xr-xdocs/com/pres/testapi.html236
-rw-r--r--testcases/Controllers/ODL/odlreport2db.py25
-rw-r--r--testcases/Controllers/ONOS/Teston/onosfunctest.py21
-rwxr-xr-xtestcases/OpenStack/rally/run_rally-cert.py47
-rw-r--r--testcases/OpenStack/tempest/run_tempest.py48
-rw-r--r--testcases/OpenStack/vPing/vPing_ssh.py58
-rw-r--r--testcases/OpenStack/vPing/vPing_userdata.py51
-rw-r--r--testcases/features/bgpvpn.py26
-rw-r--r--testcases/features/doctor.py22
-rw-r--r--testcases/features/promise.py37
-rw-r--r--testcases/security_scan/connect.py2
-rw-r--r--testcases/vIMS/vIMS.py49
-rw-r--r--utils/functest_utils.py30
17 files changed, 643 insertions, 215 deletions
diff --git a/ci/config_functest.yaml b/ci/config_functest.yaml
index ab620f023..69f263f04 100644
--- a/ci/config_functest.yaml
+++ b/ci/config_functest.yaml
@@ -155,7 +155,7 @@ promise:
router_name: promise-router
results:
- test_db_url: http://testresults.opnfv.org/testapi
+ test_db_url: http://testresults.opnfv.org/test/api/v1
# to be maintained...
# the execution order is important as some tests may be more destructive than others
diff --git a/docs/com/img/testapi0.png b/docs/com/img/testapi0.png
new file mode 100644
index 000000000..06df74d75
--- /dev/null
+++ b/docs/com/img/testapi0.png
Binary files differ
diff --git a/docs/com/img/testapi1.png b/docs/com/img/testapi1.png
new file mode 100644
index 000000000..e9a697449
--- /dev/null
+++ b/docs/com/img/testapi1.png
Binary files differ
diff --git a/docs/com/pres/conversation.html b/docs/com/pres/conversation.html
new file mode 100755
index 000000000..fab46247b
--- /dev/null
+++ b/docs/com/pres/conversation.html
@@ -0,0 +1,204 @@
+<!doctype html>
+<html lang="en">
+
+ <head>
+ <meta charset="utf-8">
+
+ <title>OPNFV presentation</title>
+
+ <meta name="description" content="Conversation with the Testing community">
+ <meta name="author" content="M.Richomme, ....">
+
+ <meta name="apple-mobile-web-app-capable" content="yes" />
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" />
+
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no, minimal-ui">
+
+ <link rel="stylesheet" href="../css/reveal.css">
+ <link rel="stylesheet" href="../css/theme/OPNFV.css" id="theme">
+
+ <!-- Code syntax highlighting -->
+ <link rel="stylesheet" href="../lib/css/zenburn.css">
+
+ <!-- Printing and PDF exports -->
+ <script>
+ var link = document.createElement( 'link' );
+ link.rel = 'stylesheet';
+ link.type = 'text/css';
+ link.href = window.location.search.match( /print-pdf/gi ) ? '../css/print/pdf.css' : '../css/print/paper.css';
+ document.getElementsByTagName( 'head' )[0].appendChild( link );
+ </script>
+
+ <!--[if lt IE 9]>
+ <script src="lib/js/html5shiv.js"></script>
+ <![endif]-->
+ </head>
+
+ <body>
+
+ <div class="reveal">
+ <!-- Any section element inside of this container is displayed as a slide -->
+ <div class="slides">
+
+ <section data-background="../img/title-bg.png" data-background-transition="none">
+ <br>
+ <h1>Conversation with the Testing Community</h1>
+ <br><br>
+ <h4>OPNFV testing community</h4>
+ <h5>OPNFV Design Summit, 20/6/2016, Berlin</h5>
+ <br>
+
+ </section>
+
+ <section data-markdown>
+ >“You make experiments and I make theories. Do you know the difference? A theory is something nobody believes, except the person who made it. An experiment is something everybody believes, except the person who made it." A.Einstein
+ </section>
+
+ <section data-markdown>
+ # Agenda
+ * Who are we?
+ * Upstream, Upstream, Upstream
+ * Towards Telco Cloud KPI
+ </section>
+
+ <section data-markdown>
+ # Who are we?
+ ![alt text](https://wiki.opnfv.org/download/attachments/2926690/Testing.png "OPNFV testing group")
+ </section>
+ <section>
+ <section>
+ <h3>The test projects</h3>
+ <table>
+ <thead>
+ <tr>
+ <th>Test case</th>
+ <th>Description</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>Functest</td>
+ <td>Umbrella project for Functional testing</td>
+ </tr>
+ <tr>
+ <td>Yardstick</td>
+ <td>Umbrella project for performance/qualification testing</td>
+ </tr>
+ <tr>
+ <td>CPerf</td>
+ <td>SDN Controller performance testing</td>
+ </tr>
+ <tr>
+ <td>StorPerf</td>
+ <td>Storage performance testing</td>
+ </tr>
+ <tr>
+ <td>VSPerf</td>
+ <td>VSwitch qualification</td>
+ </tr>
+ <tr>
+ <td>Bottlenecks</td>
+ <td>Detect possible bottlenecks </td>
+ </tr>
+ </tbody>
+ </table>
+ </section>
+
+ <section data-markdown>
+ # Functest
+ * Functional testing to validate scenarios
+ * Contributors (raise your hands)
+ * Functest presentation (here...1h ago :)) slide available here
+ * breakout sessions
+ * Today 4PM (feature project integration), 5PM (Colorado status)
+ * tomorrow 11AM (API, test collection), 1PM (work Upstream), 3.15PM (D Release)(to be confirmed...)
+ </section>
+ <section data-markdown>
+ # Yardstick
+ </section>
+ <section data-markdown>
+ # CPerf
+ </section>
+ <section data-markdown>
+ # StorPerf
+ </section>
+ <section data-markdown>
+ # VSPerf
+ </section>
+ <section data-markdown>
+ # Bottlenecks
+ </section>
+ </section>
+ <section>
+ <section data-markdown>
+ # Upstream, upstream, upstream..
+ </section>
+ <section data-markdown>
+ ## We are on the shoulders of the giants
+ * Rally (OpenStack)
+ * ODL, ONOS, ...
+ * RobotFramework, teston
+ * .....
+ </section>
+ <section data-markdown>
+ ## Time to give back
+ </section>
+
+ <section data-markdown>
+ ## How to improve work with testing upstream community
+ </section>
+
+ </section>
+
+ <section>
+ <section data-markdown>
+ # Towards telco Cloud API...
+ </section>
+
+ <section data-markdown>
+ ##
+ </section>
+
+ </section>
+
+ <section>
+ <h3>Thank you</h3>
+ <img width="600" data-src="../img/colorado.png" alt="tests">
+ </section>
+
+ </div>
+ <div class='footer'>
+ <img src="../img/logo-OPNFV.png" alt="OPNFV logo">
+ </div>
+ </div>
+
+ <script src="../lib/js/head.min.js"></script>
+ <script src="../js/reveal.js"></script>
+
+ <script>
+
+ // Full list of configuration options available at:
+ // https://github.com/hakimel/reveal.js#configuration
+ Reveal.initialize({
+ controls: true,
+ progress: true,
+ history: true,
+ center: true,
+
+ transition: 'slide', // none/fade/slide/convex/concave/zoom
+
+ // Optional reveal.js plugins
+ dependencies: [
+ { src: '../lib/js/classList.js', condition: function() { return !document.body.classList; } },
+ { src: '../plugin/markdown/marked.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
+ { src: '../plugin/markdown/markdown.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
+ { src: '../plugin/highlight/highlight.js', async: true, condition: function() { return !!document.querySelector( 'pre code' ); }, callback: function() { hljs.initHighlightingOnLoad(); } },
+ { src: '../plugin/zoom-js/zoom.js', async: true },
+ { src: '../plugin/notes/notes.js', async: true }
+ ]
+ });
+
+ </script>
+
+ </body>
+</html>
diff --git a/docs/com/pres/testapi.html b/docs/com/pres/testapi.html
new file mode 100755
index 000000000..4479b93b4
--- /dev/null
+++ b/docs/com/pres/testapi.html
@@ -0,0 +1,236 @@
+<!doctype html>
+<html lang="en">
+
+ <head>
+ <meta charset="utf-8">
+
+ <title>OPNFV presentation</title>
+
+ <meta name="description" content="Test API">
+ <meta name="author" content="Serena Feng">
+
+ <meta name="apple-mobile-web-app-capable" content="yes" />
+ <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" />
+
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no, minimal-ui">
+
+ <link rel="stylesheet" href="../css/reveal.css">
+ <link rel="stylesheet" href="../css/theme/OPNFV.css" id="theme">
+
+ <!-- Code syntax highlighting -->
+ <link rel="stylesheet" href="../lib/css/zenburn.css">
+
+ <!-- Printing and PDF exports -->
+ <script>
+ var link = document.createElement( 'link' );
+ link.rel = 'stylesheet';
+ link.type = 'text/css';
+ link.href = window.location.search.match( /print-pdf/gi ) ? '../css/print/pdf.css' : '../css/print/paper.css';
+ document.getElementsByTagName( 'head' )[0].appendChild( link );
+ </script>
+
+ <!--[if lt IE 9]>
+ <script src="lib/js/html5shiv.js"></script>
+ <![endif]-->
+ </head>
+
+ <body>
+
+ <div class="reveal">
+ <!-- Any section element inside of this container is displayed as a slide -->
+ <div class="slides">
+
+ <section data-background="../img/title-bg.png" data-background-transition="none">
+ <h1>Test API</h1>
+ <h3>Clean, Easy, Complete</h3>
+ <br>
+ <h4>OPNFV testing community</h4>
+ <h5>OPNFV Design Summit, 20/6/2016, Berlin</h5>
+ <br>
+ </section>
+
+ <section data-markdown>
+ # Agenda
+ * A test API: what for?
+ * API overview
+ * API evolution
+ </section>
+
+ <section>
+ <section data-markdown>
+ # A test API: what for?
+ </section>
+ <section data-markdown>
+ ## Give a consistant view for
+ * Test projects
+ * Test cases
+ * Test resources (Pods)
+ * Test results
+ </section>
+
+ <section data-markdown>
+ ## Unify result display
+ * From many projects
+ * Many formats (log, html, json, ..)
+ * Many locations (in VM, Jumphost, external DB,...)
+ </section>
+
+ <section data-markdown>
+ ## Help building
+ * Dashboards
+ * Automatic reporting
+ </section>
+ </section>
+
+ <section>
+ <section data-markdown>
+ # API overview
+ </section>
+ <section data-markdown>
+ ## API in Brahmaputra
+ * Tornado + MongoDB
+ * Simple data models aggreed with the testing group
+ * No unit tests
+ * Wiki and rst documentation
+ </section>
+
+ <section data-markdown>
+ ## API in Brahmaputra
+ ![testapi](https://wiki.opnfv.org/download/attachments/2926452/results_collection_structure.png?version=1&modificationDate=1459196347000&api=v2 "OPNFV API page")
+ </section>
+ <section data-markdown>
+ ## API in Brahmaputra
+ ![testapi](../img/testapi0.png)
+ https://wiki.opnfv.org/display/functest/Collection+Of+Test+Results
+ </section>
+
+ <section data-markdown>
+ ## Lessons learned in B.
+ * Wiki documentation is painful
+ * result modl too simple
+ * version used for scenario
+ * no version
+ * no overall criteria (passed/failed)
+ * need unit tests for data model evolution
+ </section>
+ <section data-markdown>
+ ## Colorado refactoring
+ * done by Serena Feng (ZTE)
+ * update of the data model (based on lesson learned)
+ * creation of a swagger tornado framework for the doc
+ * creation of unit tests
+ </section>
+ </section>
+
+ <section>
+ <section data-markdown>
+ #API evolution
+ </section>
+<section>
+ <h3>Test API evolution</h3>
+ <table>
+ <thead>
+ <tr>
+ <th>Field</th>
+ <th>Brahmaputra</th>
+ <th>Colorado</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>date</td>
+ <td>creation_date</td>
+ <td>start_date and stop_date</td>
+ </tr>
+ <tr>
+ <td>version</td>
+ <td>used as scenario</td>
+ <td>git version</td>
+ </tr>
+ <tr>
+ <td>scenario</td>
+ <td>N.R</td>
+ <td>used for scenario</td>
+ </tr>
+ <tr>
+ <td>test_criteria</td>
+ <td>N.R</td>
+ <td>passed/failed</td>
+ </tr>
+ <tr>
+ <td>trust_indictator</td>
+ <td>N.R</td>
+ <td>between 0 and 1</td>
+ </tr>
+ <tr>
+ <td>last</td>
+ <td>N.R</td>
+ <td>get last N results</td>
+ </tr>
+ </tbody>
+ </table>
+ </section>
+ <section data-markdown>
+ ## Swagger doc
+ ![alt text](../img/testapi1.png "Test API swagger interface")
+ </section>
+<section>
+ <h2>unit tests</h2>
+ <pre><code class="hljs" data-trim contenteditable>
+umry8364@umry8364-Latitude-E6400:~/Dev/OPNFV/releng/utils/test/result_collection_api$ ./run_test.sh
+Tests running...
+WARNING:tornado.general:404 GET /dashboard/v1/results?case=vPing&pod=zte-pod1&version=C&installer=fuel&period=5 (127.0.0.1): Project name missing
+WARNING:tornado.access:404 GET /dashboard/v1/results?case=vPing&pod=zte-pod1&version=C&installer=fuel&period=5 (127.0.0.1) 2.30ms
+WARNING:tornado.general:400 POST /api/v1/projects (127.0.0.1): name missing
+............
+WARNING:tornado.access:400 POST /api/v1/projects (127.0.0.1) 1.13ms
+WARNING:tornado.access:403 PUT /api/v1/projects/functest/cases/vping_1 (127.0.0.1) 2.95ms
+WARNING:tornado.general:404 PUT /api/v1/projects/functest/cases/notFound (127.0.0.1): {'project_name': u'functest', 'name': u'notFound'} could not be found in table [testcases]
+WARNING:tornado.access:404 PUT /api/v1/projects/functest/cases/notFound (127.0.0.1) 2.85ms
+
+Ran 74 tests in 1.848s
+OK
+ </code></pre>
+ </section>
+ </section>
+ <section>
+ <h3>Thank you</h3>
+ <img width="600" data-src="../img/colorado.png" alt="tests">
+ </section>
+
+ </div>
+ <div class='footer'>
+ <img src="../img/logo-OPNFV.png" alt="OPNFV logo">
+ </div>
+ </div>
+
+ <script src="../lib/js/head.min.js"></script>
+ <script src="../js/reveal.js"></script>
+
+ <script>
+
+ // Full list of configuration options available at:
+ // https://github.com/hakimel/reveal.js#configuration
+ Reveal.initialize({
+ controls: true,
+ progress: true,
+ history: true,
+ center: true,
+
+ transition: 'slide', // none/fade/slide/convex/concave/zoom
+
+ // Optional reveal.js plugins
+ dependencies: [
+ { src: '../lib/js/classList.js', condition: function() { return !document.body.classList; } },
+ { src: '../plugin/markdown/marked.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
+ { src: '../plugin/markdown/markdown.js', condition: function() { return !!document.querySelector( '[data-markdown]' ); } },
+ { src: '../plugin/highlight/highlight.js', async: true, condition: function() { return !!document.querySelector( 'pre code' ); }, callback: function() { hljs.initHighlightingOnLoad(); } },
+ { src: '../plugin/zoom-js/zoom.js', async: true },
+ { src: '../plugin/notes/notes.js', async: true }
+ ]
+ });
+
+ </script>
+
+ </body>
+</html>
diff --git a/testcases/Controllers/ODL/odlreport2db.py b/testcases/Controllers/ODL/odlreport2db.py
index 8eb78b19a..028808411 100644
--- a/testcases/Controllers/ODL/odlreport2db.py
+++ b/testcases/Controllers/ODL/odlreport2db.py
@@ -18,6 +18,7 @@
# Later, the VM2 boots then execute cloud-init to ping VM1.
# After successful ping, both the VMs are deleted.
# 0.2: measure test duration and publish results under json format
+# 0.3: adapt push 2 DB after Test API refacroting
#
#
@@ -25,6 +26,7 @@ import getopt
import json
import os
import sys
+import time
import xmltodict
import yaml
@@ -125,19 +127,19 @@ def main(argv):
functest_yaml = yaml.safe_load(f)
f.close()
- database = functest_yaml.get("results").get("test_db_url")
- build_tag = functest_utils.get_build_tag()
-
try:
# example:
# python odlreport2db.py -x ~/Pictures/Perso/odl/output3.xml
# -i fuel
# -p opnfv-jump-2
# -s os-odl_l2-ha
- version = functest_utils.get_version()
# success criteria for ODL = 100% of tests OK
- status = "failed"
+ status = "FAIL"
+ # TODO as part of the tests are executed before in the bash
+ # start and stoptime have no real meaning
+ start_time = time.time()
+ stop_time = start_time
try:
tests_passed = 0
tests_failed = 0
@@ -148,19 +150,18 @@ def main(argv):
tests_failed += 1
if (tests_failed < 1):
- status = "passed"
+ status = "PASS"
except:
print("Unable to set criteria" % sys.exc_info()[0])
- functest_utils.push_results_to_db(database,
- "functest",
+
+ functest_utils.push_results_to_db("functest",
data['case_name'],
None,
- data['pod_name'],
- version,
- scenario,
+ start_time,
+ stop_time,
status,
- build_tag,
data)
+
except:
print("Error pushing results into Database '%s'" % sys.exc_info()[0])
diff --git a/testcases/Controllers/ONOS/Teston/onosfunctest.py b/testcases/Controllers/ONOS/Teston/onosfunctest.py
index 07ecacc5d..38935c5dd 100644
--- a/testcases/Controllers/ONOS/Teston/onosfunctest.py
+++ b/testcases/Controllers/ONOS/Teston/onosfunctest.py
@@ -164,7 +164,8 @@ def CleanOnosTest():
def main():
-
+ start_time = time.time()
+ stop_time = start_time
DownloadCodes()
if args.installer == "joid":
logger.debug("Installer is Joid")
@@ -175,11 +176,10 @@ def main():
RunScript("FUNCvirNetNBL3")
try:
- logger.debug("Push result into DB")
+ logger.debug("Push ONOS results into DB")
# TODO check path result for the file
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
result = GetResult()
+ stop_time = time.time()
# ONOS success criteria = all tests OK
# i.e. FUNCvirNet & FUNCvirNetL3
@@ -191,13 +191,14 @@ def main():
except:
logger.error("Unable to set ONOS criteria")
- pod_name = functest_utils.get_pod_name(logger)
- build_tag = functest_utils.get_build_tag(logger)
- functest_utils.push_results_to_db(TEST_DB,
- "functest",
+ functest_utils.push_results_to_db("functest",
"ONOS",
- logger, pod_name, version, scenario,
- status, build_tag, payload=result)
+ logger,
+ start_time,
+ stop_time,
+ status,
+ result)
+
except:
logger.error("Error pushing results into Database")
diff --git a/testcases/OpenStack/rally/run_rally-cert.py b/testcases/OpenStack/rally/run_rally-cert.py
index c3dd304ac..6bb29b8e0 100755
--- a/testcases/OpenStack/rally/run_rally-cert.py
+++ b/testcases/OpenStack/rally/run_rally-cert.py
@@ -18,7 +18,6 @@ import iniparse
import json
import os
import re
-import requests
import subprocess
import time
import yaml
@@ -125,26 +124,6 @@ CINDER_VOLUME_TYPE_NAME = "volume_test"
SUMMARY = []
-def push_results_to_db(case, payload, criteria):
-
- url = TEST_DB + "/results"
- installer = functest_utils.get_installer_type(logger)
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
- pod_name = functest_utils.get_pod_name(logger)
-
- # evalutate success criteria
-
- params = {"project_name": "functest", "case_name": case,
- "pod_name": pod_name, "installer": installer,
- "version": version, "scenario": scenario,
- "criteria": criteria, "details": payload}
-
- headers = {'Content-Type': 'application/json'}
- r = requests.post(url, data=json.dumps(params), headers=headers)
- logger.debug(r)
-
-
def get_task_id(cmd_raw):
"""
get task id from command rally result
@@ -303,6 +282,8 @@ def run_task(test_name):
#
global SUMMARY
logger.info('Starting test scenario "{}" ...'.format(test_name))
+ start_time = time.time()
+ stop_time = start_time
task_file = '{}task.yaml'.format(RALLY_DIR)
if not os.path.exists(task_file):
@@ -376,13 +357,23 @@ def run_task(test_name):
# Push results in payload of testcase
if args.report:
- logger.debug("Push result into DB")
- push_results_to_db("Rally_details", json_data, status)
+ stop_time = time.time()
+ logger.debug("Push Rally detailed results into DB")
+ functest_utils.push_results_to_db("functest",
+ "Rally_details",
+ logger,
+ start_time,
+ stop_time,
+ status,
+ json_data)
def main():
global SUMMARY
global network_dict
+ start_time = time.time()
+ stop_time = start_time
+
# configure script
if not (args.test_name in tests):
logger.error('argument not valid')
@@ -482,6 +473,7 @@ def main():
"+===================+============+===============+===========+"
"\n")
payload = []
+ stop_time = time.time()
# for each scenario we draw a row for the table
total_duration = 0.0
@@ -538,8 +530,13 @@ def main():
if args.report:
logger.debug("Pushing Rally summary into DB...")
- push_results_to_db("Rally", payload, status)
-
+ functest_utils.push_results_to_db("functest",
+ "Rally",
+ logger,
+ start_time,
+ stop_time,
+ status,
+ payload)
if args.noclean:
exit(0)
diff --git a/testcases/OpenStack/tempest/run_tempest.py b/testcases/OpenStack/tempest/run_tempest.py
index d8a8a1acb..46b01898f 100644
--- a/testcases/OpenStack/tempest/run_tempest.py
+++ b/testcases/OpenStack/tempest/run_tempest.py
@@ -14,12 +14,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
import argparse
-import json
import os
import re
-import requests
import shutil
import subprocess
+import sys
import time
import yaml
import ConfigParser
@@ -114,27 +113,6 @@ def get_info(file_result):
logger.debug("duration:" + duration)
-def push_results_to_db(case, payload, criteria):
-
- # TODO move DB creds into config file
- url = TEST_DB + "/results"
- installer = ft_utils.get_installer_type(logger)
- scenario = ft_utils.get_scenario(logger)
- version = ft_utils.get_version(logger)
- pod_name = ft_utils.get_pod_name(logger)
-
- logger.info("Pushing results to DB: '%s'." % url)
-
- params = {"project_name": "functest", "case_name": case,
- "pod_name": str(pod_name), 'installer': installer,
- "version": version, "scenario": scenario, "criteria": criteria,
- 'details': payload}
- headers = {'Content-Type': 'application/json'}
-
- r = requests.post(url, data=json.dumps(params), headers=headers)
- logger.debug(r)
-
-
def create_tempest_resources():
ks_creds = os_utils.get_credentials("keystone")
logger.debug("Creating tenant and user for Tempest suite")
@@ -253,6 +231,8 @@ def run_tempest(OPTION):
# :return: void
#
logger.info("Starting Tempest test suite: '%s'." % OPTION)
+ start_time = time.time()
+ stop_time = start_time
cmd_line = "rally verify start " + OPTION + " --system-wide"
header = ("Tempest environment:\n"
@@ -293,11 +273,12 @@ def run_tempest(OPTION):
dur_sec_float = float(duration.split(':')[2])
dur_sec_int = int(round(dur_sec_float, 0))
dur_sec_int = dur_sec_int + 60 * dur_min
-
+ stop_time = time.time()
# Push results in payload of testcase
if args.report:
+ logger.debug("Pushing tempest results into DB...")
# Note criteria hardcoded...TODO move to testcase.yaml
- status = "failed"
+ status = "FAIL"
try:
diff = (int(num_tests) - int(num_failures))
success_rate = 100 * diff / int(num_tests)
@@ -306,7 +287,7 @@ def run_tempest(OPTION):
# For Tempest we assume that the success rate is above 90%
if success_rate >= 90:
- status = "passed"
+ status = "PASS"
# add the test in error in the details sections
# should be possible to do it during the test
@@ -322,9 +303,18 @@ def run_tempest(OPTION):
"tests": int(num_tests), "failures": int(num_failures),
"errors": error_logs}
logger.info("Results: " + str(json_results))
-
- logger.debug("Push result into DB")
- push_results_to_db("Tempest", json_results, status)
+ # TODO split Tempest smoke and full
+ try:
+ ft_utils.push_results_to_db("functest",
+ "Tempest",
+ logger,
+ start_time,
+ stop_time,
+ status,
+ json_results)
+ except:
+ logger.error("Error pushing results into Database '%s'"
+ % sys.exc_info()[0])
def main():
diff --git a/testcases/OpenStack/vPing/vPing_ssh.py b/testcases/OpenStack/vPing/vPing_ssh.py
index 11887845c..2a417eb3c 100644
--- a/testcases/OpenStack/vPing/vPing_ssh.py
+++ b/testcases/OpenStack/vPing/vPing_ssh.py
@@ -11,7 +11,7 @@
# Later, the VM2 boots then execute cloud-init to ping VM1.
# After successful ping, both the VMs are deleted.
# 0.2: measure test duration and publish results under json format
-#
+# 0.3: adapt push 2 DB after Test API refacroting
#
import argparse
import datetime
@@ -19,6 +19,7 @@ import os
import paramiko
import pprint
import re
+import sys
import time
import yaml
from scp import SCPClient
@@ -176,30 +177,6 @@ def create_security_group(neutron_client):
return sg_id
-def push_results(start_time_ts, duration, status):
- try:
- logger.debug("Pushing result into DB...")
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
- criteria = "failed"
- test_criteria = functest_utils.get_criteria_by_test("vping_ssh")
- if eval(test_criteria): # evaluates the regex 'status == "PASS"'
- criteria = "passed"
- pod_name = functest_utils.get_pod_name(logger)
- build_tag = functest_utils.get_build_tag(logger)
- functest_utils.push_results_to_db(TEST_DB,
- "functest",
- "vPing",
- logger, pod_name, version, scenario,
- criteria, build_tag,
- payload={'timestart': start_time_ts,
- 'duration': duration,
- 'status': status})
- except:
- logger.error("Error pushing results into Database '%s'"
- % sys.exc_info()[0])
-
-
def main():
creds_nova = openstack_utils.get_credentials("nova")
@@ -268,10 +245,10 @@ def main():
server.delete()
# boot VM 1
- start_time_ts = time.time()
- end_time_ts = start_time_ts
+ start_time = time.time()
+ stop_time = start_time
logger.info("vPing Start Time:'%s'" % (
- datetime.datetime.fromtimestamp(start_time_ts).strftime(
+ datetime.datetime.fromtimestamp(start_time).strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("Creating instance '%s'..." % NAME_VM_1)
@@ -409,10 +386,12 @@ def main():
logger.info("Waiting for ping...")
sec = 0
+ stop_time = time.time()
duration = 0
cmd = '~/ping.sh ' + test_ip
flag = False
+
while True:
time.sleep(1)
(stdin, stdout, stderr) = ssh.exec_command(cmd)
@@ -423,8 +402,8 @@ def main():
logger.info("vPing detected!")
# we consider start time at VM1 booting
- end_time_ts = time.time()
- duration = round(end_time_ts - start_time_ts, 1)
+ stop_time = time.time()
+ duration = round(stop_time - start_time, 1)
logger.info("vPing duration:'%s' s." % duration)
EXIT_CODE = 0
flag = True
@@ -440,7 +419,9 @@ def main():
sec += 1
test_status = "FAIL"
- if EXIT_CODE == 0:
+ test_criteria = functest_utils.get_criteria_by_test("vping_ssh")
+
+ if eval(test_criteria):
logger.info("vPing OK")
test_status = "PASS"
else:
@@ -448,7 +429,20 @@ def main():
logger.error("vPing FAILED")
if args.report:
- push_results(start_time_ts, duration, test_status)
+ try:
+ logger.debug("Pushing vPing SSH results into DB...")
+ functest_utils.push_results_to_db("functest",
+ "vPing",
+ logger,
+ start_time,
+ stop_time,
+ test_status,
+ details={'timestart': start_time,
+ 'duration': duration,
+ 'status': test_status})
+ except:
+ logger.error("Error pushing results into Database '%s'"
+ % sys.exc_info()[0])
exit(EXIT_CODE)
diff --git a/testcases/OpenStack/vPing/vPing_userdata.py b/testcases/OpenStack/vPing/vPing_userdata.py
index 2b2963144..5b7d2d90f 100644
--- a/testcases/OpenStack/vPing/vPing_userdata.py
+++ b/testcases/OpenStack/vPing/vPing_userdata.py
@@ -11,6 +11,7 @@
# Later, the VM2 boots then execute cloud-init to ping VM1.
# After successful ping, both the VMs are deleted.
# 0.2: measure test duration and publish results under json format
+# 0.3: adapt push 2 DB after Test API refacroting
#
#
@@ -18,6 +19,7 @@ import argparse
import datetime
import os
import pprint
+import sys
import time
import yaml
@@ -174,29 +176,6 @@ def create_security_group(neutron_client):
return sg_id
-def push_results(start_time_ts, duration, test_status):
- try:
- logger.debug("Pushing result into DB...")
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
- criteria = "failed"
- if test_status == "OK":
- criteria = "passed"
- pod_name = functest_utils.get_pod_name(logger)
- build_tag = functest_utils.get_build_tag(logger)
- functest_utils.push_results_to_db(TEST_DB,
- "functest",
- "vPing_userdata",
- logger, pod_name, version, scenario,
- criteria, build_tag,
- payload={'timestart': start_time_ts,
- 'duration': duration,
- 'status': test_status})
- except:
- logger.error("Error pushing results into Database '%s'"
- % sys.exc_info()[0])
-
-
def main():
creds_nova = openstack_utils.get_credentials("nova")
@@ -268,10 +247,10 @@ def main():
# tune (e.g. flavor, images, network) to your specific
# openstack configuration here
# we consider start time at VM1 booting
- start_time_ts = time.time()
- end_time_ts = start_time_ts
+ start_time = time.time()
+ stop_time = start_time
logger.info("vPing Start Time:'%s'" % (
- datetime.datetime.fromtimestamp(start_time_ts).strftime(
+ datetime.datetime.fromtimestamp(start_time).strftime(
'%Y-%m-%d %H:%M:%S')))
# create VM
@@ -336,6 +315,7 @@ def main():
metadata_tries = 0
console_log = vm2.get_console_output()
duration = 0
+ stop_time = time.time()
while True:
time.sleep(1)
@@ -346,8 +326,8 @@ def main():
logger.info("vPing detected!")
# we consider start time at VM1 booting
- end_time_ts = time.time()
- duration = round(end_time_ts - start_time_ts, 1)
+ stop_time = time.time()
+ duration = round(stop_time - start_time, 1)
logger.info("vPing duration:'%s'" % duration)
EXIT_CODE = 0
break
@@ -379,7 +359,20 @@ def main():
logger.error("vPing FAILED")
if args.report:
- push_results(start_time_ts, duration, test_status)
+ try:
+ logger.debug("Pushing vPing userdata results into DB...")
+ functest_utils.push_results_to_db("functest",
+ "vPing_userdata",
+ logger,
+ start_time,
+ stop_time,
+ test_status,
+ details={'timestart': start_time,
+ 'duration': duration,
+ 'status': test_status})
+ except:
+ logger.error("Error pushing results into Database '%s'"
+ % sys.exc_info()[0])
exit(EXIT_CODE)
diff --git a/testcases/features/bgpvpn.py b/testcases/features/bgpvpn.py
index e3db9209d..bac255913 100644
--- a/testcases/features/bgpvpn.py
+++ b/testcases/features/bgpvpn.py
@@ -12,6 +12,7 @@
import argparse
import os
import re
+import time
import yaml
import ConfigParser
@@ -41,6 +42,7 @@ logger = ft_logger.Logger("bgpvpn").getLogger()
def main():
logger.info("Running BGPVPN Tempest test case...")
+ start_time = time.time()
cmd = 'cd ' + BGPVPN_REPO + ';pip install --no-deps -e .'
ft_utils.execute_command(cmd, logger, exit_on_error=False)
@@ -93,8 +95,9 @@ def main():
"errors": error_logs}
logger.info("Results: " + str(json_results))
- criteria = "failed"
+ criteria = "FAIL"
# criteria = success rate = 100% (i.e all tests passed)
+ # TODO use criteria defined in config file
criteria_run = int(tests)
if not failed:
criteria_failed = 0
@@ -102,20 +105,19 @@ def main():
criteria_failed = int(failed)
if criteria_run > 0 and criteria_failed < 1:
- criteria = "passed"
+ criteria = "PASS"
# Push results in payload of testcase
if args.report:
- logger.debug("Push result into DB")
- url = TEST_DB_URL
- scenario = ft_utils.get_scenario(logger)
- version = ft_utils.get_version(logger)
- pod_name = ft_utils.get_pod_name(logger)
- build_tag = ft_utils.get_build_tag(logger)
-
- ft_utils.push_results_to_db(url, "sdnvpn", "bgpvpn_api", logger,
- pod_name, version, scenario, criteria,
- build_tag, json_results)
+ logger.debug("Push bgpvpn results into DB")
+ stop_time = time.time()
+ ft_utils.push_results_to_db("sdnvpn",
+ "bgpvpn_api",
+ logger,
+ start_time,
+ stop_time,
+ criteria,
+ json_results)
if __name__ == '__main__':
main()
diff --git a/testcases/features/doctor.py b/testcases/features/doctor.py
index ac68430a9..c7fc848c5 100644
--- a/testcases/features/doctor.py
+++ b/testcases/features/doctor.py
@@ -34,12 +34,12 @@ logger = ft_logger.Logger("doctor").getLogger()
def main():
cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO
- start_time_ts = time.time()
+ start_time = time.time()
ret = functest_utils.execute_command(cmd, logger, exit_on_error=False)
- end_time_ts = time.time()
- duration = round(end_time_ts - start_time_ts, 1)
+ stop_time = time.time()
+ duration = round(end_time_ts - start_time, 1)
if ret:
logger.info("doctor OK")
test_status = 'OK'
@@ -48,7 +48,7 @@ def main():
test_status = 'NOK'
details = {
- 'timestart': start_time_ts,
+ 'timestart': start_time,
'duration': duration,
'status': test_status,
}
@@ -61,7 +61,7 @@ def main():
if details['status'] == "OK":
status = "passed"
- logger.info("Pushing result: TEST_DB_URL=%(db)s pod_name=%(pod)s "
+ logger.info("Pushing Doctor results: TEST_DB_URL=%(db)s pod_name=%(pod)s "
"version=%(v)s scenario=%(s)s criteria=%(c)s details=%(d)s" % {
'db': TEST_DB_URL,
'pod': pod_name,
@@ -71,11 +71,13 @@ def main():
'b': build_tag,
'd': details,
})
- functest_utils.push_results_to_db(TEST_DB_URL,
- 'doctor', 'doctor-notification',
- logger, pod_name, version, scenario,
- status, build_tag, details)
-
+ functest_utils.push_results_to_db("doctor",
+ "doctor-notification",
+ logger,
+ start_time,
+ stop_time,
+ status,
+ details)
if __name__ == '__main__':
main()
diff --git a/testcases/features/promise.py b/testcases/features/promise.py
index c74c7cfa7..7f034643b 100644
--- a/testcases/features/promise.py
+++ b/testcases/features/promise.py
@@ -12,8 +12,8 @@
import argparse
import json
import os
-import requests
import subprocess
+import time
import yaml
import keystoneclient.v2_0.client as ksclient
@@ -71,6 +71,7 @@ logger = ft_logger.Logger("promise").getLogger()
def main():
+ start_time = time.time()
ks_creds = openstack_utils.get_credentials("keystone")
nv_creds = openstack_utils.get_credentials("nova")
nt_creds = openstack_utils.get_credentials("neutron")
@@ -235,33 +236,23 @@ def main():
start_time, end_time, duration))
if args.report:
- pod_name = functest_utils.get_pod_name(logger)
- installer = functest_utils.get_installer_type(logger)
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
- build_tag = functest_utils.get_build_tag(logger)
- # git_version = functest_utils.get_git_branch(PROMISE_REPO)
- url = TEST_DB + "/results"
-
+ stop_time = time.time()
json_results = {"timestart": start_time, "duration": duration,
"tests": int(tests), "failures": int(failures)}
- logger.debug("Results json: " + str(json_results))
+ logger.debug("Promise Results json: " + str(json_results))
# criteria for Promise in Release B was 100% of tests OK
- status = "failed"
+ status = "FAIL"
if int(tests) > 32 and int(failures) < 1:
- status = "passed"
-
- params = {"project_name": "promise", "case_name": "promise",
- "pod_name": str(pod_name), 'installer': installer,
- "version": version, "scenario": scenario,
- "criteria": status, "build_tag": build_tag,
- 'details': json_results}
- headers = {'Content-Type': 'application/json'}
-
- logger.info("Pushing results to DB...")
- r = requests.post(url, data=json.dumps(params), headers=headers)
- logger.debug(r)
+ status = "PASS"
+
+ functest_utils.push_results_to_db("promise",
+ "promise",
+ logger,
+ start_time,
+ stop_time,
+ status,
+ json_results)
if __name__ == '__main__':
diff --git a/testcases/security_scan/connect.py b/testcases/security_scan/connect.py
index 60caade09..e040de16f 100644
--- a/testcases/security_scan/connect.py
+++ b/testcases/security_scan/connect.py
@@ -24,6 +24,8 @@ logger = ft_logger.Logger("security_scan").getLogger()
paramiko.util.log_to_file("/var/log/paramiko.log")
+paramiko.util.log_to_file("/var/log/paramiko.log")
+
class novaManager:
def __init__(self, *args):
diff --git a/testcases/vIMS/vIMS.py b/testcases/vIMS/vIMS.py
index 2430af1a1..3cdbab9b4 100644
--- a/testcases/vIMS/vIMS.py
+++ b/testcases/vIMS/vIMS.py
@@ -115,33 +115,22 @@ def download_and_add_image_on_glance(glance, image_name, image_url):
def step_failure(step_name, error_msg):
logger.error(error_msg)
set_result(step_name, 0, error_msg)
- status = "failed"
+ status = "FAIL"
+ # in case of failure starting and stoping time are not correct
+ start_time = time.time()
+ stop_time = start_time
if step_name == "sig_test":
- status = "passed"
- push_results(status)
+ status = "PASS"
+ functest_utils.push_results_to_db("functest",
+ "vIMS",
+ logger,
+ start_time,
+ stop_time,
+ status,
+ RESULTS)
exit(-1)
-def push_results(status):
- if args.report:
- logger.debug("Pushing results to DB....")
-
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
- pod_name = functest_utils.get_pod_name(logger)
- build_tag = functest_utils.get_build_tag(logger)
-
- functest_utils.push_results_to_db(db_url=DB_URL,
- project="functest",
- case_name="vIMS",
- logger=logger, pod_name=pod_name,
- version=version,
- scenario=scenario,
- criteria=status,
- build_tag=build_tag,
- payload=RESULTS)
-
-
def set_result(step_name, duration=0, result=""):
RESULTS[step_name] = {'duration': duration, 'result': result}
@@ -245,14 +234,22 @@ def test_clearwater():
# success criteria for vIMS (for Brahmaputra)
# - orchestrator deployed
# - VNF deployed
- status = "failed"
+ # TODO use test criteria defined in config file
+ status = "FAIL"
try:
if (RESULTS['orchestrator']['duration'] > 0 and
RESULTS['vIMS']['duration'] > 0):
- status = "passed"
+ status = "PASS"
except:
logger.error("Unable to set test status")
- push_results(status)
+
+ functest_utils.push_results_to_db("functest",
+ "vIMS",
+ logger,
+ start_time_ts,
+ end_time_ts,
+ status,
+ RESULTS)
try:
os.remove(VIMS_TEST_DIR + "temp.json")
diff --git a/utils/functest_utils.py b/utils/functest_utils.py
index b43a63cb8..7b6824423 100644
--- a/utils/functest_utils.py
+++ b/utils/functest_utils.py
@@ -149,17 +149,35 @@ def get_build_tag(logger=None):
return build_tag
-def push_results_to_db(db_url, project, case_name, logger, pod_name,
- version, scenario, criteria, build_tag, payload):
+def get_db_url(logger=None):
+ """
+ Returns DB URL
+ """
+ with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
+ functest_yaml = yaml.safe_load(f)
+ f.close()
+ db_url = functest_yaml.get("results").get("test_db_url")
+ return db_url
+
+
+def push_results_to_db(project, case_name, logger,
+ start_date, stop_date, criteria, details):
"""
POST results to the Result target DB
"""
- url = db_url + "/results"
+ # Retrieve params from CI and conf
+ url = get_db_url(logger) + "/results"
installer = get_installer_type(logger)
+ scenario = get_scenario(logger)
+ version = get_version(logger)
+ pod_name = get_pod_name(logger)
+ build_tag = get_build_tag(logger)
+
params = {"project_name": project, "case_name": case_name,
"pod_name": pod_name, "installer": installer,
"version": version, "scenario": scenario, "criteria": criteria,
- "build_tag": build_tag, "details": payload}
+ "build_tag": build_tag, "start_date": start_date,
+ "stop_date": stop_date, "details": details}
headers = {'Content-Type': 'application/json'}
try:
@@ -170,8 +188,8 @@ def push_results_to_db(db_url, project, case_name, logger, pod_name,
except Exception, e:
print ("Error [push_results_to_db('%s', '%s', '%s', " +
"'%s', '%s', '%s', '%s', '%s', '%s')]:" %
- (db_url, project, case_name, pod_name, version,
- scenario, criteria, build_tag, payload)), e
+ (url, project, case_name, pod_name, version,
+ scenario, criteria, build_tag, details)), e
return False