summaryrefslogtreecommitdiffstats
path: root/utils/test
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test')
-rw-r--r--utils/test/reporting/functest/reporting-status.py118
-rw-r--r--utils/test/reporting/functest/reporting-tempest.py71
-rw-r--r--utils/test/reporting/functest/reporting-vims.py68
-rw-r--r--utils/test/reporting/functest/reportingConf.py12
-rw-r--r--utils/test/reporting/functest/reportingUtils.py47
-rw-r--r--utils/test/reporting/functest/template/index-status-tmpl.html2
-rw-r--r--utils/test/reporting/functest/testCase.py58
-rw-r--r--utils/test/result_collection_api/docker/Dockerfile52
-rwxr-xr-xutils/test/result_collection_api/docker/prepare-env.sh16
-rwxr-xr-xutils/test/result_collection_api/docker/start-server.sh4
-rw-r--r--utils/test/result_collection_api/etc/config.ini2
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/resources/handlers.py5
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py24
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/resources/result_models.py104
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py5
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py2
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py4
-rw-r--r--utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py51
18 files changed, 468 insertions, 177 deletions
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
index 2ce5efbd0..622c375cc 100644
--- a/utils/test/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/functest/reporting-status.py
@@ -8,7 +8,6 @@
#
import datetime
import jinja2
-import os
import requests
import sys
import time
@@ -19,7 +18,11 @@ import reportingConf as conf
import testCase as tc
import scenarioResult as sr
-testCases4Validation = []
+# Logger
+logger = utils.getLogger("Status")
+
+# Initialization
+testValid = []
otherTestCases = []
# init just tempest to get the list of scenarios
@@ -28,16 +31,16 @@ tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
# Retrieve the Functest configuration to detect which tests are relevant
# according to the installer, scenario
-# cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml"
-cf = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
+cf = conf.TEST_CONF
response = requests.get(cf)
+
functest_yaml_config = yaml.load(response.text)
-print "****************************************"
-print "* Generating reporting..... *"
-print ("* Data retention = %s days *" % conf.PERIOD)
-print "* *"
-print "****************************************"
+logger.info("*******************************************")
+logger.info("* Generating reporting scenario status *")
+logger.info("* Data retention = %s days *" % conf.PERIOD)
+logger.info("* *")
+logger.info("*******************************************")
# Retrieve test cases of Tier 1 (smoke)
config_tiers = functest_yaml_config.get("tiers")
@@ -50,19 +53,22 @@ config_tiers = functest_yaml_config.get("tiers")
for tier in config_tiers:
if tier['order'] > 0 and tier['order'] < 3:
for case in tier['testcases']:
- testCases4Validation.append(tc.TestCase(case['name'],
- "functest",
- case['dependencies']))
+ if case['name'] not in conf.blacklist:
+ testValid.append(tc.TestCase(case['name'],
+ "functest",
+ case['dependencies']))
elif tier['order'] == 3:
for case in tier['testcases']:
- testCases4Validation.append(tc.TestCase(case['name'],
- case['name'],
- case['dependencies']))
+ if case['name'] not in conf.blacklist:
+ testValid.append(tc.TestCase(case['name'],
+ case['name'],
+ case['dependencies']))
elif tier['order'] > 3:
for case in tier['testcases']:
- otherTestCases.append(tc.TestCase(case['name'],
- "functest",
- case['dependencies']))
+ if case['name'] not in conf.blacklist:
+ otherTestCases.append(tc.TestCase(case['name'],
+ "functest",
+ case['dependencies']))
# For all the versions
for version in conf.versions:
@@ -84,27 +90,32 @@ for version in conf.versions:
# Check if test case is runnable / installer, scenario
# for the test case used for Scenario validation
try:
- print ("---------------------------------")
- print ("installer %s, version %s, scenario %s:" %
- (installer, version, s))
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s:" %
+ (installer, version, s))
# 1) Manage the test cases for the scenario validation
# concretely Tiers 0-3
- for test_case in testCases4Validation:
+ for test_case in testValid:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
- print ("testcase %s is %s" % (test_case.getName(),
- test_case.isRunnable))
+ logger.debug("testcase %s is %s" %
+ (test_case.getDisplayName(),
+ test_case.isRunnable))
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
+ displayName = test_case.getDisplayName()
project = test_case.getProject()
nb_test_runnable_for_this_scenario += 1
- print (" Searching results for case %s " %
- (dbName))
+ logger.info(" Searching results for case %s " %
+ (displayName))
result = utils.getResult(dbName, installer, s, version)
- print " >>>> Test result=" + str(result)
+ # if no result set the value to 0
+ if result < 0:
+ result = 0
+ logger.info(" >>>> Test score = " + str(result))
test_case.setCriteria(result)
test_case.setIsRunnable(True)
testCases2BeDisplayed.append(tc.TestCase(name,
@@ -120,30 +131,35 @@ for version in conf.versions:
for test_case in otherTestCases:
test_case.checkRunnable(installer, s,
test_case.getConstraints())
- print ("testcase %s is %s" % (test_case.getName(),
- test_case.isRunnable))
+ logger.info("testcase %s is %s" %
+ (test_case.getName(), test_case.isRunnable))
time.sleep(1)
if test_case.isRunnable:
dbName = test_case.getDbName()
name = test_case.getName()
+ displayName = test_case.getDisplayName()
project = test_case.getProject()
- print (" Searching results for case %s " %
- (dbName))
+ logger.info(" Searching results for case %s " %
+ (displayName))
result = utils.getResult(dbName, installer, s, version)
- test_case.setCriteria(result)
- test_case.setIsRunnable(True)
- testCases2BeDisplayed.append(tc.TestCase(name,
- project,
- "",
- result,
- True,
- 4))
+ # at least 1 result for the test
+ if result > -1:
+ test_case.setCriteria(result)
+ test_case.setIsRunnable(True)
+ testCases2BeDisplayed.append(tc.TestCase(name,
+ project,
+ "",
+ result,
+ True,
+ 4))
+ else:
+ logger.debug("No results found")
items[s] = testCases2BeDisplayed
except:
- print ("Error: installer %s, version %s, scenario %s" %
- (installer, version, s))
- print "No data available , error %s " % (sys.exc_info()[0])
+ logger.error("Error: installer %s, version %s, scenario %s" %
+ (installer, version, s))
+ logger.error("No data available: %s " % (sys.exc_info()[0]))
# **********************************************
# Evaluate the results for scenario validation
@@ -158,13 +174,13 @@ for version in conf.versions:
s_score = str(scenario_score) + "/" + str(scenario_criteria)
s_status = "KO"
if scenario_score < scenario_criteria:
- print (">>>> scenario not OK, score = %s/%s" %
- (scenario_score, scenario_criteria))
+ logger.info(">>>> scenario not OK, score = %s/%s" %
+ (scenario_score, scenario_criteria))
s_status = "KO"
else:
- print ">>>>> scenario OK, save the information"
+ logger.info(">>>>> scenario OK, save the information")
s_status = "OK"
- path_validation_file = ("./release/" + version +
+ path_validation_file = (conf.REPORTING_PATH + "/release/" + version +
"/validated_scenario_history.txt")
with open(path_validation_file, "a") as f:
time_format = "%Y-%m-%d %H:%M"
@@ -173,14 +189,12 @@ for version in conf.versions:
f.write(info)
scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score)
- print "--------------------------"
+ logger.info("--------------------------")
- templateLoader = jinja2.FileSystemLoader(os.path.dirname
- (os.path.abspath
- (__file__)))
+ templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
templateEnv = jinja2.Environment(loader=templateLoader)
- TEMPLATE_FILE = "./template/index-status-tmpl.html"
+ TEMPLATE_FILE = "/template/index-status-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(scenario_stats=scenario_stats,
@@ -190,6 +204,6 @@ for version in conf.versions:
period=conf.PERIOD,
version=version)
- with open("./release/" + version +
+ with open(conf.REPORTING_PATH + "/release/" + version +
"/index-status-" + installer + ".html", "wb") as fh:
fh.write(outputText)
diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py
index a065ef442..e3f4e3306 100644
--- a/utils/test/reporting/functest/reporting-tempest.py
+++ b/utils/test/reporting/functest/reporting-tempest.py
@@ -1,28 +1,44 @@
from urllib2 import Request, urlopen, URLError
import json
import jinja2
-import os
+import reportingConf as conf
+import reportingUtils as utils
-installers = ["apex", "compass", "fuel", "joid"]
+installers = conf.installers
items = ["tests", "Success rate", "duration"]
-PERIOD = 7
-print "Generate Tempest automatic reporting"
+PERIOD = conf.PERIOD
+criteria_nb_test = 165
+criteria_duration = 1800
+criteria_success_rate = 90
+
+logger = utils.getLogger("Tempest")
+logger.info("************************************************")
+logger.info("* Generating reporting Tempest_smoke_serial *")
+logger.info("* Data retention = %s days *" % PERIOD)
+logger.info("* *")
+logger.info("************************************************")
+
+logger.info("Success criteria:")
+logger.info("nb tests executed > %s s " % criteria_nb_test)
+logger.info("test duration < %s s " % criteria_duration)
+logger.info("success rate > %s " % criteria_success_rate)
+
for installer in installers:
# we consider the Tempest results of the last PERIOD days
- url = "http://testresults.opnfv.org/test/api/v1/results?case=tempest_smoke_serial"
- request = Request(url + '&period=' + str(PERIOD)
- + '&installer=' + installer + '&version=master')
-
+ url = conf.URL_BASE + "?case=tempest_smoke_serial"
+ request = Request(url + '&period=' + str(PERIOD) +
+ '&installer=' + installer + '&version=master')
+ logger.info("Search tempest_smoke_serial results for installer %s"
+ % installer)
try:
response = urlopen(request)
k = response.read()
results = json.loads(k)
except URLError, e:
- print 'No kittez. Got an error code:', e
+ logger.error("Error code: %s" % e)
test_results = results['results']
- test_results.reverse()
scenario_results = {}
criteria = {}
@@ -48,8 +64,8 @@ for installer in installers:
nb_tests_run = result['details']['tests']
nb_tests_failed = result['details']['failures']
if nb_tests_run != 0:
- success_rate = 100*(int(nb_tests_run)
- - int(nb_tests_failed))/int(nb_tests_run)
+ success_rate = 100*(int(nb_tests_run) -
+ int(nb_tests_failed)) / int(nb_tests_run)
else:
success_rate = 0
@@ -63,40 +79,49 @@ for installer in installers:
crit_time = False
# Expect that at least 165 tests are run
- if nb_tests_run >= 165:
+ if nb_tests_run >= criteria_nb_test:
crit_tests = True
# Expect that at least 90% of success
- if success_rate >= 90:
+ if success_rate >= criteria_success_rate:
crit_rate = True
# Expect that the suite duration is inferior to 30m
- if result['details']['duration'] < 1800:
+ if result['details']['duration'] < criteria_duration:
crit_time = True
result['criteria'] = {'tests': crit_tests,
'Success rate': crit_rate,
'duration': crit_time}
- # error management
+ try:
+ logger.debug("Scenario %s, Installer %s"
+ % (s_result[1]['scenario'], installer))
+ logger.debug("Nb Test run: %s" % nb_tests_run)
+ logger.debug("Test duration: %s"
+ % result['details']['duration'])
+ logger.debug("Success rate: %s" % success_rate)
+ except:
+ logger.error("Data format error")
+
+ # Error management
# ****************
try:
errors = result['details']['errors']
result['errors'] = errors.replace('{0}', '')
except:
- print "Error field not present (Brahamputra runs?)"
+ logger.error("Error field not present (Brahamputra runs?)")
- mypath = os.path.abspath(__file__)
- tplLoader = jinja2.FileSystemLoader(os.path.dirname(mypath))
- templateEnv = jinja2.Environment(loader=tplLoader)
+ templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+ templateEnv = jinja2.Environment(loader=templateLoader)
- TEMPLATE_FILE = "./template/index-tempest-tmpl.html"
+ TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(scenario_results=scenario_results,
items=items,
installer=installer)
- with open("./release/master/index-tempest-" +
+ with open(conf.REPORTING_PATH + "/release/master/index-tempest-" +
installer + ".html", "wb") as fh:
fh.write(outputText)
-print "Tempest automatic reporting Done"
+logger.info("Tempest automatic reporting succesfully generated.")
diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py
index 4033687e8..d0436ed14 100644
--- a/utils/test/reporting/functest/reporting-vims.py
+++ b/utils/test/reporting/functest/reporting-vims.py
@@ -1,7 +1,11 @@
from urllib2 import Request, urlopen, URLError
import json
import jinja2
-import os
+import reportingConf as conf
+import reportingUtils as utils
+
+logger = utils.getLogger("vIMS")
+
def sig_test_format(sig_test):
nbPassed = 0
@@ -9,7 +13,7 @@ def sig_test_format(sig_test):
nbSkipped = 0
for data_test in sig_test:
if data_test['result'] == "Passed":
- nbPassed+= 1
+ nbPassed += 1
elif data_test['result'] == "Failed":
nbFailures += 1
elif data_test['result'] == "Skipped":
@@ -20,21 +24,29 @@ def sig_test_format(sig_test):
total_sig_test_result['skipped'] = nbSkipped
return total_sig_test_result
-installers = ["fuel", "compass", "joid", "apex"]
-step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
+logger.info("****************************************")
+logger.info("* Generating reporting vIMS *")
+logger.info("* Data retention = %s days *" % conf.PERIOD)
+logger.info("* *")
+logger.info("****************************************")
+installers = conf.installers
+step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
+logger.info("Start processing....")
for installer in installers:
- request = Request('http://testresults.opnfv.org/test/api/v1/results?case=vims&installer=' + installer)
+ logger.info("Search vIMS results for installer %s" % installer)
+ request = Request(conf.URL_BASE + '?case=vims&installer=' + installer)
try:
response = urlopen(request)
k = response.read()
results = json.loads(k)
except URLError, e:
- print 'No kittez. Got an error code:', e
+ logger.error("Error code: %s" % e)
test_results = results['results']
- test_results.reverse()
+
+ logger.debug("Results found: %s" % test_results)
scenario_results = {}
for r in test_results:
@@ -44,6 +56,7 @@ for installer in installers:
for s, s_result in scenario_results.items():
scenario_results[s] = s_result[0:5]
+ logger.debug("Search for success criteria")
for result in scenario_results[s]:
result["start_date"] = result["start_date"].split(".")[0]
sig_test = result['details']['sig_test']['result']
@@ -67,17 +80,34 @@ for installer in installers:
result['pr_step_ok'] = 0
if nb_step != 0:
result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100
-
-
- templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
- templateEnv = jinja2.Environment( loader=templateLoader )
-
- TEMPLATE_FILE = "./template/index-vims-tmpl.html"
- template = templateEnv.get_template( TEMPLATE_FILE )
-
- outputText = template.render( scenario_results = scenario_results, step_order = step_order, installer = installer)
-
- with open("./release/master/index-vims-" + installer + ".html", "wb") as fh:
+ try:
+ logger.debug("Scenario %s, Installer %s"
+ % (s_result[1]['scenario'], installer))
+ logger.debug("Orchestrator deployment: %s s"
+ % result['details']['orchestrator']['duration'])
+ logger.debug("vIMS deployment: %s s"
+ % result['details']['vIMS']['duration'])
+ logger.debug("Signaling testing: %s s"
+ % result['details']['sig_test']['duration'])
+ logger.debug("Signaling testing results: %s"
+ % format_result)
+ except:
+ logger.error("Data badly formatted")
+ logger.debug("------------------------------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+ templateEnv = jinja2.Environment(loader=templateLoader)
+
+ TEMPLATE_FILE = "/template/index-vims-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_results,
+ step_order=step_order,
+ installer=installer)
+
+ with open(conf.REPORTING_PATH +
+ "/release/master/index-vims-" +
+ installer + ".html", "wb") as fh:
fh.write(outputText)
-
+logger.info("vIMS report succesfully generated")
diff --git a/utils/test/reporting/functest/reportingConf.py b/utils/test/reporting/functest/reportingConf.py
index 649246d64..a58eeecc9 100644
--- a/utils/test/reporting/functest/reportingConf.py
+++ b/utils/test/reporting/functest/reportingConf.py
@@ -10,9 +10,19 @@
#
# ****************************************************
installers = ["apex", "compass", "fuel", "joid"]
-# installers = ["compass"]
+# installers = ["apex"]
+# list of test cases declared in testcases.yaml but that must not be
+# taken into account for the scoring
+blacklist = ["odl", "ovno", "security_scan", "copper", "moon"]
# versions = ["brahmaputra", "master"]
versions = ["master"]
PERIOD = 10
MAX_SCENARIO_CRITERIA = 18
+# get the last 5 test results to determinate the success criteria
+NB_TESTS = 5
+# REPORTING_PATH = "/usr/share/nginx/html/reporting/functest"
+REPORTING_PATH = "."
URL_BASE = 'http://testresults.opnfv.org/test/api/v1/results'
+TEST_CONF = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
+LOG_LEVEL = "ERROR"
+LOG_FILE = REPORTING_PATH + "/reporting.log"
diff --git a/utils/test/reporting/functest/reportingUtils.py b/utils/test/reporting/functest/reportingUtils.py
index 0db570f32..5051ffa95 100644
--- a/utils/test/reporting/functest/reportingUtils.py
+++ b/utils/test/reporting/functest/reportingUtils.py
@@ -7,8 +7,26 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
from urllib2 import Request, urlopen, URLError
+import logging
import json
-import reportingConf
+import reportingConf as conf
+
+
+def getLogger(module):
+ logFormatter = logging.Formatter("%(asctime)s [" +
+ module +
+ "] [%(levelname)-5.5s] %(message)s")
+ logger = logging.getLogger()
+
+ fileHandler = logging.FileHandler("{0}/{1}".format('.', conf.LOG_FILE))
+ fileHandler.setFormatter(logFormatter)
+ logger.addHandler(fileHandler)
+
+ consoleHandler = logging.StreamHandler()
+ consoleHandler.setFormatter(logFormatter)
+ logger.addHandler(consoleHandler)
+ logger.setLevel(conf.LOG_LEVEL)
+ return logger
def getApiResults(case, installer, scenario, version):
@@ -19,9 +37,10 @@ def getApiResults(case, installer, scenario, version):
# urllib2.install_opener(opener)
# url = "http://127.0.0.1:8000/results?case=" + case + \
# "&period=30&installer=" + installer
- url = (reportingConf.URL_BASE + "?case=" + case +
- "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
- "&scenario=" + scenario + "&version=" + version)
+ url = (conf.URL_BASE + "?case=" + case +
+ "&period=" + str(conf.PERIOD) + "&installer=" + installer +
+ "&scenario=" + scenario + "&version=" + version +
+ "&last=" + str(conf.NB_TESTS))
request = Request(url)
try:
@@ -37,9 +56,8 @@ def getApiResults(case, installer, scenario, version):
def getScenarios(case, installer, version):
case = case.getName()
- print case
- url = (reportingConf.URL_BASE + "?case=" + case +
- "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
+ url = (conf.URL_BASE + "?case=" + case +
+ "&period=" + str(conf.PERIOD) + "&installer=" + installer +
"&version=" + version)
request = Request(url)
@@ -104,7 +122,7 @@ def getResult(testCase, installer, scenario, version):
# print "nb of results:" + str(len(test_results))
for r in test_results:
- # print r["creation_date"]
+ # print r["start_date"]
# print r["criteria"]
scenario_results.append({r["start_date"]: r["criteria"]})
# sort results
@@ -114,11 +132,16 @@ def getResult(testCase, installer, scenario, version):
# 2: <4 successful consecutive runs but passing the criteria
# 1: close to pass the success criteria
# 0: 0% success, not passing
+ # -1: no run available
test_result_indicator = 0
nbTestOk = getNbtestOk(scenario_results)
- # print "Nb test OK:"+ str(nbTestOk)
+
+ # print "Nb test OK (last 10 days):"+ str(nbTestOk)
# check that we have at least 4 runs
- if nbTestOk < 1:
+ if len(scenario_results) < 1:
+ # No results available
+ test_result_indicator = -1
+ elif nbTestOk < 1:
test_result_indicator = 0
elif nbTestOk < 2:
test_result_indicator = 1
@@ -126,7 +149,9 @@ def getResult(testCase, installer, scenario, version):
# Test the last 4 run
if (len(scenario_results) > 3):
last4runResults = scenario_results[-4:]
- if getNbtestOk(last4runResults):
+ nbTestOkLast4 = getNbtestOk(last4runResults)
+ # print "Nb test OK (last 4 run):"+ str(nbTestOkLast4)
+ if nbTestOkLast4 > 3:
test_result_indicator = 3
else:
test_result_indicator = 2
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
index 89a1d1527..0c3fa9426 100644
--- a/utils/test/reporting/functest/template/index-status-tmpl.html
+++ b/utils/test/reporting/functest/template/index-status-tmpl.html
@@ -76,7 +76,7 @@
{% for test in items[scenario] -%}
<th>
{% if test.getCriteria() > -1 -%}
- {{test.getDbName() }}
+ {{test.getDisplayName() }}
{%- endif %}
{% if test.getTier() > 3 -%}
*
diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py
index f0e8f5995..e19853a09 100644
--- a/utils/test/reporting/functest/testCase.py
+++ b/utils/test/reporting/functest/testCase.py
@@ -19,6 +19,28 @@ class TestCase(object):
self.criteria = criteria
self.isRunnable = isRunnable
self.tier = tier
+ display_name_matrix = {'healthcheck': 'healthcheck',
+ 'vping_ssh': 'vPing (ssh)',
+ 'vping_userdata': 'vPing (userdata)',
+ 'odl': 'ODL',
+ 'onos': 'ONOS',
+ 'ocl': 'OCL',
+ 'tempest_smoke_serial': 'Tempest (smoke)',
+ 'tempest_full_parallel': 'Tempest (full)',
+ 'rally_sanity': 'Rally (smoke)',
+ 'bgpvpn': 'bgpvpn',
+ 'rally_full': 'Rally (full)',
+ 'vims': 'vIMS',
+ 'doctor': 'Doctor',
+ 'promise': 'Promise',
+ 'moon': 'moon',
+ 'copper': 'copper',
+ 'security_scan': 'security'
+ }
+ try:
+ self.displayName = display_name_matrix[self.name]
+ except:
+ self.displayName = "unknown"
def getName(self):
return self.name
@@ -74,10 +96,10 @@ class TestCase(object):
self.isRunnable = is_runnable
def toString(self):
- testcase = ("Name=" + self.name + ";Criteria=" + str(self.criteria)
- + ";Project=" + self.project + ";Constraints="
- + str(self.constraints) + ";IsRunnable"
- + str(self.isRunnable))
+ testcase = ("Name=" + self.name + ";Criteria=" +
+ str(self.criteria) + ";Project=" + self.project +
+ ";Constraints=" + str(self.constraints) +
+ ";IsRunnable" + str(self.isRunnable))
return testcase
def getDbName(self):
@@ -98,31 +120,15 @@ class TestCase(object):
'rally_full': 'rally_full',
'vims': 'vims',
'doctor': 'doctor-notification',
- 'promise': 'promise'
+ 'promise': 'promise',
+ 'moon': 'moon',
+ 'copper': 'copper',
+ 'security_scan': 'security'
}
try:
return test_match_matrix[self.name]
except:
return "unknown"
- def getTestDisplayName(self):
- # Correspondance name of the test case / name in the DB
- test_match_matrix = {'healthcheck': 'healthcheck',
- 'vping_ssh': 'vPing (ssh)',
- 'vping_userdata': 'vPing (userdata)',
- 'odl': 'ODL',
- 'onos': 'ONOS',
- 'ocl': 'OCL',
- 'tempest_smoke_serial': 'Tempest (smoke)',
- 'tempest_full_parallel': 'Tempest (full)',
- 'rally_sanity': 'Rally (smoke)',
- 'bgpvpn': 'bgpvpn',
- 'rally_full': 'Rally (full)',
- 'vims': 'vIMS',
- 'doctor': 'Doctor',
- 'promise': 'Promise'
- }
- try:
- return test_match_matrix[self.name]
- except:
- return "unknown"
+ def getDisplayName(self):
+ return self.displayName
diff --git a/utils/test/result_collection_api/docker/Dockerfile b/utils/test/result_collection_api/docker/Dockerfile
new file mode 100644
index 000000000..ffee4c231
--- /dev/null
+++ b/utils/test/result_collection_api/docker/Dockerfile
@@ -0,0 +1,52 @@
+#######################################################
+# Docker container for OPNFV-TESTAPI
+#######################################################
+# Purpose: run opnfv-testapi for gathering test results
+#
+# Maintained by SerenaFeng
+# Build:
+# $ docker build -t opnfv/testapi:tag .
+#
+# Execution:
+# $ docker run -dti -p 8000:8000 \
+# -e "swagger_url=http://10.63.243.17:8000" \
+# -e "mongodb_url=mongodb://10.63.243.17:27017/" \
+# -e "api_port=8000"
+# opnfv/testapi:tag
+#
+# NOTE: providing swagger_url, api_port, mongodb_url is optional.
+# If not provided, it will use the default one
+# configured in config.ini
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+FROM ubuntu:14.04
+MAINTAINER SerenaFeng <feng.xiaowei@zte.com.cn>
+LABEL version="v1" description="OPNFV TestAPI Docker container"
+
+ENV HOME /home
+
+# Packaged dependencies
+RUN apt-get update && apt-get install -y \
+curl \
+git \
+gcc \
+wget \
+python-dev \
+python-pip \
+crudini \
+--no-install-recommends
+
+RUN pip install --upgrade pip
+
+RUN git config --global http.sslVerify false
+RUN git clone https://gerrit.opnfv.org/gerrit/releng /home/releng
+
+WORKDIR /home/releng/utils/test/result_collection_api/
+RUN pip install -r requirements.txt
+RUN python setup.py install
+CMD ["bash", "docker/start-server.sh"]
diff --git a/utils/test/result_collection_api/docker/prepare-env.sh b/utils/test/result_collection_api/docker/prepare-env.sh
new file mode 100755
index 000000000..99433cc8c
--- /dev/null
+++ b/utils/test/result_collection_api/docker/prepare-env.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+FILE=/etc/opnfv_testapi/config.ini
+
+
+if [ "$mongodb_url" != "" ]; then
+ sudo crudini --set --existing $FILE mongo url $mongodb_url
+fi
+
+if [ "$swagger_url" != "" ]; then
+ sudo crudini --set --existing $FILE swagger base_url $swagger_url
+fi
+
+if [ "$api_port" != "" ];then
+ sudo crudini --set --existing $FILE api port $api_port
+fi
+
diff --git a/utils/test/result_collection_api/docker/start-server.sh b/utils/test/result_collection_api/docker/start-server.sh
new file mode 100755
index 000000000..8bf6084ae
--- /dev/null
+++ b/utils/test/result_collection_api/docker/start-server.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+bash docker/prepare-env.sh
+opnfv-testapi
diff --git a/utils/test/result_collection_api/etc/config.ini b/utils/test/result_collection_api/etc/config.ini
index 16346bf36..0edb73a3f 100644
--- a/utils/test/result_collection_api/etc/config.ini
+++ b/utils/test/result_collection_api/etc/config.ini
@@ -13,4 +13,4 @@ port = 8000
debug = True
[swagger]
-base_url = http://testresults.opnfv.org/test \ No newline at end of file
+base_url = http://localhost:8000
diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py
index 873701103..f98c35e8f 100644
--- a/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py
+++ b/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py
@@ -198,9 +198,8 @@ class GenericApiHandler(RequestHandler):
comparing values
"""
if not (new_value is None):
- if len(new_value) > 0:
- if new_value != old_value:
- edit_request[key] = new_value
+ if new_value != old_value:
+ edit_request[key] = new_value
return edit_request
diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py
index 56bed6c81..400b84ac1 100644
--- a/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py
+++ b/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py
@@ -45,7 +45,7 @@ class GenericResultHandler(GenericApiHandler):
obj = {"$gte": str(period)}
query['start_date'] = obj
elif k == 'trust_indicator':
- query[k] = float(v)
+ query[k + '.current'] = float(v)
elif k != 'last':
query[k] = v
return query
@@ -112,12 +112,12 @@ class ResultsCLHandler(GenericResultHandler):
@type period: L{string}
@in period: query
@required period: False
- @param last: last days
+ @param last: last records stored until now
@type last: L{string}
@in last: query
@required last: False
- @param trust_indicator: must be int/long/float
- @type trust_indicator: L{string}
+ @param trust_indicator: must be float
+ @type trust_indicator: L{float}
@in trust_indicator: query
@required trust_indicator: False
"""
@@ -180,3 +180,19 @@ class ResultsGURHandler(GenericResultHandler):
query = dict()
query["_id"] = ObjectId(result_id)
self._get_one(query)
+
+ @swagger.operation(nickname="update")
+ def put(self, result_id):
+ """
+ @description: update a single result by _id
+ @param body: fields to be updated
+ @type body: L{ResultUpdateRequest}
+ @in body: body
+ @rtype: L{Result}
+ @return 200: update success
+ @raise 404: result not exist
+ @raise 403: nothing to update
+ """
+ query = {'_id': ObjectId(result_id)}
+ db_keys = []
+ self._update(query, db_keys)
diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py b/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py
index fb6a80961..dd1e3dc53 100644
--- a/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py
+++ b/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py
@@ -10,7 +10,69 @@ from opnfv_testapi.tornado_swagger import swagger
@swagger.model()
+class TIHistory(object):
+ """
+ @ptype step: L{float}
+ """
+ def __init__(self, date=None, step=0):
+ self.date = date
+ self.step = step
+
+ def format(self):
+ return {
+ "date": self.date,
+ "step": self.step
+ }
+
+ @staticmethod
+ def from_dict(a_dict):
+ if a_dict is None:
+ return None
+
+ return TIHistory(a_dict.get('date'), a_dict.get('step'))
+
+
+@swagger.model()
+class TI(object):
+ """
+ @property histories: trust_indicator update histories
+ @ptype histories: C{list} of L{TIHistory}
+ @ptype current: L{float}
+ """
+ def __init__(self, current=0):
+ self.current = current
+ self.histories = list()
+
+ def format(self):
+ hs = []
+ for h in self.histories:
+ hs.append(h.format())
+
+ return {
+ "current": self.current,
+ "histories": hs
+ }
+
+ @staticmethod
+ def from_dict(a_dict):
+ if a_dict is None:
+ return None
+ t = TI()
+ t.current = a_dict.get('current')
+ if 'histories' in a_dict.keys():
+ for history in a_dict.get('histories', None):
+ t.histories.append(TIHistory.from_dict(history))
+ else:
+ t.histories = []
+ return t
+
+
+@swagger.model()
class ResultCreateRequest(object):
+ """
+ @property trust_indicator:
+ @ptype trust_indicator: L{TI}
+ """
def __init__(self,
pod_name=None,
project_name=None,
@@ -50,15 +112,30 @@ class ResultCreateRequest(object):
"build_tag": self.build_tag,
"scenario": self.scenario,
"criteria": self.criteria,
- "trust_indicator": self.trust_indicator
+ "trust_indicator": self.trust_indicator.format()
+ }
+
+
+@swagger.model()
+class ResultUpdateRequest(object):
+ """
+ @property trust_indicator:
+ @ptype trust_indicator: L{TI}
+ """
+ def __init__(self, trust_indicator=None):
+ self.trust_indicator = trust_indicator
+
+ def format(self):
+ return {
+ "trust_indicator": self.trust_indicator.format(),
}
@swagger.model()
class TestResult(object):
"""
- @property trust_indicator: must be int/long/float
- @ptype trust_indicator: L{float}
+ @property trust_indicator: used for long duration test case
+ @ptype trust_indicator: L{TI}
"""
def __init__(self, _id=None, case_name=None, project_name=None,
pod_name=None, installer=None, version=None,
@@ -90,7 +167,6 @@ class TestResult(object):
t.case_name = a_dict.get('case_name')
t.pod_name = a_dict.get('pod_name')
t.project_name = a_dict.get('project_name')
- t.description = a_dict.get('description')
t.start_date = str(a_dict.get('start_date'))
t.stop_date = str(a_dict.get('stop_date'))
t.details = a_dict.get('details')
@@ -99,19 +175,7 @@ class TestResult(object):
t.build_tag = a_dict.get('build_tag')
t.scenario = a_dict.get('scenario')
t.criteria = a_dict.get('criteria')
- # 0 < trust indicator < 1
- # if bad value => set this indicator to 0
- t.trust_indicator = a_dict.get('trust_indicator')
- if t.trust_indicator is not None:
- if isinstance(t.trust_indicator, (int, long, float)):
- if t.trust_indicator < 0:
- t.trust_indicator = 0
- elif t.trust_indicator > 1:
- t.trust_indicator = 1
- else:
- t.trust_indicator = 0
- else:
- t.trust_indicator = 0
+ t.trust_indicator = TI.from_dict(a_dict.get('trust_indicator'))
return t
def format(self):
@@ -119,7 +183,6 @@ class TestResult(object):
"case_name": self.case_name,
"project_name": self.project_name,
"pod_name": self.pod_name,
- "description": self.description,
"start_date": str(self.start_date),
"stop_date": str(self.stop_date),
"version": self.version,
@@ -128,7 +191,7 @@ class TestResult(object):
"build_tag": self.build_tag,
"scenario": self.scenario,
"criteria": self.criteria,
- "trust_indicator": self.trust_indicator
+ "trust_indicator": self.trust_indicator.format()
}
def format_http(self):
@@ -137,7 +200,6 @@ class TestResult(object):
"case_name": self.case_name,
"project_name": self.project_name,
"pod_name": self.pod_name,
- "description": self.description,
"start_date": str(self.start_date),
"stop_date": str(self.stop_date),
"version": self.version,
@@ -146,7 +208,7 @@ class TestResult(object):
"build_tag": self.build_tag,
"scenario": self.scenario,
"criteria": self.criteria,
- "trust_indicator": self.trust_indicator
+ "trust_indicator": self.trust_indicator.format()
}
diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py
index 6ab98c720..450969248 100644
--- a/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py
+++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py
@@ -116,8 +116,8 @@ class MemDb(object):
if k == 'start_date':
if not MemDb._compare_date(v, content.get(k)):
return False
- elif k == 'trust_indicator':
- if float(content.get(k)) != float(v):
+ elif k == 'trust_indicator.current':
+ if content.get('trust_indicator').get('current') != v:
return False
elif content.get(k, None) != v:
return False
@@ -173,7 +173,6 @@ class MemDb(object):
def _check_keys(self, doc):
for key in doc.keys():
- print('key', key, 'value', doc.get(key))
if '.' in key:
raise NameError('key {} must not contain .'.format(key))
if key.startswith('$'):
diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py
index 27382f089..9a1253e94 100644
--- a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py
+++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py
@@ -8,9 +8,9 @@
##############################################################################
import unittest
-from tornado.web import Application
from tornado import gen
from tornado.testing import AsyncHTTPTestCase, gen_test
+from tornado.web import Application
import fake_pymongo
diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py
index d47306093..327ddf7b2 100644
--- a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py
+++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py
@@ -10,7 +10,7 @@ import unittest
from test_base import TestBase
from opnfv_testapi.resources.project_models import ProjectCreateRequest, \
- Project, Projects
+ Project, Projects, ProjectUpdateRequest
from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \
HTTP_FORBIDDEN, HTTP_NOT_FOUND
@@ -112,7 +112,7 @@ class TestProjectUpdate(TestProjectBase):
code, body = self.get(self.req_d.name)
_id = body._id
- req = ProjectCreateRequest('newName', 'new description')
+ req = ProjectUpdateRequest('newName', 'new description')
code, body = self.update(req, self.req_d.name)
self.assertEqual(code, HTTP_OK)
self.assertEqual(_id, body._id)
diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py
index bba3b228f..98ef7c08c 100644
--- a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py
+++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py
@@ -6,15 +6,16 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
import copy
+import unittest
+from datetime import datetime, timedelta
from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \
HTTP_NOT_FOUND
from opnfv_testapi.resources.pod_models import PodCreateRequest
from opnfv_testapi.resources.project_models import ProjectCreateRequest
from opnfv_testapi.resources.result_models import ResultCreateRequest, \
- TestResult, TestResults
+ TestResult, TestResults, ResultUpdateRequest, TI, TIHistory
from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest
from test_base import TestBase
@@ -55,9 +56,11 @@ class TestResultBase(TestBase):
self.build_tag = 'v3.0'
self.scenario = 'odl-l2'
self.criteria = 'passed'
- self.trust_indicator = 0.7
+ self.trust_indicator = TI(0.7)
self.start_date = "2016-05-23 07:16:09.477097"
self.stop_date = "2016-05-23 07:16:19.477097"
+ self.update_date = "2016-05-24 07:16:19.477097"
+ self.update_step = -0.05
super(TestResultBase, self).setUp()
self.details = Details(timestart='0', duration='9s', status='OK')
self.req_d = ResultCreateRequest(pod_name=self.pod,
@@ -74,6 +77,7 @@ class TestResultBase(TestBase):
trust_indicator=self.trust_indicator)
self.get_res = TestResult
self.list_res = TestResults
+ self.update_res = TestResult
self.basePath = '/api/v1/results'
self.req_pod = PodCreateRequest(self.pod, 'metal', 'zte pod 1')
self.req_project = ProjectCreateRequest(self.project, 'vping test')
@@ -103,10 +107,19 @@ class TestResultBase(TestBase):
self.assertEqual(result.build_tag, req.build_tag)
self.assertEqual(result.scenario, req.scenario)
self.assertEqual(result.criteria, req.criteria)
- self.assertEqual(result.trust_indicator, req.trust_indicator)
self.assertEqual(result.start_date, req.start_date)
self.assertEqual(result.stop_date, req.stop_date)
self.assertIsNotNone(result._id)
+ ti = result.trust_indicator
+ self.assertEqual(ti.current, req.trust_indicator.current)
+ if ti.histories:
+ history = ti.histories[0]
+ self.assertEqual(history.date, self.update_date)
+ self.assertEqual(history.step, self.update_step)
+
+ def _create_d(self):
+ _, res = self.create_d()
+ return res.href.split('/')[-1]
class TestResultCreate(TestResultBase):
@@ -172,8 +185,7 @@ class TestResultCreate(TestResultBase):
class TestResultGet(TestResultBase):
def test_getOne(self):
- _, res = self.create_d()
- _id = res.href.split('/')[-1]
+ _id = self._create_d()
code, body = self.get(_id)
self.assert_res(code, body)
@@ -266,8 +278,6 @@ class TestResultGet(TestResultBase):
self.assert_res(code, result, req)
def _create_changed_date(self, **kwargs):
- import copy
- from datetime import datetime, timedelta
req = copy.deepcopy(self.req_d)
req.start_date = datetime.now() + timedelta(**kwargs)
req.stop_date = str(req.start_date + timedelta(minutes=10))
@@ -276,13 +286,36 @@ class TestResultGet(TestResultBase):
return req
def _set_query(self, *args):
+ def get_value(arg):
+ return eval('self.' + arg) \
+ if arg != 'trust_indicator' else self.trust_indicator.current
uri = ''
for arg in args:
if '=' in arg:
uri += arg + '&'
else:
- uri += '{}={}&'.format(arg, eval('self.' + arg))
+ uri += '{}={}&'.format(arg, get_value(arg))
return uri[0: -1]
+
+class TestResultUpdate(TestResultBase):
+ def test_success(self):
+ _id = self._create_d()
+
+ new_ti = copy.deepcopy(self.trust_indicator)
+ new_ti.current += self.update_step
+ new_ti.histories.append(TIHistory(self.update_date, self.update_step))
+ new_data = copy.deepcopy(self.req_d)
+ new_data.trust_indicator = new_ti
+ update = ResultUpdateRequest(trust_indicator=new_ti)
+ code, body = self.update(update, _id)
+ self.assertEqual(_id, body._id)
+ self.assert_res(code, body, new_data)
+
+ code, new_body = self.get(_id)
+ self.assertEqual(_id, new_body._id)
+ self.assert_res(code, new_body, new_data)
+
+
if __name__ == '__main__':
unittest.main()