summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rwxr-xr-xutils/test/reporting/docker/reporting.sh3
-rwxr-xr-xutils/test/reporting/reporting/functest/reporting-status.py32
-rw-r--r--utils/test/reporting/reporting/storperf/reporting-status.py10
-rw-r--r--utils/test/reporting/reporting/utils/reporting_utils.py224
-rw-r--r--utils/test/reporting/reporting/vsperf/__init__.py0
-rw-r--r--utils/test/reporting/reporting/vsperf/reporting-status.py138
-rw-r--r--utils/test/reporting/reporting/vsperf/template/index-status-tmpl.html114
-rw-r--r--utils/test/reporting/reporting/yardstick/reporting-status.py9
-rw-r--r--utils/test/testapi/.gitignore2
-rw-r--r--utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profile.html11
-rw-r--r--utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profileController.js2
-rw-r--r--utils/test/testapi/etc/config.ini48
-rw-r--r--utils/test/testapi/opnfv_testapi/common/check.py35
-rw-r--r--utils/test/testapi/opnfv_testapi/common/constants.py5
-rw-r--r--utils/test/testapi/opnfv_testapi/resources/result_handlers.py19
-rw-r--r--utils/test/testapi/opnfv_testapi/router/url_mappings.py3
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/common/noparam.ini16
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/common/normal.ini17
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/common/nosection.ini11
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/common/notboolean.ini17
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/common/notint.ini17
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/conftest.py2
-rw-r--r--utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py6
-rw-r--r--utils/test/testapi/opnfv_testapi/ui/auth/base.py35
-rw-r--r--utils/test/testapi/opnfv_testapi/ui/auth/constants.py18
-rw-r--r--utils/test/testapi/opnfv_testapi/ui/auth/sign.py80
-rw-r--r--utils/test/testapi/opnfv_testapi/ui/auth/user.py43
-rw-r--r--utils/test/testapi/opnfv_testapi/ui/root.py6
-rw-r--r--utils/test/testapi/requirements.txt1
29 files changed, 527 insertions, 397 deletions
diff --git a/utils/test/reporting/docker/reporting.sh b/utils/test/reporting/docker/reporting.sh
index 076dc4719..d8db6201e 100755
--- a/utils/test/reporting/docker/reporting.sh
+++ b/utils/test/reporting/docker/reporting.sh
@@ -4,7 +4,7 @@ export PYTHONPATH="${PYTHONPATH}:./reporting"
export CONFIG_REPORTING_YAML=./reporting/reporting.yaml
declare -a versions=(danube master)
-declare -a projects=(functest storperf yardstick qtip)
+declare -a projects=(functest storperf yardstick qtip vsperf)
project=$1
reporting_type=$2
@@ -32,6 +32,7 @@ cp -Rf js display
# yardstick | status
# storperf | status
# qtip | status
+# vsperf | status
function report_project()
{
diff --git a/utils/test/reporting/reporting/functest/reporting-status.py b/utils/test/reporting/reporting/functest/reporting-status.py
index c7c2051a3..02bf67d0e 100755
--- a/utils/test/reporting/reporting/functest/reporting-status.py
+++ b/utils/test/reporting/reporting/functest/reporting-status.py
@@ -7,18 +7,19 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
import datetime
-import jinja2
import os
import sys
import time
+import jinja2
+
import testCase as tc
import scenarioResult as sr
+import reporting.utils.reporting_utils as rp_utils
-# manage conf
-import utils.reporting_utils as rp_utils
-
-"""Functest reporting status"""
+"""
+Functest reporting status
+"""
# Logger
logger = rp_utils.getLogger("Functest-Status")
@@ -106,7 +107,8 @@ for version in versions:
for installer in installers:
# get scenarios
- scenario_results = rp_utils.getScenarios(healthcheck,
+ scenario_results = rp_utils.getScenarios("functest",
+ "connection_check",
installer,
version)
# get nb of supported architecture (x86, aarch64)
@@ -219,7 +221,7 @@ for version in versions:
logger.debug("No results found")
items[s] = testCases2BeDisplayed
- except:
+ except Exception:
logger.error("Error: installer %s, version %s, scenario %s"
% (installer, version, s))
logger.error("No data available: %s" % (sys.exc_info()[0]))
@@ -279,13 +281,13 @@ for version in versions:
template = templateEnv.get_template(TEMPLATE_FILE)
outputText = template.render(
- scenario_stats=scenario_stats,
- scenario_results=scenario_result_criteria,
- items=items,
- installer=installer_display,
- period=period,
- version=version,
- date=reportingDate)
+ scenario_stats=scenario_stats,
+ scenario_results=scenario_result_criteria,
+ items=items,
+ installer=installer_display,
+ period=period,
+ version=version,
+ date=reportingDate)
with open("./display/" + version +
"/functest/status-" +
@@ -298,8 +300,6 @@ for version in versions:
# Generate outputs for export
# pdf
- # TODO Change once web site updated...use the current one
- # to test pdf production
url_pdf = rp_utils.get_config('general.url')
pdf_path = ("./display/" + version +
"/functest/status-" + installer_display + ".html")
diff --git a/utils/test/reporting/reporting/storperf/reporting-status.py b/utils/test/reporting/reporting/storperf/reporting-status.py
index 0c188a338..103b80fd9 100644
--- a/utils/test/reporting/reporting/storperf/reporting-status.py
+++ b/utils/test/reporting/reporting/storperf/reporting-status.py
@@ -7,13 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
import datetime
-import jinja2
import os
-# manage conf
-import utils.reporting_utils as rp_utils
+import jinja2
-import utils.scenarioResult as sr
+import reporting.utils.reporting_utils as rp_utils
+import reporting.utils.scenarioResult as sr
installers = rp_utils.get_config('general.installers')
versions = rp_utils.get_config('general.versions')
@@ -39,7 +38,8 @@ for version in versions:
for installer in installers:
# get scenarios results data
# for the moment we consider only 1 case snia_steady_state
- scenario_results = rp_utils.getScenarios("snia_steady_state",
+ scenario_results = rp_utils.getScenarios("storperf",
+ "snia_steady_state",
installer,
version)
# logger.info("scenario_results: %s" % scenario_results)
diff --git a/utils/test/reporting/reporting/utils/reporting_utils.py b/utils/test/reporting/reporting/utils/reporting_utils.py
index 62820914a..235bd6ef9 100644
--- a/utils/test/reporting/reporting/utils/reporting_utils.py
+++ b/utils/test/reporting/reporting/utils/reporting_utils.py
@@ -20,15 +20,15 @@ import yaml
# YAML UTILS
#
# -----------------------------------------------------------
-def get_parameter_from_yaml(parameter, file):
+def get_parameter_from_yaml(parameter, config_file):
"""
Returns the value of a given parameter in file.yaml
parameter must be given in string format with dots
Example: general.openstack.image_name
"""
- with open(file) as f:
- file_yaml = yaml.safe_load(f)
- f.close()
+ with open(config_file) as my_file:
+ file_yaml = yaml.safe_load(my_file)
+ my_file.close()
value = file_yaml
for element in parameter.split("."):
value = value.get(element)
@@ -39,6 +39,9 @@ def get_parameter_from_yaml(parameter, file):
def get_config(parameter):
+ """
+ Get configuration parameter from yaml configuration file
+ """
yaml_ = os.environ["CONFIG_REPORTING_YAML"]
return get_parameter_from_yaml(parameter, yaml_)
@@ -49,20 +52,23 @@ def get_config(parameter):
#
# -----------------------------------------------------------
def getLogger(module):
- logFormatter = logging.Formatter("%(asctime)s [" +
- module +
- "] [%(levelname)-5.5s] %(message)s")
+ """
+ Get Logger
+ """
+ log_formatter = logging.Formatter("%(asctime)s [" +
+ module +
+ "] [%(levelname)-5.5s] %(message)s")
logger = logging.getLogger()
log_file = get_config('general.log.log_file')
log_level = get_config('general.log.log_level')
- fileHandler = logging.FileHandler("{0}/{1}".format('.', log_file))
- fileHandler.setFormatter(logFormatter)
- logger.addHandler(fileHandler)
+ file_handler = logging.FileHandler("{0}/{1}".format('.', log_file))
+ file_handler.setFormatter(log_formatter)
+ logger.addHandler(file_handler)
- consoleHandler = logging.StreamHandler()
- consoleHandler.setFormatter(logFormatter)
- logger.addHandler(consoleHandler)
+ console_handler = logging.StreamHandler()
+ console_handler.setFormatter(log_formatter)
+ logger.addHandler(console_handler)
logger.setLevel(log_level)
return logger
@@ -73,6 +79,9 @@ def getLogger(module):
#
# -----------------------------------------------------------
def getApiResults(case, installer, scenario, version):
+ """
+ Get Results by calling the API
+ """
results = json.dumps([])
# to remove proxy (to be removed at the end for local test only)
# proxy_handler = urllib2.ProxyHandler({})
@@ -94,29 +103,32 @@ def getApiResults(case, installer, scenario, version):
response = urlopen(request)
k = response.read()
results = json.loads(k)
- except URLError as e:
- print 'No kittez. Got an error code:'.format(e)
+ except URLError:
+ print "Error when retrieving results form API"
return results
-def getScenarios(case, installer, version):
-
- try:
- case = case.getName()
- except:
- # if case is not an object test case, try the string
- if type(case) == str:
- case = case
- else:
- raise ValueError("Case cannot be evaluated")
+def getScenarios(project, case, installer, version):
+ """
+ Get the list of Scenarios
+ """
period = get_config('general.period')
url_base = get_config('testapi.url')
- url = ("http://" + url_base + "?case=" + case +
- "&period=" + str(period) + "&installer=" + installer +
- "&version=" + version)
+ url = ("http://" + url_base +
+ "?installer=" + installer +
+ "&period=" + str(period))
+
+ if version is not None:
+ url += "&version=" + version
+
+ if project is not None:
+ url += "&project=" + project
+
+ if case is not None:
+ url += "&case=" + case
try:
request = Request(url)
@@ -136,7 +148,7 @@ def getScenarios(case, installer, version):
results = json.loads(k)
test_results += results['results']
except KeyError:
- print ('No pagination detected')
+ print "No pagination detected"
except URLError as err:
print 'Got an error code: {}'.format(err)
@@ -144,32 +156,38 @@ def getScenarios(case, installer, version):
test_results.reverse()
scenario_results = {}
- for r in test_results:
+ for my_result in test_results:
# Retrieve all the scenarios per installer
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
+ if not my_result['scenario'] in scenario_results.keys():
+ scenario_results[my_result['scenario']] = []
# Do we consider results from virtual pods ...
# Do we consider results for non HA scenarios...
exclude_virtual_pod = get_config('functest.exclude_virtual')
exclude_noha = get_config('functest.exclude_noha')
- if ((exclude_virtual_pod and "virtual" in r['pod_name']) or
- (exclude_noha and "noha" in r['scenario'])):
+ if ((exclude_virtual_pod and "virtual" in my_result['pod_name']) or
+ (exclude_noha and "noha" in my_result['scenario'])):
print "exclude virtual pod results..."
else:
- scenario_results[r['scenario']].append(r)
+ scenario_results[my_result['scenario']].append(my_result)
return scenario_results
def getScenarioStats(scenario_results):
+ """
+ Get the number of occurence of scenarios over the defined PERIOD
+ """
scenario_stats = {}
- for k, v in scenario_results.iteritems():
- scenario_stats[k] = len(v)
-
+ for res_k, res_v in scenario_results.iteritems():
+ scenario_stats[res_k] = len(res_v)
return scenario_stats
def getScenarioStatus(installer, version):
+ """
+ Get the status of a scenariofor Yardstick
+ they used criteria SUCCESS (default: PASS)
+ """
period = get_config('general.period')
url_base = get_config('testapi.url')
@@ -184,33 +202,37 @@ def getScenarioStatus(installer, version):
response.close()
results = json.loads(k)
test_results = results['results']
- except URLError as e:
- print 'Got an error code: {}'.format(e)
+ except URLError:
+ print "GetScenarioStatus: error when calling the API"
scenario_results = {}
result_dict = {}
if test_results is not None:
- for r in test_results:
- if r['stop_date'] != 'None' and r['criteria'] is not None:
- if not r['scenario'] in scenario_results.keys():
- scenario_results[r['scenario']] = []
- scenario_results[r['scenario']].append(r)
-
- for k, v in scenario_results.items():
+ for test_r in test_results:
+ if (test_r['stop_date'] != 'None' and
+ test_r['criteria'] is not None):
+ if not test_r['scenario'] in scenario_results.keys():
+ scenario_results[test_r['scenario']] = []
+ scenario_results[test_r['scenario']].append(test_r)
+
+ for scen_k, scen_v in scenario_results.items():
# scenario_results[k] = v[:LASTEST_TESTS]
s_list = []
- for element in v:
+ for element in scen_v:
if element['criteria'] == 'SUCCESS':
s_list.append(1)
else:
s_list.append(0)
- result_dict[k] = s_list
+ result_dict[scen_k] = s_list
# return scenario_results
return result_dict
def getQtipResults(version, installer):
+ """
+ Get QTIP results
+ """
period = get_config('qtip.period')
url_base = get_config('testapi.url')
@@ -240,19 +262,24 @@ def getQtipResults(version, installer):
def getNbtestOk(results):
+ """
+ based on default value (PASS) count the number of test OK
+ """
nb_test_ok = 0
- for r in results:
- for k, v in r.iteritems():
+ for my_result in results:
+ for res_k, res_v in my_result.iteritems():
try:
- if "PASS" in v:
+ if "PASS" in res_v:
nb_test_ok += 1
- except:
+ except Exception:
print "Cannot retrieve test status"
return nb_test_ok
def getResult(testCase, installer, scenario, version):
-
+ """
+ Get Result for a given Functest Testcase
+ """
# retrieve raw results
results = getApiResults(testCase, installer, scenario, version)
# let's concentrate on test results only
@@ -269,10 +296,10 @@ def getResult(testCase, installer, scenario, version):
# print " ---------------- "
# print "nb of results:" + str(len(test_results))
- for r in test_results:
+ for res_r in test_results:
# print r["start_date"]
# print r["criteria"]
- scenario_results.append({r["start_date"]: r["criteria"]})
+ scenario_results.append({res_r["start_date"]: res_r["criteria"]})
# sort results
scenario_results.sort()
# 4 levels for the results
@@ -295,7 +322,7 @@ def getResult(testCase, installer, scenario, version):
test_result_indicator = 1
else:
# Test the last 4 run
- if (len(scenario_results) > 3):
+ if len(scenario_results) > 3:
last4runResults = scenario_results[-4:]
nbTestOkLast4 = getNbtestOk(last4runResults)
# print "Nb test OK (last 4 run):"+ str(nbTestOkLast4)
@@ -309,19 +336,22 @@ def getResult(testCase, installer, scenario, version):
def getJenkinsUrl(build_tag):
- # e.g. jenkins-functest-apex-apex-daily-colorado-daily-colorado-246
- # id = 246
- # jenkins-functest-compass-huawei-pod5-daily-master-136
- # id = 136
- # note it is linked to jenkins format
- # if this format changes...function to be adapted....
+ """
+ Get Jenkins url_base corespoding to the last test CI run
+ e.g. jenkins-functest-apex-apex-daily-colorado-daily-colorado-246
+ id = 246
+ jenkins-functest-compass-huawei-pod5-daily-master-136
+ id = 136
+ note it is linked to jenkins format
+ if this format changes...function to be adapted....
+ """
url_base = get_config('functest.jenkins_url')
try:
build_id = [int(s) for s in build_tag.split("-") if s.isdigit()]
url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] +
"/" + str(build_id[0]))
jenkins_url = url_base + url_id + "/console"
- except:
+ except Exception:
print 'Impossible to get jenkins url:'
if "jenkins-" not in build_tag:
@@ -331,10 +361,13 @@ def getJenkinsUrl(build_tag):
def getScenarioPercent(scenario_score, scenario_criteria):
+ """
+ Get success rate of the scenario (in %)
+ """
score = 0.0
try:
score = float(scenario_score) / float(scenario_criteria) * 100
- except:
+ except Exception:
print 'Impossible to calculate the percentage score'
return score
@@ -343,32 +376,41 @@ def getScenarioPercent(scenario_score, scenario_criteria):
# Functest
# *********
def getFunctestConfig(version=""):
+ """
+ Get Functest configuration
+ """
config_file = get_config('functest.test_conf') + version
response = requests.get(config_file)
return yaml.safe_load(response.text)
def getArchitectures(scenario_results):
+ """
+ Get software architecture (x86 or Aarch64)
+ """
supported_arch = ['x86']
- if (len(scenario_results) > 0):
+ if len(scenario_results) > 0:
for scenario_result in scenario_results.values():
for value in scenario_result:
- if ("armband" in value['build_tag']):
+ if "armband" in value['build_tag']:
supported_arch.append('aarch64')
return supported_arch
return supported_arch
def filterArchitecture(results, architecture):
+ """
+ Restrict the list of results based on given architecture
+ """
filtered_results = {}
- for name, results in results.items():
+ for name, res in results.items():
filtered_values = []
- for value in results:
- if (architecture is "x86"):
+ for value in res:
+ if architecture is "x86":
# drop aarch64 results
if ("armband" not in value['build_tag']):
filtered_values.append(value)
- elif(architecture is "aarch64"):
+ elif architecture is "aarch64":
# drop x86 results
if ("armband" in value['build_tag']):
filtered_values.append(value)
@@ -381,6 +423,9 @@ def filterArchitecture(results, architecture):
# Yardstick
# *********
def subfind(given_list, pattern_list):
+ """
+ Yardstick util function
+ """
LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
for i in range(len(given_list)):
if given_list[i] == pattern_list[0] and \
@@ -390,7 +435,9 @@ def subfind(given_list, pattern_list):
def _get_percent(status):
-
+ """
+ Yardstick util function to calculate success rate
+ """
if status * 100 % 6:
return round(float(status) * 100 / 6, 1)
else:
@@ -398,13 +445,16 @@ def _get_percent(status):
def get_percent(four_list, ten_list):
+ """
+ Yardstick util function to calculate success rate
+ """
four_score = 0
ten_score = 0
- for v in four_list:
- four_score += v
- for v in ten_list:
- ten_score += v
+ for res_v in four_list:
+ four_score += res_v
+ for res_v in ten_list:
+ ten_score += res_v
LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
if four_score == LASTEST_TESTS:
@@ -420,9 +470,12 @@ def get_percent(four_list, ten_list):
def _test():
+ """
+ Yardstick util function (test)
+ """
status = getScenarioStatus("compass", "master")
print "status:++++++++++++++++++++++++"
- print(json.dumps(status, indent=4))
+ print json.dumps(status, indent=4)
# ----------------------------------------------------------
@@ -432,8 +485,9 @@ def _test():
# -----------------------------------------------------------
def export_csv(scenario_file_name, installer, version):
- # csv
- # generate sub files based on scenario_history.txt
+ """
+ Generate sub files based on scenario_history.txt
+ """
scenario_installer_file_name = ("./display/" + version +
"/functest/scenario_history_" +
installer + ".csv")
@@ -443,21 +497,25 @@ def export_csv(scenario_file_name, installer, version):
for line in scenario_file:
if installer in line:
scenario_installer_file.write(line)
- scenario_installer_file.close
+ scenario_installer_file.close
def generate_csv(scenario_file):
+ """
+ Generate sub files based on scenario_history.txt
+ """
import shutil
- # csv
- # generate sub files based on scenario_history.txt
csv_file = scenario_file.replace('txt', 'csv')
shutil.copy2(scenario_file, csv_file)
def export_pdf(pdf_path, pdf_doc_name):
+ """
+ Export results to pdf
+ """
try:
pdfkit.from_file(pdf_path, pdf_doc_name)
except IOError:
print "Error but pdf generated anyway..."
- except:
+ except Exception:
print "impossible to generate PDF"
diff --git a/utils/test/reporting/reporting/vsperf/__init__.py b/utils/test/reporting/reporting/vsperf/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/reporting/reporting/vsperf/__init__.py
diff --git a/utils/test/reporting/reporting/vsperf/reporting-status.py b/utils/test/reporting/reporting/vsperf/reporting-status.py
new file mode 100644
index 000000000..fc4cc677d
--- /dev/null
+++ b/utils/test/reporting/reporting/vsperf/reporting-status.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import os
+
+import jinja2
+
+import reporting.utils.reporting_utils as rp_utils
+import reporting.utils.scenarioResult as sr
+
+installers = rp_utils.get_config('general.installers')
+PERIOD = rp_utils.get_config('general.period')
+
+# Logger
+logger = rp_utils.getLogger("Storperf-Status")
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+logger.info("*******************************************")
+logger.info("* Generating reporting scenario status *")
+logger.info("* Data retention = %s days *" % PERIOD)
+logger.info("* *")
+logger.info("*******************************************")
+
+# retrieve the list of storperf tests
+versions = {'master'}
+
+# For all the versions
+for version in versions:
+ # For all the installers
+ for installer in installers:
+ scenario_results = rp_utils.getScenarios("vsperf",
+ None,
+ installer,
+ None)
+ items = {}
+ scenario_result_criteria = {}
+ logger.info("installer %s, version %s, scenario ", installer, version)
+
+ # From each scenarios get results list
+ for s, s_result in scenario_results.items():
+ logger.info("---------------------------------")
+ logger.info("installer %s, version %s, scenario %s", installer,
+ version, s)
+ ten_criteria = len(s_result)
+
+ ten_score = 0
+ for v in s_result:
+ if "PASS" in v['criteria']:
+ ten_score += 1
+
+ logger.info("ten_score: %s / %s" % (ten_score, ten_criteria))
+
+ four_score = 0
+ try:
+ LASTEST_TESTS = rp_utils.get_config(
+ 'general.nb_iteration_tests_success_criteria')
+ s_result.sort(key=lambda x: x['start_date'])
+ four_result = s_result[-LASTEST_TESTS:]
+ logger.debug("four_result: {}".format(four_result))
+ logger.debug("LASTEST_TESTS: {}".format(LASTEST_TESTS))
+ # logger.debug("four_result: {}".format(four_result))
+ four_criteria = len(four_result)
+ for v in four_result:
+ if "PASS" in v['criteria']:
+ four_score += 1
+ logger.info("4 Score: %s / %s " % (four_score,
+ four_criteria))
+ except Exception:
+ logger.error("Impossible to retrieve the four_score")
+
+ try:
+ s_status = (four_score * 100) / four_criteria
+ except ZeroDivisionError:
+ s_status = 0
+ logger.info("Score percent = %s" % str(s_status))
+ s_four_score = str(four_score) + '/' + str(four_criteria)
+ s_ten_score = str(ten_score) + '/' + str(ten_criteria)
+ s_score_percent = str(s_status)
+
+ logger.debug(" s_status: {}".format(s_status))
+ if s_status == 100:
+ logger.info(">>>>> scenario OK, save the information")
+ else:
+ logger.info(">>>> scenario not OK, last 4 iterations = %s, \
+ last 10 days = %s" % (s_four_score, s_ten_score))
+
+ s_url = ""
+ if len(s_result) > 0:
+ build_tag = s_result[len(s_result)-1]['build_tag']
+ logger.debug("Build tag: %s" % build_tag)
+ s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
+ logger.info("last jenkins url: %s" % s_url)
+
+ # Save daily results in a file
+ path_validation_file = ("./display/" + version +
+ "/vsperf/scenario_history.txt")
+
+ if not os.path.exists(path_validation_file):
+ with open(path_validation_file, 'w') as f:
+ info = 'date,scenario,installer,details,score\n'
+ f.write(info)
+
+ with open(path_validation_file, "a") as f:
+ info = (reportingDate + "," + s + "," + installer +
+ "," + s_ten_score + "," +
+ str(s_score_percent) + "\n")
+ f.write(info)
+
+ scenario_result_criteria[s] = sr.ScenarioResult(s_status,
+ s_four_score,
+ s_ten_score,
+ s_score_percent,
+ s_url)
+
+ logger.info("--------------------------")
+
+ templateLoader = jinja2.FileSystemLoader(".")
+ templateEnv = jinja2.Environment(loader=templateLoader,
+ autoescape=True)
+
+ TEMPLATE_FILE = "./reporting/vsperf/template/index-status-tmpl.html"
+ template = templateEnv.get_template(TEMPLATE_FILE)
+
+ outputText = template.render(scenario_results=scenario_result_criteria,
+ installer=installer,
+ period=PERIOD,
+ version=version,
+ date=reportingDate)
+
+ with open("./display/" + version +
+ "/vsperf/status-" + installer + ".html", "wb") as fh:
+ fh.write(outputText)
diff --git a/utils/test/reporting/reporting/vsperf/template/index-status-tmpl.html b/utils/test/reporting/reporting/vsperf/template/index-status-tmpl.html
new file mode 100644
index 000000000..7e06ef66b
--- /dev/null
+++ b/utils/test/reporting/reporting/vsperf/template/index-status-tmpl.html
@@ -0,0 +1,114 @@
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <!-- Bootstrap core CSS -->
+ <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+ <link href="../../css/default.css" rel="stylesheet">
+ <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+ <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+ <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+ <script type="text/javascript" src="../../js/gauge.js"></script>
+ <script type="text/javascript" src="../../js/trend.js"></script>
+ <script>
+ function onDocumentReady() {
+ // Gauge management
+ {% for scenario in scenario_results.keys() -%}
+ var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
+ {%- endfor %}
+ // assign success rate to the gauge
+ function updateReadings() {
+ {% for scenario in scenario_results.keys() -%}
+ gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
+ {%- endfor %}
+ }
+ updateReadings();
+ }
+
+ // trend line management
+ d3.csv("./scenario_history.txt", function(data) {
+ // ***************************************
+ // Create the trend line
+ {% for scenario in scenario_results.keys() -%}
+ // for scenario {{scenario}}
+ // Filter results
+ var trend{{loop.index}} = data.filter(function(row) {
+ return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+ })
+ // Parse the date
+ trend{{loop.index}}.forEach(function(d) {
+ d.date = parseDate(d.date);
+ d.score = +d.score
+ });
+ // Draw the trend line
+ var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+ // ****************************************
+ {%- endfor %}
+ });
+ if ( !window.isLoaded ) {
+ window.addEventListener("load", function() {
+ onDocumentReady();
+ }, false);
+ } else {
+ onDocumentReady();
+ }
+ </script>
+ <script type="text/javascript">
+ $(document).ready(function (){
+ $(".btn-more").click(function() {
+ $(this).hide();
+ $(this).parent().find(".panel-default").show();
+ });
+ })
+ </script>
+ </head>
+ <body>
+ <div class="container">
+ <div class="masthead">
+ <h3 class="text-muted">Vsperf status page ({{version}}, {{date}})</h3>
+ <nav>
+ <ul class="nav nav-justified">
+ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+ <li><a href="status-apex.html">Apex</a></li>
+ <li><a href="status-compass.html">Compass</a></li>
+ <li><a href="status-fuel.html">Fuel</a></li>
+ <li><a href="status-joid.html">Joid</a></li>
+ </ul>
+ </nav>
+ </div>
+<div class="row">
+ <div class="col-md-1"></div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h2>{{installer}}</h2>
+ </div>
+ <div><h1>Reported values represent the percentage of completed
+
+ CI tests during the reporting period, where results
+
+ were communicated to the Test Database.</h1></div>
+ <div class="scenario-overview">
+ <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+ <table class="table">
+ <tr>
+ <th width="40%">Scenario</th>
+ <th width="20%">Status</th>
+ <th width="20%">Trend</th>
+ <th width="10%">Last 4 Iterations</th>
+ <th width="10%">Last 10 Days</th>
+ </tr>
+ {% for scenario,result in scenario_results.iteritems() -%}
+ <tr class="tr-ok">
+ <td><a href="{{scenario_results[scenario].getLastUrl()}}">{{scenario}}</a></td>
+ <td><div id="gaugeScenario{{loop.index}}"></div></td>
+ <td><div id="trend_svg{{loop.index}}"></div></td>
+ <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
+ <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
+ </tr>
+ {%- endfor %}
+ </table>
+ </div>
+
+
+ </div>
+ <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting/yardstick/reporting-status.py b/utils/test/reporting/reporting/yardstick/reporting-status.py
index 85c386bf1..6584f4e8d 100644
--- a/utils/test/reporting/reporting/yardstick/reporting-status.py
+++ b/utils/test/reporting/reporting/yardstick/reporting-status.py
@@ -7,14 +7,13 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
import datetime
-import jinja2
import os
-import utils.scenarioResult as sr
-from scenarios import config as cf
+import jinja2
-# manage conf
-import utils.reporting_utils as rp_utils
+import reporting.utils.scenarioResult as sr
+import reporting.utils.reporting_utils as rp_utils
+from scenarios import config as cf
installers = rp_utils.get_config('general.installers')
versions = rp_utils.get_config('general.versions')
diff --git a/utils/test/testapi/.gitignore b/utils/test/testapi/.gitignore
index 00f8a03d0..86ec0d2d5 100644
--- a/utils/test/testapi/.gitignore
+++ b/utils/test/testapi/.gitignore
@@ -4,4 +4,4 @@ setup.cfg-e
opnfv_testapi/static
build
*.egg-info
-
+3rd_party/static/static
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profile.html b/utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profile.html
index dc97c41e2..763f5d120 100644
--- a/utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profile.html
+++ b/utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profile.html
@@ -3,9 +3,16 @@
<div>
<table class="table table-striped table-hover">
<tbody>
- <tr> <td>User name</td> <td>{{auth.currentUser.fullname}}</td> </tr>
- <tr> <td>User OpenId</td> <td>{{auth.currentUser.openid}}</td> </tr>
+ <tr> <td>User</td> <td>{{auth.currentUser.user}}</td> </tr>
+ <tr> <td>Fullname</td> <td>{{auth.currentUser.fullname}}</td> </tr>
<tr> <td>Email</td> <td>{{auth.currentUser.email}}</td> </tr>
+ <tr> <td>Groups</td>
+ <td>
+ <div ng-repeat="group in auth.currentUser.groups">
+ {{group}}</br>
+ </div>
+ </td>
+ </tr>
</tbody>
</table>
</div>
diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profileController.js b/utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profileController.js
index 0660e19f6..5dbdf7b1a 100644
--- a/utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profileController.js
+++ b/utils/test/testapi/3rd_party/static/testapi-ui/components/profile/profileController.js
@@ -26,7 +26,7 @@
* This is a provider for the user's uploaded public keys.
*/
function PubKeys($resource, testapiApiUrl) {
- return $resource(testapiApiUrl + '/profile/pubkeys/:id', null, null);
+ return $resource(testapiApiUrl + '/user/pubkeys/:id', null, null);
}
angular
diff --git a/utils/test/testapi/etc/config.ini b/utils/test/testapi/etc/config.ini
index 1ec899fcb..db0e191d1 100644
--- a/utils/test/testapi/etc/config.ini
+++ b/utils/test/testapi/etc/config.ini
@@ -21,48 +21,6 @@ authenticate = False
[ui]
url = http://localhost:8000
-[osid]
-
-# OpenStackID Auth Server URI. (string value)
-openstack_openid_endpoint = https://openstackid.org/accounts/openid2
-
-# OpenStackID logout URI. (string value)
-openid_logout_endpoint = https://openstackid.org/accounts/user/logout
-
-# Interaction mode. Specifies whether Openstack Id IdP may interact
-# with the user to determine the outcome of the request. (string
-# value)
-openid_mode = checkid_setup
-
-# Protocol version. Value identifying the OpenID protocol version
-# being used. This value should be "http://specs.openid.net/auth/2.0".
-# (string value)
-openid_ns = http://specs.openid.net/auth/2.0
-
-# Return endpoint in Refstack's API. Value indicating the endpoint
-# where the user should be returned to after signing in. Openstack Id
-# Idp only supports HTTPS address types. (string value)
-openid_return_to = v1/auth/signin_return
-
-# Claimed identifier. This value must be set to
-# "http://specs.openid.net/auth/2.0/identifier_select". or to user
-# claimed identity (user local identifier or user owned identity [ex:
-# custom html hosted on a owned domain set to html discover]). (string
-# value)
-openid_claimed_id = http://specs.openid.net/auth/2.0/identifier_select
-
-# Alternate identifier. This value must be set to
-# http://specs.openid.net/auth/2.0/identifier_select. (string value)
-openid_identity = http://specs.openid.net/auth/2.0/identifier_select
-
-# Indicates request for user attribute information. This value must be
-# set to "http://openid.net/extensions/sreg/1.1". (string value)
-openid_ns_sreg = http://openid.net/extensions/sreg/1.1
-
-# Comma-separated list of field names which, if absent from the
-# response, will prevent the Consumer from completing the registration
-# without End User interation. The field names are those that are
-# specified in the Response Format, with the "openid.sreg." prefix
-# removed. Valid values include: "country", "email", "firstname",
-# "language", "lastname" (string value)
-openid_sreg_required = email,fullname
+[lfid]
+# Linux Foundation cas URL
+cas_url = https://identity.linuxfoundation.org/cas/
diff --git a/utils/test/testapi/opnfv_testapi/common/check.py b/utils/test/testapi/opnfv_testapi/common/check.py
index 24ba876a9..009d3d46c 100644
--- a/utils/test/testapi/opnfv_testapi/common/check.py
+++ b/utils/test/testapi/opnfv_testapi/common/check.py
@@ -8,14 +8,49 @@
##############################################################################
import functools
+import cas
from tornado import gen
from tornado import web
+from opnfv_testapi.common import constants
from opnfv_testapi.common import message
from opnfv_testapi.common import raises
+from opnfv_testapi.common.config import CONF
from opnfv_testapi.db import api as dbapi
+def login(method):
+ @web.asynchronous
+ @gen.coroutine
+ @functools.wraps(method)
+ def wrapper(self, *args, **kwargs):
+ ticket = self.get_query_argument('ticket', default=None)
+ if ticket:
+ client = cas.CASClient(version='2',
+ server_url=CONF.lfid_cas_url,
+ service_url=CONF.ui_url)
+ (user, attrs, _) = client.verify_ticket(ticket=ticket)
+ print 'login user: {}'.format(user)
+ login_user = {
+ 'user': user,
+ 'email': attrs.get('mail'),
+ 'fullname': attrs.get('field_lf_full_name'),
+ 'groups': constants.TESTAPI_USERS + attrs.get('group', [])
+ }
+ q_user = {'user': user}
+ db_user = yield dbapi.db_find_one(constants.USER_TABLE, q_user)
+ if not db_user:
+ dbapi.db_save(constants.USER_TABLE, login_user)
+ else:
+ dbapi.db_update(constants.USER_TABLE, q_user, login_user)
+
+ self.clear_cookie(constants.TESTAPI_ID)
+ self.set_secure_cookie(constants.TESTAPI_ID, user)
+ ret = yield gen.coroutine(method)(self, *args, **kwargs)
+ raise gen.Return(ret)
+ return wrapper
+
+
def authenticate(method):
@web.asynchronous
@gen.coroutine
diff --git a/utils/test/testapi/opnfv_testapi/common/constants.py b/utils/test/testapi/opnfv_testapi/common/constants.py
new file mode 100644
index 000000000..b37ebb3d6
--- /dev/null
+++ b/utils/test/testapi/opnfv_testapi/common/constants.py
@@ -0,0 +1,5 @@
+TESTAPI_ID = 'testapi_id'
+CSRF_TOKEN = 'csrf_token'
+ROLE = 'role'
+TESTAPI_USERS = ['opnfv-testapi-users']
+USER_TABLE = 'users'
diff --git a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
index 9389d266d..e202f5c2c 100644
--- a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
+++ b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py
@@ -6,20 +6,20 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import logging
-from datetime import datetime
-from datetime import timedelta
import json
+import logging
from bson import objectid
+from datetime import datetime
+from datetime import timedelta
-from opnfv_testapi.common.config import CONF
+from opnfv_testapi.common import constants
from opnfv_testapi.common import message
from opnfv_testapi.common import raises
+from opnfv_testapi.common.config import CONF
from opnfv_testapi.resources import handlers
from opnfv_testapi.resources import result_models
from opnfv_testapi.tornado_swagger import swagger
-from opnfv_testapi.ui.auth import constants as auth_const
class GenericResultHandler(handlers.GenericApiHandler):
@@ -59,13 +59,12 @@ class GenericResultHandler(handlers.GenericApiHandler):
elif k == 'to':
date_range.update({'$lt': str(v)})
elif k == 'signed':
- openid = self.get_secure_cookie(auth_const.OPENID)
- role = self.get_secure_cookie(auth_const.ROLE)
- logging.info('role:%s', role)
+ username = self.get_secure_cookie(constants.TESTAPI_ID)
+ role = self.get_secure_cookie(constants.ROLE)
if role:
del query['public']
if role != "reviewer":
- query['user'] = openid
+ query['user'] = username
elif k not in ['last', 'page', 'descend']:
query[k] = v
if date_range:
@@ -246,7 +245,7 @@ class ResultsUploadHandler(ResultsCLHandler):
self.json_args = json.loads(fileinfo['body']).copy()
self.json_args['public'] = is_public
- openid = self.get_secure_cookie(auth_const.OPENID)
+ openid = self.get_secure_cookie(constants.TESTAPI_ID)
if openid:
self.json_args['user'] = openid
diff --git a/utils/test/testapi/opnfv_testapi/router/url_mappings.py b/utils/test/testapi/opnfv_testapi/router/url_mappings.py
index 3e3ab87aa..be6240e70 100644
--- a/utils/test/testapi/opnfv_testapi/router/url_mappings.py
+++ b/utils/test/testapi/opnfv_testapi/router/url_mappings.py
@@ -76,8 +76,7 @@ mappings = [
(r'/', root.RootHandler),
(r'/api/v1/auth/signin', sign.SigninHandler),
- (r'/api/v1/auth/signin_return', sign.SigninReturnHandler),
(r'/api/v1/auth/signout', sign.SignoutHandler),
- (r'/api/v1/profile', user.ProfileHandler),
+ (r'/api/v1/profile', user.UserHandler),
]
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/noparam.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/noparam.ini
deleted file mode 100644
index be7f2b9f8..000000000
--- a/utils/test/testapi/opnfv_testapi/tests/unit/common/noparam.ini
+++ /dev/null
@@ -1,16 +0,0 @@
-# to add a new parameter in the config file,
-# the CONF object in config.ini must be updated
-[mongo]
-# URL of the mongo DB
-# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1
-url = mongodb://127.0.0.1:27017/
-
-[api]
-# Listening port
-port = 8000
-# With debug_on set to true, error traces will be shown in HTTP responses
-debug = True
-authenticate = False
-
-[ui]
-url = http://localhost:8000
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/normal.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/normal.ini
deleted file mode 100644
index c81c6c56a..000000000
--- a/utils/test/testapi/opnfv_testapi/tests/unit/common/normal.ini
+++ /dev/null
@@ -1,17 +0,0 @@
-# to add a new parameter in the config file,
-# the CONF object in config.ini must be updated
-[mongo]
-# URL of the mongo DB
-# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1
-url = mongodb://127.0.0.1:27017/
-dbname = test_results_collection
-
-[api]
-# Listening port
-port = 8000
-# With debug_on set to true, error traces will be shown in HTTP responses
-debug = True
-authenticate = False
-
-[ui]
-url = http://localhost:8000
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/nosection.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/nosection.ini
deleted file mode 100644
index a9ed49c5c..000000000
--- a/utils/test/testapi/opnfv_testapi/tests/unit/common/nosection.ini
+++ /dev/null
@@ -1,11 +0,0 @@
-# to add a new parameter in the config file,
-# the CONF object in config.ini must be updated
-[api]
-# Listening port
-port = 8000
-# With debug_on set to true, error traces will be shown in HTTP responses
-debug = True
-authenticate = False
-
-[ui]
-url = http://localhost:8000
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/notboolean.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/notboolean.ini
deleted file mode 100644
index 3a11f9dd3..000000000
--- a/utils/test/testapi/opnfv_testapi/tests/unit/common/notboolean.ini
+++ /dev/null
@@ -1,17 +0,0 @@
-# to add a new parameter in the config file,
-# the CONF object in config.ini must be updated
-[mongo]
-# URL of the mongo DB
-# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1
-url = mongodb://127.0.0.1:27017/
-dbname = test_results_collection
-
-[api]
-# Listening port
-port = 8000
-# With debug_on set to true, error traces will be shown in HTTP responses
-debug = True
-authenticate = notboolean
-
-[ui]
-url = http://localhost:8000
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/notint.ini b/utils/test/testapi/opnfv_testapi/tests/unit/common/notint.ini
deleted file mode 100644
index 8180719b8..000000000
--- a/utils/test/testapi/opnfv_testapi/tests/unit/common/notint.ini
+++ /dev/null
@@ -1,17 +0,0 @@
-# to add a new parameter in the config file,
-# the CONF object in config.ini must be updated
-[mongo]
-# URL of the mongo DB
-# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1
-url = mongodb://127.0.0.1:27017/
-dbname = test_results_collection
-
-[api]
-# Listening port
-port = notint
-# With debug_on set to true, error traces will be shown in HTTP responses
-debug = True
-authenticate = False
-
-[ui]
-url = http://localhost:8000
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/conftest.py b/utils/test/testapi/opnfv_testapi/tests/unit/conftest.py
index feff1daaa..75e621d0e 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/conftest.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/conftest.py
@@ -5,4 +5,4 @@ import pytest
@pytest.fixture
def config_normal():
- return path.join(path.dirname(__file__), 'common/normal.ini')
+ return path.join(path.dirname(__file__), '../../../etc/config.ini')
diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py
index 77a8d18c1..39633e5f5 100644
--- a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py
+++ b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py
@@ -37,7 +37,8 @@ class TestBase(testing.AsyncHTTPTestCase):
def _patch_server(self):
import argparse
- config = path.join(path.dirname(__file__), '../common/normal.ini')
+ config = path.join(path.dirname(__file__),
+ '../../../../etc/config.ini')
self.config_patcher = mock.patch(
'argparse.ArgumentParser.parse_known_args',
return_value=(argparse.Namespace(config_file=config), None))
@@ -46,9 +47,6 @@ class TestBase(testing.AsyncHTTPTestCase):
self.config_patcher.start()
self.db_patcher.start()
- def set_config_file(self):
- self.config_file = 'normal.ini'
-
def get_app(self):
from opnfv_testapi.cmd import server
return server.make_app()
diff --git a/utils/test/testapi/opnfv_testapi/ui/auth/base.py b/utils/test/testapi/opnfv_testapi/ui/auth/base.py
deleted file mode 100644
index bea87c4d9..000000000
--- a/utils/test/testapi/opnfv_testapi/ui/auth/base.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import random
-import string
-
-from six.moves.urllib import parse
-
-from opnfv_testapi.resources import handlers
-
-
-class BaseHandler(handlers.GenericApiHandler):
- def __init__(self, application, request, **kwargs):
- super(BaseHandler, self).__init__(application, request, **kwargs)
- self.table = 'users'
-
- def set_cookies(self, cookies):
- for cookie_n, cookie_v in cookies:
- self.set_secure_cookie(cookie_n, cookie_v)
-
-
-def get_token(length=30):
- """Get random token."""
- return ''.join(random.choice(string.ascii_lowercase)
- for i in range(length))
-
-
-def set_query_params(url, params):
- """Set params in given query."""
- url_parts = parse.urlparse(url)
- url = parse.urlunparse((
- url_parts.scheme,
- url_parts.netloc,
- url_parts.path,
- url_parts.params,
- parse.urlencode(params),
- url_parts.fragment))
- return url
diff --git a/utils/test/testapi/opnfv_testapi/ui/auth/constants.py b/utils/test/testapi/opnfv_testapi/ui/auth/constants.py
deleted file mode 100644
index 44ccb46d7..000000000
--- a/utils/test/testapi/opnfv_testapi/ui/auth/constants.py
+++ /dev/null
@@ -1,18 +0,0 @@
-OPENID = 'openid'
-ROLE = 'role'
-DEFAULT_ROLE = 'user'
-
-# OpenID parameters
-OPENID_MODE = 'openid.mode'
-OPENID_NS = 'openid.ns'
-OPENID_RETURN_TO = 'openid.return_to'
-OPENID_CLAIMED_ID = 'openid.claimed_id'
-OPENID_IDENTITY = 'openid.identity'
-OPENID_REALM = 'openid.realm'
-OPENID_NS_SREG = 'openid.ns.sreg'
-OPENID_NS_SREG_REQUIRED = 'openid.sreg.required'
-OPENID_NS_SREG_EMAIL = 'openid.sreg.email'
-OPENID_NS_SREG_FULLNAME = 'openid.sreg.fullname'
-OPENID_ERROR = 'openid.error'
-
-CSRF_TOKEN = 'csrf_token'
diff --git a/utils/test/testapi/opnfv_testapi/ui/auth/sign.py b/utils/test/testapi/opnfv_testapi/ui/auth/sign.py
index 462395225..01cd0f7c3 100644
--- a/utils/test/testapi/opnfv_testapi/ui/auth/sign.py
+++ b/utils/test/testapi/opnfv_testapi/ui/auth/sign.py
@@ -1,76 +1,22 @@
-from six.moves.urllib import parse
-from tornado import gen
-from tornado import web
+from cas import CASClient
+from opnfv_testapi.common import constants
from opnfv_testapi.common.config import CONF
-from opnfv_testapi.db import api as dbapi
-from opnfv_testapi.ui.auth import base
-from opnfv_testapi.ui.auth import constants as const
+from opnfv_testapi.resources import handlers
-class SigninHandler(base.BaseHandler):
+class SigninHandler(handlers.GenericApiHandler):
def get(self):
- csrf_token = base.get_token()
- return_endpoint = parse.urljoin(CONF.api_url,
- CONF.osid_openid_return_to)
- return_to = base.set_query_params(return_endpoint,
- {const.CSRF_TOKEN: csrf_token})
+ client = CASClient(version='2',
+ server_url=CONF.lfid_cas_url,
+ service_url=CONF.ui_url)
+ self.redirect(url=(client.get_login_url()))
- params = {
- const.OPENID_MODE: CONF.osid_openid_mode,
- const.OPENID_NS: CONF.osid_openid_ns,
- const.OPENID_RETURN_TO: return_to,
- const.OPENID_CLAIMED_ID: CONF.osid_openid_claimed_id,
- const.OPENID_IDENTITY: CONF.osid_openid_identity,
- const.OPENID_REALM: CONF.api_url,
- const.OPENID_NS_SREG: CONF.osid_openid_ns_sreg,
- const.OPENID_NS_SREG_REQUIRED: CONF.osid_openid_sreg_required,
- }
- url = CONF.osid_openstack_openid_endpoint
- url = base.set_query_params(url, params)
- self.redirect(url=url, permanent=False)
-
-class SigninReturnHandler(base.BaseHandler):
- @web.asynchronous
- @gen.coroutine
- def get(self):
- if self.get_query_argument(const.OPENID_MODE) == 'cancel':
- self._auth_failure('Authentication canceled.')
-
- openid = self.get_query_argument(const.OPENID_CLAIMED_ID)
- role = const.DEFAULT_ROLE
- new_user_info = {
- 'openid': openid,
- 'email': self.get_query_argument(const.OPENID_NS_SREG_EMAIL),
- 'fullname': self.get_query_argument(const.OPENID_NS_SREG_FULLNAME),
- const.ROLE: role
- }
- user = yield dbapi.db_find_one(self.table, {'openid': openid})
- if not user:
- dbapi.db_save(self.table, new_user_info)
- else:
- role = user.get(const.ROLE)
-
- self.clear_cookie(const.OPENID)
- self.clear_cookie(const.ROLE)
- self.set_secure_cookie(const.OPENID, openid)
- self.set_secure_cookie(const.ROLE, role)
- self.redirect(url=CONF.ui_url)
-
- def _auth_failure(self, message):
- params = {'message': message}
- url = parse.urljoin(CONF.ui_url,
- '/#/auth_failure?' + parse.urlencode(params))
- self.redirect(url)
-
-
-class SignoutHandler(base.BaseHandler):
+class SignoutHandler(handlers.GenericApiHandler):
def get(self):
"""Handle signout request."""
- self.clear_cookie(const.OPENID)
- self.clear_cookie(const.ROLE)
- params = {'openid_logout': CONF.osid_openid_logout_endpoint}
- url = parse.urljoin(CONF.ui_url,
- '/#/logout?' + parse.urlencode(params))
- self.redirect(url)
+ self.clear_cookie(constants.TESTAPI_ID)
+ client = CASClient(version='2',
+ server_url=CONF.lfid_cas_url)
+ self.redirect(url=(client.get_logout_url(redirect_url=CONF.ui_url)))
diff --git a/utils/test/testapi/opnfv_testapi/ui/auth/user.py b/utils/test/testapi/opnfv_testapi/ui/auth/user.py
index 955cdeead..ab86007f1 100644
--- a/utils/test/testapi/opnfv_testapi/ui/auth/user.py
+++ b/utils/test/testapi/opnfv_testapi/ui/auth/user.py
@@ -1,25 +1,26 @@
-from tornado import gen
-from tornado import web
-
+from opnfv_testapi.common import constants
from opnfv_testapi.common import raises
-from opnfv_testapi.db import api as dbapi
-from opnfv_testapi.ui.auth import base
+from opnfv_testapi.resources import handlers
+from opnfv_testapi.resources import models
+
+
+class User(models.ModelBase):
+ def __init__(self, user=None, email=None, fullname=None, groups=None):
+ self.user = user
+ self.email = email
+ self.fullname = fullname
+ self.groups = groups
+
+class UserHandler(handlers.GenericApiHandler):
+ def __init__(self, application, request, **kwargs):
+ super(UserHandler, self).__init__(application, request, **kwargs)
+ self.table = 'users'
+ self.table_cls = User
-class ProfileHandler(base.BaseHandler):
- @web.asynchronous
- @gen.coroutine
def get(self):
- openid = self.get_secure_cookie('openid')
- if openid:
- try:
- user = yield dbapi.db_find_one(self.table, {'openid': openid})
- self.finish_request({
- "openid": user.get('openid'),
- "email": user.get('email'),
- "fullname": user.get('fullname'),
- "role": user.get('role', 'user')
- })
- except Exception:
- pass
- raises.Unauthorized('Unauthorized')
+ username = self.get_secure_cookie(constants.TESTAPI_ID)
+ if username:
+ self._get_one(query={'user': username})
+ else:
+ raises.Unauthorized('Unauthorized')
diff --git a/utils/test/testapi/opnfv_testapi/ui/root.py b/utils/test/testapi/opnfv_testapi/ui/root.py
index 5b2c922d7..069ad5e93 100644
--- a/utils/test/testapi/opnfv_testapi/ui/root.py
+++ b/utils/test/testapi/opnfv_testapi/ui/root.py
@@ -1,10 +1,12 @@
-from opnfv_testapi.resources.handlers import GenericApiHandler
+from opnfv_testapi.common import check
from opnfv_testapi.common.config import CONF
+from opnfv_testapi.resources import handlers
-class RootHandler(GenericApiHandler):
+class RootHandler(handlers.GenericApiHandler):
def get_template_path(self):
return CONF.static_path
+ @check.login
def get(self):
self.render('testapi-ui/index.html')
diff --git a/utils/test/testapi/requirements.txt b/utils/test/testapi/requirements.txt
index 4b6f75c10..fbd2e0ede 100644
--- a/utils/test/testapi/requirements.txt
+++ b/utils/test/testapi/requirements.txt
@@ -8,3 +8,4 @@ tornado>=3.1,<=4.3 # Apache-2.0
epydoc>=0.3.1
six>=1.9.0 # MIT
motor # Apache-2.0
+python-cas