diff options
Diffstat (limited to 'utils')
18 files changed, 423 insertions, 157 deletions
diff --git a/utils/jenkins-jnlp-connect.sh b/utils/jenkins-jnlp-connect.sh index 8c41620d6..d268a28de 100755 --- a/utils/jenkins-jnlp-connect.sh +++ b/utils/jenkins-jnlp-connect.sh @@ -48,6 +48,14 @@ main () { exit 1 fi + if [[ $(whoami) != "root" ]]; then + if grep "^Defaults requiretty" /etc/sudoers + then echo "please comment out Defaults requiretty from /etc/sudoers" + exit 1 + fi + fi + + if [ -d /etc/monit/conf.d ]; then monitconfdir="/etc/monit/conf.d/" elif [ -d /etc/monit.d ]; then @@ -87,7 +95,7 @@ main () { echo "Writing the following as monit config:" cat << EOF | tee $monitconfdir/jenkins check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid -start program = "/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $dir; export started_monit=true; $0 $@'" +start program = "/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $dir; export started_monit=true; $0 $@' with timeout 60 seconds" stop program = "/bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'" EOF } @@ -96,7 +104,7 @@ EOF #test for diff if [[ "$(diff $monitconfdir/jenkins <(echo "\ check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid -start program = \"/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $dir; export started_monit=true; $0 $@'\" +start program = \"/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $dir; export started_monit=true; $0 $@' with timeout 60 seconds\" stop program = \"/bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'\"\ ") )" ]]; then echo "Updating monit config..." diff --git a/utils/push-test-logs.sh b/utils/push-test-logs.sh index 7486adb40..964b41908 100644 --- a/utils/push-test-logs.sh +++ b/utils/push-test-logs.sh @@ -17,7 +17,7 @@ res_build_date=${1:-$(date -u +"%Y-%m-%d_%H-%M-%S")} project=$PROJECT branch=${GIT_BRANCH##*/} testbed=$NODE_NAME -dir_result="${HOME}/opnfv/$project/results" +dir_result="${HOME}/opnfv/$project/results/${branch}" # src: https://wiki.opnfv.org/display/INF/Hardware+Infrastructure # + intel-pod3 (vsperf) node_list=(\ diff --git a/utils/retention_script.sh b/utils/retention_script.sh new file mode 100755 index 000000000..7e50623ca --- /dev/null +++ b/utils/retention_script.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2016 The Linux Foundation and others +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################## + +PATH=$PATH:/usr/local/bin/ + +#These are the only projects that generate artifacts +for x in armband ovsnfv fuel apex compass4nfv +do + + echo "Looking at artifacts for project $x" + + while IFS= read -r artifact; do + + artifact_date="$(gsutil ls -L $artifact | grep "Creation time:" | awk '{print $4,$5,$6}')" + age=$(($(date +%s)-$(date -d"$artifact_date" +%s))) + daysold=$(($age/86400)) + + if [[ "$daysold" -gt "10" ]]; then + echo "$daysold Days old deleting: $(basename $artifact)" + else + echo "$daysold Days old retaining: $(basename $artifact)" + fi + + done < <(gsutil ls gs://artifacts.opnfv.org/"$x" |grep -v "/$") +done diff --git a/utils/test-sign-artifact.sh b/utils/test-sign-artifact.sh new file mode 100755 index 000000000..f09b7f4e2 --- /dev/null +++ b/utils/test-sign-artifact.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +export PATH=$PATH:/usr/local/bin/ + +# clone releng repository +echo "Cloning releng repository..." +[ -d releng ] && rm -rf releng +git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null +#this is where we import the siging key +if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then + source $WORKSPACE/releng/utils/gpg_import_key.sh +fi + +artifact="foo" +echo foo > foo + +testsign () { + echo "Signing artifact: ${artifact}" + gpg2 -vvv --batch \ + --default-key opnfv-helpdesk@rt.linuxfoundation.org \ + --passphrase besteffort \ + --detach-sig $artifact +} + +testsign + diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py index adbee36aa..622c375cc 100644 --- a/utils/test/reporting/functest/reporting-status.py +++ b/utils/test/reporting/functest/reporting-status.py @@ -8,8 +8,6 @@ # import datetime import jinja2 -import logging -import os import requests import sys import time @@ -21,17 +19,7 @@ import testCase as tc import scenarioResult as sr # Logger -logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s") -logger = logging.getLogger() - -fileHandler = logging.FileHandler("{0}/{1}".format('.', conf.LOG_FILE)) -fileHandler.setFormatter(logFormatter) -logger.addHandler(fileHandler) - -consoleHandler = logging.StreamHandler() -consoleHandler.setFormatter(logFormatter) -logger.addHandler(consoleHandler) -logger.setLevel(conf.LOG_LEVEL) +logger = utils.getLogger("Status") # Initialization testValid = [] @@ -48,11 +36,11 @@ response = requests.get(cf) functest_yaml_config = yaml.load(response.text) -logger.info("****************************************") -logger.info("* Generating reporting..... *") -logger.info("* Data retention = %s days *" % conf.PERIOD) -logger.info("* *") -logger.info("****************************************") +logger.info("*******************************************") +logger.info("* Generating reporting scenario status *") +logger.info("* Data retention = %s days *" % conf.PERIOD) +logger.info("* *") +logger.info("*******************************************") # Retrieve test cases of Tier 1 (smoke) config_tiers = functest_yaml_config.get("tiers") @@ -111,17 +99,22 @@ for version in conf.versions: for test_case in testValid: test_case.checkRunnable(installer, s, test_case.getConstraints()) - logger.debug("testcase %s is %s" % (test_case.getName(), - test_case.isRunnable)) + logger.debug("testcase %s is %s" % + (test_case.getDisplayName(), + test_case.isRunnable)) time.sleep(1) if test_case.isRunnable: dbName = test_case.getDbName() name = test_case.getName() + displayName = test_case.getDisplayName() project = test_case.getProject() nb_test_runnable_for_this_scenario += 1 logger.info(" Searching results for case %s " % - (dbName)) + (displayName)) result = utils.getResult(dbName, installer, s, version) + # if no result set the value to 0 + if result < 0: + result = 0 logger.info(" >>>> Test score = " + str(result)) test_case.setCriteria(result) test_case.setIsRunnable(True) @@ -144,18 +137,23 @@ for version in conf.versions: if test_case.isRunnable: dbName = test_case.getDbName() name = test_case.getName() + displayName = test_case.getDisplayName() project = test_case.getProject() logger.info(" Searching results for case %s " % - (dbName)) + (displayName)) result = utils.getResult(dbName, installer, s, version) - test_case.setCriteria(result) - test_case.setIsRunnable(True) - testCases2BeDisplayed.append(tc.TestCase(name, - project, - "", - result, - True, - 4)) + # at least 1 result for the test + if result > -1: + test_case.setCriteria(result) + test_case.setIsRunnable(True) + testCases2BeDisplayed.append(tc.TestCase(name, + project, + "", + result, + True, + 4)) + else: + logger.debug("No results found") items[s] = testCases2BeDisplayed except: @@ -182,7 +180,7 @@ for version in conf.versions: else: logger.info(">>>>> scenario OK, save the information") s_status = "OK" - path_validation_file = ("./release/" + version + + path_validation_file = (conf.REPORTING_PATH + "/release/" + version + "/validated_scenario_history.txt") with open(path_validation_file, "a") as f: time_format = "%Y-%m-%d %H:%M" @@ -193,12 +191,10 @@ for version in conf.versions: scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score) logger.info("--------------------------") - templateLoader = jinja2.FileSystemLoader(os.path.dirname - (os.path.abspath - (__file__))) + templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH) templateEnv = jinja2.Environment(loader=templateLoader) - TEMPLATE_FILE = "./template/index-status-tmpl.html" + TEMPLATE_FILE = "/template/index-status-tmpl.html" template = templateEnv.get_template(TEMPLATE_FILE) outputText = template.render(scenario_stats=scenario_stats, @@ -208,6 +204,6 @@ for version in conf.versions: period=conf.PERIOD, version=version) - with open("./release/" + version + + with open(conf.REPORTING_PATH + "/release/" + version + "/index-status-" + installer + ".html", "wb") as fh: fh.write(outputText) diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py index a065ef442..e3f4e3306 100644 --- a/utils/test/reporting/functest/reporting-tempest.py +++ b/utils/test/reporting/functest/reporting-tempest.py @@ -1,28 +1,44 @@ from urllib2 import Request, urlopen, URLError import json import jinja2 -import os +import reportingConf as conf +import reportingUtils as utils -installers = ["apex", "compass", "fuel", "joid"] +installers = conf.installers items = ["tests", "Success rate", "duration"] -PERIOD = 7 -print "Generate Tempest automatic reporting" +PERIOD = conf.PERIOD +criteria_nb_test = 165 +criteria_duration = 1800 +criteria_success_rate = 90 + +logger = utils.getLogger("Tempest") +logger.info("************************************************") +logger.info("* Generating reporting Tempest_smoke_serial *") +logger.info("* Data retention = %s days *" % PERIOD) +logger.info("* *") +logger.info("************************************************") + +logger.info("Success criteria:") +logger.info("nb tests executed > %s s " % criteria_nb_test) +logger.info("test duration < %s s " % criteria_duration) +logger.info("success rate > %s " % criteria_success_rate) + for installer in installers: # we consider the Tempest results of the last PERIOD days - url = "http://testresults.opnfv.org/test/api/v1/results?case=tempest_smoke_serial" - request = Request(url + '&period=' + str(PERIOD) - + '&installer=' + installer + '&version=master') - + url = conf.URL_BASE + "?case=tempest_smoke_serial" + request = Request(url + '&period=' + str(PERIOD) + + '&installer=' + installer + '&version=master') + logger.info("Search tempest_smoke_serial results for installer %s" + % installer) try: response = urlopen(request) k = response.read() results = json.loads(k) except URLError, e: - print 'No kittez. Got an error code:', e + logger.error("Error code: %s" % e) test_results = results['results'] - test_results.reverse() scenario_results = {} criteria = {} @@ -48,8 +64,8 @@ for installer in installers: nb_tests_run = result['details']['tests'] nb_tests_failed = result['details']['failures'] if nb_tests_run != 0: - success_rate = 100*(int(nb_tests_run) - - int(nb_tests_failed))/int(nb_tests_run) + success_rate = 100*(int(nb_tests_run) - + int(nb_tests_failed)) / int(nb_tests_run) else: success_rate = 0 @@ -63,40 +79,49 @@ for installer in installers: crit_time = False # Expect that at least 165 tests are run - if nb_tests_run >= 165: + if nb_tests_run >= criteria_nb_test: crit_tests = True # Expect that at least 90% of success - if success_rate >= 90: + if success_rate >= criteria_success_rate: crit_rate = True # Expect that the suite duration is inferior to 30m - if result['details']['duration'] < 1800: + if result['details']['duration'] < criteria_duration: crit_time = True result['criteria'] = {'tests': crit_tests, 'Success rate': crit_rate, 'duration': crit_time} - # error management + try: + logger.debug("Scenario %s, Installer %s" + % (s_result[1]['scenario'], installer)) + logger.debug("Nb Test run: %s" % nb_tests_run) + logger.debug("Test duration: %s" + % result['details']['duration']) + logger.debug("Success rate: %s" % success_rate) + except: + logger.error("Data format error") + + # Error management # **************** try: errors = result['details']['errors'] result['errors'] = errors.replace('{0}', '') except: - print "Error field not present (Brahamputra runs?)" + logger.error("Error field not present (Brahamputra runs?)") - mypath = os.path.abspath(__file__) - tplLoader = jinja2.FileSystemLoader(os.path.dirname(mypath)) - templateEnv = jinja2.Environment(loader=tplLoader) + templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH) + templateEnv = jinja2.Environment(loader=templateLoader) - TEMPLATE_FILE = "./template/index-tempest-tmpl.html" + TEMPLATE_FILE = "/template/index-tempest-tmpl.html" template = templateEnv.get_template(TEMPLATE_FILE) outputText = template.render(scenario_results=scenario_results, items=items, installer=installer) - with open("./release/master/index-tempest-" + + with open(conf.REPORTING_PATH + "/release/master/index-tempest-" + installer + ".html", "wb") as fh: fh.write(outputText) -print "Tempest automatic reporting Done" +logger.info("Tempest automatic reporting succesfully generated.") diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py index 4033687e8..d0436ed14 100644 --- a/utils/test/reporting/functest/reporting-vims.py +++ b/utils/test/reporting/functest/reporting-vims.py @@ -1,7 +1,11 @@ from urllib2 import Request, urlopen, URLError import json import jinja2 -import os +import reportingConf as conf +import reportingUtils as utils + +logger = utils.getLogger("vIMS") + def sig_test_format(sig_test): nbPassed = 0 @@ -9,7 +13,7 @@ def sig_test_format(sig_test): nbSkipped = 0 for data_test in sig_test: if data_test['result'] == "Passed": - nbPassed+= 1 + nbPassed += 1 elif data_test['result'] == "Failed": nbFailures += 1 elif data_test['result'] == "Skipped": @@ -20,21 +24,29 @@ def sig_test_format(sig_test): total_sig_test_result['skipped'] = nbSkipped return total_sig_test_result -installers = ["fuel", "compass", "joid", "apex"] -step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"] +logger.info("****************************************") +logger.info("* Generating reporting vIMS *") +logger.info("* Data retention = %s days *" % conf.PERIOD) +logger.info("* *") +logger.info("****************************************") +installers = conf.installers +step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"] +logger.info("Start processing....") for installer in installers: - request = Request('http://testresults.opnfv.org/test/api/v1/results?case=vims&installer=' + installer) + logger.info("Search vIMS results for installer %s" % installer) + request = Request(conf.URL_BASE + '?case=vims&installer=' + installer) try: response = urlopen(request) k = response.read() results = json.loads(k) except URLError, e: - print 'No kittez. Got an error code:', e + logger.error("Error code: %s" % e) test_results = results['results'] - test_results.reverse() + + logger.debug("Results found: %s" % test_results) scenario_results = {} for r in test_results: @@ -44,6 +56,7 @@ for installer in installers: for s, s_result in scenario_results.items(): scenario_results[s] = s_result[0:5] + logger.debug("Search for success criteria") for result in scenario_results[s]: result["start_date"] = result["start_date"].split(".")[0] sig_test = result['details']['sig_test']['result'] @@ -67,17 +80,34 @@ for installer in installers: result['pr_step_ok'] = 0 if nb_step != 0: result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100 - - - templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) - templateEnv = jinja2.Environment( loader=templateLoader ) - - TEMPLATE_FILE = "./template/index-vims-tmpl.html" - template = templateEnv.get_template( TEMPLATE_FILE ) - - outputText = template.render( scenario_results = scenario_results, step_order = step_order, installer = installer) - - with open("./release/master/index-vims-" + installer + ".html", "wb") as fh: + try: + logger.debug("Scenario %s, Installer %s" + % (s_result[1]['scenario'], installer)) + logger.debug("Orchestrator deployment: %s s" + % result['details']['orchestrator']['duration']) + logger.debug("vIMS deployment: %s s" + % result['details']['vIMS']['duration']) + logger.debug("Signaling testing: %s s" + % result['details']['sig_test']['duration']) + logger.debug("Signaling testing results: %s" + % format_result) + except: + logger.error("Data badly formatted") + logger.debug("------------------------------------------------") + + templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH) + templateEnv = jinja2.Environment(loader=templateLoader) + + TEMPLATE_FILE = "/template/index-vims-tmpl.html" + template = templateEnv.get_template(TEMPLATE_FILE) + + outputText = template.render(scenario_results=scenario_results, + step_order=step_order, + installer=installer) + + with open(conf.REPORTING_PATH + + "/release/master/index-vims-" + + installer + ".html", "wb") as fh: fh.write(outputText) - +logger.info("vIMS report succesfully generated") diff --git a/utils/test/reporting/functest/reportingConf.py b/utils/test/reporting/functest/reportingConf.py index 61410b414..a58eeecc9 100644 --- a/utils/test/reporting/functest/reportingConf.py +++ b/utils/test/reporting/functest/reportingConf.py @@ -13,14 +13,16 @@ installers = ["apex", "compass", "fuel", "joid"] # installers = ["apex"] # list of test cases declared in testcases.yaml but that must not be # taken into account for the scoring -blacklist = ["odl", "ovno", "security_scan"] +blacklist = ["odl", "ovno", "security_scan", "copper", "moon"] # versions = ["brahmaputra", "master"] versions = ["master"] PERIOD = 10 MAX_SCENARIO_CRITERIA = 18 # get the last 5 test results to determinate the success criteria NB_TESTS = 5 +# REPORTING_PATH = "/usr/share/nginx/html/reporting/functest" +REPORTING_PATH = "." URL_BASE = 'http://testresults.opnfv.org/test/api/v1/results' TEST_CONF = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml" -LOG_LEVEL = "INFO" -LOG_FILE = "reporting.log" +LOG_LEVEL = "ERROR" +LOG_FILE = REPORTING_PATH + "/reporting.log" diff --git a/utils/test/reporting/functest/reportingUtils.py b/utils/test/reporting/functest/reportingUtils.py index 2f06b8449..5051ffa95 100644 --- a/utils/test/reporting/functest/reportingUtils.py +++ b/utils/test/reporting/functest/reportingUtils.py @@ -7,8 +7,26 @@ # http://www.apache.org/licenses/LICENSE-2.0 # from urllib2 import Request, urlopen, URLError +import logging import json -import reportingConf +import reportingConf as conf + + +def getLogger(module): + logFormatter = logging.Formatter("%(asctime)s [" + + module + + "] [%(levelname)-5.5s] %(message)s") + logger = logging.getLogger() + + fileHandler = logging.FileHandler("{0}/{1}".format('.', conf.LOG_FILE)) + fileHandler.setFormatter(logFormatter) + logger.addHandler(fileHandler) + + consoleHandler = logging.StreamHandler() + consoleHandler.setFormatter(logFormatter) + logger.addHandler(consoleHandler) + logger.setLevel(conf.LOG_LEVEL) + return logger def getApiResults(case, installer, scenario, version): @@ -19,10 +37,10 @@ def getApiResults(case, installer, scenario, version): # urllib2.install_opener(opener) # url = "http://127.0.0.1:8000/results?case=" + case + \ # "&period=30&installer=" + installer - url = (reportingConf.URL_BASE + "?case=" + case + - "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer + + url = (conf.URL_BASE + "?case=" + case + + "&period=" + str(conf.PERIOD) + "&installer=" + installer + "&scenario=" + scenario + "&version=" + version + - "&last=" + str(reportingConf.NB_TESTS)) + "&last=" + str(conf.NB_TESTS)) request = Request(url) try: @@ -38,9 +56,8 @@ def getApiResults(case, installer, scenario, version): def getScenarios(case, installer, version): case = case.getName() - print case - url = (reportingConf.URL_BASE + "?case=" + case + - "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer + + url = (conf.URL_BASE + "?case=" + case + + "&period=" + str(conf.PERIOD) + "&installer=" + installer + "&version=" + version) request = Request(url) @@ -115,11 +132,16 @@ def getResult(testCase, installer, scenario, version): # 2: <4 successful consecutive runs but passing the criteria # 1: close to pass the success criteria # 0: 0% success, not passing + # -1: no run available test_result_indicator = 0 nbTestOk = getNbtestOk(scenario_results) + # print "Nb test OK (last 10 days):"+ str(nbTestOk) # check that we have at least 4 runs - if nbTestOk < 1: + if len(scenario_results) < 1: + # No results available + test_result_indicator = -1 + elif nbTestOk < 1: test_result_indicator = 0 elif nbTestOk < 2: test_result_indicator = 1 diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html index 89a1d1527..0c3fa9426 100644 --- a/utils/test/reporting/functest/template/index-status-tmpl.html +++ b/utils/test/reporting/functest/template/index-status-tmpl.html @@ -76,7 +76,7 @@ {% for test in items[scenario] -%} <th> {% if test.getCriteria() > -1 -%} - {{test.getDbName() }} + {{test.getDisplayName() }} {%- endif %} {% if test.getTier() > 3 -%} * diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py index f0e8f5995..e19853a09 100644 --- a/utils/test/reporting/functest/testCase.py +++ b/utils/test/reporting/functest/testCase.py @@ -19,6 +19,28 @@ class TestCase(object): self.criteria = criteria self.isRunnable = isRunnable self.tier = tier + display_name_matrix = {'healthcheck': 'healthcheck', + 'vping_ssh': 'vPing (ssh)', + 'vping_userdata': 'vPing (userdata)', + 'odl': 'ODL', + 'onos': 'ONOS', + 'ocl': 'OCL', + 'tempest_smoke_serial': 'Tempest (smoke)', + 'tempest_full_parallel': 'Tempest (full)', + 'rally_sanity': 'Rally (smoke)', + 'bgpvpn': 'bgpvpn', + 'rally_full': 'Rally (full)', + 'vims': 'vIMS', + 'doctor': 'Doctor', + 'promise': 'Promise', + 'moon': 'moon', + 'copper': 'copper', + 'security_scan': 'security' + } + try: + self.displayName = display_name_matrix[self.name] + except: + self.displayName = "unknown" def getName(self): return self.name @@ -74,10 +96,10 @@ class TestCase(object): self.isRunnable = is_runnable def toString(self): - testcase = ("Name=" + self.name + ";Criteria=" + str(self.criteria) - + ";Project=" + self.project + ";Constraints=" - + str(self.constraints) + ";IsRunnable" - + str(self.isRunnable)) + testcase = ("Name=" + self.name + ";Criteria=" + + str(self.criteria) + ";Project=" + self.project + + ";Constraints=" + str(self.constraints) + + ";IsRunnable" + str(self.isRunnable)) return testcase def getDbName(self): @@ -98,31 +120,15 @@ class TestCase(object): 'rally_full': 'rally_full', 'vims': 'vims', 'doctor': 'doctor-notification', - 'promise': 'promise' + 'promise': 'promise', + 'moon': 'moon', + 'copper': 'copper', + 'security_scan': 'security' } try: return test_match_matrix[self.name] except: return "unknown" - def getTestDisplayName(self): - # Correspondance name of the test case / name in the DB - test_match_matrix = {'healthcheck': 'healthcheck', - 'vping_ssh': 'vPing (ssh)', - 'vping_userdata': 'vPing (userdata)', - 'odl': 'ODL', - 'onos': 'ONOS', - 'ocl': 'OCL', - 'tempest_smoke_serial': 'Tempest (smoke)', - 'tempest_full_parallel': 'Tempest (full)', - 'rally_sanity': 'Rally (smoke)', - 'bgpvpn': 'bgpvpn', - 'rally_full': 'Rally (full)', - 'vims': 'vIMS', - 'doctor': 'Doctor', - 'promise': 'Promise' - } - try: - return test_match_matrix[self.name] - except: - return "unknown" + def getDisplayName(self): + return self.displayName diff --git a/utils/test/result_collection_api/etc/config.ini b/utils/test/result_collection_api/etc/config.ini index 16346bf36..0edb73a3f 100644 --- a/utils/test/result_collection_api/etc/config.ini +++ b/utils/test/result_collection_api/etc/config.ini @@ -13,4 +13,4 @@ port = 8000 debug = True [swagger] -base_url = http://testresults.opnfv.org/test
\ No newline at end of file +base_url = http://localhost:8000 diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py index 873701103..f98c35e8f 100644 --- a/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py +++ b/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py @@ -198,9 +198,8 @@ class GenericApiHandler(RequestHandler): comparing values """ if not (new_value is None): - if len(new_value) > 0: - if new_value != old_value: - edit_request[key] = new_value + if new_value != old_value: + edit_request[key] = new_value return edit_request diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py index 5198ba355..400b84ac1 100644 --- a/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py +++ b/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py @@ -45,7 +45,7 @@ class GenericResultHandler(GenericApiHandler): obj = {"$gte": str(period)} query['start_date'] = obj elif k == 'trust_indicator': - query[k] = float(v) + query[k + '.current'] = float(v) elif k != 'last': query[k] = v return query @@ -116,8 +116,8 @@ class ResultsCLHandler(GenericResultHandler): @type last: L{string} @in last: query @required last: False - @param trust_indicator: must be int/long/float - @type trust_indicator: L{string} + @param trust_indicator: must be float + @type trust_indicator: L{float} @in trust_indicator: query @required trust_indicator: False """ @@ -180,3 +180,19 @@ class ResultsGURHandler(GenericResultHandler): query = dict() query["_id"] = ObjectId(result_id) self._get_one(query) + + @swagger.operation(nickname="update") + def put(self, result_id): + """ + @description: update a single result by _id + @param body: fields to be updated + @type body: L{ResultUpdateRequest} + @in body: body + @rtype: L{Result} + @return 200: update success + @raise 404: result not exist + @raise 403: nothing to update + """ + query = {'_id': ObjectId(result_id)} + db_keys = [] + self._update(query, db_keys) diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py b/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py index fdd80593a..dd1e3dc53 100644 --- a/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py +++ b/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py @@ -10,7 +10,69 @@ from opnfv_testapi.tornado_swagger import swagger @swagger.model() +class TIHistory(object): + """ + @ptype step: L{float} + """ + def __init__(self, date=None, step=0): + self.date = date + self.step = step + + def format(self): + return { + "date": self.date, + "step": self.step + } + + @staticmethod + def from_dict(a_dict): + if a_dict is None: + return None + + return TIHistory(a_dict.get('date'), a_dict.get('step')) + + +@swagger.model() +class TI(object): + """ + @property histories: trust_indicator update histories + @ptype histories: C{list} of L{TIHistory} + @ptype current: L{float} + """ + def __init__(self, current=0): + self.current = current + self.histories = list() + + def format(self): + hs = [] + for h in self.histories: + hs.append(h.format()) + + return { + "current": self.current, + "histories": hs + } + + @staticmethod + def from_dict(a_dict): + if a_dict is None: + return None + t = TI() + t.current = a_dict.get('current') + if 'histories' in a_dict.keys(): + for history in a_dict.get('histories', None): + t.histories.append(TIHistory.from_dict(history)) + else: + t.histories = [] + return t + + +@swagger.model() class ResultCreateRequest(object): + """ + @property trust_indicator: + @ptype trust_indicator: L{TI} + """ def __init__(self, pod_name=None, project_name=None, @@ -50,15 +112,30 @@ class ResultCreateRequest(object): "build_tag": self.build_tag, "scenario": self.scenario, "criteria": self.criteria, - "trust_indicator": self.trust_indicator + "trust_indicator": self.trust_indicator.format() + } + + +@swagger.model() +class ResultUpdateRequest(object): + """ + @property trust_indicator: + @ptype trust_indicator: L{TI} + """ + def __init__(self, trust_indicator=None): + self.trust_indicator = trust_indicator + + def format(self): + return { + "trust_indicator": self.trust_indicator.format(), } @swagger.model() class TestResult(object): """ - @property trust_indicator: must be int/long/float - @ptype trust_indicator: L{float} + @property trust_indicator: used for long duration test case + @ptype trust_indicator: L{TI} """ def __init__(self, _id=None, case_name=None, project_name=None, pod_name=None, installer=None, version=None, @@ -98,19 +175,7 @@ class TestResult(object): t.build_tag = a_dict.get('build_tag') t.scenario = a_dict.get('scenario') t.criteria = a_dict.get('criteria') - # 0 < trust indicator < 1 - # if bad value => set this indicator to 0 - t.trust_indicator = a_dict.get('trust_indicator') - if t.trust_indicator is not None: - if isinstance(t.trust_indicator, (int, long, float)): - if t.trust_indicator < 0: - t.trust_indicator = 0 - elif t.trust_indicator > 1: - t.trust_indicator = 1 - else: - t.trust_indicator = 0 - else: - t.trust_indicator = 0 + t.trust_indicator = TI.from_dict(a_dict.get('trust_indicator')) return t def format(self): @@ -126,7 +191,7 @@ class TestResult(object): "build_tag": self.build_tag, "scenario": self.scenario, "criteria": self.criteria, - "trust_indicator": self.trust_indicator + "trust_indicator": self.trust_indicator.format() } def format_http(self): @@ -143,7 +208,7 @@ class TestResult(object): "build_tag": self.build_tag, "scenario": self.scenario, "criteria": self.criteria, - "trust_indicator": self.trust_indicator + "trust_indicator": self.trust_indicator.format() } diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py index 6ab98c720..450969248 100644 --- a/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py @@ -116,8 +116,8 @@ class MemDb(object): if k == 'start_date': if not MemDb._compare_date(v, content.get(k)): return False - elif k == 'trust_indicator': - if float(content.get(k)) != float(v): + elif k == 'trust_indicator.current': + if content.get('trust_indicator').get('current') != v: return False elif content.get(k, None) != v: return False @@ -173,7 +173,6 @@ class MemDb(object): def _check_keys(self, doc): for key in doc.keys(): - print('key', key, 'value', doc.get(key)) if '.' in key: raise NameError('key {} must not contain .'.format(key)) if key.startswith('$'): diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py index 27382f089..9a1253e94 100644 --- a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py @@ -8,9 +8,9 @@ ############################################################################## import unittest -from tornado.web import Application from tornado import gen from tornado.testing import AsyncHTTPTestCase, gen_test +from tornado.web import Application import fake_pymongo diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py index bba3b228f..98ef7c08c 100644 --- a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py @@ -6,15 +6,16 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import unittest import copy +import unittest +from datetime import datetime, timedelta from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ HTTP_NOT_FOUND from opnfv_testapi.resources.pod_models import PodCreateRequest from opnfv_testapi.resources.project_models import ProjectCreateRequest from opnfv_testapi.resources.result_models import ResultCreateRequest, \ - TestResult, TestResults + TestResult, TestResults, ResultUpdateRequest, TI, TIHistory from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest from test_base import TestBase @@ -55,9 +56,11 @@ class TestResultBase(TestBase): self.build_tag = 'v3.0' self.scenario = 'odl-l2' self.criteria = 'passed' - self.trust_indicator = 0.7 + self.trust_indicator = TI(0.7) self.start_date = "2016-05-23 07:16:09.477097" self.stop_date = "2016-05-23 07:16:19.477097" + self.update_date = "2016-05-24 07:16:19.477097" + self.update_step = -0.05 super(TestResultBase, self).setUp() self.details = Details(timestart='0', duration='9s', status='OK') self.req_d = ResultCreateRequest(pod_name=self.pod, @@ -74,6 +77,7 @@ class TestResultBase(TestBase): trust_indicator=self.trust_indicator) self.get_res = TestResult self.list_res = TestResults + self.update_res = TestResult self.basePath = '/api/v1/results' self.req_pod = PodCreateRequest(self.pod, 'metal', 'zte pod 1') self.req_project = ProjectCreateRequest(self.project, 'vping test') @@ -103,10 +107,19 @@ class TestResultBase(TestBase): self.assertEqual(result.build_tag, req.build_tag) self.assertEqual(result.scenario, req.scenario) self.assertEqual(result.criteria, req.criteria) - self.assertEqual(result.trust_indicator, req.trust_indicator) self.assertEqual(result.start_date, req.start_date) self.assertEqual(result.stop_date, req.stop_date) self.assertIsNotNone(result._id) + ti = result.trust_indicator + self.assertEqual(ti.current, req.trust_indicator.current) + if ti.histories: + history = ti.histories[0] + self.assertEqual(history.date, self.update_date) + self.assertEqual(history.step, self.update_step) + + def _create_d(self): + _, res = self.create_d() + return res.href.split('/')[-1] class TestResultCreate(TestResultBase): @@ -172,8 +185,7 @@ class TestResultCreate(TestResultBase): class TestResultGet(TestResultBase): def test_getOne(self): - _, res = self.create_d() - _id = res.href.split('/')[-1] + _id = self._create_d() code, body = self.get(_id) self.assert_res(code, body) @@ -266,8 +278,6 @@ class TestResultGet(TestResultBase): self.assert_res(code, result, req) def _create_changed_date(self, **kwargs): - import copy - from datetime import datetime, timedelta req = copy.deepcopy(self.req_d) req.start_date = datetime.now() + timedelta(**kwargs) req.stop_date = str(req.start_date + timedelta(minutes=10)) @@ -276,13 +286,36 @@ class TestResultGet(TestResultBase): return req def _set_query(self, *args): + def get_value(arg): + return eval('self.' + arg) \ + if arg != 'trust_indicator' else self.trust_indicator.current uri = '' for arg in args: if '=' in arg: uri += arg + '&' else: - uri += '{}={}&'.format(arg, eval('self.' + arg)) + uri += '{}={}&'.format(arg, get_value(arg)) return uri[0: -1] + +class TestResultUpdate(TestResultBase): + def test_success(self): + _id = self._create_d() + + new_ti = copy.deepcopy(self.trust_indicator) + new_ti.current += self.update_step + new_ti.histories.append(TIHistory(self.update_date, self.update_step)) + new_data = copy.deepcopy(self.req_d) + new_data.trust_indicator = new_ti + update = ResultUpdateRequest(trust_indicator=new_ti) + code, body = self.update(update, _id) + self.assertEqual(_id, body._id) + self.assert_res(code, body, new_data) + + code, new_body = self.get(_id) + self.assertEqual(_id, new_body._id) + self.assert_res(code, new_body, new_data) + + if __name__ == '__main__': unittest.main() |