summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ci/config_functest.yaml1
-rwxr-xr-xci/exec_test.sh18
-rw-r--r--ci/generate_report.py17
-rwxr-xr-xci/run_tests.py15
-rw-r--r--ci/testcases.yaml12
-rw-r--r--testcases/Controllers/ONOS/Sfc/Sfc.py5
-rwxr-xr-xtestcases/Controllers/ONOS/Teston/onosfunctest.py24
-rwxr-xr-xtestcases/OpenStack/rally/run_rally-cert.py23
-rw-r--r--testcases/OpenStack/tempest/custom_tests/blacklist.txt17
-rwxr-xr-xtestcases/OpenStack/tempest/run_tempest.py26
-rwxr-xr-xtestcases/features/doctor.py4
-rwxr-xr-xtestcases/features/promise.py4
-rw-r--r--testcases/vnf/RNC/parser.py27
-rw-r--r--utils/functest_utils.py40
14 files changed, 159 insertions, 74 deletions
diff --git a/ci/config_functest.yaml b/ci/config_functest.yaml
index 953d69023..e4468208e 100644
--- a/ci/config_functest.yaml
+++ b/ci/config_functest.yaml
@@ -23,6 +23,7 @@ general:
dir_repo_copper: /home/opnfv/repos/copper
dir_repo_ovno: /home/opnfv/repos/ovno
dir_repo_parser: /home/opnfv/repos/parser
+ dir_repo_domino: /home/opnfv/repos/domino
dir_functest: /home/opnfv/functest
dir_results: /home/opnfv/functest/results
dir_functest_conf: /home/opnfv/functest/conf
diff --git a/ci/exec_test.sh b/ci/exec_test.sh
index 800b44ab4..cd5ae1822 100755
--- a/ci/exec_test.sh
+++ b/ci/exec_test.sh
@@ -116,24 +116,14 @@ function run_test(){
$clean_flag --sanity all $report
;;
"bgpvpn")
- sdnvpn_repo_dir=${repos_dir}/sdnvpn/test/functest/
- # Copy blacklist from sdnvpn repo to the proper place to execute functest
- src=${sdnvpn_repo_dir}/tempest_blacklist.txt
- dst=${FUNCTEST_REPO_DIR}/testcases/OpenStack/tempest/custom_tests/blacklist.txt
- cp $src $dst
- # Execute tempest smoke with blacklist
- python ${FUNCTEST_REPO_DIR}/testcases/OpenStack/tempest/run_tempest.py \
- $clean_flag -s -m smoke $report
- # Remove blacklist file
- rm $dst
-
- # Execute SDNVPN test cases
python ${sdnvpn_repo_dir}/run_tests.py $report
-
;;
"onos")
python ${FUNCTEST_REPO_DIR}/testcases/Controllers/ONOS/Teston/onosfunctest.py
- ;;
+ ;;
+ "onos_sfc")
+ python ${FUNCTEST_REPO_DIR}/testcases/Controllers/ONOS/Teston/onosfunctest.py -t sfc
+ ;;
"promise")
python ${FUNCTEST_REPO_DIR}/testcases/features/promise.py $report
sleep 10 # to let the instances terminate
diff --git a/ci/generate_report.py b/ci/generate_report.py
index 53aef0c9e..3ca2847bd 100644
--- a/ci/generate_report.py
+++ b/ci/generate_report.py
@@ -35,8 +35,12 @@ def get_results_from_db():
url = 'http://testresults.opnfv.org/test/api/v1/results?build_tag=' + \
BUILD_TAG
logger.debug("Query to rest api: %s" % url)
- data = json.load(urllib2.urlopen(url))
- return data['results']
+ try:
+ data = json.load(urllib2.urlopen(url))
+ return data['results']
+ except:
+ logger.error("Cannot read content from the url: %s" % url)
+ return None
def get_data(test, results):
@@ -90,10 +94,11 @@ def main(args):
if IS_CI_RUN:
results = get_results_from_db()
- for test in executed_test_cases:
- data = get_data(test, results)
- test.update({"url": data['url'],
- "result": data['result']})
+ if results is not None:
+ for test in executed_test_cases:
+ data = get_data(test, results)
+ test.update({"url": data['url'],
+ "result": data['result']})
TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN
if IS_CI_RUN:
diff --git a/ci/run_tests.py b/ci/run_tests.py
index 383a20f9f..758a87c2d 100755
--- a/ci/run_tests.py
+++ b/ci/run_tests.py
@@ -128,8 +128,6 @@ def run_test(test, tier_name):
update_test_info(test_name, result_str, duration_str)
- return result
-
def run_tier(tier):
tier_name = tier.get_name()
@@ -144,11 +142,7 @@ def run_tier(tier):
print_separator("#")
logger.debug("\n%s" % tier)
for test in tests:
- res = run_test(test, tier_name)
- if res != 0:
- return res
-
- return 0
+ run_test(test, tier_name)
def run_all(tiers):
@@ -173,12 +167,9 @@ def run_all(tiers):
logger.info("Tests to be executed:%s" % summary)
EXECUTED_TEST_CASES = generate_report.init(tiers_to_run)
for tier in tiers_to_run:
- res = run_tier(tier)
- if res != 0:
- return res
- generate_report.main(EXECUTED_TEST_CASES)
+ run_tier(tier)
- return 0
+ generate_report.main(EXECUTED_TEST_CASES)
def main():
diff --git a/ci/testcases.yaml b/ci/testcases.yaml
index 634d041b0..315969ae7 100644
--- a/ci/testcases.yaml
+++ b/ci/testcases.yaml
@@ -60,7 +60,7 @@ tiers:
the OpenStack deplopyment.
dependencies:
installer: ''
- scenario: '^(?!.*bgpvpn).*$'
+ scenario: ''
-
name: rally_sanity
@@ -198,6 +198,16 @@ tiers:
dependencies:
installer: 'fuel'
scenario: 'odl_l2-sfc'
+ -
+ name: onos_sfc
+ criteria: 'status == "PASS"'
+ blocking: true
+ description: >-
+ Test Suite for onos-sfc to test sfc function.
+ dependencies:
+ installer: ''
+ scenario: 'onos-sfc'
+
-
name: openstack
order: 4
diff --git a/testcases/Controllers/ONOS/Sfc/Sfc.py b/testcases/Controllers/ONOS/Sfc/Sfc.py
index 6b1973ef0..a52019875 100644
--- a/testcases/Controllers/ONOS/Sfc/Sfc.py
+++ b/testcases/Controllers/ONOS/Sfc/Sfc.py
@@ -226,4 +226,9 @@ class Sfc:
'status': status})
except:
logger.error("Error pushing results into Database")
+
+ if status == "FAIL":
+ EXIT_CODE = -1
+ exit(EXIT_CODE)
+
print("############################END OF SCRIPT ######################")
diff --git a/testcases/Controllers/ONOS/Teston/onosfunctest.py b/testcases/Controllers/ONOS/Teston/onosfunctest.py
index 35ced61c5..2790e6941 100755
--- a/testcases/Controllers/ONOS/Teston/onosfunctest.py
+++ b/testcases/Controllers/ONOS/Teston/onosfunctest.py
@@ -18,6 +18,7 @@ import datetime
import os
import re
import time
+import argparse
from neutronclient.v2_0 import client as neutronclient
@@ -25,6 +26,11 @@ import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as functest_utils
import functest.utils.openstack_utils as openstack_utils
+parser = argparse.ArgumentParser()
+parser.add_argument("-t", "--testcase", help="Testcase name")
+args = parser.parse_args()
+
+
""" logging configuration """
logger = ft_logger.Logger("onos").getLogger()
@@ -215,11 +221,9 @@ def SetSfcConf():
logger.info("Modify configuration for SFC")
-def main():
+def OnosTest():
start_time = time.time()
stop_time = start_time
- # DownloadCodes()
- # if args.installer == "joid":
if INSTALLER_TYPE == "joid":
logger.debug("Installer is Joid")
SetOnosIpForJoid()
@@ -254,13 +258,19 @@ def main():
except:
logger.error("Error pushing results into Database")
- if DEPLOY_SCENARIO == "os-onos-sfc-ha":
+ if status == "FAIL":
+ EXIT_CODE = -1
+ exit(EXIT_CODE)
+
+
+def main():
+
+ if args.testcase == "sfc":
CreateImage()
SetSfcConf()
SfcTest()
-
- # CleanOnosTest()
-
+ else:
+ OnosTest()
if __name__ == '__main__':
main()
diff --git a/testcases/OpenStack/rally/run_rally-cert.py b/testcases/OpenStack/rally/run_rally-cert.py
index 2f696ec30..1f1214e03 100755
--- a/testcases/OpenStack/rally/run_rally-cert.py
+++ b/testcases/OpenStack/rally/run_rally-cert.py
@@ -504,23 +504,18 @@ def main():
'nb tests': total_nb_tests,
'nb success': success_rate}})
- # Generate json results for DB
- # json_results = {"timestart": time_start, "duration": total_duration,
- # "tests": int(total_nb_tests),
- # "success": int(total_success)}
- # logger.info("Results: "+str(json_results))
-
- # Evaluation of the success criteria
- status = "FAIL"
- # for Rally we decided that the overall success rate must be above 90%
- if float(success_rate) >= 90:
- status = "PASS"
-
if args.sanity:
case_name = "rally_sanity"
else:
case_name = "rally_full"
+ # Evaluation of the success criteria
+ status = functest_utils.check_success_rate(case_name, success_rate)
+
+ exit_code = -1
+ if status == "PASS":
+ exit_code = 0
+
if args.report:
logger.debug("Pushing Rally summary into DB...")
functest_utils.push_results_to_db("functest",
@@ -531,7 +526,7 @@ def main():
status,
payload)
if args.noclean:
- exit(0)
+ exit(exit_code)
if not image_exists:
logger.debug("Deleting image '%s' with ID '%s'..."
@@ -545,6 +540,8 @@ def main():
if not os_utils.delete_volume_type(cinder_client, volume_type):
logger.error("Error in deleting volume type...")
+ exit(exit_code)
+
if __name__ == '__main__':
main()
diff --git a/testcases/OpenStack/tempest/custom_tests/blacklist.txt b/testcases/OpenStack/tempest/custom_tests/blacklist.txt
new file mode 100644
index 000000000..49605ca7b
--- /dev/null
+++ b/testcases/OpenStack/tempest/custom_tests/blacklist.txt
@@ -0,0 +1,17 @@
+-
+ scenarios:
+ - os-odl_l2-bgpvpn-ha
+ - os-odl_l2-bgpvpn-noha
+ installers:
+ - fuel
+ - apex
+ tests:
+ - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
+ - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
+ - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
+ - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
+ - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32,smoke]
+ - tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke]
+ - tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops[compute,id-7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba,network,smoke]
+ - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern[compute,id-557cd2c2-4eb8-4dce-98be-f86765ff311b,image,smoke,volume]
+ - tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern[compute,id-557cd2c2-4eb8-4dce-98be-f86765ff311b,image,smoke,volume]
diff --git a/testcases/OpenStack/tempest/run_tempest.py b/testcases/OpenStack/tempest/run_tempest.py
index f99678df4..8d23b7415 100755
--- a/testcases/OpenStack/tempest/run_tempest.py
+++ b/testcases/OpenStack/tempest/run_tempest.py
@@ -21,11 +21,12 @@ import shutil
import subprocess
import sys
import time
+import yaml
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
-import yaml
+
modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing',
'identity', 'image', 'network', 'object_storage', 'orchestration',
@@ -330,13 +331,30 @@ def apply_tempest_blacklist():
logger.debug("Applying tempest blacklist...")
cases_file = read_file(TEMPEST_RAW_LIST)
result_file = open(TEMPEST_LIST, 'w')
+ black_tests = []
try:
- black_file = read_file(TEMPEST_BLACKLIST)
+ installer_type = os.getenv('INSTALLER_TYPE')
+ deploy_scenario = os.getenv('DEPLOY_SCENARIO')
+ if (bool(installer_type) * bool(deploy_scenario)):
+ # if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the file
+ black_list_file = open(TEMPEST_BLACKLIST)
+ black_list_yaml = yaml.safe_load(black_list_file)
+ black_list_file.close()
+ for item in black_list_yaml:
+ scenarios = item['sceanrios']
+ installers = item['installers']
+ if (deploy_scenario in scenarios and
+ installer_type in installers):
+ tests = item['tests']
+ for test in tests:
+ black_tests.append(test)
+ break
except:
- black_file = ''
+ black_tests = []
logger.debug("Tempest blacklist file does not exist.")
+
for line in cases_file:
- if line not in black_file:
+ if line not in black_tests:
result_file.write(str(line) + '\n')
result_file.close()
diff --git a/testcases/features/doctor.py b/testcases/features/doctor.py
index ef55506af..bdf3ddc35 100755
--- a/testcases/features/doctor.py
+++ b/testcases/features/doctor.py
@@ -33,6 +33,7 @@ logger = ft_logger.Logger("doctor").getLogger()
def main():
+ exit_code = -1
cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO
start_time = time.time()
@@ -43,6 +44,7 @@ def main():
if ret == 0:
logger.info("doctor OK")
test_status = 'OK'
+ exit_code = 0
else:
logger.info("doctor FAILED")
test_status = 'NOK'
@@ -79,5 +81,7 @@ def main():
status,
details)
+ exit(exit_code)
+
if __name__ == '__main__':
main()
diff --git a/testcases/features/promise.py b/testcases/features/promise.py
index 74c1ad782..170f75494 100755
--- a/testcases/features/promise.py
+++ b/testcases/features/promise.py
@@ -70,6 +70,7 @@ logger = ft_logger.Logger("promise").getLogger()
def main():
+ exit_code = -1
start_time = time.time()
ks_creds = openstack_utils.get_credentials("keystone")
nv_creds = openstack_utils.get_credentials("nova")
@@ -241,6 +242,7 @@ def main():
status = "FAIL"
if int(tests) > 32 and int(failures) < 1:
status = "PASS"
+ exit_code = 0
functest_utils.push_results_to_db("promise",
"promise",
@@ -250,6 +252,8 @@ def main():
status,
json_results)
+ exit(exit_code)
+
if __name__ == '__main__':
main()
diff --git a/testcases/vnf/RNC/parser.py b/testcases/vnf/RNC/parser.py
index 7def5bf82..485af0e14 100644
--- a/testcases/vnf/RNC/parser.py
+++ b/testcases/vnf/RNC/parser.py
@@ -27,13 +27,14 @@ with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
dirs = functest_yaml.get('general').get('directories')
FUNCTEST_REPO = dirs.get('dir_repo_functest')
PARSER_REPO = dirs.get('dir_repo_parser')
-TEST_DB_URL = functest_yaml.get('results').get('test_db_url')
logger = ft_logger.Logger("parser").getLogger()
def main():
EXIT_CODE = -1
+ project = 'parser'
+ case_name = 'parser-basics'
cmd = 'cd %s/tests && ./functest_run.sh' % PARSER_REPO
start_time = time.time()
@@ -54,27 +55,19 @@ def main():
'duration': duration,
'status': test_status,
}
- pod_name = functest_utils.get_pod_name(logger)
- scenario = functest_utils.get_scenario(logger)
- version = functest_utils.get_version(logger)
- build_tag = functest_utils.get_build_tag(logger)
status = "FAIL"
if details['status'] == "OK":
status = "PASS"
- logger.info("Pushing Parser results: TEST_DB_URL=%(db)s pod_name=%(pod)s "
- "version=%(v)s scenario=%(s)s criteria=%(c)s details=%(d)s" % {
- 'db': TEST_DB_URL,
- 'pod': pod_name,
- 'v': version,
- 's': scenario,
- 'c': status,
- 'b': build_tag,
- 'd': details,
- })
- functest_utils.push_results_to_db("parser",
- "parser-basics",
+ functest_utils.logger_test_results(logger,
+ project,
+ case_name,
+ status,
+ details)
+
+ functest_utils.push_results_to_db(project,
+ case_name,
logger,
start_time,
stop_time,
diff --git a/utils/functest_utils.py b/utils/functest_utils.py
index b46dc7dda..b0014308e 100644
--- a/utils/functest_utils.py
+++ b/utils/functest_utils.py
@@ -163,6 +163,27 @@ def get_db_url(logger=None):
return db_url
+def logger_test_results(logger, project, case_name, status, details):
+ pod_name = get_pod_name(logger)
+ scenario = get_scenario(logger)
+ version = get_version(logger)
+ build_tag = get_build_tag(logger)
+
+ logger.info("Pushing %(p)s/%(n)s results: TEST_DB_URL=%(db)s "
+ "pod_name=%(pod)s version=%(v)s scenario=%(s)s "
+ "criteria=%(c)s details=%(d)s" % {
+ 'p': project,
+ 'n': case_name,
+ 'db': get_db_url(),
+ 'pod': pod_name,
+ 'v': version,
+ 's': scenario,
+ 'c': status,
+ 'b': build_tag,
+ 'd': details,
+ })
+
+
def push_results_to_db(project, case_name, logger,
start_date, stop_date, criteria, details):
"""
@@ -326,3 +347,22 @@ def get_parameter_from_yaml(parameter, file=None):
raise ValueError("The parameter %s is not defined in"
" config_functest.yaml" % parameter)
return value
+
+
+def check_success_rate(case_name, success_rate):
+ success_rate = float(success_rate)
+ criteria = get_criteria_by_test(case_name)
+
+ def get_value(op):
+ return float(criteria.split(op)[1].rstrip('%'))
+
+ status = 'FAIL'
+ ops = ['==', '>=']
+ for op in ops:
+ if op in criteria:
+ c_value = get_value(op)
+ if eval("%s %s %s" % (success_rate, op, c_value)):
+ status = 'PASS'
+ break
+
+ return status