diff options
author | Morgan Richomme <morgan.richomme@orange.com> | 2016-01-14 11:45:19 +0100 |
---|---|---|
committer | Morgan Richomme <morgan.richomme@orange.com> | 2016-01-14 13:44:20 +0100 |
commit | cdf5e6a2fd05f07a5db92175777113d732258a94 (patch) | |
tree | fb62a35e92d7618be1a8018d03eac527749926d7 | |
parent | 04d586e1563dd263bf1e7aeb052c9d397ecf5c8a (diff) |
set up mechanism to tun only runnable tests in CI based on scenario
JIRA: FUNCTEST-119
Change-Id: I342c027c79fab1cc9fa65ddf8222e7b12b946af8
Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
-rwxr-xr-x | docker/run_tests.sh | 9 | ||||
-rwxr-xr-x | testcases/config_functest.py | 5 | ||||
-rw-r--r-- | testcases/config_functest.yaml | 75 | ||||
-rw-r--r-- | testcases/functest_utils.py | 146 | ||||
-rw-r--r-- | testcases/tests/TestFunctestUtils.py | 41 |
5 files changed, 147 insertions, 129 deletions
diff --git a/docker/run_tests.sh b/docker/run_tests.sh index bc026d92..46fe6cd5 100755 --- a/docker/run_tests.sh +++ b/docker/run_tests.sh @@ -30,7 +30,14 @@ examples: # NOTE: Still not 100% working when running the tests offline=false report="" -arr_test=(vping odl tempest vims rally) +# Get the list of runnable tests +# Check if we are in CI mode +if [ -n "$DEPLOY_SCENARIO" ]; then + testcase=`cat /home/opnfv/functest/conf/testcase-list.txt` + arr_test=("$testcase") +elif + arr_test=(vping odl tempest vims rally) +fi function clean_openstack(){ python ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py \ diff --git a/testcases/config_functest.py b/testcases/config_functest.py index d392d557..19d894fb 100755 --- a/testcases/config_functest.py +++ b/testcases/config_functest.py @@ -128,6 +128,11 @@ def action_start(): if not os.path.exists(RALLY_RESULT_DIR): os.makedirs(RALLY_RESULT_DIR) + try: + functest_utils.generateTestcaseList(functest_yaml) + except: + logger.info("Not CI mode. Test cases list not generated.") + exit(0) diff --git a/testcases/config_functest.yaml b/testcases/config_functest.yaml index 5c457612..be7687ac 100644 --- a/testcases/config_functest.yaml +++ b/testcases/config_functest.yaml @@ -148,10 +148,29 @@ ONOS: results: test_db_url: http://213.77.62.197 +# to be maintained... +# the execution order is important as some tests may be more destructive than others +# and if vPing is failing is usually not needed to continue... +test_exec_priority: + 1: vping + 2: tempest + 3: odl + #4: onos + #5: ovno + #6: doctor + #7: promise + #8: policy-test + #9: odl-vpn_service-tests + #10: opnfv-yardstick-tc026-sdnvpn + #11: openstack-neutron-bgpvpn-api-extension-tests + 12: vims + 13: rally + + ######################################################################## # This part lists the dependencies of the tests # -# it is used to managed the complexity of the possible combination +# it is used to manage the complexity of the possible combinations # # 17 projects have been declared for Brahmaputra (D Milestone) # 89 testcases are associated with these 17 projects @@ -168,51 +187,55 @@ results: # # By default we consider that all the tests can be run on any configuration # -# we defined 3 constraints +# we defined 2 constraints # - installer (e.g. my test can be run only with installer Compass) # possible values: apex, compass, fuel, joid # -# - the controller (odl, onos, opencontrail) +# - the scenario: it described a specif installation +# os-<controller>-<nfvfeature>-<mode>[-<extrastuff>] +# With parameters: +# controller=(nosdn|odl_l3|odl_l2|onos|ocl) +# No odl_l3 today +# nfvfeature=(kvm|ovs|dpdk|nofeature) +# '_' list separated. +# mode=(ha|noha) +# extrastuff=(none) +# Optional field - Not used today# # -# - the scenario (ovs, kvm, QEMU, proc) based on Fuel proposal -# see https://git.opnfv.org/cgit/fuel/tree/deploy/scenario/scenario.yaml +# ref:https://gerrit.opnfv.org/gerrit/#/c/6323/7/jjb/joid/joid-deploy.sh (L72-82) # e.g my test is only possible with OVS 2.3.2 on odl -# not fully clear as the controller may be included with the scenario # -# In a first step, our main need is to trigger ad hox controller suite -# In second step we may focus with scenario parameter -# but no so far -# - either controller suite -# or -# - suites that should be runnable on any conf, any scenario +# in functest, we indicate the regex pattern to be checked towards the scenario +# e.g. odl-vpn_service-tests can be run if and only if +# - installer is fuel +# - scenario contains the name ovs and odl # ####################################################################### test-dependencies: doctor: + installer: 'fuel' functest: - vping: vims: + vping: tempest: rally: odl: - controller: 'odl' + scenario: 'odl' onos: - controller: 'onos' - onos-ovsdb: - controller: 'onos' + scenario: 'onos' promise: + installer: '(fuel)|(joid)' ovno: - controller: 'opencontrail' + scenario: 'ocl' policy-test: - controller: 'odl' + scenario: 'odl' sdnvpn: opnfv-yardstick-tc026-sdnvpn: - controller: 'nosdn' - scenario: 'os_ovh_ha' + installer: 'fuel' + scenario: '(ovs)*(nosdn)' odl-vpn_service-tests: - controller: 'odl' - scenario: 'os_ovh_ha' + installer: 'fuel' + scenario: '(ovs)*(odl)' openstack-neutron-bgpvpn-api-extension-tests: - controller: 'nosdn' - scenario: 'os_ovh_ha' - + installer: 'fuel' + scenario: '(ovs)*(nosdn)' diff --git a/testcases/functest_utils.py b/testcases/functest_utils.py index 401078b7..e7641b01 100644 --- a/testcases/functest_utils.py +++ b/testcases/functest_utils.py @@ -18,6 +18,7 @@ import requests import json import shutil import re +import yaml from git import Repo @@ -652,6 +653,17 @@ def push_results_to_db(db_url, case_name, logger, pod_name, return False +def get_resolvconf_ns(): + nameservers = [] + rconf = open("/etc/resolv.conf", "r") + line = rconf.readline() + while line: + ip = re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", line) + if ip: + nameservers.append(ip.group()) + line = rconf.readline() + return nameservers + def getTestEnv(test, functest_yaml): # get the config of the testcase based on functest_config.yaml # 2 options @@ -683,87 +695,69 @@ def get_ci_envvars(): """ ci_env_var = { "installer": os.environ.get('INSTALLER_TYPE'), - "controller": os.environ.get('SDN_CONTROLLER'), - "options": os.environ.get("OPNFV_FEATURE")} + "scenario": os.environ.get('DEPLOY_SCENARIO')} return ci_env_var -def get_resolvconf_ns(): - nameservers = [] - rconf = open("/etc/resolv.conf", "r") - line = rconf.readline() - while line: - ip = re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", line) - if ip: - nameservers.append(ip.group()) - line = rconf.readline() - return nameservers - - def isTestRunnable(test, functest_yaml): - # check getTestEnv(test) and CI env var - # check installer, controller and options - # e.g. if test needs onos => do not run odl suite - try: - # By default we assume that all the tests are always runnable... - is_runnable = True - # Retrieve CI environment - ci_env = get_ci_envvars() - - # Retrieve test environement from config file - test_env = getTestEnv(test, functest_yaml) - - # if test_env not empty => dependencies to be checked - if test_env is not None and len(test_env) > 0: - # possible criteria = ["installer", "controller", "options"] - # consider test criteria from config file - # compare towards CI env through CI en variable - for criteria in test_env: - if test_env[criteria] != ci_env[criteria]: - # print "Test "+ test + " cannot be run on the environment" - is_runnable = False - except: - print "Error isTestRunnable:", sys.exc_info()[0] + # By default we assume that all the tests are always runnable... + is_runnable = True + # Retrieve CI environment + ci_env = get_ci_envvars() + # Retrieve test environement from config file + test_env = getTestEnv(test, functest_yaml) + + # if test_env not empty => dependencies to be checked + if test_env is not None and len(test_env) > 0: + # possible criteria = ["installer", "scenario"] + # consider test criteria from config file + # compare towards CI env through CI en variable + for criteria in test_env: + if re.search(test_env[criteria], ci_env[criteria]) is None: + # print "Test "+ test + " cannot be run on the environment" + is_runnable = False return is_runnable def generateTestcaseList(functest_yaml): - try: - test_list = "" - # Retrieve CI environment - get_ci_envvars() - - # get testcases - testcase_list = functest_yaml.get("test-dependencies") - projects = testcase_list.keys() - for project in projects: - testcases = testcase_list[project] - # 1 or 2 levels for testcases project[/case] - # if only project name without controller or scenario - # => shall be runnable on any controller/scenario - if testcases is None: - test_list += project + " " - else: - for testcase in testcases: - if testcase == "controller" or testcase == "scenario": - # project (1 level) - if isTestRunnable(project, functest_yaml): - test_list += project + " " - else: - # project/testcase (2 levels) - thetest = project + "/" + testcase - if isTestRunnable(thetest, functest_yaml): - test_list += testcase + " " - - # create a file that could be consumed by run-test.sh - file = open("testcase-list.txt", 'w') - file.write(test_list) - file.close() - - return test_list - - # test for each testcase if it is runnable - # towards the declared configuration - # generate the test config file - except: - print "Error generateTestcaseList:", sys.exc_info()[0] + test_list = "" + # get testcases + testcase_list = functest_yaml.get("test-dependencies") + projects = testcase_list.keys() + + for project in projects: + testcases = testcase_list[project] + # 1 or 2 levels for testcases project[/case] + # if only project name without controller or scenario + # => shall be runnable on any controller/scenario + if testcases is None: + test_list += project + " " + else: + for testcase in testcases: + if testcase == "installer" or testcase == "scenario": + # project (1 level) + if isTestRunnable(project, functest_yaml): + test_list += project + " " + else: + # project/testcase (2 levels) + thetest = project + "/" + testcase + if isTestRunnable(thetest, functest_yaml): + test_list += testcase + " " + + # sort the list to execute the test in the right order + test_order_list = functest_yaml.get("test_exec_priority") + test_sorted_list = "" + for test in test_order_list: + if test_order_list[test] in test_list: + test_sorted_list += test_order_list[test] + " " + + # create a file that could be consumed by run-test.sh + # this method is used only for CI + # so it can be run only in container + # reuse default conf directory to store the list of runnable tests + file = open("/home/opnfv/functest/conf/testcase-list.txt", 'w') + file.write(test_sorted_list) + file.close() + + return test_sorted_list + diff --git a/testcases/tests/TestFunctestUtils.py b/testcases/tests/TestFunctestUtils.py index 46da50cb..6f12e603 100644 --- a/testcases/tests/TestFunctestUtils.py +++ b/testcases/tests/TestFunctestUtils.py @@ -11,25 +11,24 @@ class TestFunctestUtils(unittest.TestCase): def setUp(self): os.environ["INSTALLER_TYPE"] = "fuel" - os.environ["SDN_CONTROLLER"] = "odl" - os.environ["OPNFV_FEATURE"] = "ovs2.4" + os.environ["DEPLOY_SCENARIO"] = "os-odl_l3-ovs-ha" global functest_yaml - with open("/home/opnfv/functest/conf/config_functest.yaml") as f: + with open("../config_functest.yaml") as f: functest_yaml = yaml.safe_load(f) f.close() def test_getTestEnv(self): env_test = getTestEnv('ovno', functest_yaml) - self.assertEqual(env_test, {'controller': 'opencontrail'}) + self.assertEqual(env_test, {'scenario': 'ocl'}) env_test = getTestEnv('doctor', functest_yaml) - self.assertEqual(env_test, None) + self.assertEqual(env_test, {'installer': 'fuel'}) env_test = getTestEnv('promise', functest_yaml) - self.assertEqual(env_test, None) + self.assertEqual(env_test, {'installer': '(fuel)|(joid)'}) env_test = getTestEnv('functest/tempest', functest_yaml) self.assertEqual(env_test, None) @@ -38,30 +37,22 @@ class TestFunctestUtils(unittest.TestCase): self.assertEqual(env_test, None) env_test = getTestEnv('functest/odl', functest_yaml) - self.assertEqual(env_test, {'controller': 'odl'}) + self.assertEqual(env_test, {'scenario': 'odl'}) env_test = getTestEnv('functest/onos', functest_yaml) - self.assertEqual(env_test, {'controller': 'onos'}) - - env_test = getTestEnv('functest/onos-ovsdb', functest_yaml) - self.assertEqual(env_test, {'controller': 'onos'}) + self.assertEqual(env_test, {'scenario': 'onos'}) env_test = getTestEnv('policy-test', functest_yaml) - self.assertEqual(env_test, {'controller': 'odl'}) + self.assertEqual(env_test, {'scenario': 'odl'}) env_test = getTestEnv('sdnvpn/odl-vpn_service-tests', functest_yaml) self.assertEqual(env_test, - {'controller': 'odl', 'scenario': 'os_ovh_ha'}) + {'installer': 'fuel', 'scenario': '(ovs)*(odl)'}) env_test = getTestEnv('sdnvpn/opnfv-yardstick-tc026-sdnvpn', functest_yaml) self.assertEqual(env_test, - {'controller': 'nosdn', 'scenario': 'os_ovh_ha'}) - - env_test = getTestEnv('sdnvpn/openstack-neutron-bgpvpn-api-extension-tests', - functest_yaml) - self.assertEqual(env_test, - {'controller': 'nosdn', 'scenario': 'os_ovh_ha'}) + {'installer': 'fuel', 'scenario': '(ovs)*(nosdn)'}) env_test = getTestEnv('foo', functest_yaml) self.assertEqual(env_test, '') @@ -106,17 +97,15 @@ class TestFunctestUtils(unittest.TestCase): def test_generateTestcaseList(self): test = generateTestcaseList(functest_yaml) - test = sorted(test.split(' ')) - expected_list = "doctor vims odl rally vping tempest promise policy-test odl-vpn_service-tests " - expected_list_array = sorted(expected_list.split(' ')) - - self.assertEqual(test, expected_list_array) + + expected_list = "vping tempest odl doctor promise policy-test odl-vpn_service-tests vims rally " + self.assertEqual(test, expected_list) def tearDown(self): os.environ["INSTALLER_TYPE"] = "" - os.environ["SDN_CONTROLLER"] = "" - os.environ["OPNFV_FEATURE"] = "" + os.environ["DEPLOY_SCENARIO"] = "" if __name__ == '__main__': unittest.main() + |