diff options
Diffstat (limited to 'functest')
-rwxr-xr-x | functest/ci/prepare_env.py | 71 | ||||
-rwxr-xr-x | functest/ci/run_tests.py | 76 |
2 files changed, 76 insertions, 71 deletions
diff --git a/functest/ci/prepare_env.py b/functest/ci/prepare_env.py index cca9ac73..6b24fe08 100755 --- a/functest/ci/prepare_env.py +++ b/functest/ci/prepare_env.py @@ -47,7 +47,8 @@ class PrepareEnvParser(): def __init__(self): self.parser = argparse.ArgumentParser() self.parser.add_argument("action", help="Possible actions are: " - "'{d[0]}|{d[1]}' ".format(d=actions)) + "'{d[0]}|{d[1]}' ".format(d=actions), + choices=actions) self.parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") @@ -140,14 +141,14 @@ def source_rc_file(): if CONST.INSTALLER_IP is None: logger.error("The env variable CI_INSTALLER_IP must be provided in" " order to fetch the credentials from the installer.") - sys.exit("Missing CI_INSTALLER_IP.") + raise Exception("Missing CI_INSTALLER_IP.") if CONST.INSTALLER_TYPE not in opnfv_constants.INSTALLERS: logger.error("Cannot fetch credentials. INSTALLER_TYPE=%s is " "not a valid OPNFV installer. Available " "installers are : %s." % (CONST.INSTALLER_TYPE, opnfv_constants.INSTALLERS)) - sys.exit("Wrong INSTALLER_TYPE.") + raise Exception("Wrong INSTALLER_TYPE.") cmd = ("/home/opnfv/repos/releng/utils/fetch_os_creds.sh " "-d %s -i %s -a %s" @@ -159,15 +160,12 @@ def source_rc_file(): output = p.communicate()[0] logger.debug("\n%s" % output) if p.returncode != 0: - logger.error("Failed to fetch credentials from installer.") - sys.exit(1) + raise Exception("Failed to fetch credentials from installer.") else: logger.info("RC file provided in %s." % CONST.openstack_creds) if os.path.getsize(CONST.openstack_creds) == 0: - logger.error("The file %s is empty." - % CONST.openstack_creds) - sys.exit(1) + raise Exception("The file %s is empty." % CONST.openstack_creds) logger.info("Sourcing the OpenStack RC file...") os_utils.source_credentials( @@ -211,7 +209,7 @@ def verify_deployment(): line = p.stdout.readline().rstrip() if "ERROR" in line: logger.error(line) - sys.exit("Problem while running 'check_os.sh'.") + raise Exception("Problem while running 'check_os.sh'.") logger.info(line) @@ -270,46 +268,43 @@ def create_flavor(): def check_environment(): msg_not_active = "The Functest environment is not installed." if not os.path.isfile(CONST.env_active): - logger.error(msg_not_active) - sys.exit(1) + raise Exception(msg_not_active) with open(CONST.env_active, "r") as env_file: s = env_file.read() if not re.search("1", s): - logger.error(msg_not_active) - sys.exit(1) + raise Exception(msg_not_active) logger.info("Functest environment is installed.") def main(**kwargs): - if not (kwargs['action'] in actions): - logger.error('Argument not valid.') - sys.exit() - - if kwargs['action'] == "start": - logger.info("######### Preparing Functest environment #########\n") - check_env_variables() - create_directories() - source_rc_file() - patch_config_file() - verify_deployment() - install_rally() - install_tempest() - create_flavor() - - with open(CONST.env_active, "w") as env_file: - env_file.write("1") - - check_environment() - - if kwargs['action'] == "check": - check_environment() - - exit(0) + try: + if not (kwargs['action'] in actions): + logger.error('Argument not valid.') + return -1 + elif kwargs['action'] == "start": + logger.info("######### Preparing Functest environment #########\n") + check_env_variables() + create_directories() + source_rc_file() + patch_config_file() + verify_deployment() + install_rally() + install_tempest() + create_flavor() + with open(CONST.env_active, "w") as env_file: + env_file.write("1") + check_environment() + elif kwargs['action'] == "check": + check_environment() + except Exception as e: + logger.error(e) + return -1 + return 0 if __name__ == '__main__': parser = PrepareEnvParser() args = parser.parse_args(sys.argv[1:]) - main(**args) + sys.exit(main(**args)) diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py index 6a6516ab..b1ab9169 100755 --- a/functest/ci/run_tests.py +++ b/functest/ci/run_tests.py @@ -10,6 +10,7 @@ import argparse import datetime +import enum import importlib import os import re @@ -35,7 +36,16 @@ logger = ft_logger.Logger("run_tests").getLogger() EXEC_SCRIPT = ("%s/functest/ci/exec_test.sh" % CONST.dir_repo_functest) # This will be the return code of this script. If any of the tests fails, -# this variable will change to -1 +# this variable will change to Result.EX_ERROR + + +class Result(enum.Enum): + EX_OK = os.EX_OK + EX_ERROR = -1 + + +class BlockingTestFailed(Exception): + pass class RunTestsParser(): @@ -60,7 +70,7 @@ class RunTestsParser(): class GlobalVariables: EXECUTED_TEST_CASES = [] - OVERALL_RESULT = 0 + OVERALL_RESULT = Result.EX_OK CLEAN_FLAG = True REPORT_FLAG = False @@ -75,8 +85,7 @@ def print_separator(str, count=45): def source_rc_file(): rc_file = CONST.openstack_creds if not os.path.isfile(rc_file): - logger.error("RC file %s does not exist..." % rc_file) - sys.exit(1) + raise Exception("RC file %s does not exist..." % rc_file) logger.debug("Sourcing the OpenStack RC file...") os_utils.source_credentials(rc_file) for key, value in os.environ.iteritems(): @@ -179,19 +188,16 @@ def run_test(test, tier_name, testcases=None): if result != 0: logger.error("The test case '%s' failed. " % test_name) - GlobalVariables.OVERALL_RESULT = -1 + GlobalVariables.OVERALL_RESULT = Result.EX_ERROR result_str = "FAIL" if test.is_blocking(): if not testcases or testcases == "all": - logger.info("This test case is blocking. Aborting overall " - "execution.") # if it is a single test we don't print the whole results table update_test_info(test_name, result_str, duration_str) generate_report.main(GlobalVariables.EXECUTED_TEST_CASES) - logger.info("Execution exit value: %s" % - GlobalVariables.OVERALL_RESULT) - sys.exit(GlobalVariables.OVERALL_RESULT) + raise BlockingTestFailed("The test case {} failed and is blocking" + .format(test.get_name())) update_test_info(test_name, result_str, duration_str) @@ -246,33 +252,37 @@ def main(**kwargs): if kwargs['report']: GlobalVariables.REPORT_FLAG = True - if kwargs['test']: - source_rc_file() - if _tiers.get_tier(kwargs['test']): - run_tier(_tiers.get_tier(kwargs['test'])) - - elif _tiers.get_test(kwargs['test']): - run_test(_tiers.get_test(kwargs['test']), - _tiers.get_tier(kwargs['test']), - kwargs['test']) - - elif kwargs['test'] == "all": - run_all(_tiers) - + try: + if kwargs['test']: + source_rc_file() + if _tiers.get_tier(kwargs['test']): + GlobalVariables.EXECUTED_TEST_CASES = generate_report.init( + [_tiers.get_tier(kwargs['test'])]) + run_tier(_tiers.get_tier(kwargs['test'])) + elif _tiers.get_test(kwargs['test']): + run_test(_tiers.get_test(kwargs['test']), + _tiers.get_tier(kwargs['test']), + kwargs['test']) + elif kwargs['test'] == "all": + run_all(_tiers) + else: + logger.error("Unknown test case or tier '%s', " + "or not supported by " + "the given scenario '%s'." + % (kwargs['test'], CI_SCENARIO)) + logger.debug("Available tiers are:\n\n%s" + % _tiers) + return Result.EX_ERROR else: - logger.error("Unknown test case or tier '%s', or not supported by " - "the given scenario '%s'." - % (kwargs['test'], CI_SCENARIO)) - logger.debug("Available tiers are:\n\n%s" - % _tiers) - else: - run_all(_tiers) - + run_all(_tiers) + except Exception as e: + logger.error(e) + GlobalVariables.OVERALL_RESULT = Result.EX_ERROR logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT) - sys.exit(GlobalVariables.OVERALL_RESULT) + return GlobalVariables.OVERALL_RESULT if __name__ == '__main__': parser = RunTestsParser() args = parser.parse_args(sys.argv[1:]) - main(**args) + sys.exit(main(**args).value) |