aboutsummaryrefslogtreecommitdiffstats
path: root/functest/ci/run_tests.py
diff options
context:
space:
mode:
Diffstat (limited to 'functest/ci/run_tests.py')
-rwxr-xr-xfunctest/ci/run_tests.py104
1 files changed, 54 insertions, 50 deletions
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index 37b90f92..76760096 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -9,26 +9,26 @@
#
import argparse
-import datetime
import enum
import importlib
+import logging
+import logging.config
import os
import re
import sys
-import functest.ci.generate_report as generate_report
+import prettytable
+
import functest.ci.tier_builder as tb
import functest.core.testcase as testcase
-import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_clean as os_clean
import functest.utils.openstack_snapshot as os_snapshot
import functest.utils.openstack_utils as os_utils
from functest.utils.constants import CONST
-
-""" logging configuration """
-logger = ft_logger.Logger("run_tests").getLogger()
+# __name__ cannot be used here
+logger = logging.getLogger('functest.ci.run_tests')
class Result(enum.Enum):
@@ -40,6 +40,10 @@ class BlockingTestFailed(Exception):
pass
+class TestNotEnabled(Exception):
+ pass
+
+
class RunTestsParser(object):
def __init__(self):
@@ -75,7 +79,7 @@ def print_separator(str, count=45):
def source_rc_file():
- rc_file = CONST.openstack_creds
+ rc_file = CONST.__getattribute__('openstack_creds')
if not os.path.isfile(rc_file):
raise Exception("RC file %s does not exist..." % rc_file)
logger.debug("Sourcing the OpenStack RC file...")
@@ -83,13 +87,13 @@ def source_rc_file():
for key, value in os.environ.iteritems():
if re.search("OS_", key):
if key == 'OS_AUTH_URL':
- CONST.OS_AUTH_URL = value
+ CONST.__setattr__('OS_AUTH_URL', value)
elif key == 'OS_USERNAME':
- CONST.OS_USERNAME = value
+ CONST.__setattr__('OS_USERNAME', value)
elif key == 'OS_TENANT_NAME':
- CONST.OS_TENANT_NAME = value
+ CONST.__setattr__('OS_TENANT_NAME', value)
elif key == 'OS_PASSWORD':
- CONST.OS_PASSWORD = value
+ CONST.__setattr__('OS_PASSWORD', value)
def generate_os_snapshot():
@@ -100,13 +104,6 @@ def cleanup():
os_clean.main()
-def update_test_info(test_name, result, duration):
- for test in GlobalVariables.EXECUTED_TEST_CASES:
- if test['test_name'] == test_name:
- test.update({"result": result,
- "duration": duration})
-
-
def get_run_dict(testname):
try:
dict = ft_utils.get_dict_by_test(testname)
@@ -121,8 +118,9 @@ def get_run_dict(testname):
def run_test(test, tier_name, testcases=None):
- result_str = "PASS"
- start = datetime.datetime.now()
+ if not test.is_enabled():
+ raise TestNotEnabled("The test case {} is not enabled"
+ .format(test.get_name()))
test_name = test.get_name()
logger.info("\n") # blank line
print_separator("=")
@@ -144,8 +142,9 @@ def run_test(test, tier_name, testcases=None):
try:
module = importlib.import_module(run_dict['module'])
cls = getattr(module, run_dict['class'])
- test_case = cls()
-
+ test_dict = ft_utils.get_dict_by_test(test_name)
+ test_case = cls(**test_dict)
+ GlobalVariables.EXECUTED_TEST_CASES.append(test_case)
try:
kwargs = run_dict['args']
result = test_case.run(**kwargs)
@@ -154,7 +153,8 @@ def run_test(test, tier_name, testcases=None):
if result == testcase.TestCase.EX_OK:
if GlobalVariables.REPORT_FLAG:
test_case.push_to_db()
- result = test_case.check_criteria()
+ result = test_case.is_successful()
+ logger.info("Test result:\n\n%s\n", test_case)
except ImportError:
logger.exception("Cannot import module {}".format(
run_dict['module']))
@@ -166,27 +166,13 @@ def run_test(test, tier_name, testcases=None):
if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
cleanup()
-
- end = datetime.datetime.now()
- duration = (end - start).seconds
- duration_str = ("%02d:%02d" % divmod(duration, 60))
- logger.info("Test execution time: %s" % duration_str)
-
- if result != 0:
+ if result != testcase.TestCase.EX_OK:
logger.error("The test case '%s' failed. " % test_name)
GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
- result_str = "FAIL"
-
if test.is_blocking():
- if not testcases or testcases == "all":
- # if it is a single test we don't print the whole results table
- update_test_info(test_name, result_str, duration_str)
- generate_report.main(GlobalVariables.EXECUTED_TEST_CASES)
raise BlockingTestFailed("The test case {} failed and is blocking"
.format(test.get_name()))
- update_test_info(test_name, result_str, duration_str)
-
def run_tier(tier):
tier_name = tier.get_name()
@@ -210,27 +196,24 @@ def run_all(tiers):
for tier in tiers.get_tiers():
if (len(tier.get_tests()) != 0 and
- re.search(CONST.CI_LOOP, tier.get_ci_loop()) is not None):
+ re.search(CONST.__getattribute__('CI_LOOP'),
+ tier.get_ci_loop()) is not None):
tiers_to_run.append(tier)
summary += ("\n - %s:\n\t %s"
% (tier.get_name(),
tier.get_test_names()))
logger.info("Tests to be executed:%s" % summary)
- GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(tiers_to_run)
for tier in tiers_to_run:
run_tier(tier)
- generate_report.main(GlobalVariables.EXECUTED_TEST_CASES)
-
def main(**kwargs):
- CI_INSTALLER_TYPE = CONST.INSTALLER_TYPE
- CI_SCENARIO = CONST.DEPLOY_SCENARIO
-
file = CONST.functest_testcases_yaml
- _tiers = tb.TierBuilder(CI_INSTALLER_TYPE, CI_SCENARIO, file)
+ _tiers = tb.TierBuilder(CONST.__getattribute__('INSTALLER_TYPE'),
+ CONST.__getattribute__('DEPLOY_SCENARIO'),
+ file)
if kwargs['noclean']:
GlobalVariables.CLEAN_FLAG = False
@@ -242,12 +225,10 @@ def main(**kwargs):
if kwargs['test']:
source_rc_file()
if _tiers.get_tier(kwargs['test']):
- GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(
- [_tiers.get_tier(kwargs['test'])])
run_tier(_tiers.get_tier(kwargs['test']))
elif _tiers.get_test(kwargs['test']):
run_test(_tiers.get_test(kwargs['test']),
- _tiers.get_tier(kwargs['test']),
+ _tiers.get_tier_name(kwargs['test']),
kwargs['test'])
elif kwargs['test'] == "all":
run_all(_tiers)
@@ -255,7 +236,8 @@ def main(**kwargs):
logger.error("Unknown test case or tier '%s', "
"or not supported by "
"the given scenario '%s'."
- % (kwargs['test'], CI_SCENARIO))
+ % (kwargs['test'],
+ CONST.__getattribute__('DEPLOY_SCENARIO')))
logger.debug("Available tiers are:\n\n%s"
% _tiers)
return Result.EX_ERROR
@@ -264,11 +246,33 @@ def main(**kwargs):
except Exception as e:
logger.error(e)
GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['env var', 'value'])
+ for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
+ 'CI_LOOP']:
+ msg.add_row([env_var, CONST.__getattribute__(env_var)])
+ logger.info("Deployment description: \n\n%s\n", msg)
+
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['test case', 'project', 'tier', 'duration', 'result'])
+ for test_case in GlobalVariables.EXECUTED_TEST_CASES:
+ result = 'PASS' if(test_case.is_successful(
+ ) == test_case.EX_OK) else 'FAIL'
+ msg.add_row([test_case.case_name, test_case.project_name,
+ _tiers.get_tier_name(test_case.case_name),
+ test_case.get_duration(), result])
+ logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
+
logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT)
return GlobalVariables.OVERALL_RESULT
if __name__ == '__main__':
+ logging.config.fileConfig(
+ CONST.__getattribute__('dir_functest_logging_cfg'))
parser = RunTestsParser()
args = parser.parse_args(sys.argv[1:])
sys.exit(main(**args).value)