aboutsummaryrefslogtreecommitdiffstats
path: root/functest/ci/run_tests.py
diff options
context:
space:
mode:
authorLinda Wang <wangwulin@huawei.com>2017-12-04 08:33:59 +0000
committerLinda Wang <wangwulin@huawei.com>2017-12-14 07:23:00 +0000
commit3f29dd2f11e3bb847fce5ac56060758d6076e8e7 (patch)
treed8fce3daf5820ed65082a2451700e2cc9742c858 /functest/ci/run_tests.py
parent3306da5522f2576f2cd8431aac7fd4f3f4b32ca3 (diff)
Improve the pylint score of functest-core
It also modifies the file list to ensure the rating of ci modules. Change-Id: Ia1e414be5364cb3da3d54882db428024ed6bd99f Signed-off-by: Linda Wang <wangwulin@huawei.com>
Diffstat (limited to 'functest/ci/run_tests.py')
-rw-r--r--functest/ci/run_tests.py108
1 files changed, 67 insertions, 41 deletions
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index b8da3be62..481931695 100644
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -7,16 +7,22 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
+""" The entry of running tests:
+1) Parses functest/ci/testcases.yaml to check which testcase(s) to be run
+2) Execute the common operations on every testcase (run, push results to db...)
+3) Return the right status code
+"""
+
import argparse
import enum
import importlib
import logging
import logging.config
import os
-import pkg_resources
import re
import sys
import textwrap
+import pkg_resources
import prettytable
import six
@@ -28,26 +34,32 @@ import functest.utils.openstack_utils as os_utils
from functest.utils.constants import CONST
# __name__ cannot be used here
-logger = logging.getLogger('functest.ci.run_tests')
+LOGGER = logging.getLogger('functest.ci.run_tests')
CONFIG_FUNCTEST_PATH = pkg_resources.resource_filename(
'functest', 'ci/config_functest.yaml')
class Result(enum.Enum):
+ """The overall result in enumerated type"""
+ # pylint: disable=too-few-public-methods
EX_OK = os.EX_OK
EX_ERROR = -1
class BlockingTestFailed(Exception):
+ """Exception when the blocking test fails"""
pass
class TestNotEnabled(Exception):
+ """Exception when the test is not enabled"""
pass
class RunTestsParser(object):
+ """Parser to run tests"""
+ # pylint: disable=too-few-public-methods
def __init__(self):
self.parser = argparse.ArgumentParser()
@@ -63,11 +75,19 @@ class RunTestsParser(object):
"database (default=false).",
action="store_true")
- def parse_args(self, argv=[]):
+ def parse_args(self, argv=None):
+ """Parse arguments.
+
+ It can call sys.exit if arguments are incorrect.
+
+ Returns:
+ the arguments from cmdline
+ """
return vars(self.parser.parse_args(argv))
class Runner(object):
+ """Runner class"""
def __init__(self):
self.executed_test_cases = {}
@@ -81,10 +101,12 @@ class Runner(object):
@staticmethod
def source_rc_file():
+ """Set the environmental vars from openstack.creds"""
+
rc_file = CONST.__getattribute__('openstack_creds')
if not os.path.isfile(rc_file):
raise Exception("RC file %s does not exist..." % rc_file)
- logger.debug("Sourcing the OpenStack RC file...")
+ LOGGER.debug("Sourcing the OpenStack RC file...")
os_utils.source_credentials(rc_file)
for key, value in six.iteritems(os.environ):
if re.search("OS_", key):
@@ -101,22 +123,24 @@ class Runner(object):
@staticmethod
def get_run_dict(testname):
+ """Obtain the the 'run' block of the testcase from testcases.yaml"""
try:
- dict = ft_utils.get_dict_by_test(testname)
- if not dict:
- logger.error("Cannot get {}'s config options".format(testname))
- elif 'run' in dict:
- return dict['run']
+ dic_testcase = ft_utils.get_dict_by_test(testname)
+ if not dic_testcase:
+ LOGGER.error("Cannot get %s's config options", testname)
+ elif 'run' in dic_testcase:
+ return dic_testcase['run']
return None
- except Exception:
- logger.exception("Cannot get {}'s config options".format(testname))
+ except Exception: # pylint: disable=broad-except
+ LOGGER.exception("Cannot get %s's config options", testname)
return None
def run_test(self, test):
+ """Run one test case"""
if not test.is_enabled():
raise TestNotEnabled(
"The test case {} is not enabled".format(test.get_name()))
- logger.info("Running test case '%s'...", test.get_name())
+ LOGGER.info("Running test case '%s'...", test.get_name())
result = testcase.TestCase.EX_RUN_ERROR
run_dict = self.get_run_dict(test.get_name())
if run_dict:
@@ -140,33 +164,32 @@ class Runner(object):
result = test_case.is_successful()
else:
result = testcase.TestCase.EX_OK
- logger.info("Test result:\n\n%s\n", test_case)
+ LOGGER.info("Test result:\n\n%s\n", test_case)
if self.clean_flag:
test_case.clean()
except ImportError:
- logger.exception("Cannot import module {}".format(
- run_dict['module']))
+ LOGGER.exception("Cannot import module %s", run_dict['module'])
except AttributeError:
- logger.exception("Cannot get class {}".format(
- run_dict['class']))
+ LOGGER.exception("Cannot get class %s", run_dict['class'])
else:
raise Exception("Cannot import the class for the test case.")
return result
def run_tier(self, tier):
+ """Run one tier"""
tier_name = tier.get_name()
tests = tier.get_tests()
- if tests is None or len(tests) == 0:
- logger.info("There are no supported test cases in this tier "
+ if not tests:
+ LOGGER.info("There are no supported test cases in this tier "
"for the given scenario")
self.overall_result = Result.EX_ERROR
else:
- logger.info("Running tier '%s'" % tier_name)
+ LOGGER.info("Running tier '%s'", tier_name)
for test in tests:
self.run_test(test)
test_case = self.executed_test_cases[test.get_name()]
if test_case.is_successful() != testcase.TestCase.EX_OK:
- logger.error("The test case '%s' failed.", test.get_name())
+ LOGGER.error("The test case '%s' failed.", test.get_name())
if test.get_project() == "functest":
self.overall_result = Result.EX_ERROR
if test.is_blocking():
@@ -176,13 +199,14 @@ class Runner(object):
return self.overall_result
def run_all(self):
+ """Run all available testcases"""
tiers_to_run = []
msg = prettytable.PrettyTable(
header_style='upper', padding_width=5,
field_names=['tiers', 'order', 'CI Loop', 'description',
'testcases'])
for tier in self._tiers.get_tiers():
- if (len(tier.get_tests()) != 0 and
+ if (tier.get_tests() and
re.search(CONST.__getattribute__('CI_LOOP'),
tier.get_ci_loop()) is not None):
tiers_to_run.append(tier)
@@ -191,11 +215,12 @@ class Runner(object):
textwrap.fill(tier.description, width=40),
textwrap.fill(' '.join([str(x.get_name(
)) for x in tier.get_tests()]), width=40)])
- logger.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
+ LOGGER.info("TESTS TO BE EXECUTED:\n\n%s\n", msg)
for tier in tiers_to_run:
self.run_tier(tier)
def main(self, **kwargs):
+ """Entry point of class Runner"""
if 'noclean' in kwargs:
self.clean_flag = not kwargs['noclean']
if 'report' in kwargs:
@@ -203,59 +228,59 @@ class Runner(object):
try:
if 'test' in kwargs:
self.source_rc_file()
- logger.debug("Test args: %s", kwargs['test'])
+ LOGGER.debug("Test args: %s", kwargs['test'])
if self._tiers.get_tier(kwargs['test']):
self.run_tier(self._tiers.get_tier(kwargs['test']))
elif self._tiers.get_test(kwargs['test']):
result = self.run_test(
self._tiers.get_test(kwargs['test']))
if result != testcase.TestCase.EX_OK:
- logger.error("The test case '%s' failed.",
+ LOGGER.error("The test case '%s' failed.",
kwargs['test'])
self.overall_result = Result.EX_ERROR
elif kwargs['test'] == "all":
self.run_all()
else:
- logger.error("Unknown test case or tier '%s', "
- "or not supported by "
- "the given scenario '%s'."
- % (kwargs['test'],
- CONST.__getattribute__('DEPLOY_SCENARIO')))
- logger.debug("Available tiers are:\n\n%s",
+ LOGGER.error("Unknown test case or tier '%s', or not "
+ "supported by the given scenario '%s'.",
+ kwargs['test'],
+ CONST.__getattribute__('DEPLOY_SCENARIO'))
+ LOGGER.debug("Available tiers are:\n\n%s",
self._tiers)
return Result.EX_ERROR
else:
self.run_all()
except BlockingTestFailed:
pass
- except Exception:
- logger.exception("Failures when running testcase(s)")
+ except Exception: # pylint: disable=broad-except
+ LOGGER.exception("Failures when running testcase(s)")
self.overall_result = Result.EX_ERROR
if not self._tiers.get_test(kwargs['test']):
self.summary(self._tiers.get_tier(kwargs['test']))
- logger.info("Execution exit value: %s" % self.overall_result)
+ LOGGER.info("Execution exit value: %s", self.overall_result)
return self.overall_result
def summary(self, tier=None):
+ """To generate functest report showing the overall results"""
msg = prettytable.PrettyTable(
header_style='upper', padding_width=5,
field_names=['env var', 'value'])
for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
'CI_LOOP']:
msg.add_row([env_var, CONST.__getattribute__(env_var)])
- logger.info("Deployment description:\n\n%s\n", msg)
+ LOGGER.info("Deployment description:\n\n%s\n", msg)
msg = prettytable.PrettyTable(
header_style='upper', padding_width=5,
field_names=['test case', 'project', 'tier',
'duration', 'result'])
tiers = [tier] if tier else self._tiers.get_tiers()
- for tier in tiers:
- for test in tier.get_tests():
+ for each_tier in tiers:
+ for test in each_tier.get_tests():
try:
test_case = self.executed_test_cases[test.get_name()]
except KeyError:
msg.add_row([test.get_name(), test.get_project(),
- tier.get_name(), "00:00", "SKIP"])
+ each_tier.get_name(), "00:00", "SKIP"])
else:
result = 'PASS' if(test_case.is_successful(
) == test_case.EX_OK) else 'FAIL'
@@ -263,13 +288,14 @@ class Runner(object):
[test_case.case_name, test_case.project_name,
self._tiers.get_tier_name(test_case.case_name),
test_case.get_duration(), result])
- for test in tier.get_skipped_test():
+ for test in each_tier.get_skipped_test():
msg.add_row([test.get_name(), test.get_project(),
- tier.get_name(), "00:00", "SKIP"])
- logger.info("FUNCTEST REPORT:\n\n%s\n", msg)
+ each_tier.get_name(), "00:00", "SKIP"])
+ LOGGER.info("FUNCTEST REPORT:\n\n%s\n", msg)
def main():
+ """Entry point"""
logging.config.fileConfig(pkg_resources.resource_filename(
'functest', 'ci/logging.ini'))
logging.captureWarnings(True)