diff options
author | Cédric Ollivier <cedric.ollivier@orange.com> | 2017-05-11 13:01:16 +0200 |
---|---|---|
committer | Cédric Ollivier <cedric.ollivier@orange.com> | 2017-05-16 15:01:57 +0200 |
commit | 8cfa8d15a572cbae8bd46dae2a19f9b764684a12 (patch) | |
tree | c63685f22453173856a940ba2a676868f81e6da4 /functest/ci | |
parent | 5a9e8f0924daf487c872aab077400df70451813e (diff) |
Switch from generate_report to PrettyTable
run_tests.py now relies on PrettyTable as most of the openstack clients.
generate_report.py and its related unit tests are simply removed.
It sets padding_width=5 in testcase.py too to conform with
run_tests.py.
Now report is printed in every case.
Change-Id: Id9ce93f984503f25d6a2150482f397853fa3dd64
Signed-off-by: Cédric Ollivier <cedric.ollivier@orange.com>
Diffstat (limited to 'functest/ci')
-rw-r--r-- | functest/ci/generate_report.py | 149 | ||||
-rwxr-xr-x | functest/ci/run_tests.py | 50 |
2 files changed, 24 insertions, 175 deletions
diff --git a/functest/ci/generate_report.py b/functest/ci/generate_report.py deleted file mode 100644 index e400b1b6..00000000 --- a/functest/ci/generate_report.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -import json -import logging -import re -import urllib2 - -import functest.utils.functest_utils as ft_utils -from functest.utils.constants import CONST - -COL_1_LEN = 25 -COL_2_LEN = 15 -COL_3_LEN = 12 -COL_4_LEN = 15 -COL_5_LEN = 75 - -# If we run from CI (Jenkins) we will push the results to the DB -# and then we can print the url to the specific test result - - -logger = logging.getLogger(__name__) - - -def init(tiers_to_run=[]): - test_cases_arr = [] - for tier in tiers_to_run: - for test in tier.get_tests(): - test_cases_arr.append({'test_name': test.get_name(), - 'tier_name': tier.get_name(), - 'result': 'Not executed', - 'duration': '0', - 'url': ''}) - return test_cases_arr - - -def get_results_from_db(): - url = "%s?build_tag=%s" % (ft_utils.get_db_url(), - CONST.BUILD_TAG) - logger.debug("Query to rest api: %s" % url) - try: - data = json.load(urllib2.urlopen(url)) - return data['results'] - except: - logger.error("Cannot read content from the url: %s" % url) - return None - - -def get_data(test, results): - test_result = test['result'] - url = '' - for test_db in results: - if test['test_name'] in test_db['case_name']: - id = test_db['_id'] - url = ft_utils.get_db_url() + '/' + id - test_result = test_db['criteria'] - - return {"url": url, "result": test_result} - - -def print_line(w1, w2='', w3='', w4='', w5=''): - str = ('| ' + w1.ljust(COL_1_LEN - 1) + - '| ' + w2.ljust(COL_2_LEN - 1) + - '| ' + w3.ljust(COL_3_LEN - 1) + - '| ' + w4.ljust(COL_4_LEN - 1)) - if CONST.__getattribute__('IS_CI_RUN'): - str += ('| ' + w5.ljust(COL_5_LEN - 1)) - str += '|\n' - return str - - -def print_line_no_columns(str): - TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN + 2 - if CONST.__getattribute__('IS_CI_RUN'): - TOTAL_LEN += COL_5_LEN + 1 - return ('| ' + str.ljust(TOTAL_LEN) + "|\n") - - -def print_separator(char="=", delimiter="+"): - str = ("+" + char * COL_1_LEN + - delimiter + char * COL_2_LEN + - delimiter + char * COL_3_LEN + - delimiter + char * COL_4_LEN) - if CONST.__getattribute__('IS_CI_RUN'): - str += (delimiter + char * COL_5_LEN) - str += '+\n' - return str - - -def main(args=[]): - executed_test_cases = args - - if CONST.__getattribute__('IS_CI_RUN'): - results = get_results_from_db() - if results is not None: - for test in executed_test_cases: - data = get_data(test, results) - test.update({"url": data['url'], - "result": data['result']}) - - TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN - if CONST.__getattribute__('IS_CI_RUN'): - TOTAL_LEN += COL_5_LEN - MID = TOTAL_LEN / 2 - - if CONST.__getattribute__('BUILD_TAG') is not None: - if re.search("daily", CONST.__getattribute__('BUILD_TAG')) is not None: - CONST.__setattr__('CI_LOOP', 'daily') - else: - CONST.__setattr__('CI_LOOP', 'weekly') - - str = '' - str += print_separator('=', delimiter="=") - str += print_line_no_columns(' ' * (MID - 8) + 'FUNCTEST REPORT') - str += print_separator('=', delimiter="=") - str += print_line_no_columns(' ') - str += print_line_no_columns(" Deployment description:") - str += print_line_no_columns(" INSTALLER: %s" - % CONST.__getattribute__('INSTALLER_TYPE')) - if CONST.__getattribute__('DEPLOY_SCENARIO') is not None: - str += print_line_no_columns(" SCENARIO: %s" - % CONST.__getattribute__( - 'DEPLOY_SCENARIO')) - if CONST.__getattribute__('BUILD_TAG') is not None: - str += print_line_no_columns(" BUILD TAG: %s" - % CONST.__getattribute__('BUILD_TAG')) - if CONST.__getattribute__('CI_LOOP') is not None: - str += print_line_no_columns(" CI LOOP: %s" - % CONST.__getattribute__('CI_LOOP')) - str += print_line_no_columns(' ') - str += print_separator('=') - if CONST.__getattribute__('IS_CI_RUN'): - str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT', 'URL') - else: - str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT') - str += print_separator('=') - for test in executed_test_cases: - str += print_line(test['test_name'], - test['tier_name'], - test['duration'], - test['result'], - test['url']) - str += print_separator('-') - - logger.info("\n\n\n%s" % str) diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py index 7035992b..493d5f9c 100755 --- a/functest/ci/run_tests.py +++ b/functest/ci/run_tests.py @@ -17,7 +17,8 @@ import os import re import sys -import functest.ci.generate_report as generate_report +import prettytable + import functest.ci.tier_builder as tb import functest.core.testcase as testcase import functest.utils.functest_utils as ft_utils @@ -99,13 +100,6 @@ def cleanup(): os_clean.main() -def update_test_info(test_name, result, duration): - for test in GlobalVariables.EXECUTED_TEST_CASES: - if test['test_name'] == test_name: - test.update({"result": result, - "duration": duration}) - - def get_run_dict(testname): try: dict = ft_utils.get_dict_by_test(testname) @@ -120,8 +114,6 @@ def get_run_dict(testname): def run_test(test, tier_name, testcases=None): - duration = "XX:XX" - result_str = "PASS" test_name = test.get_name() logger.info("\n") # blank line print_separator("=") @@ -145,6 +137,7 @@ def run_test(test, tier_name, testcases=None): cls = getattr(module, run_dict['class']) test_dict = ft_utils.get_dict_by_test(test_name) test_case = cls(**test_dict) + GlobalVariables.EXECUTED_TEST_CASES.append(test_case) try: kwargs = run_dict['args'] result = test_case.run(**kwargs) @@ -154,8 +147,7 @@ def run_test(test, tier_name, testcases=None): if GlobalVariables.REPORT_FLAG: test_case.push_to_db() result = test_case.is_successful() - duration = test_case.get_duration() - logger.info("\n%s\n", test_case) + logger.info("Test result:\n\n%s\n", test_case) except ImportError: logger.exception("Cannot import module {}".format( run_dict['module'])) @@ -167,22 +159,13 @@ def run_test(test, tier_name, testcases=None): if test.needs_clean() and GlobalVariables.CLEAN_FLAG: cleanup() - if result != testcase.TestCase.EX_OK: logger.error("The test case '%s' failed. " % test_name) GlobalVariables.OVERALL_RESULT = Result.EX_ERROR - result_str = "FAIL" - if test.is_blocking(): - if not testcases or testcases == "all": - # if it is a single test we don't print the whole results table - update_test_info(test_name, result_str, duration) - generate_report.main(GlobalVariables.EXECUTED_TEST_CASES) raise BlockingTestFailed("The test case {} failed and is blocking" .format(test.get_name())) - update_test_info(test_name, result_str, duration) - def run_tier(tier): tier_name = tier.get_name() @@ -214,12 +197,9 @@ def run_all(tiers): tier.get_test_names())) logger.info("Tests to be executed:%s" % summary) - GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(tiers_to_run) for tier in tiers_to_run: run_tier(tier) - generate_report.main(GlobalVariables.EXECUTED_TEST_CASES) - def main(**kwargs): @@ -238,8 +218,6 @@ def main(**kwargs): if kwargs['test']: source_rc_file() if _tiers.get_tier(kwargs['test']): - GlobalVariables.EXECUTED_TEST_CASES = generate_report.init( - [_tiers.get_tier(kwargs['test'])]) run_tier(_tiers.get_tier(kwargs['test'])) elif _tiers.get_test(kwargs['test']): run_test(_tiers.get_test(kwargs['test']), @@ -261,6 +239,26 @@ def main(**kwargs): except Exception as e: logger.error(e) GlobalVariables.OVERALL_RESULT = Result.EX_ERROR + + msg = prettytable.PrettyTable( + header_style='upper', padding_width=5, + field_names=['env var', 'value']) + for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG', + 'CI_LOOP']: + msg.add_row([env_var, CONST.__getattribute__(env_var)]) + logger.info("Deployment description: \n\n%s\n", msg) + + msg = prettytable.PrettyTable( + header_style='upper', padding_width=5, + field_names=['test case', 'project', 'tier', 'duration', 'result']) + for test_case in GlobalVariables.EXECUTED_TEST_CASES: + result = 'PASS' if(test_case.is_successful( + ) == test_case.EX_OK) else 'FAIL' + msg.add_row([test_case.case_name, test_case.project_name, + _tiers.get_tier_name(test_case.case_name), + test_case.get_duration(), result]) + logger.info("FUNCTEST REPORT: \n\n%s\n", msg) + logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT) return GlobalVariables.OVERALL_RESULT |