diff options
author | Georg Kunz <georg.kunz@ericsson.com> | 2018-06-22 09:26:36 +0200 |
---|---|---|
committer | Georg Kunz <georg.kunz@ericsson.com> | 2018-07-24 16:38:25 +0200 |
commit | 947f1bf0147c40971fdae36feecd477ab3caf3b8 (patch) | |
tree | 5cad79bea5cb0490df591445c64017c12203ef88 | |
parent | 69500f77438eb6fccbb37921e612a3179c311313 (diff) |
Making the results file a proper json file
The results.json file is currently not a properly formated json file,
but contains one json structure per line/testcase. Whit this change, all
test results are being collected at runtime and then written to a
properly formated json file at the end of a test run.
Moreover, this patch introduces 'mandatory' and 'optional' sections to
the test suites definition files in order to clearly indicate if a test
is part of the corresponding test scope.
The web portal needs to be adapted accordingly to be able to read the
new results file.
JIRA: DOVETAIL-672
Change-Id: Id0706c7e82622fc9c2c2fece26b2d6c07c1580be
Signed-off-by: Georg Kunz <georg.kunz@ericsson.com>
-rw-r--r-- | dovetail/report.py | 59 | ||||
-rwxr-xr-x | dovetail/run.py | 11 | ||||
-rw-r--r-- | dovetail/testcase.py | 14 | ||||
-rw-r--r-- | etc/compliance/proposed_tests.yml | 2 | ||||
-rw-r--r-- | setup.cfg | 2 |
5 files changed, 51 insertions, 37 deletions
diff --git a/dovetail/report.py b/dovetail/report.py index 8d157559..f1f47c25 100644 --- a/dovetail/report.py +++ b/dovetail/report.py @@ -18,8 +18,6 @@ import datetime import tarfile import time -from pbr import version - import utils.dovetail_logger as dt_logger from utils.dovetail_config import DovetailConfig as dt_cfg @@ -70,17 +68,18 @@ class Report(object): @classmethod def generate_json(cls, testcase_list, duration): report_obj = {} - report_obj['version'] = \ - version.VersionInfo('dovetail').version_string() + # egeokun: using a hardcoded string instead of pbr version for + # versioning the result file. The version of the results.json is + # logically independent of the release of Dovetail. + report_obj['version'] = '2018.08' report_obj['build_tag'] = dt_cfg.dovetail_config['build_tag'] - report_obj['upload_date'] =\ + report_obj['test_date'] =\ datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC") report_obj['duration'] = duration report_obj['testcases_list'] = [] if not testcase_list: return report_obj - for testcase_name in testcase_list: testcase = Testcase.get(testcase_name) testcase_inreport = {} @@ -89,11 +88,13 @@ class Report(object): testcase_inreport['result'] = 'Undefined' testcase_inreport['objective'] = '' testcase_inreport['sub_testcase'] = [] + testcase_inreport['mandatory'] = False report_obj['testcases_list'].append(testcase_inreport) continue testcase_inreport['result'] = testcase.passed() testcase_inreport['objective'] = testcase.objective() + testcase_inreport['mandatory'] = testcase.is_mandatory testcase_inreport['sub_testcase'] = [] if testcase.sub_testcase() is not None: for sub_test in testcase.sub_testcase(): @@ -108,11 +109,13 @@ class Report(object): @classmethod def generate(cls, testcase_list, duration): report_data = cls.generate_json(testcase_list, duration) + cls.save_json_results(report_data) + report_txt = '' report_txt += '\n\nDovetail Report\n' report_txt += 'Version: %s\n' % report_data['version'] report_txt += 'Build Tag: %s\n' % report_data['build_tag'] - report_txt += 'Upload Date: %s\n' % report_data['upload_date'] + report_txt += 'Test Date: %s\n' % report_data['test_date'] report_txt += 'Duration: %.2f s\n\n' % report_data['duration'] total_num = 0 @@ -173,6 +176,18 @@ class Report(object): return report_txt @classmethod + def save_json_results(cls, results): + result_file = os.path.join(dt_cfg.dovetail_config['result_dir'], + dt_cfg.dovetail_config['result_file']) + + try: + with open(result_file, 'w') as f: + f.write(json.dumps(results) + '\n') + except Exception as e: + cls.logger.exception("Failed to add result to file {}, " + "exception: {}".format(result_file, e)) + + @classmethod def save_logs(cls): file_suffix = time.strftime('%Y%m%d_%H%M', time.localtime()) logs_gz = "logs_{}.tar.gz".format(file_suffix) @@ -197,14 +212,10 @@ class Report(object): cls.logger.error('Crawler is None: {}'.format(testcase.name())) return None - # if validate_testcase in cls.results[type]: - # return cls.results[type][validate_testcase] - result = crawler.crawl(testcase, check_results_file) if result is not None: cls.results[type][validate_testcase] = result - # testcase.script_result_acquired(True) cls.logger.debug( 'Test case: {} -> result acquired'.format(validate_testcase)) else: @@ -215,18 +226,7 @@ class Report(object): class Crawler(object): - - def add_result_to_file(self, result): - result_file = os.path.join(dt_cfg.dovetail_config['result_dir'], - dt_cfg.dovetail_config['result_file']) - try: - with open(result_file, 'a') as f: - f.write(json.dumps(result) + '\n') - return True - except Exception as e: - self.logger.exception("Failed to add result to file {}, " - "exception: {}".format(result_file, e)) - return False + pass class FunctestCrawler(Crawler): @@ -268,7 +268,6 @@ class FunctestCrawler(Crawler): if (testcase_name == data['case_name'] or data['project_name'] == "sdnvpn") and \ build_tag == data['build_tag']: - self.add_result_to_file(data) criteria = data['criteria'] timestart = data['start_date'] timestop = data['stop_date'] @@ -296,6 +295,7 @@ class FunctestCrawler(Crawler): 'timestop': timestop, 'duration': duration, 'details': details} + testcase.set_results(json_results) return json_results @@ -323,7 +323,6 @@ class YardstickCrawler(Crawler): with open(file_path, 'r') as f: for jsonfile in f: data = json.loads(jsonfile) - self.add_result_to_file(data, testcase.name()) try: criteria = data['result']['criteria'] if criteria == 'PASS': @@ -335,13 +334,9 @@ class YardstickCrawler(Crawler): except KeyError as e: self.logger.exception('Pass flag not found {}'.format(e)) json_results = {'criteria': criteria} - return json_results - def add_result_to_file(self, result, tc_name): - build_tag = '{}-{}'.format(dt_cfg.dovetail_config['build_tag'], - tc_name) - result['build_tag'] = build_tag - super(YardstickCrawler, self).add_result_to_file(result) + testcase.set_results(json_results) + return json_results class BottlenecksCrawler(Crawler): @@ -377,6 +372,8 @@ class BottlenecksCrawler(Crawler): except KeyError as e: self.logger.exception('Pass flag not found {}'.format(e)) json_results = {'criteria': criteria} + + testcase.set_results(json_results) return json_results diff --git a/dovetail/run.py b/dovetail/run.py index 84a448f6..8d4b7f8d 100755 --- a/dovetail/run.py +++ b/dovetail/run.py @@ -221,11 +221,15 @@ def check_testcase_list(testcase_list, logger=None): return None -# If specify 'testcase' with CLI, ignore 'testsuite' and 'testarea' -# If not specify 'testcase', check combination of 'testsuite' and 'testarea' def get_testcase_list(logger=None, **kwargs): Testcase.load() testcase_list = kwargs['testcase'] + + # If specify 'testcase' on the CLI, ignore 'testsuite' and 'testarea'. In + # this case, all test cases are marked as mandatory=false in the result + # file because there is no testsuite to relate to. + # If 'testcase' is not specified on the CLI, check the combination of + # 'testsuite' and 'testarea' if testcase_list: return check_testcase_list(testcase_list, logger) @@ -238,7 +242,8 @@ def get_testcase_list(logger=None, **kwargs): if testsuite_validation and testarea_validation: testsuite_yaml = load_testsuite(testsuite) - testcase_list = Testcase.get_testcase_list(testsuite_yaml, testarea) + testcase_list = Testcase.get_testcases_for_testsuite(testsuite_yaml, + testarea) return check_testcase_list(testcase_list, logger) elif not testsuite_validation: logger.error('Test suite {} is not defined.'.format(testsuite)) diff --git a/dovetail/testcase.py b/dovetail/testcase.py index ff716292..8eec9388 100644 --- a/dovetail/testcase.py +++ b/dovetail/testcase.py @@ -30,6 +30,8 @@ class Testcase(object): self.cmds = [] self.sub_testcase_status = {} self.update_validate_testcase(self.validate_testcase()) + self.is_mandatory = False + self.results = None @classmethod def create_log(cls): @@ -101,6 +103,12 @@ class Testcase(object): self.testcase['passed'] = passed return self.testcase['passed'] + def set_results(self, results): + self.results = results + + def get_results(self): + return self.results + def script_result_acquired(self, acquired=None): return self._result_acquired(self.validate_testcase(), acquired) @@ -270,7 +278,7 @@ class Testcase(object): return False @classmethod - def get_testcase_list(cls, testsuite, testarea): + def get_testcases_for_testsuite(cls, testsuite, testarea): testcase_list = [] selected_tests = [] testcases = dt_utils.get_value_from_dict('testcases_list', testsuite) @@ -303,6 +311,10 @@ class Testcase(object): for area in testarea: if cls.check_testcase_area(value, area): testcase_list.append(value) + if value in mandatory: + Testcase.testcase_list[value].is_mandatory = True + else: + Testcase.testcase_list[value].is_mandatory = False break return testcase_list diff --git a/etc/compliance/proposed_tests.yml b/etc/compliance/proposed_tests.yml index e893fa19..2ded05d8 100644 --- a/etc/compliance/proposed_tests.yml +++ b/etc/compliance/proposed_tests.yml @@ -2,7 +2,7 @@ proposed_tests: name: proposed_tests testcases_list: - # proposed test cases for 2nd release + # proposed test cases for 2nd release mandatory: # tempest - dovetail.tempest.compute @@ -1,6 +1,6 @@ [metadata] name = dovetail -version = 1.0.0 +version = 2018.08.0 home-page = https://wiki.opnfv.org/display/dovetail [files] |