diff options
Diffstat (limited to 'dovetail')
-rw-r--r-- | dovetail/compliance/debug.yml | 6 | ||||
-rw-r--r-- | dovetail/conf/dovetail_config.yml | 20 | ||||
-rw-r--r-- | dovetail/conf/functest_config.yml | 9 | ||||
-rw-r--r-- | dovetail/report.py | 81 | ||||
-rw-r--r-- | dovetail/testcase.py | 30 | ||||
-rw-r--r-- | dovetail/testcase/example.tc001.yml | 2 | ||||
-rw-r--r-- | dovetail/testcase/ipv6.tc001.yml | 2 | ||||
-rw-r--r-- | dovetail/testcase/nfvi.tc101.yml | 8 | ||||
-rw-r--r-- | dovetail/testcase/nfvi.tc102.yml | 8 | ||||
-rw-r--r-- | dovetail/utils/dovetail_utils.py | 27 |
10 files changed, 130 insertions, 63 deletions
diff --git a/dovetail/compliance/debug.yml b/dovetail/compliance/debug.yml index 6fc4f0f1..87003c47 100644 --- a/dovetail/compliance/debug.yml +++ b/dovetail/compliance/debug.yml @@ -6,6 +6,10 @@ debug: name: debug testcases_list: - dovetail.example.tc002 - - dovetail.ipv6.tc001 + - dovetail.ipv6.tc008 + - dovetail.ipv6.tc009 + - dovetail.ipv6.tc018 + - dovetail.ipv6.tc019 - dovetail.nfvi.tc001 - dovetail.nfvi.tc002 + - dovetail.nfvi.tc101 diff --git a/dovetail/conf/dovetail_config.yml b/dovetail/conf/dovetail_config.yml index b6f7b016..f8f18e46 100644 --- a/dovetail/conf/dovetail_config.yml +++ b/dovetail/conf/dovetail_config.yml @@ -25,6 +25,20 @@ testarea_supported: - ipv6 - example +functest_testsuite: + - tempest_smoke_serial + - tempest_full_parallel + - rally_sanity + - promise + +functest_testcase: + - healthcheck + - vping_ssh + - vping_userdata + - doctor + - copper + - cloudify_ims + # used for testcase cmd template in jinja2 format # we have two variables available now # parameter path, use this path to walk through python object and get value @@ -53,3 +67,9 @@ validate_input: valid_docker_tag: - 'stable' - 'latest' + - 'colorado.1.0' + - 'colorado.2.0' + - 'colorado.3.0' + - 'danube.1.0' + - 'danube.2.0' + - 'danube.3.0' diff --git a/dovetail/conf/functest_config.yml b/dovetail/conf/functest_config.yml index f20d1b7e..2c702cdb 100644 --- a/dovetail/conf/functest_config.yml +++ b/dovetail/conf/functest_config.yml @@ -4,18 +4,21 @@ functest: docker_tag: latest envs: '-e INSTALLER_TYPE=compass -e INSTALLER_IP=192.168.200.2 -e NODE_NAME=dovetail-pod -e DEPLOY_SCENARIO=ha_nosdn - -e BUILD_TAG=dovetail -e CI_DEBUG=true -e DEPLOY_TYPE=baremetal' + -e BUILD_TAG=dovetail -e CI_DEBUG=true + -e DEPLOY_TYPE=baremetal + -e RESULTS_STORE=file:///home/opnfv/functest/results/functest_result.json' opts: '-id --privileged=true' pre_condition: - 'echo test for precondition in functest' cmds: - 'functest env prepare' - - 'functest testcase run {{validate_testcase}}' + - 'functest testcase run {{validate_testcase}} -r' post_condition: - 'echo test for postcondition in functest' result: dir: '/home/opnfv/functest/results' store_type: 'file' - file_path: 'tempest/tempest.log' + file_path: 'functest_result.json' + tp_path: 'tempest/tempest.log' db_url: 'http://testresults.opnfv.org/test/api/v1/results?case=%s&last=1' creds: '/home/opnfv/functest/conf/openstack.creds' diff --git a/dovetail/report.py b/dovetail/report.py index 11e3c244..b7b27930 100644 --- a/dovetail/report.py +++ b/dovetail/report.py @@ -214,23 +214,44 @@ class FunctestCrawler(object): def crawl_from_file(self, testcase=None): dovetail_config = dt_cfg.dovetail_config - file_path = \ - os.path.join(dovetail_config['result_dir'], - dovetail_config[self.type]['result']['file_path']) - if not os.path.exists(file_path): - self.logger.info('result file not found: %s', file_path) - return None - - try: + criteria = 'FAIL' + timestart = 0 + testcase_duration = 0 + testcase_name = testcase.validate_testcase() + json_results = {} + if testcase_name in dt_cfg.dovetail_config['functest_testcase']: + file_path = \ + os.path.join(dovetail_config['result_dir'], + dovetail_config[self.type]['result']['file_path']) + if not os.path.exists(file_path): + self.logger.info('result file not found: %s', file_path) + return None + with open(file_path, 'r') as f: + for jsonfile in f: + data = json.loads(jsonfile) + if testcase_name == data['case_name']: + criteria = data['details']['status'] + timestart = data['details']['timestart'] + testcase_duration = data['details']['duration'] + + json_results = {'criteria': criteria, + 'details': {"timestart": timestart, + "duration": testcase_duration, + "tests": '', "failures": ''}} + elif 'tempest' in testcase_name: + file_path = \ + os.path.join(dovetail_config['result_dir'], + dovetail_config[self.type]['result']['tp_path']) + if not os.path.exists(file_path): + self.logger.info('result file not found: %s', file_path) + return None with open(file_path, 'r') as myfile: output = myfile.read() - error_logs = "" - for match in re.findall('(.*?)[. ]*FAILED', output): - error_logs += match + error_logs = " ".join(re.findall('(.*?)[. ]*fail ', output)) + skipped = " ".join(re.findall('(.*?)[. ]*skip:', output)) - criteria = 'PASS' - failed_num = int(re.findall(' - Failed: (\d*)', output)[0]) + failed_num = int(re.findall(' - Failures: (\d*)', output)[0]) if failed_num != 0: criteria = 'FAIL' @@ -239,13 +260,11 @@ class FunctestCrawler(object): json_results = {'criteria': criteria, 'details': {"timestart": '', "duration": int(dur_sec_int), "tests": int(num_tests), "failures": failed_num, - "errors": error_logs}} - self.logger.debug('Results: %s', str(json_results)) - return json_results - except Exception as e: - self.logger.error('Cannot read content from the file: %s, ' - 'exception: %s', file_path, e) - return None + "errors": error_logs, + "skipped": skipped}} + + self.logger.debug('Results: %s', str(json_results)) + return json_results def crawl_from_url(self, testcase=None): url = \ @@ -289,17 +308,15 @@ class YardstickCrawler(object): if not os.path.exists(file_path): self.logger.info('result file not found: %s', file_path) return None - try: - with open(file_path, 'r') as myfile: - myfile.read() - criteria = 'PASS' - json_results = {'criteria': criteria} - self.logger.debug('Results: %s', str(json_results)) - return json_results - except Exception as e: - self.logger.error('Cannot read content from the file: %s, ' - 'exception: %s', file_path, e) - return None + criteria = 'FAIL' + with open(file_path, 'r') as f: + for jsonfile in f: + data = json.loads(jsonfile) + if 1 == data['status']: + criteria = 'PASS' + json_results = {'criteria': criteria} + self.logger.debug('Results: %s', str(json_results)) + return json_results def crawl_from_url(self, testcase=None): return None @@ -378,6 +395,8 @@ class FunctestChecker(object): all_passed = True for sub_testcase in sub_testcase_list: self.logger.debug('check sub_testcase:%s', sub_testcase) + # TO DO: should think the test case when skipped, should think + # together with the "dovetail report" if sub_testcase in db_result['details']['errors']: testcase.sub_testcase_passed(sub_testcase, False) all_passed = False diff --git a/dovetail/testcase.py b/dovetail/testcase.py index 5ca23c4b..4ad2b361 100644 --- a/dovetail/testcase.py +++ b/dovetail/testcase.py @@ -105,24 +105,28 @@ class Testcase(object): def pre_condition(self): try: pre_condition = self.testcase['validate']['pre_condition'] - if pre_condition == '': - pre_condition = self.pre_condition_cls(self.validate_type()) + except KeyError: + pre_condition = '' + if pre_condition: return pre_condition - except: + pre_condition = self.pre_condition_cls(self.validate_type()) + if not pre_condition: self.logger.debug('testcase:%s pre_condition is empty', self.name()) - return '' + return pre_condition def post_condition(self): try: post_condition = self.testcase['validate']['post_condition'] - if post_condition == '': - post_condition = self.post_condition_cls(self.validate_type()) + except KeyError: + post_condition = '' + if post_condition: return post_condition - except: + post_condition = self.post_condition_cls(self.validate_type()) + if not post_condition: self.logger.debug('testcae:%s post_condition is empty', self.name()) - return '' + return post_condition def run(self): runner = TestRunnerFactory.create(self) @@ -151,11 +155,17 @@ class Testcase(object): @staticmethod def pre_condition_cls(validate_type): - return dt_cfg.dovetail_config[validate_type]['pre_condition'] + try: + return dt_cfg.dovetail_config[validate_type]['pre_condition'] + except KeyError: + return None @staticmethod def post_condition_cls(validate_type): - return dt_cfg.dovetail_config[validate_type]['post_condition'] + try: + return dt_cfg.dovetail_config[validate_type]['post_condition'] + except KeyError: + return None @classmethod def update_validate_testcase(cls, testcase_name): diff --git a/dovetail/testcase/example.tc001.yml b/dovetail/testcase/example.tc001.yml index e389a00f..0ba297a8 100644 --- a/dovetail/testcase/example.tc001.yml +++ b/dovetail/testcase/example.tc001.yml @@ -9,7 +9,7 @@ dovetail.example.tc001: - 'echo test for precondition' cmds: - 'functest env prepare' - - 'functest testcase run {{validate_testcase}}' + - 'functest testcase run {{validate_testcase}} -r' post_condition: - 'echo test for precondition' report: diff --git a/dovetail/testcase/ipv6.tc001.yml b/dovetail/testcase/ipv6.tc001.yml index f9edf069..598e1cad 100644 --- a/dovetail/testcase/ipv6.tc001.yml +++ b/dovetail/testcase/ipv6.tc001.yml @@ -9,7 +9,7 @@ dovetail.ipv6.tc001: - 'echo test for precondition in testcase' cmds: - 'functest env prepare' - - 'functest testcase run {{validate_testcase}}' + - 'functest testcase run {{validate_testcase}} -r' post_condition: - 'echo test for precondition in testcase' report: diff --git a/dovetail/testcase/nfvi.tc101.yml b/dovetail/testcase/nfvi.tc101.yml new file mode 100644 index 00000000..7c8fb3ec --- /dev/null +++ b/dovetail/testcase/nfvi.tc101.yml @@ -0,0 +1,8 @@ +dovetail.nfvi.tc101: + name: dovetail.nfvi.tc101 + objective: measure number of cores and threads, available memory size and cache size + validate: + type: yardstick + testcase: opnfv_yardstick_tc001 + report: + sub_testcase_list: diff --git a/dovetail/testcase/nfvi.tc102.yml b/dovetail/testcase/nfvi.tc102.yml new file mode 100644 index 00000000..7ce0435e --- /dev/null +++ b/dovetail/testcase/nfvi.tc102.yml @@ -0,0 +1,8 @@ +dovetail.nfvi.tc102: + name: dovetail.nfvi.tc102 + objective: measure number of cores and threads, available memory size and cache size + validate: + type: yardstick + testcase: opnfv_yardstick_tc002 + report: + sub_testcase_list: diff --git a/dovetail/utils/dovetail_utils.py b/dovetail/utils/dovetail_utils.py index 960801a8..a54081f5 100644 --- a/dovetail/utils/dovetail_utils.py +++ b/dovetail/utils/dovetail_utils.py @@ -10,7 +10,6 @@ # import sys -import time import subprocess from collections import Mapping, Set, Sequence @@ -43,25 +42,21 @@ def exec_cmd(cmd, logger=None, exit_on_error=False, info=False, exec_log(verbose, logger, msg_exec, level) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - seconds = 0 - while p.poll() is None: - seconds += 1 - if seconds > 3: - show_progress_bar(seconds) - time.sleep(1) - - (stdout, stderr) = p.communicate() - if p.returncode == 0: - for line in stdout.strip().splitlines(): - exec_log(verbose, logger, line, level, True) - else: - exec_log(verbose, logger, stderr, 'error') + stderr=subprocess.STDOUT) + stdout = '' + for line in iter(p.stdout.readline, b''): + exec_log(verbose, logger, line.strip(), level, True) + stdout += line + stdout = stdout.strip() + returncode = p.wait() + p.stdout.close() + + if returncode != 0: exec_log(verbose, logger, msg_err, 'error') if exit_on_error: sys.exit(1) - return p.returncode, stdout.strip() + return returncode, stdout # walkthrough the object, yield path and value |