summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--dovetail/report.py35
-rwxr-xr-xdovetail/run.py108
-rw-r--r--dovetail/test_runner.py3
-rw-r--r--etc/conf/cmd_config.yml13
4 files changed, 86 insertions, 73 deletions
diff --git a/dovetail/report.py b/dovetail/report.py
index 835fed3f..20475a3b 100644
--- a/dovetail/report.py
+++ b/dovetail/report.py
@@ -42,20 +42,19 @@ class Report(object):
checker.check(testcase, db_result)
@classmethod
- def generate_json(cls, testsuite_yaml, testarea, duration):
+ def generate_json(cls, testcase_list, duration):
report_obj = {}
report_obj['version'] = \
version.VersionInfo('dovetail').version_string()
- report_obj['testsuite'] = testsuite_yaml['name']
- # TO DO: once dashboard url settled, adjust accordingly
- report_obj['dashboard'] = None
report_obj['build_tag'] = dt_cfg.dovetail_config['build_tag']
report_obj['upload_date'] =\
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
report_obj['duration'] = duration
report_obj['testcases_list'] = []
- testcase_list = Testcase.get_testcase_list(testsuite_yaml, testarea)
+ if not testcase_list:
+ return report_obj
+
for testcase_name in testcase_list:
testcase = Testcase.get(testcase_name)
testcase_inreport = {}
@@ -81,19 +80,14 @@ class Report(object):
return report_obj
@classmethod
- def generate(cls, testsuite_yaml, testarea, duration):
- report_data = cls.generate_json(testsuite_yaml, testarea, duration)
+ def generate(cls, testcase_list, duration):
+ report_data = cls.generate_json(testcase_list, duration)
report_txt = ''
report_txt += '\n\nDovetail Report\n'
report_txt += 'Version: %s\n' % report_data['version']
- report_txt += 'TestSuite: %s\n' % report_data['testsuite']
- report_txt += 'Result Dashboard: %s\n' % report_data['dashboard']
report_txt += 'Build Tag: %s\n' % report_data['build_tag']
report_txt += 'Upload Date: %s\n' % report_data['upload_date']
- if report_data['duration'] == 0:
- report_txt += 'Duration: %s\n\n' % 'N/A'
- else:
- report_txt += 'Duration: %.2f s\n\n' % report_data['duration']
+ report_txt += 'Duration: %.2f s\n\n' % report_data['duration']
total_num = 0
pass_num = 0
@@ -135,21 +129,13 @@ class Report(object):
pass_rate = pass_num / total_num
report_txt += 'Pass Rate: %.2f%% (%s/%s)\n' %\
(pass_rate * 100, pass_num, total_num)
- report_txt += 'Assessed test areas:\n'
else:
report_txt += \
- 'no testcase or all testcases are skipped in this testsuite'
+ 'no testcase or all testcases are skipped in this testsuite\n'
for key in sub_report:
if testcase_num[key] != 0:
pass_rate = testcase_passnum[key] / testcase_num[key]
- report_txt += '-%-25s pass %.2f%%\n' %\
- (key + ' results:', pass_rate * 100)
- elif key in testarea_scope:
- report_txt += '-%-25s all skipped\n' % key
- for key in sub_report:
- if testcase_num[key] != 0:
- pass_rate = testcase_passnum[key] / testcase_num[key]
report_txt += '%-25s pass rate %.2f%%\n' %\
(key + ':', pass_rate * 100)
report_txt += sub_report[key]
@@ -158,7 +144,6 @@ class Report(object):
report_txt += sub_report[key]
cls.logger.info(report_txt)
- # cls.save(report_txt)
return report_txt
@classmethod
@@ -292,7 +277,6 @@ class FunctestCrawler(Crawler):
'timestop': timestop, 'duration': duration,
'details': details}
- self.logger.debug('Results: {}'.format(str(json_results)))
return json_results
@@ -334,7 +318,6 @@ class YardstickCrawler(Crawler):
except KeyError as e:
self.logger.exception('Pass flag not found {}'.format(e))
json_results = {'criteria': criteria}
- self.logger.debug('Results: {}'.format(str(json_results)))
return json_results
def add_result_to_file(self, result, tc_name):
@@ -379,7 +362,6 @@ class BottlenecksCrawler(Crawler):
except KeyError as e:
self.logger.exception('Pass flag not found {}'.format(e))
json_results = {'criteria': criteria}
- self.logger.debug('Results: {}'.format(str(json_results)))
return json_results
@@ -436,7 +418,6 @@ class VnftestCrawler(Crawler):
except KeyError as e:
self.logger.exception('Pass flag not found {}'.format(e))
json_results = {'criteria': criteria}
- self.logger.debug('Results: {}'.format(str(json_results)))
return json_results
diff --git a/dovetail/run.py b/dovetail/run.py
index 21e7b4c2..512565c3 100755
--- a/dovetail/run.py
+++ b/dovetail/run.py
@@ -38,21 +38,16 @@ def load_testsuite(testsuite):
return Testsuite.get(testsuite)
-def load_testcase():
- Testcase.load()
-
-
-def run_test(testsuite, testarea, logger, kwargs):
- testcase_list = Testcase.get_testcase_list(testsuite, testarea)
+def run_test(testcase_list, logger):
duration = 0
+ if not testcase_list:
+ logger.warning("No test case will be executed.")
+ return duration
+
start_time = time.time()
for testcase_name in testcase_list:
logger.info('>>[testcase]: {}'.format(testcase_name))
testcase = Testcase.get(testcase_name)
- if testcase is None:
- logger.error('Test case {} is not defined in testcase folder, '
- 'skipping.'.format(testcase_name))
- continue
run_testcase = True
# if testcase.exceed_max_retry_times():
@@ -67,7 +62,7 @@ def run_test(testsuite, testarea, logger, kwargs):
stop_on_fail = check_tc_result(testcase, logger)
try:
if (not stop_on_fail or stop_on_fail['criteria'] == "FAIL") \
- and kwargs['stop']:
+ and dt_cfg.dovetail_config['stop']:
return "stop_on_fail"
except KeyError as e:
logger.error("There is no key {}.".format(e))
@@ -235,6 +230,57 @@ def check_hosts_file(logger):
"domain name resolution.".format(hosts_file))
+def parse_cli(logger=None, **kwargs):
+ validate_input(kwargs, dt_cfg.dovetail_config['validate_input'], logger)
+ configs = filter_config(kwargs, logger)
+ if configs is not None:
+ dt_cfg.update_config(configs)
+ dt_cfg.dovetail_config['offline'] = True if kwargs['offline'] else False
+ dt_cfg.dovetail_config['noclean'] = True if kwargs['no_clean'] else False
+ dt_cfg.dovetail_config['stop'] = True if kwargs['stop'] else False
+ if kwargs['no_api_validation']:
+ dt_cfg.dovetail_config['no_api_validation'] = True
+ logger.warning('Strict API response validation DISABLED.')
+ else:
+ dt_cfg.dovetail_config['no_api_validation'] = False
+
+
+def check_testcase_list(testcase_list, logger=None):
+ if testcase_list:
+ for tc in testcase_list:
+ if tc not in Testcase.testcase_list:
+ logger.error('Test case {} is not defined.'.format(tc))
+ return None
+ return testcase_list
+ return None
+
+
+# If specify 'testcase' with CLI, ignore 'testsuite' and 'testarea'
+# If not specify 'testcase', check combination of 'testsuite' and 'testarea'
+def get_testcase_list(logger=None, **kwargs):
+ Testcase.load()
+ testcase_list = kwargs['testcase']
+ if testcase_list:
+ return check_testcase_list(testcase_list, logger)
+
+ testsuite_validation = False
+ testsuite = kwargs['testsuite']
+ if testsuite in dt_cfg.dovetail_config['testsuite_supported']:
+ testsuite_validation = True
+ origin_testarea = kwargs['testarea']
+ testarea_validation, testarea = Testcase.check_testarea(origin_testarea)
+
+ if testsuite_validation and testarea_validation:
+ testsuite_yaml = load_testsuite(testsuite)
+ testcase_list = Testcase.get_testcase_list(testsuite_yaml, testarea)
+ return check_testcase_list(testcase_list, logger)
+ elif not testsuite_validation:
+ logger.error('Test suite {} is not defined.'.format(testsuite))
+ else:
+ logger.error('Test area {} is not defined.'.format(origin_testarea))
+ return None
+
+
def main(*args, **kwargs):
"""Dovetail compliance test entry!"""
build_tag = "daily-master-%s" % str(uuid.uuid1())
@@ -251,47 +297,21 @@ def main(*args, **kwargs):
logger.info('Dovetail compliance: {}!'.format(kwargs['testsuite']))
logger.info('================================================')
logger.info('Build tag: {}'.format(dt_cfg.dovetail_config['build_tag']))
+ parse_cli(logger, **kwargs)
env_init(logger)
copy_userconfig_files(logger)
copy_patch_files(logger)
dt_utils.check_docker_version(logger)
dt_utils.get_openstack_endpoint(logger)
- validate_input(kwargs, dt_cfg.dovetail_config['validate_input'], logger)
check_hosts_file(logger)
- configs = filter_config(kwargs, logger)
-
- if configs is not None:
- dt_cfg.update_config(configs)
-
- if kwargs['offline']:
- dt_cfg.dovetail_config['offline'] = True
- else:
- dt_cfg.dovetail_config['offline'] = False
-
- if kwargs['no_api_validation']:
- dt_cfg.dovetail_config['no_api_validation'] = True
- logger.warning('Strict API response validation DISABLED.')
- else:
- dt_cfg.dovetail_config['no_api_validation'] = False
-
dt_utils.get_hardware_info(logger)
- origin_testarea = kwargs['testarea']
- testsuite_validation = False
- if kwargs['testsuite'] in dt_cfg.dovetail_config['testsuite_supported']:
- testsuite_validation = True
- testarea_validation, testarea = Testcase.check_testarea(origin_testarea)
- if testsuite_validation and testarea_validation:
- testsuite_yaml = load_testsuite(kwargs['testsuite'])
- load_testcase()
- duration = run_test(testsuite_yaml, testarea, logger, kwargs)
- if (duration != "stop_on_fail"):
- Report.generate(testsuite_yaml, testarea, duration)
- if (kwargs['report']):
- Report.save_logs()
- else:
- logger.error('Invalid input commands, testsuite {} testarea {}'
- .format(kwargs['testsuite'], origin_testarea))
+ testcase_list = get_testcase_list(logger, **kwargs)
+ duration = run_test(testcase_list, logger)
+ if (duration != "stop_on_fail"):
+ Report.generate(testcase_list, duration)
+ if (kwargs['report']):
+ Report.save_logs()
dt_cfg.load_config_files(constants.CONF_PATH)
diff --git a/dovetail/test_runner.py b/dovetail/test_runner.py
index f43b4739..7370c81b 100644
--- a/dovetail/test_runner.py
+++ b/dovetail/test_runner.py
@@ -113,7 +113,8 @@ class DockerRunner(object):
ret, msg = Container.exec_cmd(container_id, cmd)
self.testcase.cleaned(True)
- Container.clean(container_id, self.type)
+ if not dt_cfg.dovetail_config['noclean']:
+ Container.clean(container_id, self.type)
def save_logs(self):
pass
diff --git a/etc/conf/cmd_config.yml b/etc/conf/cmd_config.yml
index 52192acd..ae097185 100644
--- a/etc/conf/cmd_config.yml
+++ b/etc/conf/cmd_config.yml
@@ -53,7 +53,12 @@ cli:
flags:
- '--testarea'
multiple: 'True'
- help: 'compliance testarea within testsuite'
+ help: 'Compliance testarea within testsuite. Specify option multiple times to include multiple test areas.'
+ testcase:
+ flags:
+ - '--testcase'
+ multiple: 'True'
+ help: 'Compliance testcase. Specify option multiple times to include multiple test cases.'
debug:
flags:
- '--debug'
@@ -82,3 +87,9 @@ cli:
- '--no-api-validation'
is_flag: 'True'
help: 'disable strict API response validation'
+ noclean:
+ flags:
+ - '--no-clean'
+ - '-n'
+ is_flag: 'True'
+ help: 'Keep all Containers created for debuging.'