aboutsummaryrefslogtreecommitdiffstats
path: root/testcases/VIM/OpenStack/CI/libraries
diff options
context:
space:
mode:
Diffstat (limited to 'testcases/VIM/OpenStack/CI/libraries')
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally-cert.py66
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally.py6
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_tempest.py20
3 files changed, 66 insertions, 26 deletions
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
index ade4385b3..0d1992604 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
@@ -54,6 +54,9 @@ parser.add_argument("-s", "--smoke",
parser.add_argument("-v", "--verbose",
help="Print verbose info about the progress",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -208,15 +211,13 @@ def get_output(proc, test_name):
nb_tests = 0
overall_duration = 0.0
success = 0.0
+ nb_totals = 0
- if args.verbose:
- while proc.poll() is None:
- line = proc.stdout.readline()
- print line.replace('\n', '')
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if args.verbose:
result += line
- else:
- while proc.poll() is None:
- line = proc.stdout.readline()
+ else:
if "Load duration" in line or \
"started" in line or \
"finished" in line or \
@@ -224,28 +225,41 @@ def get_output(proc, test_name):
"+-" in line or \
"|" in line:
result += line
- if "| " in line and \
- "| action" not in line and \
- "| " not in line and \
- "| total" not in line:
- nb_tests += 1
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- success += float(percentage)
-
elif "test scenario" in line:
result += "\n" + line
elif "Full duration" in line:
result += line + "\n\n"
- overall_duration += float(line.split(': ')[1])
- logger.info("\n" + result)
- overall_duration = "{:10.2f}".format(overall_duration)
- success_avg = success / nb_tests
+
+ # parse output for summary report
+ if "| " in line and \
+ "| action" not in line and \
+ "| Starting" not in line and \
+ "| Completed" not in line and \
+ "| ITER" not in line and \
+ "| " not in line and \
+ "| total" not in line:
+ nb_tests += 1
+ elif "| total" in line:
+ percentage = ((line.split('|')[8]).strip(' ')).strip('%')
+ success += float(percentage)
+ nb_totals += 1
+ elif "Full duration" in line:
+ overall_duration += float(line.split(': ')[1])
+
+ overall_duration="{:10.2f}".format(overall_duration)
+ if nb_totals == 0:
+ success_avg = 0
+ else:
+ success_avg = "{:0.2f}".format(success / nb_totals)
+
scenario_summary = {'test_name': test_name,
'overall_duration': overall_duration,
'nb_tests': nb_tests,
'success': success_avg}
-
SUMMARY.append(scenario_summary)
+
+ logger.info("\n" + result)
+
return result
@@ -255,6 +269,7 @@ def run_task(test_name):
# :param test_name: name for the rally test
# :return: void
#
+ global SUMMARY
logger.info('Starting test scenario "{}" ...'.format(test_name))
task_file = '{}task.yaml'.format(SCENARIOS_DIR)
@@ -282,12 +297,12 @@ def run_task(test_name):
logger.debug('task_id : {}'.format(task_id))
if task_id is None:
- logger.error("failed to retrieve task_id")
+ logger.error("Failed to retrieve task_id.")
exit(-1)
# check for result directory and create it otherwise
if not os.path.exists(RESULTS_DIR):
- logger.debug('does not exists, we create it'.format(RESULTS_DIR))
+ logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
os.makedirs(RESULTS_DIR)
# write html report file
@@ -388,7 +403,7 @@ def main():
test_name == 'vm'):
run_task(test_name)
else:
- print(args.test_name)
+ logger.debug("Test name: " + args.test_name)
run_task(args.test_name)
report = "\n"\
@@ -424,7 +439,7 @@ def main():
total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
total_duration_str2 = "{0:<10}".format(total_duration_str)
total_nb_tests_str = "{0:<13}".format(total_nb_tests)
- total_success = total_success / len(SUMMARY)
+ total_success = "{:0.2f}".format(total_success / len(SUMMARY))
total_success_str = "{0:<10}".format(str(total_success)+'%')
report += "+===================+============+===============+===========+\n"
report += "| TOTAL: | " + total_duration_str2 + " | " + \
@@ -445,6 +460,9 @@ def main():
logger.debug("Pushing Rally summary into DB...")
push_results_to_db("Rally", payload)
+ if args.noclean:
+ exit(0)
+
logger.debug("Deleting image '%s' with ID '%s'..." \
% (GLANCE_IMAGE_NAME, image_id))
if not functest_utils.delete_glance_image(nova_client, image_id):
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
index 18f60acc1..6b1aae2eb 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
@@ -47,6 +47,9 @@ parser.add_argument("-r", "--report",
parser.add_argument("-v", "--verbose",
help="Print verbose info about the progress",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -271,6 +274,9 @@ def main():
print(args.test_name)
run_task(args.test_name)
+ if args.noclean:
+ exit(0)
+
logger.debug("Deleting image '%s' with ID '%s'..." \
% (GLANCE_IMAGE_NAME, image_id))
if not functest_utils.delete_glance_image(nova_client, image_id):
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
index b8ed2716e..294669182 100644
--- a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
@@ -33,12 +33,21 @@ modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing',
""" tests configuration """
parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-m", "--mode", help="Tempest test mode [smoke, all]",
+parser.add_argument("-d", "--debug",
+ help="Debug mode",
+ action="store_true")
+parser.add_argument("-s", "--serial",
+ help="Run tests in one thread",
+ action="store_true")
+parser.add_argument("-m", "--mode",
+ help="Tempest test mode [smoke, all]",
default="smoke")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -289,12 +298,19 @@ def main():
else:
MODE = "--set "+args.mode
+ if args.serial:
+ MODE = "--concur 1 "+MODE
+
if not os.path.exists(TEMPEST_RESULTS_DIR):
os.makedirs(TEMPEST_RESULTS_DIR)
create_tempest_resources()
configure_tempest()
run_tempest(MODE)
+
+ if args.noclean:
+ exit(0)
+
free_tempest_resources()