aboutsummaryrefslogtreecommitdiffstats
path: root/testcases
diff options
context:
space:
mode:
Diffstat (limited to 'testcases')
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally-cert.py66
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally.py6
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_tempest.py20
-rw-r--r--testcases/config_functest.yaml4
-rw-r--r--testcases/tests/TestFunctestUtils.py7
-rw-r--r--testcases/vIMS/CI/vIMS.py5
-rw-r--r--testcases/vPing/CI/libraries/vPing_ssh.py (renamed from testcases/vPing/CI/libraries/vPing2.py)8
-rw-r--r--testcases/vPing/CI/libraries/vPing_userdata.py (renamed from testcases/vPing/CI/libraries/vPing.py)8
8 files changed, 92 insertions, 32 deletions
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
index ade4385b3..0d1992604 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
@@ -54,6 +54,9 @@ parser.add_argument("-s", "--smoke",
parser.add_argument("-v", "--verbose",
help="Print verbose info about the progress",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -208,15 +211,13 @@ def get_output(proc, test_name):
nb_tests = 0
overall_duration = 0.0
success = 0.0
+ nb_totals = 0
- if args.verbose:
- while proc.poll() is None:
- line = proc.stdout.readline()
- print line.replace('\n', '')
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if args.verbose:
result += line
- else:
- while proc.poll() is None:
- line = proc.stdout.readline()
+ else:
if "Load duration" in line or \
"started" in line or \
"finished" in line or \
@@ -224,28 +225,41 @@ def get_output(proc, test_name):
"+-" in line or \
"|" in line:
result += line
- if "| " in line and \
- "| action" not in line and \
- "| " not in line and \
- "| total" not in line:
- nb_tests += 1
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- success += float(percentage)
-
elif "test scenario" in line:
result += "\n" + line
elif "Full duration" in line:
result += line + "\n\n"
- overall_duration += float(line.split(': ')[1])
- logger.info("\n" + result)
- overall_duration = "{:10.2f}".format(overall_duration)
- success_avg = success / nb_tests
+
+ # parse output for summary report
+ if "| " in line and \
+ "| action" not in line and \
+ "| Starting" not in line and \
+ "| Completed" not in line and \
+ "| ITER" not in line and \
+ "| " not in line and \
+ "| total" not in line:
+ nb_tests += 1
+ elif "| total" in line:
+ percentage = ((line.split('|')[8]).strip(' ')).strip('%')
+ success += float(percentage)
+ nb_totals += 1
+ elif "Full duration" in line:
+ overall_duration += float(line.split(': ')[1])
+
+ overall_duration="{:10.2f}".format(overall_duration)
+ if nb_totals == 0:
+ success_avg = 0
+ else:
+ success_avg = "{:0.2f}".format(success / nb_totals)
+
scenario_summary = {'test_name': test_name,
'overall_duration': overall_duration,
'nb_tests': nb_tests,
'success': success_avg}
-
SUMMARY.append(scenario_summary)
+
+ logger.info("\n" + result)
+
return result
@@ -255,6 +269,7 @@ def run_task(test_name):
# :param test_name: name for the rally test
# :return: void
#
+ global SUMMARY
logger.info('Starting test scenario "{}" ...'.format(test_name))
task_file = '{}task.yaml'.format(SCENARIOS_DIR)
@@ -282,12 +297,12 @@ def run_task(test_name):
logger.debug('task_id : {}'.format(task_id))
if task_id is None:
- logger.error("failed to retrieve task_id")
+ logger.error("Failed to retrieve task_id.")
exit(-1)
# check for result directory and create it otherwise
if not os.path.exists(RESULTS_DIR):
- logger.debug('does not exists, we create it'.format(RESULTS_DIR))
+ logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
os.makedirs(RESULTS_DIR)
# write html report file
@@ -388,7 +403,7 @@ def main():
test_name == 'vm'):
run_task(test_name)
else:
- print(args.test_name)
+ logger.debug("Test name: " + args.test_name)
run_task(args.test_name)
report = "\n"\
@@ -424,7 +439,7 @@ def main():
total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
total_duration_str2 = "{0:<10}".format(total_duration_str)
total_nb_tests_str = "{0:<13}".format(total_nb_tests)
- total_success = total_success / len(SUMMARY)
+ total_success = "{:0.2f}".format(total_success / len(SUMMARY))
total_success_str = "{0:<10}".format(str(total_success)+'%')
report += "+===================+============+===============+===========+\n"
report += "| TOTAL: | " + total_duration_str2 + " | " + \
@@ -445,6 +460,9 @@ def main():
logger.debug("Pushing Rally summary into DB...")
push_results_to_db("Rally", payload)
+ if args.noclean:
+ exit(0)
+
logger.debug("Deleting image '%s' with ID '%s'..." \
% (GLANCE_IMAGE_NAME, image_id))
if not functest_utils.delete_glance_image(nova_client, image_id):
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
index 18f60acc1..6b1aae2eb 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
@@ -47,6 +47,9 @@ parser.add_argument("-r", "--report",
parser.add_argument("-v", "--verbose",
help="Print verbose info about the progress",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -271,6 +274,9 @@ def main():
print(args.test_name)
run_task(args.test_name)
+ if args.noclean:
+ exit(0)
+
logger.debug("Deleting image '%s' with ID '%s'..." \
% (GLANCE_IMAGE_NAME, image_id))
if not functest_utils.delete_glance_image(nova_client, image_id):
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
index b8ed2716e..294669182 100644
--- a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
@@ -33,12 +33,21 @@ modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing',
""" tests configuration """
parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-m", "--mode", help="Tempest test mode [smoke, all]",
+parser.add_argument("-d", "--debug",
+ help="Debug mode",
+ action="store_true")
+parser.add_argument("-s", "--serial",
+ help="Run tests in one thread",
+ action="store_true")
+parser.add_argument("-m", "--mode",
+ help="Tempest test mode [smoke, all]",
default="smoke")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -289,12 +298,19 @@ def main():
else:
MODE = "--set "+args.mode
+ if args.serial:
+ MODE = "--concur 1 "+MODE
+
if not os.path.exists(TEMPEST_RESULTS_DIR):
os.makedirs(TEMPEST_RESULTS_DIR)
create_tempest_resources()
configure_tempest()
run_tempest(MODE)
+
+ if args.noclean:
+ exit(0)
+
free_tempest_resources()
diff --git a/testcases/config_functest.yaml b/testcases/config_functest.yaml
index 2f034f940..7d5f21360 100644
--- a/testcases/config_functest.yaml
+++ b/testcases/config_functest.yaml
@@ -166,7 +166,7 @@ results:
# the execution order is important as some tests may be more destructive than others
# and if vPing is failing is usually not needed to continue...
test_exec_priority:
- 1: vping
+ 1: vping_ssh
2: vping_userdata
3: tempest
4: odl
@@ -231,7 +231,7 @@ test-dependencies:
functest:
vims:
scenario: '(ocl)|(odl)|(nosdn)'
- vping:
+ vping_ssh:
vping_userdata:
scenario: '(ocl)|(odl)|(nosdn)'
tempest:
diff --git a/testcases/tests/TestFunctestUtils.py b/testcases/tests/TestFunctestUtils.py
index 17bc958e3..fd83ed6f5 100644
--- a/testcases/tests/TestFunctestUtils.py
+++ b/testcases/tests/TestFunctestUtils.py
@@ -65,7 +65,10 @@ class TestFunctestUtils(unittest.TestCase):
test = isTestRunnable('functest/odl', functest_yaml)
self.assertTrue(test)
- test = isTestRunnable('functest/vping', functest_yaml)
+ test = isTestRunnable('functest/vping_ssh', functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('functest/vping_userdata', functest_yaml)
self.assertTrue(test)
test = isTestRunnable('functest/tempest', functest_yaml)
@@ -82,7 +85,7 @@ class TestFunctestUtils(unittest.TestCase):
test = generateTestcaseList(functest_yaml)
- expected_list = "vping tempest odl doctor promise policy-test odl-vpn_service-tests vims rally "
+ expected_list = "vping_ssh vping_userdata tempest odl doctor promise policy-test odl-vpn_service-tests vims rally "
self.assertEqual(test, expected_list)
def tearDown(self):
diff --git a/testcases/vIMS/CI/vIMS.py b/testcases/vIMS/CI/vIMS.py
index a8ac97f5c..c50334936 100644
--- a/testcases/vIMS/CI/vIMS.py
+++ b/testcases/vIMS/CI/vIMS.py
@@ -40,6 +40,9 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
""" logging configuration """
@@ -461,6 +464,8 @@ def main():
cfy.undeploy_manager()
############### GENERAL CLEANUP ################
+ if args.noclean:
+ exit(0)
ks_creds = functest_utils.get_credentials("keystone")
diff --git a/testcases/vPing/CI/libraries/vPing2.py b/testcases/vPing/CI/libraries/vPing_ssh.py
index 1ce6dc9e5..d8b50f7e9 100644
--- a/testcases/vPing/CI/libraries/vPing2.py
+++ b/testcases/vPing/CI/libraries/vPing_ssh.py
@@ -37,12 +37,15 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
""" logging configuration """
-logger = logging.getLogger('vPing')
+logger = logging.getLogger('vPing_ssh')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
@@ -198,6 +201,9 @@ def create_private_neutron_net(neutron):
def cleanup(nova, neutron, image_id, network_dic, port_id1, port_id2, secgroup_id):
+ if args.noclean:
+ logger.debug("The OpenStack resources are not deleted.")
+ return True
# delete both VMs
logger.info("Cleaning up...")
diff --git a/testcases/vPing/CI/libraries/vPing.py b/testcases/vPing/CI/libraries/vPing_userdata.py
index 1368bbec1..c81a1fddb 100644
--- a/testcases/vPing/CI/libraries/vPing.py
+++ b/testcases/vPing/CI/libraries/vPing_userdata.py
@@ -35,12 +35,15 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
""" logging configuration """
-logger = logging.getLogger('vPing')
+logger = logging.getLogger('vPing_userdata')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
@@ -192,6 +195,9 @@ def create_private_neutron_net(neutron):
def cleanup(nova, neutron, image_id, network_dic, port_id1, port_id2):
+ if args.noclean:
+ logger.debug("The OpenStack resources are not deleted.")
+ return True
# delete both VMs
logger.info("Cleaning up...")