summaryrefslogtreecommitdiffstats
path: root/testcases/VIM/OpenStack/CI
diff options
context:
space:
mode:
Diffstat (limited to 'testcases/VIM/OpenStack/CI')
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally-cert.py133
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally.py23
2 files changed, 86 insertions, 70 deletions
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
index 293ef18e5..e1870046c 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
@@ -79,7 +79,7 @@ formatter = logging.Formatter("%(asctime)s - %(name)s - "
ch.setFormatter(formatter)
logger.addHandler(ch)
-REPO_PATH=os.environ['repos_dir']+'/functest/'
+REPO_PATH = os.environ['repos_dir']+'/functest/'
if not os.path.exists(REPO_PATH):
logger.error("Functest repository directory not found '%s'" % REPO_PATH)
exit(-1)
@@ -91,8 +91,8 @@ with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
f.close()
HOME = os.environ['HOME']+"/"
-####todo:
-#SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
+### todo:
+# SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
# get("directories").get("dir_rally_scn")
SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
###
@@ -126,14 +126,15 @@ CINDER_VOLUME_TYPE_NAME = "volume_test"
SUMMARY = []
-def push_results_to_db(payload):
+
+def push_results_to_db(case, payload):
url = TEST_DB + "/results"
installer = functest_utils.get_installer_type(logger)
scenario = functest_utils.get_scenario(logger)
pod_name = functest_utils.get_pod_name(logger)
# TODO pod_name hardcoded, info shall come from Jenkins
- params = {"project_name": "functest", "case_name": "Rally",
+ params = {"project_name": "functest", "case_name": case,
"pod_name": pod_name, "installer": installer,
"version": scenario, "details": payload}
@@ -207,15 +208,13 @@ def get_output(proc, test_name):
nb_tests = 0
overall_duration = 0.0
success = 0.0
+ nb_totals = 0
- if args.verbose:
- while proc.poll() is None:
- line = proc.stdout.readline()
- print line.replace('\n', '')
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if args.verbose:
result += line
- else:
- while proc.poll() is None:
- line = proc.stdout.readline()
+ else:
if "Load duration" in line or \
"started" in line or \
"finished" in line or \
@@ -223,26 +222,41 @@ def get_output(proc, test_name):
"+-" in line or \
"|" in line:
result += line
- if "| " in line and \
- "| action" not in line and \
- "| " not in line and \
- "| total" not in line:
- nb_tests += 1
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- success += float(percentage)
-
elif "test scenario" in line:
result += "\n" + line
elif "Full duration" in line:
result += line + "\n\n"
- overall_duration += float(line.split(': ')[1])
- logger.info("\n" + result)
+
+ # parse output for summary report
+ if "| " in line and \
+ "| action" not in line and \
+ "| Starting" not in line and \
+ "| Completed" not in line and \
+ "| ITER" not in line and \
+ "| " not in line and \
+ "| total" not in line:
+ nb_tests += 1
+ elif "| total" in line:
+ percentage = ((line.split('|')[8]).strip(' ')).strip('%')
+ success += float(percentage)
+ nb_totals += 1
+ elif "Full duration" in line:
+ overall_duration += float(line.split(': ')[1])
+
overall_duration="{:10.2f}".format(overall_duration)
- success_avg = success / nb_tests
- scenario_summary = {'test_name': test_name, 'overall_duration':overall_duration, \
- 'nb_tests': nb_tests, 'success': success_avg}
+ if nb_totals == 0:
+ success_avg = 0
+ else:
+ success_avg = "{:0.2f}".format(success / nb_totals)
+ scenario_summary = {'test_name': test_name,
+ 'overall_duration': overall_duration,
+ 'nb_tests': nb_tests,
+ 'success': success_avg}
SUMMARY.append(scenario_summary)
+
+ logger.info("\n" + result)
+
return result
@@ -252,6 +266,7 @@ def run_task(test_name):
# :param test_name: name for the rally test
# :return: void
#
+ global SUMMARY
logger.info('Starting test scenario "{}" ...'.format(test_name))
task_file = '{}task.yaml'.format(SCENARIOS_DIR)
@@ -259,7 +274,8 @@ def run_task(test_name):
logger.error("Task file '%s' does not exist." % task_file)
exit(-1)
- test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
+ test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
+ test_name)
if not os.path.exists(test_file_name):
logger.error("The scenario '%s' does not exist." % test_file_name)
exit(-1)
@@ -271,18 +287,19 @@ def run_task(test_name):
"--task-args \"{}\" ".format(build_task_args(test_name))
logger.debug('running command line : {}'.format(cmd_line))
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
+ p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
+ stderr=RALLY_STDERR, shell=True)
output = get_output(p, test_name)
task_id = get_task_id(output)
logger.debug('task_id : {}'.format(task_id))
if task_id is None:
- logger.error("failed to retrieve task_id")
+ logger.error("Failed to retrieve task_id.")
exit(-1)
# check for result directory and create it otherwise
if not os.path.exists(RESULTS_DIR):
- logger.debug('does not exists, we create it'.format(RESULTS_DIR))
+ logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
os.makedirs(RESULTS_DIR)
# write html report file
@@ -309,7 +326,7 @@ def run_task(test_name):
# Push results in payload of testcase
if args.report:
logger.debug("Push result into DB")
- push_results_to_db(json_data)
+ push_results_to_db("Rally_details", json_data)
""" parse JSON operation result """
if task_succeed(json_results):
@@ -317,10 +334,6 @@ def run_task(test_name):
else:
logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
-def push_results_to_db(payload):
- # TODO
- pass
-
def main():
global SUMMARY
@@ -331,17 +344,17 @@ def main():
SUMMARY = []
creds_nova = functest_utils.get_credentials("nova")
- nova_client = novaclient.Client('2',**creds_nova)
+ nova_client = novaclient.Client('2', **creds_nova)
creds_neutron = functest_utils.get_credentials("neutron")
neutron_client = neutronclient.Client(**creds_neutron)
creds_keystone = functest_utils.get_credentials("keystone")
keystone_client = keystoneclient.Client(**creds_keystone)
glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
- endpoint_type='publicURL')
+ endpoint_type='publicURL')
glance_client = glanceclient.Client(1, glance_endpoint,
token=keystone_client.auth_token)
creds_cinder = functest_utils.get_credentials("cinder")
- cinder_client = cinderclient.Client('2',creds_cinder['username'],
+ cinder_client = cinderclient.Client('2', creds_cinder['username'],
creds_cinder['api_key'],
creds_cinder['project_id'],
creds_cinder['auth_url'],
@@ -349,9 +362,10 @@ def main():
client_dict['neutron'] = neutron_client
- volume_types = functest_utils.list_volume_types(cinder_client, private=False)
+ volume_types = functest_utils.list_volume_types(cinder_client,
+ private=False)
if not volume_types:
- volume_type = functest_utils.create_volume_type(cinder_client, \
+ volume_type = functest_utils.create_volume_type(cinder_client,
CINDER_VOLUME_TYPE_NAME)
if not volume_type:
logger.error("Failed to create volume type...")
@@ -365,10 +379,11 @@ def main():
image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
if image_id == '':
- logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
+ logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
GLANCE_IMAGE_PATH))
- image_id = functest_utils.create_glance_image(glance_client,\
- GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
+ image_id = functest_utils.create_glance_image(glance_client,
+ GLANCE_IMAGE_NAME,
+ GLANCE_IMAGE_PATH)
if not image_id:
logger.error("Failed to create the Glance image...")
exit(-1)
@@ -377,7 +392,7 @@ def main():
% (GLANCE_IMAGE_NAME, image_id))
else:
logger.debug("Using existing image '%s' with ID '%s'..." \
- % (GLANCE_IMAGE_NAME,image_id))
+ % (GLANCE_IMAGE_NAME, image_id))
if args.test_name == "all":
for test_name in tests:
@@ -385,15 +400,16 @@ def main():
test_name == 'vm'):
run_task(test_name)
else:
- print(args.test_name)
+ logger.debug("Test name: " + args.test_name)
run_task(args.test_name)
- report="\n"\
- " \n"\
- " Rally Summary Report\n"\
- "+===================+============+===============+===========+\n"\
- "| Module | Duration | nb. Test Run | Success |\n"\
- "+===================+============+===============+===========+\n"
+ report = "\n"\
+ " \n"\
+ " Rally Summary Report\n"\
+ "+===================+============+===============+===========+\n"\
+ "| Module | Duration | nb. Test Run | Success |\n"\
+ "+===================+============+===============+===========+\n"
+ payload = []
#for each scenario we draw a row for the table
total_duration = 0.0
@@ -412,13 +428,13 @@ def main():
report += ""\
"| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
"+-------------------+------------+---------------+-----------+\n"
-
-
+ payload.append({'module': name, 'duration': duration,
+ 'nb tests': nb_tests, 'success': success})
total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
total_duration_str2 = "{0:<10}".format(total_duration_str)
total_nb_tests_str = "{0:<13}".format(total_nb_tests)
- total_success = total_success / len(SUMMARY)
+ total_success = "{:0.2f}".format(total_success / len(SUMMARY))
total_success_str = "{0:<10}".format(str(total_success)+'%')
report += "+===================+============+===============+===========+\n"
report += "| TOTAL: | " + total_duration_str2 + " | " + \
@@ -426,17 +442,18 @@ def main():
report += "+===================+============+===============+===========+\n"
logger.info("\n"+report)
-
+ payload.append({'summary': {'duration': total_duration_str2,
+ 'nb tests': total_nb_tests_str,
+ 'nb success': total_success_str}})
# Generate json results for DB
#json_results = {"timestart": time_start, "duration": total_duration,
# "tests": int(total_nb_tests), "success": int(total_success)}
#logger.info("Results: "+str(json_results))
- #if args.report:
- # logger.debug("Pushing result into DB...")
- # push_results_to_db(json_results)
-
+ if args.report:
+ logger.debug("Pushing Rally summary into DB...")
+ push_results_to_db("Rally", payload)
logger.debug("Deleting image '%s' with ID '%s'..." \
% (GLANCE_IMAGE_NAME, image_id))
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
index 3c70e3880..18f60acc1 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
@@ -70,7 +70,7 @@ formatter = logging.Formatter("%(asctime)s - %(name)s - "
ch.setFormatter(formatter)
logger.addHandler(ch)
-REPO_PATH=os.environ['repos_dir']+'/functest/'
+REPO_PATH = os.environ['repos_dir']+'/functest/'
if not os.path.exists(REPO_PATH):
logger.error("Functest repository directory not found '%s'" % REPO_PATH)
exit(-1)
@@ -97,14 +97,14 @@ GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
-def push_results_to_db(payload):
+def push_results_to_db(case, payload):
url = TEST_DB + "/results"
installer = functest_utils.get_installer_type(logger)
scenario = functest_utils.get_scenario(logger)
pod_name = functest_utils.get_pod_name(logger)
# TODO pod_name hardcoded, info shall come from Jenkins
- params = {"project_name": "functest", "case_name": "Rally",
+ params = {"project_name": "functest", "case_name": case,
"pod_name": pod_name, "installer": installer,
"version": scenario, "details": payload}
@@ -213,7 +213,7 @@ def run_task(test_name):
# Push results in payload of testcase
if args.report:
logger.debug("Push result into DB")
- push_results_to_db(json_data)
+ push_results_to_db("Rally_details", json_data)
""" parse JSON operation result """
if task_succeed(json_results):
@@ -232,22 +232,22 @@ def main():
exit(-1)
creds_nova = functest_utils.get_credentials("nova")
- nova_client = novaclient.Client('2',**creds_nova)
+ nova_client = novaclient.Client('2', **creds_nova)
creds_keystone = functest_utils.get_credentials("keystone")
keystone_client = keystoneclient.Client(**creds_keystone)
glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
- endpoint_type='publicURL')
+ endpoint_type='publicURL')
glance_client = glanceclient.Client(1, glance_endpoint,
token=keystone_client.auth_token)
-
image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
if image_id == '':
- logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
+ logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
GLANCE_IMAGE_PATH))
- image_id = functest_utils.create_glance_image(glance_client,\
- GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
+ image_id = functest_utils.create_glance_image(glance_client,
+ GLANCE_IMAGE_NAME,
+ GLANCE_IMAGE_PATH)
if not image_id:
logger.error("Failed to create the Glance image...")
exit(-1)
@@ -256,8 +256,7 @@ def main():
% (GLANCE_IMAGE_NAME, image_id))
else:
logger.debug("Using existing image '%s' with ID '%s'..." \
- % (GLANCE_IMAGE_NAME,image_id))
-
+ % (GLANCE_IMAGE_NAME, image_id))
if args.test_name == "all":
for test_name in tests: