summaryrefslogtreecommitdiffstats
path: root/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
diff options
context:
space:
mode:
Diffstat (limited to 'testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py')
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally-cert.py189
1 files changed, 148 insertions, 41 deletions
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
index 0fb6ce7d4..0d1992604 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
@@ -13,16 +13,17 @@
# 0.3 (19/10/2015) remove Tempest from run_rally
# and push result into test DB
#
-
-import re
-import json
-import os
import argparse
+import json
import logging
-import yaml
+import os
+import re
import requests
import subprocess
import sys
+import time
+import yaml
+
from novaclient import client as novaclient
from glanceclient import client as glanceclient
from keystoneclient.v2_0 import client as keystoneclient
@@ -53,6 +54,9 @@ parser.add_argument("-s", "--smoke",
parser.add_argument("-v", "--verbose",
help="Print verbose info about the progress",
action="store_true")
+parser.add_argument("-n", "--noclean",
+ help="Don't clean the created resources for this test.",
+ action="store_true")
args = parser.parse_args()
@@ -78,7 +82,7 @@ formatter = logging.Formatter("%(asctime)s - %(name)s - "
ch.setFormatter(formatter)
logger.addHandler(ch)
-REPO_PATH=os.environ['repos_dir']+'/functest/'
+REPO_PATH = os.environ['repos_dir']+'/functest/'
if not os.path.exists(REPO_PATH):
logger.error("Functest repository directory not found '%s'" % REPO_PATH)
exit(-1)
@@ -90,8 +94,8 @@ with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
f.close()
HOME = os.environ['HOME']+"/"
-####todo:
-#SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
+### todo:
+# SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
# get("directories").get("dir_rally_scn")
SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
###
@@ -108,8 +112,6 @@ CONCURRENCY = 4
RESULTS_DIR = functest_yaml.get("general").get("directories"). \
get("dir_rally_res")
TEST_DB = functest_yaml.get("results").get("test_db_url")
-FLOATING_NETWORK = functest_yaml.get("general"). \
- get("openstack").get("neutron_public_net_name")
PRIVATE_NETWORK = functest_yaml.get("general"). \
get("openstack").get("neutron_private_net_name")
@@ -125,14 +127,17 @@ GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
CINDER_VOLUME_TYPE_NAME = "volume_test"
-def push_results_to_db(payload):
+SUMMARY = []
+
+
+def push_results_to_db(case, payload):
url = TEST_DB + "/results"
installer = functest_utils.get_installer_type(logger)
scenario = functest_utils.get_scenario(logger)
pod_name = functest_utils.get_pod_name(logger)
# TODO pod_name hardcoded, info shall come from Jenkins
- params = {"project_name": "functest", "case_name": "Rally",
+ params = {"project_name": "functest", "case_name": case,
"pod_name": pod_name, "installer": installer,
"version": scenario, "details": payload}
@@ -180,9 +185,6 @@ def build_task_args(test_file_name):
task_args['image_name'] = GLANCE_IMAGE_NAME
task_args['flavor_name'] = FLAVOR_NAME
task_args['glance_image_location'] = GLANCE_IMAGE_PATH
- task_args['floating_network'] = FLOATING_NETWORK
- task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
- PRIVATE_NETWORK).encode('ascii', 'ignore')
task_args['tmpl_dir'] = TEMPLATE_DIR
task_args['sup_dir'] = SUPPORT_DIR
task_args['users_amount'] = USERS_AMOUNT
@@ -190,19 +192,32 @@ def build_task_args(test_file_name):
task_args['iterations'] = ITERATIONS_AMOUNT
task_args['concurrency'] = CONCURRENCY
+ ext_net = functest_utils.get_external_net(client_dict['neutron'])
+ if ext_net:
+ task_args['floating_network'] = str(ext_net)
+ else:
+ task_args['floating_network'] = ''
+
+ net_id = functest_utils.get_network_id(client_dict['neutron'],
+ PRIVATE_NETWORK)
+ task_args['netid'] = str(net_id)
+
return task_args
-def get_output(proc):
+def get_output(proc, test_name):
+ global SUMMARY
result = ""
- if args.verbose:
- while proc.poll() is None:
- line = proc.stdout.readline()
- print line.replace('\n', '')
+ nb_tests = 0
+ overall_duration = 0.0
+ success = 0.0
+ nb_totals = 0
+
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if args.verbose:
result += line
- else:
- while proc.poll() is None:
- line = proc.stdout.readline()
+ else:
if "Load duration" in line or \
"started" in line or \
"finished" in line or \
@@ -214,7 +229,36 @@ def get_output(proc):
result += "\n" + line
elif "Full duration" in line:
result += line + "\n\n"
- logger.info("\n" + result)
+
+ # parse output for summary report
+ if "| " in line and \
+ "| action" not in line and \
+ "| Starting" not in line and \
+ "| Completed" not in line and \
+ "| ITER" not in line and \
+ "| " not in line and \
+ "| total" not in line:
+ nb_tests += 1
+ elif "| total" in line:
+ percentage = ((line.split('|')[8]).strip(' ')).strip('%')
+ success += float(percentage)
+ nb_totals += 1
+ elif "Full duration" in line:
+ overall_duration += float(line.split(': ')[1])
+
+ overall_duration="{:10.2f}".format(overall_duration)
+ if nb_totals == 0:
+ success_avg = 0
+ else:
+ success_avg = "{:0.2f}".format(success / nb_totals)
+
+ scenario_summary = {'test_name': test_name,
+ 'overall_duration': overall_duration,
+ 'nb_tests': nb_tests,
+ 'success': success_avg}
+ SUMMARY.append(scenario_summary)
+
+ logger.info("\n" + result)
return result
@@ -225,7 +269,7 @@ def run_task(test_name):
# :param test_name: name for the rally test
# :return: void
#
-
+ global SUMMARY
logger.info('Starting test scenario "{}" ...'.format(test_name))
task_file = '{}task.yaml'.format(SCENARIOS_DIR)
@@ -233,7 +277,8 @@ def run_task(test_name):
logger.error("Task file '%s' does not exist." % task_file)
exit(-1)
- test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
+ test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
+ test_name)
if not os.path.exists(test_file_name):
logger.error("The scenario '%s' does not exist." % test_file_name)
exit(-1)
@@ -245,18 +290,19 @@ def run_task(test_name):
"--task-args \"{}\" ".format(build_task_args(test_name))
logger.debug('running command line : {}'.format(cmd_line))
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
- output = get_output(p)
+ p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
+ stderr=RALLY_STDERR, shell=True)
+ output = get_output(p, test_name)
task_id = get_task_id(output)
logger.debug('task_id : {}'.format(task_id))
if task_id is None:
- logger.error("failed to retrieve task_id")
+ logger.error("Failed to retrieve task_id.")
exit(-1)
# check for result directory and create it otherwise
if not os.path.exists(RESULTS_DIR):
- logger.debug('does not exists, we create it'.format(RESULTS_DIR))
+ logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
os.makedirs(RESULTS_DIR)
# write html report file
@@ -283,7 +329,7 @@ def run_task(test_name):
# Push results in payload of testcase
if args.report:
logger.debug("Push result into DB")
- push_results_to_db(json_data)
+ push_results_to_db("Rally_details", json_data)
""" parse JSON operation result """
if task_succeed(json_results):
@@ -293,23 +339,25 @@ def run_task(test_name):
def main():
+ global SUMMARY
# configure script
if not (args.test_name in tests):
logger.error('argument not valid')
exit(-1)
+ SUMMARY = []
creds_nova = functest_utils.get_credentials("nova")
- nova_client = novaclient.Client('2',**creds_nova)
+ nova_client = novaclient.Client('2', **creds_nova)
creds_neutron = functest_utils.get_credentials("neutron")
neutron_client = neutronclient.Client(**creds_neutron)
creds_keystone = functest_utils.get_credentials("keystone")
keystone_client = keystoneclient.Client(**creds_keystone)
glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
- endpoint_type='publicURL')
+ endpoint_type='publicURL')
glance_client = glanceclient.Client(1, glance_endpoint,
token=keystone_client.auth_token)
creds_cinder = functest_utils.get_credentials("cinder")
- cinder_client = cinderclient.Client('2',creds_cinder['username'],
+ cinder_client = cinderclient.Client('2', creds_cinder['username'],
creds_cinder['api_key'],
creds_cinder['project_id'],
creds_cinder['auth_url'],
@@ -317,9 +365,10 @@ def main():
client_dict['neutron'] = neutron_client
- volume_types = functest_utils.list_volume_types(cinder_client, private=False)
+ volume_types = functest_utils.list_volume_types(cinder_client,
+ private=False)
if not volume_types:
- volume_type = functest_utils.create_volume_type(cinder_client, \
+ volume_type = functest_utils.create_volume_type(cinder_client,
CINDER_VOLUME_TYPE_NAME)
if not volume_type:
logger.error("Failed to create volume type...")
@@ -333,10 +382,11 @@ def main():
image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
if image_id == '':
- logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
+ logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
GLANCE_IMAGE_PATH))
- image_id = functest_utils.create_glance_image(glance_client,\
- GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
+ image_id = functest_utils.create_glance_image(glance_client,
+ GLANCE_IMAGE_NAME,
+ GLANCE_IMAGE_PATH)
if not image_id:
logger.error("Failed to create the Glance image...")
exit(-1)
@@ -345,7 +395,7 @@ def main():
% (GLANCE_IMAGE_NAME, image_id))
else:
logger.debug("Using existing image '%s' with ID '%s'..." \
- % (GLANCE_IMAGE_NAME,image_id))
+ % (GLANCE_IMAGE_NAME, image_id))
if args.test_name == "all":
for test_name in tests:
@@ -353,9 +403,66 @@ def main():
test_name == 'vm'):
run_task(test_name)
else:
- print(args.test_name)
+ logger.debug("Test name: " + args.test_name)
run_task(args.test_name)
+ report = "\n"\
+ " \n"\
+ " Rally Summary Report\n"\
+ "+===================+============+===============+===========+\n"\
+ "| Module | Duration | nb. Test Run | Success |\n"\
+ "+===================+============+===============+===========+\n"
+ payload = []
+
+ #for each scenario we draw a row for the table
+ total_duration = 0.0
+ total_nb_tests = 0
+ total_success = 0.0
+ for s in SUMMARY:
+ name = "{0:<17}".format(s['test_name'])
+ duration = float(s['overall_duration'])
+ total_duration += duration
+ duration = time.strftime("%M:%S", time.gmtime(duration))
+ duration = "{0:<10}".format(duration)
+ nb_tests = "{0:<13}".format(s['nb_tests'])
+ total_nb_tests += int(s['nb_tests'])
+ success = "{0:<10}".format(str(s['success'])+'%')
+ total_success += float(s['success'])
+ report += ""\
+ "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
+ "+-------------------+------------+---------------+-----------+\n"
+ payload.append({'module': name,
+ 'details': {'duration': s['overall_duration'],
+ 'nb tests': s['nb_tests'],
+ 'success': s['success']}})
+
+ total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
+ total_duration_str2 = "{0:<10}".format(total_duration_str)
+ total_nb_tests_str = "{0:<13}".format(total_nb_tests)
+ total_success = "{:0.2f}".format(total_success / len(SUMMARY))
+ total_success_str = "{0:<10}".format(str(total_success)+'%')
+ report += "+===================+============+===============+===========+\n"
+ report += "| TOTAL: | " + total_duration_str2 + " | " + \
+ total_nb_tests_str + " | " + total_success_str + "|\n"
+ report += "+===================+============+===============+===========+\n"
+
+ logger.info("\n"+report)
+ payload.append({'summary': {'duration': total_duration,
+ 'nb tests': total_nb_tests,
+ 'nb success': total_success}})
+
+ # Generate json results for DB
+ #json_results = {"timestart": time_start, "duration": total_duration,
+ # "tests": int(total_nb_tests), "success": int(total_success)}
+ #logger.info("Results: "+str(json_results))
+
+ if args.report:
+ logger.debug("Pushing Rally summary into DB...")
+ push_results_to_db("Rally", payload)
+
+ if args.noclean:
+ exit(0)
+
logger.debug("Deleting image '%s' with ID '%s'..." \
% (GLANCE_IMAGE_NAME, image_id))
if not functest_utils.delete_glance_image(nova_client, image_id):