summaryrefslogtreecommitdiffstats
path: root/testcases/VIM/OpenStack/CI/libraries
diff options
context:
space:
mode:
Diffstat (limited to 'testcases/VIM/OpenStack/CI/libraries')
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_rally.py77
1 files changed, 27 insertions, 50 deletions
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
index 2bfb8127f..341281a2c 100644
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
@@ -10,24 +10,24 @@
#
import re, json, os, urllib2, argparse, logging, yaml
-with open('../functest.yaml') as f:
- functest_yaml = yaml.safe_load(f)
-f.close()
-HOME = os.environ['HOME']+"/"
-SCENARIOS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_scn")
-RESULTS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res")
+
+""" get the date """
+cmd = os.popen("date '+%d%m%Y_%H%M'")
+test_date = cmd.read().rstrip()
+
""" tests configuration """
-tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', 'neutron', 'nova', 'quotas', 'requests', 'tempest', 'vm', 'all', 'smoke']
+tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', 'neutron', 'nova', 'quotas', 'requests', 'vm', 'tempest', 'all', 'smoke']
parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
parser.add_argument("test_name", help="The name of the test you want to perform with rally. "
"Possible values are : "
"[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | {d[5]} | {d[6]} "
- "| {d[7]} | {d[8]} | {d[9]} | {d[10]} | {d[11]} | {d[12]}]. The 'all' value performs all the tests scenarios "
+ "| {d[7]} | {d[8]} | {d[9]} | {d[10]} | {d[11]} | {d[12]}]. The 'all' value performs all the possible tests scenarios"
"except 'tempest'".format(d=tests))
-
parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+
parser.add_argument("test_mode", help="Tempest test mode", nargs='?', default="smoke")
args = parser.parse_args()
test_mode=args.test_mode
@@ -50,6 +50,17 @@ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(messag
ch.setFormatter(formatter)
logger.addHandler(ch)
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_rally_scn")
+RESULTS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res") + test_date + "/"
+
+
+
def get_tempest_id(cmd_raw):
"""
@@ -95,7 +106,7 @@ def task_succeed(json_raw):
return False
for result in rally_report.get('result'):
- if len(result.get('errors')) > 0:
+ if len(result.get('error')) > 0:
return False
return True
@@ -108,10 +119,6 @@ def run_tempest():
"""
logger.info('starting {} Tempest ...'.format(test_mode))
- """ get the date """
- cmd = os.popen("date '+%d%m%Y_%H%M'")
- test_date = cmd.read().rstrip()
-
cmd_line = "rally verify start {}".format(test_mode)
logger.debug('running command line : {}'.format(cmd_line))
cmd = os.popen(cmd_line)
@@ -128,7 +135,7 @@ def run_tempest():
os.makedirs(RESULTS_DIR)
""" write log report file """
- report_file_name = '{}opnfv-tempest-{}.log'.format(RESULTS_DIR, test_date)
+ report_file_name = '{}opnfv-tempest.log'.format(RESULTS_DIR)
cmd_line = "rally verify detailed {} > {} ".format(task_id, report_file_name)
logger.debug('running command line : {}'.format(cmd_line))
os.popen(cmd_line)
@@ -142,16 +149,12 @@ def run_task(test_name):
"""
logger.info('starting {} test ...'.format(test_name))
- """ get the date """
- cmd = os.popen("date '+%d%m%Y_%H%M'")
- test_date = cmd.read().rstrip()
-
""" check directory for scenarios test files or retrieve from git otherwise"""
proceed_test = True
test_file_name = '{}opnfv-{}.json'.format(SCENARIOS_DIR, test_name)
if not os.path.exists(test_file_name):
- logger.debug('{} does not exists'.format(test_file_name))
- proceed_test = retrieve_test_cases_file(test_name, SCENARIOS_DIR)
+ logger.error("The scenario '%s' does not exist." %test_file_name)
+ exit(-1)
""" we do the test only if we have a scenario test file """
if proceed_test:
@@ -172,7 +175,7 @@ def run_task(test_name):
os.makedirs(RESULTS_DIR)
""" write html report file """
- report_file_name = '{}opnfv-{}-{}.html'.format(RESULTS_DIR, test_name, test_date)
+ report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
cmd_line = "rally task report %s --out %s" % (task_id, report_file_name)
logger.debug('running command line : {}'.format(cmd_line))
os.popen(cmd_line)
@@ -182,7 +185,7 @@ def run_task(test_name):
logger.debug('running command line : {}'.format(cmd_line))
cmd = os.popen(cmd_line)
json_results = cmd.read()
- with open('{}opnfv-{}-{}.json'.format(RESULTS_DIR, test_name, test_date), 'w') as f:
+ with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
logger.debug('saving json file')
f.write(json_results)
logger.debug('saving json file2')
@@ -196,32 +199,6 @@ def run_task(test_name):
logger.error('{} test failed, unable to fetch a scenario test file'.format(test_name))
-def retrieve_test_cases_file(test_name, tests_path):
- """
- Retrieve from github the sample test files
- :return: Boolean that indicates the retrieval status
- """
-
- """ do not add the "/" at the end """
- url_base = "https://git.opnfv.org/cgit/functest/plain/testcases/VIM/OpenStack/CI/suites"
-
- test_file_name = 'opnfv-{}.json'.format(test_name)
- logger.info('fetching {}/{} ...'.format(url_base, test_file_name))
-
- try:
- response = urllib2.urlopen('{}/{}'.format(url_base, test_file_name))
- except (urllib2.HTTPError, urllib2.URLError):
- return False
- file_raw = response.read()
-
- """ check if the test path exist otherwise we create it """
- if not os.path.exists(tests_path):
- os.makedirs(tests_path)
-
- with open('{}/{}'.format(tests_path, test_file_name), 'w') as f:
- f.write(file_raw)
- return True
-
def main():
""" configure script """
@@ -231,7 +208,7 @@ def main():
if args.test_name == "all":
for test_name in tests:
- if not (test_name == 'all' or test_name == 'tempest'):
+ if not (test_name == 'all' or test_name == 'tempest' or test_name == 'heat' or test_name == 'smoke' or test_name == 'vm' ):
print(test_name)
run_task(test_name)
else: