aboutsummaryrefslogtreecommitdiffstats
path: root/func
diff options
context:
space:
mode:
authorMofassirArif <Mofassir_Arif@dellteam.com>2016-01-12 03:12:23 -0800
committerMofassirArif <Mofassir_Arif@dellteam.com>2016-01-12 03:15:02 -0800
commit4c06a4ed3b3b22e3dbcddb33dda33ca773dfae11 (patch)
treee7fa95e0b05564d5f3ee2e14a516e8410083a9d6 /func
parent90eb79018b459c1aa6606168f1ee592da535643c (diff)
error handling for the files being input to the framework.
iperf bug fix for the bare metal testing, renamed all the iperf config files. added more detail to the help. Change-Id: I16cfb1c05599cd0b803e735e6a75083e3e6733ec Signed-off-by: MofassirArif <Mofassir_Arif@dellteam.com>
Diffstat (limited to 'func')
-rw-r--r--func/cli.py49
-rw-r--r--func/create_zones.py3
-rw-r--r--func/driver.py10
-rw-r--r--func/validate_yaml.py2
4 files changed, 50 insertions, 14 deletions
diff --git a/func/cli.py b/func/cli.py
index 8433e483..b8718407 100644
--- a/func/cli.py
+++ b/func/cli.py
@@ -18,24 +18,58 @@ import argparse
class cli():
def _getfile(self, filepath):
+
with open('test_list/'+filepath,'r') as finput:
_benchmarks=finput.readlines()
for items in range( len(_benchmarks)):
_benchmarks[items]=_benchmarks[items].rstrip()
return _benchmarks
+
def _getsuite(self, filepath):
-# for suites in range (len(filepath)):
- # xindex= filepath[suites].find('.')
- # filepath[suites]=filepath[suites][0:xindex]
+
return filepath
-
+
+ def _checkTestList(self, filename):
+
+ if os.path.isfile('test_list/'+filename):
+ return True
+ else:
+ return False
+
+ def _checkLabName(self, labname):
+
+ if os.path.isdir('test_cases/'+labname):
+ return True
+ else:
+ return False
+
def __init__(self):
+
suite=[]
parser = argparse.ArgumentParser()
- parser.add_argument('-l ', '--lab', help='Name of Lab on which being tested ')
- parser.add_argument('-f', '--file', help = 'File in test_list with the list ' \
- 'of tests')
+ parser.add_argument('-l ', '--lab', help='Name of Lab on which being tested, These can' \
+ 'be found in the test_cases/ directory. Please ' \
+ 'ensure that you have edited the respective files '\
+ 'before using them. For testing other than through Jenkins'\
+ ' The user should list default after -l . all the fields in'\
+ ' the files are necessary and should be filled')
+ parser.add_argument('-f', '--file', help = 'File in test_list with the list of tests. there are three files' \
+ '\n compute '\
+ '\n storage '\
+ '\n network '\
+ 'They contain all the tests that will be run. They are listed by suite.' \
+ 'Please ensure there are no empty lines')
args = parser.parse_args()
+
+ if not self._checkTestList(args.file):
+ print '\n\n ERROR: Test File Does not exist in test_list/ please enter correct file \n\n'
+ sys.exit(0)
+
+ if not self._checkLabName(args.lab):
+ print '\n\n You have specified a lab that is not present in test_cases/ please enter correct'\
+ ' file. If unsure how to proceed, use -l default.\n\n'
+ sys.exit(0)
+
benchmarks = self._getfile(args.file)
suite.append(args.file)
suite=self._getsuite(suite)
@@ -63,4 +97,3 @@ class cli():
else:
print (args.benchmark, ' is not a Template in the Directory - \
Enter a Valid file name. or use qtip.py -h for list')
-
diff --git a/func/create_zones.py b/func/create_zones.py
index a21fb41f..44ba7568 100644
--- a/func/create_zones.py
+++ b/func/create_zones.py
@@ -107,6 +107,9 @@ class create_zones:
for x in range(len(zone_machine)):
compute_index = self.get_compute_num(D[x])
+ if compute_index > len(hyper_list):
+ print '\n The specified compute node doesnt exist. using compute 1'
+ compute_index = 1
if not self.check_aggregate(nova, hostnA[compute_index]):
agg_idA = nova.aggregates.create(hostnA[compute_index], D[x])
nova.aggregates.add_host(aggregate=agg_idA, host=hostnA[compute_index])
diff --git a/func/driver.py b/func/driver.py
index 2cdddba5..0dc0a6e1 100644
--- a/func/driver.py
+++ b/func/driver.py
@@ -21,6 +21,7 @@ class Driver:
self.dic_json = defaultdict()
def drive_bench(self, benchmark, roles, benchmark_detail= None, pip_dict = None):
+
roles= sorted(roles)
pip_dict = sorted(pip_dict)
result_dir = 'results'
@@ -39,12 +40,11 @@ class Driver:
if k == '1-server':
print values, 'saving IP'
self.dic_json['ip'+str(index)]= str(values)
- self.dic_json['privateip'+str(index)] = pip_dict[0][1]
+ if pip_dict[0][1][0]:
+ self.dic_json['privateip'+str(index)] = pip_dict[0][1]
+ if not pip_dict[0][1][0]:
+ self.dic_json['privateip'+str(index)] = 'NONE'
index= index+1
dic_json = json.dumps(dict(self.dic_json.items()))
- print dic_json
run_play = 'ansible-playbook -s ./benchmarks/playbooks/{0} --private-key=./data/QtipKey -i ./data/hosts --extra-vars \'{1}\' -v '.format(benchmark_name, dic_json)
-# run_play = 'ansible-playbook -s $PWD/benchmarks/playbooks/{0} --extra-vars "Dest_dir={1} role={2}" -vvv'.format(
-# benchmark_name, result_dir, k)
status = os.system(run_play)
-
diff --git a/func/validate_yaml.py b/func/validate_yaml.py
index c0df4d87..e4d3d5cf 100644
--- a/func/validate_yaml.py
+++ b/func/validate_yaml.py
@@ -24,7 +24,7 @@ class Validate_Yaml():
if not doc['Scenario']['benchmark']:
print '\nBenchmark field missing'
if not doc['Scenario']['pointless']:
- print '\nBabyeating anumal'
+ print ''
if not doc['Context']:
print '\nEntire Context is missing'
if not doc['Context']['Host_Machine']: