diff options
49 files changed, 1977 insertions, 1271 deletions
diff --git a/benchmarks/playbooks/info_script/info_collect.py b/benchmarks/playbooks/info_script/info_collect.py index 7e5544c0..4daed318 100644 --- a/benchmarks/playbooks/info_script/info_collect.py +++ b/benchmarks/playbooks/info_script/info_collect.py @@ -1,94 +1,87 @@ import os import pickle -import time -import datetime import json import sys os.system('inxi -b -c0 -n > $PWD/est_2') -est_ob=open("est_2","r+") -est_ob2=open("est_1","w+") -in_string= est_ob.read().replace('\n',' ') -cpu_idle=float(os.popen("""top -bn1 | grep "Cpu(s)" | awk '{print $8}'""").read().rstrip()) -cpu_usage= 100-cpu_idle -est_ob2.write(in_string); +est_ob = open("est_2", "r+") +est_ob2 = open("est_1", "w+") +in_string = est_ob.read().replace('\n', ' ') +cpu_idle = float(os.popen("""top -bn1 | grep "Cpu(s)" | awk '{print $8}'""").read().rstrip()) +cpu_usage = 100 - cpu_idle +est_ob2.write(in_string) est_ob.close() est_ob2.close() -Info_dict={}; -inxi_host=os.popen("""cat $PWD/est_1 | grep -o -P '(?<=Host:).*(?=Kernel)' """).read().lstrip().rstrip() -inxi_mem=os.popen("""cat $PWD/est_1 | grep -o -P '(?<=Memory:).*(?=MB)' """).read().lstrip().rstrip()+"MB" -inxi_cpu=os.popen("""cat $PWD/est_1 | grep -o -P '(?<=CPU).*(?=speed)' | cut -f2 -d':'""").read().lstrip().rstrip() -inxi_distro=os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Distro:).*(?=Machine:)' """).read().rstrip().lstrip() -inxi_kernel=os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Kernel:).*(?=Console:)' """).read().rstrip().lstrip() -inxi_HD=os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=HDD Total Size:).*(?=Info:)' """).read().rstrip().lstrip() -inxi_product=os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=product:).*(?=Mobo:)' """).read().rstrip().lstrip() +Info_dict = {} +inxi_host = os.popen("""cat $PWD/est_1 | grep -o -P '(?<=Host:).*(?=Kernel)' """).read().lstrip().rstrip() +inxi_mem = os.popen("""cat $PWD/est_1 | grep -o -P '(?<=Memory:).*(?=MB)' """).read().lstrip().rstrip() + "MB" +inxi_cpu = os.popen("""cat $PWD/est_1 | grep -o -P '(?<=CPU).*(?=speed)' | cut -f2 -d':'""").read().lstrip().rstrip() +inxi_distro = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Distro:).*(?=Machine:)' """).read().rstrip().lstrip() +inxi_kernel = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Kernel:).*(?=Console:)' """).read().rstrip().lstrip() +inxi_HD = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=HDD Total Size:).*(?=Info:)' """).read().rstrip().lstrip() +inxi_product = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=product:).*(?=Mobo:)' """).read().rstrip().lstrip() - - -Info_dict['1_Hostname']=inxi_host -Info_dict['2_Product']=inxi_product -Info_dict['3_OS Distribution']=inxi_distro -Info_dict['4_Kernel']=inxi_kernel -Info_dict['5_CPU']=inxi_cpu -Info_dict['6_CPU_Usage']=str(round(cpu_usage,3))+'%' -Info_dict['7_Memory Usage']=inxi_mem -Info_dict['8_Disk usage']=inxi_HD -network_flag=str(sys.argv[1]).rstrip() +Info_dict['1_Hostname'] = inxi_host +Info_dict['2_Product'] = inxi_product +Info_dict['3_OS Distribution'] = inxi_distro +Info_dict['4_Kernel'] = inxi_kernel +Info_dict['5_CPU'] = inxi_cpu +Info_dict['6_CPU_Usage'] = str(round(cpu_usage, 3)) + '%' +Info_dict['7_Memory Usage'] = inxi_mem +Info_dict['8_Disk usage'] = inxi_HD +network_flag = str(sys.argv[1]).rstrip() if (network_flag == 'n'): - - Info_dict['9_Network_Interfaces']={}; - tem_2=""" cat $PWD/est_1 | grep -o -P '(?<=Network:).*(?=Info:)'""" - print os.system(tem_2+' > Hello') - i=int(os.popen(tem_2+" | grep -o 'Card' | wc -l ").read()) - print i + Info_dict['9_Network_Interfaces'] = {} + tem_2 = """ cat $PWD/est_1 | grep -o -P '(?<=Network:).*(?=Info:)'""" + print os.system(tem_2 + ' > Hello') + i = int(os.popen(tem_2 + " | grep -o 'Card' | wc -l ").read()) + print i - for x in range (1,i+1): - tem=""" cat $PWD/est_1 | grep -o -P '(?<=Card-"""+str(x)+""":).*(?=Card-"""+str(x+1)+""")'""" + for x in range(1, i + 1): + tem = """ cat $PWD/est_1 | grep -o -P '(?<=Card-""" + str(x) + """:).*(?=Card-""" + str(x + 1) + """)'""" if i == 1: - tem=""" cat $PWD/est_1 | grep -o -P '(?<=Network:).*(?=Info:)'""" - inxi_card_1=((os.popen(tem+" | grep -o -P '(?<=Card:).*(?=Drives:)'|sed 's/ *driver:.*//'").read().rstrip().lstrip())) + tem = """ cat $PWD/est_1 | grep -o -P '(?<=Network:).*(?=Info:)'""" + inxi_card_1 = ((os.popen(tem + " | grep -o -P '(?<=Card:).*(?=Drives:)'|sed 's/ *driver:.*//'").read().rstrip().lstrip())) print inxi_card_1 - Info_dict['9_Network_Interfaces']['Interface_'+str(x)]={}; - Info_dict['9_Network_Interfaces']['Interface_'+str(x)]['1_Network_Card']=inxi_card_1 - inxi_card_2=((os.popen(tem+"| grep -o -P '(?<=Card:).*(?=Drives:)'|sed -e 's/^.*IF: //'").read())).rstrip().lstrip() - Info_dict['9_Network_Interfaces']['Interface_'+str(x)]['2_Interface_info']=inxi_card_2 + Info_dict['9_Network_Interfaces']['Interface_' + str(x)] = {} + Info_dict['9_Network_Interfaces']['Interface_' + str(x)]['1_Network_Card'] = inxi_card_1 + inxi_card_2 = ((os.popen(tem + "| grep -o -P '(?<=Card:).*(?=Drives:)'|sed -e 's/^.*IF: //'").read())).rstrip().lstrip() + Info_dict['9_Network_Interfaces']['Interface_' + str(x)]['2_Interface_info'] = inxi_card_2 elif x < (i): print "two" - #inxi_Card_temp=((os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Card-"""+str(x)+""":).*(?=Card-"""+str(x+1)+""")' """).read().rstrip().lstrip())) - inxi_card_1=((os.popen(tem+"| sed 's/ *driver:.*//'").read().rstrip().lstrip())) - Info_dict['9_Network_Interfaces']['Interface_'+str(x)]={}; - Info_dict['9_Network_Interfaces']['Interface_'+str(x)]['1_Network_Card']=inxi_card_1 - inxi_card_2=((os.popen(tem+"|sed -e 's/^.*IF: //'").read())).rstrip().lstrip() - Info_dict['9_Network_Interfaces']['Interface_'+str(x)]['2_Interface_info']=inxi_card_2 + inxi_card_1 = ((os.popen(tem + "| sed 's/ *driver:.*//'").read().rstrip().lstrip())) + Info_dict['9_Network_Interfaces']['Interface_' + str(x)] = {} + Info_dict['9_Network_Interfaces']['Interface_' + str(x)]['1_Network_Card'] = inxi_card_1 + inxi_card_2 = ((os.popen(tem + "|sed -e 's/^.*IF: //'").read())).rstrip().lstrip() + Info_dict['9_Network_Interfaces']['Interface_' + str(x)]['2_Interface_info'] = inxi_card_2 elif x == i: print "Three" - Info_dict['9_Network_Interfaces']['Interface_'+str(x)]={}; - inxi_card_1=((os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Card-"""+str(x)+""":).*(?=Drives:)'| sed 's/ *driver:.*//' """).read().rstrip().lstrip())) - Info_dict['9_Network_Interfaces']['Interface_'+str(x)]['1_Network_Card']=inxi_card_1 - inxi_card_2=((os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Card-"""+str(x)+""":).*(?=Drives:)'| sed -e 's/^.*IF: //' """).read().rstrip().lstrip())) - Info_dict['9_Network_Interfaces']['Interface_'+str(x)]['2_Interface_info']=inxi_card_2 + Info_dict['9_Network_Interfaces']['Interface_' + str(x)] = {} + inxi_card_1 = ((os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Card-""" + str(x) + """:).*(?=Drives:)'| sed 's/ *driver:.*//' """).read().rstrip().lstrip())) + Info_dict['9_Network_Interfaces']['Interface_' + str(x)]['1_Network_Card'] = inxi_card_1 + inxi_card_2 = ((os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Card-""" + str(x) + """:).*(?=Drives:)'| sed -e 's/^.*IF: //' """).read().rstrip().lstrip())) + Info_dict['9_Network_Interfaces']['Interface_' + str(x)]['2_Interface_info'] = inxi_card_2 else: print "No network cards" os.system("bwm-ng -o plain -c 1 | grep -v '=' | grep -v 'iface' | grep -v '-' > bwm_dump") - n_interface=int(os.popen(" cat bwm_dump | grep -v 'total' | wc -l ").read().rstrip()) - interface={}; - for x in range (1,n_interface): - interface_name=os.popen(" cat bwm_dump | awk 'NR=="+str(x)+"' | awk '{print $1}' ").read().rstrip().replace(':','') - interface[str(interface_name)]={}; - interface[str(interface_name)]['Rx (KB/s)']=os.popen(" cat bwm_dump | awk 'NR=="+str(x)+"' | awk '{print $2}' ").read().rstrip() - interface[str(interface_name)]['Tx (KB/s)']=os.popen(" cat bwm_dump | awk 'NR=="+str(x)+"' | awk '{print $4}' ").read().rstrip() - interface[str(interface_name)]['Total (KB/s)']=os.popen(" cat bwm_dump | awk 'NR== "+str(x)+"' | awk '{print $6}' ").read().rstrip() - - Info_dict['10.Interface I/O']=interface + n_interface = int(os.popen(" cat bwm_dump | grep -v 'total' | wc -l ").read().rstrip()) + interface = {} + for x in range(1, n_interface): + interface_name = os.popen(" cat bwm_dump | awk 'NR==" + str(x) + "' | awk '{print $1}' ").read().rstrip().replace(':', '') + interface[str(interface_name)] = {} + interface[str(interface_name)]['Rx (KB/s)'] = os.popen(" cat bwm_dump | awk 'NR==" + str(x) + "' | awk '{print $2}' ").read().rstrip() + interface[str(interface_name)]['Tx (KB/s)'] = os.popen(" cat bwm_dump | awk 'NR==" + str(x) + "' | awk '{print $4}' ").read().rstrip() + interface[str(interface_name)]['Total (KB/s)'] = os.popen(" cat bwm_dump | awk 'NR== " + str(x) + "' | awk '{print $6}' ").read().rstrip() + + Info_dict['10.Interface I/O'] = interface print Info_dict - -with open('./sys_info_temp','w+')as out_info: - pickle.dump(Info_dict,out_info) -with open('temp','w+') as result_json: - json.dump(Info_dict,result_json,indent=4,sort_keys=True) +with open('./sys_info_temp', 'w+')as out_info: + pickle.dump(Info_dict, out_info) +with open('temp', 'w+') as result_json: + json.dump(Info_dict, result_json, indent=4, sort_keys=True) diff --git a/benchmarks/playbooks/result_transform/dpi/dpi_transform.py b/benchmarks/playbooks/result_transform/dpi/dpi_transform.py index b95e0e23..622030cd 100644 --- a/benchmarks/playbooks/result_transform/dpi/dpi_transform.py +++ b/benchmarks/playbooks/result_transform/dpi/dpi_transform.py @@ -1,5 +1,4 @@ import os -import json import pickle import datetime @@ -46,4 +45,4 @@ result = {} result['DPI_benchmark(M pps)'] = round(dpi_result_pps, 3) result['DPI_benchmark(Gb/s)'] = round(dpi_result_bps, 3) with open('./result_temp', 'w+') as result_file: - pickle.dump(result, result_file)
\ No newline at end of file + pickle.dump(result, result_file) diff --git a/benchmarks/playbooks/result_transform/fio/fio_result_transform.py b/benchmarks/playbooks/result_transform/fio/fio_result_transform.py index f9410a62..9929aa18 100755 --- a/benchmarks/playbooks/result_transform/fio/fio_result_transform.py +++ b/benchmarks/playbooks/result_transform/fio/fio_result_transform.py @@ -4,22 +4,20 @@ import os import datetime with open("fio_result.json") as fio_raw: - fio_data=json.load(fio_raw) + fio_data = json.load(fio_raw) -r_iops=[]; -r_io_bytes=[]; -r_io_runtime=[]; -r_lat=[]; -w_iops=[]; -w_io_bytes=[]; -w_io_runtime=[]; -w_lat=[]; +r_iops = [] +r_io_bytes = [] +r_io_runtime = [] +r_lat = [] +w_iops = [] +w_io_bytes = [] +w_io_runtime = [] +w_lat = [] +total_jobs = len(fio_data["jobs"]) - -total_jobs=len(fio_data["jobs"]) - -for x in range (0,int(total_jobs)): +for x in range(0, int(total_jobs)): r_iops.append(fio_data["jobs"][x]["read"]["iops"]) r_io_bytes.append(fio_data["jobs"][x]["read"]["io_bytes"]) r_io_runtime.append(fio_data["jobs"][x]["read"]["runtime"]) @@ -29,29 +27,24 @@ for x in range (0,int(total_jobs)): w_io_runtime.append(fio_data["jobs"][x]["write"]["runtime"]) w_lat.append(fio_data["jobs"][x]["write"]["lat"]["mean"]) +FIO_result_dict = {} +for x in range(0, total_jobs): + FIO_result_dict['Job_' + str(x)] = {} + FIO_result_dict['Job_' + str(x)]['read'] = {} + FIO_result_dict['Job_' + str(x)]['read']['Total_IO_Bytes'] = r_io_bytes[x] + FIO_result_dict['Job_' + str(x)]['read']['IO/sec'] = r_iops[x] + FIO_result_dict['Job_' + str(x)]['read']['IO_runtime (millisec)'] = r_io_runtime[x] + FIO_result_dict['Job_' + str(x)]['read']['mean_IO_latenchy (microsec)'] = r_lat[x] -FIO_result_dict={}; - -for x in range (0,total_jobs): - FIO_result_dict['Job_'+str(x)]={}; - FIO_result_dict['Job_'+str(x)]['read']={}; - FIO_result_dict['Job_'+str(x)]['read']['Total_IO_Bytes']=r_io_bytes[x] - FIO_result_dict['Job_'+str(x)]['read']['IO/sec']=r_iops[x] - FIO_result_dict['Job_'+str(x)]['read']['IO_runtime (millisec)']=r_io_runtime[x] - FIO_result_dict['Job_'+str(x)]['read']['mean_IO_latenchy (microsec)']=r_lat[x] - - FIO_result_dict['Job_'+str(x)]['write']={}; - FIO_result_dict['Job_'+str(x)]['write']['Total_IO_Bytes']=w_io_bytes[x] - FIO_result_dict['Job_'+str(x)]['write']['IO/sec']=w_iops[x] - FIO_result_dict['Job_'+str(x)]['write']['IO_runtime (millisec)']=w_io_runtime[x] - FIO_result_dict['Job_'+str(x)]['write']['mean_IO_latenchy (microsec)']=w_lat[x] - - + FIO_result_dict['Job_' + str(x)]['write'] = {} + FIO_result_dict['Job_' + str(x)]['write']['Total_IO_Bytes'] = w_io_bytes[x] + FIO_result_dict['Job_' + str(x)]['write']['IO/sec'] = w_iops[x] + FIO_result_dict['Job_' + str(x)]['write']['IO_runtime (millisec)'] = w_io_runtime[x] + FIO_result_dict['Job_' + str(x)]['write']['mean_IO_latenchy (microsec)'] = w_lat[x] host_name = (os.popen("hostname").read().rstrip()) report_time = str(datetime.datetime.utcnow().isoformat()) -os.system("mv fio_result.json "+str(host_name)+"-"+report_time+".log") -with open('./result_temp','w+')as out_fio_result: - pickle.dump(FIO_result_dict,out_fio_result) - +os.system("mv fio_result.json " + str(host_name) + "-" + report_time + ".log") +with open('./result_temp', 'w + ')as out_fio_result: + pickle.dump(FIO_result_dict, out_fio_result) diff --git a/benchmarks/playbooks/result_transform/iperf/iperf_transform.py b/benchmarks/playbooks/result_transform/iperf/iperf_transform.py index 39c5956c..8df5a79a 100644 --- a/benchmarks/playbooks/result_transform/iperf/iperf_transform.py +++ b/benchmarks/playbooks/result_transform/iperf/iperf_transform.py @@ -1,30 +1,29 @@ import json
import datetime
import pickle
-with open('iperf_raw.json','r') as ifile:
- raw_iperf_data=json.loads(ifile.read().rstrip())
-
-
-bits_sent= raw_iperf_data['end']['sum_sent']['bits_per_second']
-bits_received= raw_iperf_data['end']['sum_received']['bits_per_second']
-total_byte_sent=raw_iperf_data['end']['sum_sent']['bytes']
-total_byte_received=raw_iperf_data['end']['sum_received']['bytes']
-cpu_host_total_percent=raw_iperf_data['end']['cpu_utilization_percent']['host_total']
-cpu_remote_total_percent=raw_iperf_data['end']['cpu_utilization_percent']['remote_total']
+with open('iperf_raw.json', 'r') as ifile:
+ raw_iperf_data = json.loads(ifile.read().rstrip())
-result={}
+bits_sent = raw_iperf_data['end']['sum_sent']['bits_per_second']
+bits_received = raw_iperf_data['end']['sum_received']['bits_per_second']
+total_byte_sent = raw_iperf_data['end']['sum_sent']['bytes']
+total_byte_received = raw_iperf_data['end']['sum_received']['bytes']
+cpu_host_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['host_total']
+cpu_remote_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['remote_total']
+
+result = {}
time_stamp = str(datetime.datetime.utcnow().isoformat())
-result['1. Version']=raw_iperf_data['start']['version']
-result['2. Bandwidth']={}
+result['1. Version'] = raw_iperf_data['start']['version']
+result['2. Bandwidth'] = {}
result['2. Bandwidth']['1. throughput Sender (b/s)'] = bits_sent
result['2. Bandwidth']['2. throughput Received (b/s)'] = bits_received
-result['3. CPU']={}
-result['3. CPU']['1. CPU host total (%)']=cpu_host_total_percent
-result['3. CPU']['2. CPU remote total (%)']=cpu_remote_total_percent
+result['3. CPU'] = {}
+result['3. CPU']['1. CPU host total (%)'] = cpu_host_total_percent
+result['3. CPU']['2. CPU remote total (%)'] = cpu_remote_total_percent
-with open('iperf_raw-'+time_stamp+'.log','w+') as ofile:
+with open('iperf_raw-' + time_stamp + '.log', 'w+') as ofile:
ofile.write(json.dumps(raw_iperf_data))
-
+
with open('./result_temp', 'w+') as result_file:
- pickle.dump(result,result_file)
\ No newline at end of file + pickle.dump(result, result_file)
diff --git a/benchmarks/playbooks/result_transform/ramspd/ramspd_transform.py b/benchmarks/playbooks/result_transform/ramspd/ramspd_transform.py index aed68acf..c3f03dd0 100644 --- a/benchmarks/playbooks/result_transform/ramspd/ramspd_transform.py +++ b/benchmarks/playbooks/result_transform/ramspd/ramspd_transform.py @@ -1,9 +1,7 @@ import os -import json import pickle import datetime - intmem_copy = os.popen("cat Intmem | grep 'BatchRun Copy' | awk '{print $4}'").read().rstrip() intmem_scale = os.popen("cat Intmem | grep 'BatchRun Scale' | awk '{print $4}'").read().rstrip() intmem_add = os.popen("cat Intmem | grep 'BatchRun Add' | awk '{print $4}'").read().rstrip() @@ -22,35 +20,27 @@ floatmem_average = os.popen("cat Floatmem | grep 'BatchRun AVERAGE' | awk '{pri print floatmem_copy print floatmem_average - hostname = os.popen("hostname").read().rstrip() time_stamp = str(datetime.datetime.utcnow().isoformat()) - os.system("mv Intmem " + hostname + "-" + time_stamp + ".log") os.system("cp Floatmem >> " + hostname + "-" + time_stamp + ".log") +result = {} -result = {}; - -result['1. INTmem bandwidth'] = {}; -result['1. INTmem bandwidth']['1. Copy (MB/s)']=intmem_copy -result['1. INTmem bandwidth']['2. Add (MB/s)']=intmem_add -result['1. INTmem bandwidth']['3. Scale (MB/s)']=intmem_scale -result['1. INTmem bandwidth']['4. Triad (MB/s)']=intmem_triad -result['1. INTmem bandwidth']['5. Average (MB/s)']=intmem_average - - -result['2. FLOATmem bandwidth'] = {}; -result['2. FLOATmem bandwidth']['1. Copy (MB/s)']=floatmem_copy -result['2. FLOATmem bandwidth']['2. Add (MB/s)']=floatmem_add -result['2. FLOATmem bandwidth']['3. Scale (MB/s)']=floatmem_scale -result['2. FLOATmem bandwidth']['4. Triad (MB/s)']=floatmem_triad -result['2. FLOATmem bandwidth']['5. Average (MB/s)']=floatmem_average - +result['1. INTmem bandwidth'] = {} +result['1. INTmem bandwidth']['1. Copy (MB/s)'] = intmem_copy +result['1. INTmem bandwidth']['2. Add (MB/s)'] = intmem_add +result['1. INTmem bandwidth']['3. Scale (MB/s)'] = intmem_scale +result['1. INTmem bandwidth']['4. Triad (MB/s)'] = intmem_triad +result['1. INTmem bandwidth']['5. Average (MB/s)'] = intmem_average +result['2. FLOATmem bandwidth'] = {} +result['2. FLOATmem bandwidth']['1. Copy (MB/s)'] = floatmem_copy +result['2. FLOATmem bandwidth']['2. Add (MB/s)'] = floatmem_add +result['2. FLOATmem bandwidth']['3. Scale (MB/s)'] = floatmem_scale +result['2. FLOATmem bandwidth']['4. Triad (MB/s)'] = floatmem_triad +result['2. FLOATmem bandwidth']['5. Average (MB/s)'] = floatmem_average with open('./result_temp', 'w+') as result_file: pickle.dump(result, result_file) - - diff --git a/benchmarks/playbooks/result_transform/ssl/ssl_transform.py b/benchmarks/playbooks/result_transform/ssl/ssl_transform.py index 6e632251..029135ac 100644 --- a/benchmarks/playbooks/result_transform/ssl/ssl_transform.py +++ b/benchmarks/playbooks/result_transform/ssl/ssl_transform.py @@ -1,10 +1,7 @@ import os -import json import pickle import datetime -#total_cpu=os.popen("cat $HOME/tempD/nDPI/example/result.txt | tail -1").read() - openssl_version = os.popen("cat RSA_dump | head -1").read().rstrip() rsa_512_sps = os.popen( "cat RSA_dump | grep '512 bits ' | awk '{print $6}' ").read().rstrip() @@ -23,7 +20,6 @@ rsa_4096_sps = os.popen( rsa_4096_vps = os.popen( "cat RSA_dump | grep '4096 bits ' | awk '{print $7}' ").read().rstrip() - aes_16B = os.popen( "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $2}' ").read().rstrip() aes_64B = os.popen( @@ -35,16 +31,12 @@ aes_1024B = os.popen( aes_8192B = os.popen( "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $6}' ").read().rstrip() - hostname = os.popen("hostname").read().rstrip() time_stamp = str(datetime.datetime.utcnow().isoformat()) - os.system("mv RSA_dump " + hostname + "-" + time_stamp + ".log") os.system("cat AES-128-CBC_dump >> " + hostname + "-" + time_stamp + ".log") - - result = {} result['1. Version'] = [openssl_version] @@ -64,4 +56,3 @@ result['3. AES-128-cbc throughput']['5. 8192 Bytes block (B/sec)'] = aes_8192B with open('./result_temp', 'w+') as result_file: pickle.dump(result, result_file) - diff --git a/benchmarks/playbooks/result_transform/ubench_transform.py b/benchmarks/playbooks/result_transform/ubench_transform.py index f15943d7..3c8ba1d8 100644 --- a/benchmarks/playbooks/result_transform/ubench_transform.py +++ b/benchmarks/playbooks/result_transform/ubench_transform.py @@ -1,7 +1,6 @@ import os import json import pickle -import datetime total_cpu = os.popen( "cat $HOME/tempT/UnixBench/results/* | grep 'of tests' | awk '{print $1;}' | awk 'NR==1'").read().rstrip() diff --git a/dashboard/pushtoDB.py b/dashboard/pushtoDB.py index 75c1d612..d5458b1d 100644 --- a/dashboard/pushtoDB.py +++ b/dashboard/pushtoDB.py @@ -2,21 +2,25 @@ import requests import json import datetime import os +import sys TEST_DB = 'http://testresults.opnfv.org/test/api/v1' -suite_list = [('compute_result.json', 'compute_test_suite'),('network_result.json', 'network_test_suite'),('storage_result.json', 'storage_test_suite')] -payload_list = { } +suite_list = [('compute_result.json', 'compute_test_suite'), + ('network_result.json', 'network_test_suite'), + ('storage_result.json', 'storage_test_suite')] +payload_list = {} -def push_results_to_db(db_url, case_name, payload,logger=None, pod_name="dell-pod1"): + +def push_results_to_db(db_url, case_name, payload, logger=None, pod_name="dell-pod1"): url = db_url + "/results" - creation_date= str(datetime.datetime.utcnow().isoformat()) + creation_date = str(datetime.datetime.utcnow().isoformat()) installer = os.environ['INSTALLER_TYPE'] pod_name = os.environ['NODE_NAME'] params = {"project_name": "qtip", "case_name": case_name, "pod_name": pod_name, "installer": installer, "start_date": creation_date, - "version": "test" , "details": payload} + "version": "test", "details": payload} headers = {'Content-Type': 'application/json'} print pod_name @@ -31,13 +35,15 @@ def push_results_to_db(db_url, case_name, payload,logger=None, pod_name="dell-po print "Error:", sys.exc_info()[0] return False + def populate_payload(suite_list): global payload_list - for k,v in suite_list: + for k, v in suite_list: + + if os.path.isfile('results/' + str(k)): + payload_list[k] = v - if os.path.isfile('results/'+str(k)): - payload_list[k]=v def main(): @@ -45,10 +51,10 @@ def main(): populate_payload(suite_list) if payload_list: print payload_list - for suite,case in payload_list.items(): - with open('results/'+suite,'r') as result_file: - j=json.load(result_file) - push_results_to_db(TEST_DB, case , j) + for suite, case in payload_list.items(): + with open('results/' + suite, 'r') as result_file: + j = json.load(result_file) + push_results_to_db(TEST_DB, case, j) elif not payload_list: print 'Results not found' diff --git a/data/ref_results/compute_benchmarks_indices.py b/data/ref_results/compute_benchmarks_indices.py index 9aaff888..e46b8771 100644 --- a/data/ref_results/compute_benchmarks_indices.py +++ b/data/ref_results/compute_benchmarks_indices.py @@ -1,163 +1,161 @@ from index_calculation import generic_index as get_index -from index_calculation import get_reference +from index_calculation import get_reference from result_accum import result_concat as concat -def dpi_index (): - dpi_dict=concat('../../results/dpi/') - dpi_bm_ref = get_reference('compute','dpi_bm') - dpi_bm_index= get_index(dpi_dict,'dpi_bm',dpi_bm_ref,'4 DPI result', 'DPI_benchmark(Gb/s)') - - dpi_vm_ref = get_reference('compute','dpi_vm') - dpi_vm_index= get_index(dpi_dict,'dpi_vm',dpi_vm_ref,'4 DPI result', 'DPI_benchmark(Gb/s)') - dpi_index=(dpi_bm_index+dpi_vm_index)/2 - dpi_dict_i={}; - dpi_dict_i['1. Index']=dpi_index - dpi_dict_i['2. Results']=dpi_dict - return dpi_dict_i +def dpi_index(): + dpi_dict = concat('../../results/dpi/') + dpi_bm_ref = get_reference('compute', 'dpi_bm') + dpi_bm_index = get_index(dpi_dict, 'dpi_bm', dpi_bm_ref, '4 DPI result', 'DPI_benchmark(Gb/s)') + + dpi_vm_ref = get_reference('compute', 'dpi_vm') + dpi_vm_index = get_index(dpi_dict, 'dpi_vm', dpi_vm_ref, '4 DPI result', 'DPI_benchmark(Gb/s)') + + dpi_index = (dpi_bm_index + dpi_vm_index) / 2 + dpi_dict_i = {} + dpi_dict_i['1. Index'] = dpi_index + dpi_dict_i['2. Results'] = dpi_dict + return dpi_dict_i + def dhrystone_index(): - dhrystone_dict=concat('../../results/dhrystone/') - dhrystone_single_bm_ref = get_reference('compute','dhrystone_bm','single_cpu') - dhrystone_single_bm_index= get_index(dhrystone_dict,'dhrystone_bm',dhrystone_single_bm_ref,'4 Dhrystone result','2.Single CPU test','2.Index score') - - dhrystone_multi_bm_ref = get_reference('compute','dhrystone_bm','multi_cpu') - dhrystone_multi_bm_index= get_index(dhrystone_dict,'dhrystone_bm',dhrystone_multi_bm_ref,'4 Dhrystone result','3.Multi CPU test','2.Index score') - - dhrystone_bm_index=(dhrystone_single_bm_index+dhrystone_multi_bm_index)/2 - - dhrystone_single_vm_ref = get_reference('compute','dhrystone_vm','single_cpu') - dhrystone_single_vm_index= get_index(dhrystone_dict,'dhrystone_vm',dhrystone_single_vm_ref,'4 Dhrystone result','2.Single CPU test','2.Index score') - - dhrystone_multi_vm_ref = get_reference('compute','dhrystone_vm','multi_cpu') - dhrystone_multi_vm_index= get_index(dhrystone_dict,'dhrystone_vm',dhrystone_multi_vm_ref,'4 Dhrystone result','3.Multi CPU test','2.Index score') - - dhrystone_vm_index=(dhrystone_single_vm_index+dhrystone_multi_vm_index)/2 - - - dhrystone_index=(dhrystone_bm_index+dhrystone_vm_index)/2 - dhrystone_dict_i={}; - dhrystone_dict_i['1. Index']=dhrystone_index - dhrystone_dict_i['2. Results']=dhrystone_dict + dhrystone_dict = concat('../../results/dhrystone/') + dhrystone_single_bm_ref = get_reference('compute', 'dhrystone_bm', 'single_cpu') + dhrystone_single_bm_index = get_index(dhrystone_dict, 'dhrystone_bm', dhrystone_single_bm_ref, '4 Dhrystone result', '2.Single CPU test', '2.Index score') + + dhrystone_multi_bm_ref = get_reference('compute', 'dhrystone_bm', 'multi_cpu') + dhrystone_multi_bm_index = get_index(dhrystone_dict, 'dhrystone_bm', dhrystone_multi_bm_ref, '4 Dhrystone result', '3.Multi CPU test', '2.Index score') + + dhrystone_bm_index = (dhrystone_single_bm_index + dhrystone_multi_bm_index) / 2 + + dhrystone_single_vm_ref = get_reference('compute', 'dhrystone_vm', 'single_cpu') + dhrystone_single_vm_index = get_index(dhrystone_dict, 'dhrystone_vm', dhrystone_single_vm_ref, '4 Dhrystone result', '2.Single CPU test', '2.Index score') + + dhrystone_multi_vm_ref = get_reference('compute', 'dhrystone_vm', 'multi_cpu') + dhrystone_multi_vm_index = get_index(dhrystone_dict, 'dhrystone_vm', dhrystone_multi_vm_ref, '4 Dhrystone result', '3.Multi CPU test', '2.Index score') + + dhrystone_vm_index = (dhrystone_single_vm_index + dhrystone_multi_vm_index) / 2 + + dhrystone_index = (dhrystone_bm_index + dhrystone_vm_index) / 2 + dhrystone_dict_i = {} + dhrystone_dict_i['1. Index'] = dhrystone_index + dhrystone_dict_i['2. Results'] = dhrystone_dict return dhrystone_dict_i + def whetstone_index(): - whetstone_dict=concat('../../results/whetstone/') - whetstone_single_bm_ref = get_reference('compute','whetstone_bm','single_cpu') - whetstone_single_bm_index= get_index(whetstone_dict,'whetstone_bm',whetstone_single_bm_ref,'4 Whetstone result','2.Single CPU test','2.Index score') - - whetstone_multi_bm_ref = get_reference('compute','whetstone_bm','multi_cpu') - whetstone_multi_bm_index= get_index(whetstone_dict,'whetstone_bm',whetstone_multi_bm_ref,'4 Whetstone result','3.Multi CPU test','2.Index score') - - whetstone_bm_index=(whetstone_single_bm_index+whetstone_multi_bm_index)/2 - - whetstone_single_vm_ref = get_reference('compute','whetstone_vm','single_cpu') - whetstone_single_vm_index= get_index(whetstone_dict,'whetstone_vm',whetstone_single_vm_ref,'4 Whetstone result','2.Single CPU test','2.Index score') - - whetstone_multi_vm_ref = get_reference('compute','whetstone_vm','multi_cpu') - whetstone_multi_vm_index= get_index(whetstone_dict,'whetstone_vm',whetstone_multi_vm_ref,'4 Whetstone result','3.Multi CPU test','2.Index score') - - whetstone_vm_index=(whetstone_single_vm_index+whetstone_multi_vm_index)/2 - - whetstone_index=(whetstone_bm_index+whetstone_vm_index)/2 - whetstone_dict_i={}; - whetstone_dict_i['1. Index']=whetstone_index - whetstone_dict_i['2. Results']=whetstone_dict + whetstone_dict = concat('../../results/whetstone/') + whetstone_single_bm_ref = get_reference('compute', 'whetstone_bm', 'single_cpu') + whetstone_single_bm_index = get_index(whetstone_dict, 'whetstone_bm', whetstone_single_bm_ref, '4 Whetstone result', '2.Single CPU test', '2.Index score') + + whetstone_multi_bm_ref = get_reference('compute', 'whetstone_bm', 'multi_cpu') + whetstone_multi_bm_index = get_index(whetstone_dict, 'whetstone_bm', whetstone_multi_bm_ref, '4 Whetstone result', '3.Multi CPU test', '2.Index score') + + whetstone_bm_index = (whetstone_single_bm_index + whetstone_multi_bm_index) / 2 + + whetstone_single_vm_ref = get_reference('compute', 'whetstone_vm', 'single_cpu') + whetstone_single_vm_index = get_index(whetstone_dict, 'whetstone_vm', whetstone_single_vm_ref, '4 Whetstone result', '2.Single CPU test', '2.Index score') + + whetstone_multi_vm_ref = get_reference('compute', 'whetstone_vm', 'multi_cpu') + whetstone_multi_vm_index = get_index(whetstone_dict, 'whetstone_vm', whetstone_multi_vm_ref, '4 Whetstone result', '3.Multi CPU test', '2.Index score') + + whetstone_vm_index = (whetstone_single_vm_index + whetstone_multi_vm_index) / 2 + + whetstone_index = (whetstone_bm_index + whetstone_vm_index) / 2 + whetstone_dict_i = {} + whetstone_dict_i['1. Index'] = whetstone_index + whetstone_dict_i['2. Results'] = whetstone_dict return whetstone_dict_i -def ramspeed_index (): - - ramspeed_dict=concat('../../results/ramspeed/') - ramspeed_int_bm_ref=get_reference('compute','ramspeed_bm','INTmem','Average (MB/s)') - ramspeed_int_bm_index=get_index(ramspeed_dict, 'ramspeed_bm', ramspeed_int_bm_ref,'4 RamSpeed result','1. INTmem bandwidth','5. Average (MB/s)') - - ramspeed_float_bm_ref=get_reference('compute','ramspeed_bm','FLOATmem','Average (MB/s)') - ramspeed_float_bm_index=get_index(ramspeed_dict, 'ramspeed_bm', ramspeed_float_bm_ref,'4 RamSpeed result','2. FLOATmem bandwidth','5. Average (MB/s)') - - ramspeed_bm_index=(ramspeed_int_bm_index+ramspeed_float_bm_index)/2 - - ramspeed_int_vm_ref=get_reference('compute','ramspeed_vm','INTmem','Average (MB/s)') - ramspeed_int_vm_index=get_index(ramspeed_dict, 'ramspeed_vm', ramspeed_int_vm_ref,'4 RamSpeed result','1. INTmem bandwidth','5. Average (MB/s)') - - ramspeed_float_vm_ref=get_reference('compute','ramspeed_vm','FLOATmem','Average (MB/s)') - ramspeed_float_vm_index=get_index(ramspeed_dict, 'ramspeed_vm', ramspeed_float_vm_ref,'4 RamSpeed result','2. FLOATmem bandwidth','5. Average (MB/s)') - - ramspeed_vm_index=(ramspeed_int_vm_index+ramspeed_float_vm_index)/2 - - ramspeed_index=(ramspeed_vm_index+ramspeed_bm_index)/2 - - ramspeed_dict_i={}; - ramspeed_dict_i['1. Index']=ramspeed_index - ramspeed_dict_i['2. Results']=ramspeed_dict + +def ramspeed_index(): + + ramspeed_dict = concat('../../results/ramspeed/') + ramspeed_int_bm_ref = get_reference('compute', 'ramspeed_bm', 'INTmem', 'Average (MB/s)') + ramspeed_int_bm_index = get_index(ramspeed_dict, 'ramspeed_bm', ramspeed_int_bm_ref, '4 RamSpeed result', '1. INTmem bandwidth', '5. Average (MB/s)') + + ramspeed_float_bm_ref = get_reference('compute', 'ramspeed_bm', 'FLOATmem', 'Average (MB/s)') + ramspeed_float_bm_index = get_index(ramspeed_dict, 'ramspeed_bm', ramspeed_float_bm_ref, '4 RamSpeed result', '2. FLOATmem bandwidth', '5. Average (MB/s)') + + ramspeed_bm_index = (ramspeed_int_bm_index + ramspeed_float_bm_index) / 2 + + ramspeed_int_vm_ref = get_reference('compute', 'ramspeed_vm', 'INTmem', 'Average (MB/s)') + ramspeed_int_vm_index = get_index(ramspeed_dict, 'ramspeed_vm', ramspeed_int_vm_ref, '4 RamSpeed result', '1. INTmem bandwidth', '5. Average (MB/s)') + + ramspeed_float_vm_ref = get_reference('compute', 'ramspeed_vm', 'FLOATmem', 'Average (MB/s)') + ramspeed_float_vm_index = get_index(ramspeed_dict, 'ramspeed_vm', ramspeed_float_vm_ref, '4 RamSpeed result', '2. FLOATmem bandwidth', '5. Average (MB/s)') + + ramspeed_vm_index = (ramspeed_int_vm_index + ramspeed_float_vm_index) / 2 + + ramspeed_index = (ramspeed_vm_index + ramspeed_bm_index) / 2 + + ramspeed_dict_i = {} + ramspeed_dict_i['1. Index'] = ramspeed_index + ramspeed_dict_i['2. Results'] = ramspeed_dict return ramspeed_dict_i -def ssl_index (): - - ssl_dict=concat('../../results/ssl/') - - ssl_RSA512b_bm_ref=get_reference('compute','ssl_bm','RSA','512b') - ssl_RSA1024b_bm_ref=get_reference('compute','ssl_bm','RSA','1024b') - ssl_RSA2048b_bm_ref=get_reference('compute','ssl_bm','RSA','2048b') - ssl_RSA4096b_bm_ref=get_reference('compute','ssl_bm','RSA','4096b') - - - ssl_AES16B_bm_ref=get_reference('compute','ssl_bm','AES','16B') - ssl_AES64B_bm_ref=get_reference('compute','ssl_bm','AES','64B') - ssl_AES256B_bm_ref=get_reference('compute','ssl_bm','AES','256B') - ssl_AES1024B_bm_ref=get_reference('compute','ssl_bm','AES','1024B') - ssl_AES8192B_bm_ref=get_reference('compute','ssl_bm','AES','8192B') - - ssl_RSA512b_bm_index=get_index(ssl_dict, "ssl_bm", ssl_RSA512b_bm_ref,'4 SSL result','2. RSA signatures','1. 512 bits (sign/s)') - ssl_RSA1024b_bm_index=get_index(ssl_dict, "ssl_bm", ssl_RSA1024b_bm_ref,'4 SSL result','2. RSA signatures','2. 1024 bits (sign/s)') - ssl_RSA2048b_bm_index=get_index(ssl_dict, "ssl_bm", ssl_RSA2048b_bm_ref,'4 SSL result','2. RSA signatures','3. 2048 bits (sign/s)') - ssl_RSA4096b_bm_index=get_index(ssl_dict, "ssl_bm", ssl_RSA4096b_bm_ref,'4 SSL result','2. RSA signatures','4. 4096 bits (sign/s)') - ssl_RSA_bm_index=(ssl_RSA512b_bm_index+ssl_RSA1024b_bm_index+ssl_RSA2048b_bm_index+ssl_RSA4096b_bm_index)/4 - - - ssl_AES16B_bm_index=get_index(ssl_dict, "ssl_bm", ssl_AES16B_bm_ref,'4 SSL result','3. AES-128-cbc throughput','1. 16 Bytes block (B/sec)') - ssl_AES64B_bm_index=get_index(ssl_dict, "ssl_bm", ssl_AES64B_bm_ref,'4 SSL result','3. AES-128-cbc throughput','2. 64 Bytes block (B/sec)') - ssl_AES256B_bm_index=get_index(ssl_dict, "ssl_bm", ssl_AES256B_bm_ref,'4 SSL result','3. AES-128-cbc throughput','3. 256 Bytes block (B/sec)') - ssl_AES1024B_bm_index=get_index(ssl_dict, "ssl_bm", ssl_AES1024B_bm_ref,'4 SSL result','3. AES-128-cbc throughput','4. 1024 Bytes block (B/sec)') - ssl_AES8192B_bm_index=get_index(ssl_dict, "ssl_bm", ssl_AES8192B_bm_ref,'4 SSL result','3. AES-128-cbc throughput','5. 8192 Bytes block (B/sec)') - ssl_AES_bm_index=(ssl_AES16B_bm_index+ssl_AES64B_bm_index+ssl_AES256B_bm_index+ssl_AES1024B_bm_index+ssl_AES8192B_bm_index)/5 - - ssl_bm_index=(ssl_RSA_bm_index+ssl_AES_bm_index)/2 - - ssl_RSA512b_vm_ref=get_reference('compute','ssl_vm','RSA','512b') - ssl_RSA1024b_vm_ref=get_reference('compute','ssl_vm','RSA','1024b') - ssl_RSA2048b_vm_ref=get_reference('compute','ssl_vm','RSA','2048b') - ssl_RSA4096b_vm_ref=get_reference('compute','ssl_vm','RSA','4096b') - - - ssl_AES16B_vm_ref=get_reference('compute','ssl_vm','AES','16B') - ssl_AES64B_vm_ref=get_reference('compute','ssl_vm','AES','64B') - ssl_AES256B_vm_ref=get_reference('compute','ssl_vm','AES','256B') - ssl_AES1024B_vm_ref=get_reference('compute','ssl_vm','AES','1024B') - ssl_AES8192B_vm_ref=get_reference('compute','ssl_vm','AES','8192B') - - ssl_RSA512b_vm_index=get_index(ssl_dict, "ssl_vm", ssl_RSA512b_vm_ref,'4 SSL result','2. RSA signatures','1. 512 bits (sign/s)') - ssl_RSA1024b_vm_index=get_index(ssl_dict, "ssl_vm", ssl_RSA1024b_vm_ref,'4 SSL result','2. RSA signatures','2. 1024 bits (sign/s)') - ssl_RSA2048b_vm_index=get_index(ssl_dict, "ssl_vm", ssl_RSA2048b_vm_ref,'4 SSL result','2. RSA signatures','3. 2048 bits (sign/s)') - ssl_RSA4096b_vm_index=get_index(ssl_dict, "ssl_vm", ssl_RSA4096b_vm_ref,'4 SSL result','2. RSA signatures','4. 4096 bits (sign/s)') - ssl_RSA_vm_index=(ssl_RSA512b_vm_index+ssl_RSA1024b_vm_index+ssl_RSA2048b_vm_index+ssl_RSA4096b_vm_index)/4 - - - ssl_AES16B_vm_index=get_index(ssl_dict, "ssl_vm", ssl_AES16B_vm_ref,'4 SSL result','3. AES-128-cbc throughput','1. 16 Bytes block (B/sec)') - ssl_AES64B_vm_index=get_index(ssl_dict, "ssl_vm", ssl_AES64B_vm_ref,'4 SSL result','3. AES-128-cbc throughput','2. 64 Bytes block (B/sec)') - ssl_AES256B_vm_index=get_index(ssl_dict, "ssl_vm", ssl_AES256B_vm_ref,'4 SSL result','3. AES-128-cbc throughput','3. 256 Bytes block (B/sec)') - ssl_AES1024B_vm_index=get_index(ssl_dict, "ssl_vm", ssl_AES1024B_vm_ref,'4 SSL result','3. AES-128-cbc throughput','4. 1024 Bytes block (B/sec)') - ssl_AES8192B_vm_index=get_index(ssl_dict, "ssl_vm", ssl_AES8192B_vm_ref,'4 SSL result','3. AES-128-cbc throughput','5. 8192 Bytes block (B/sec)') - ssl_AES_vm_index=(ssl_AES16B_vm_index+ssl_AES64B_vm_index+ssl_AES256B_vm_index+ssl_AES1024B_vm_index+ssl_AES8192B_vm_index)/5 - - ssl_vm_index=(ssl_RSA_vm_index+ssl_AES_vm_index)/2 - - ssl_index=(ssl_bm_index+ssl_vm_index)/2 - - - ssl_dict_i={}; - ssl_dict_i['1. Index']=ssl_index - ssl_dict_i['2. Results']=ssl_dict +def ssl_index(): + + ssl_dict = concat('../../results/ssl/') + + ssl_RSA512b_bm_ref = get_reference('compute', 'ssl_bm', 'RSA', '512b') + ssl_RSA1024b_bm_ref = get_reference('compute', 'ssl_bm', 'RSA', '1024b') + ssl_RSA2048b_bm_ref = get_reference('compute', 'ssl_bm', 'RSA', '2048b') + ssl_RSA4096b_bm_ref = get_reference('compute', 'ssl_bm', 'RSA', '4096b') + + ssl_AES16B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '16B') + ssl_AES64B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '64B') + ssl_AES256B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '256B') + ssl_AES1024B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '1024B') + ssl_AES8192B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '8192B') + + ssl_RSA512b_bm_index = get_index(ssl_dict, "ssl_bm", ssl_RSA512b_bm_ref, '4 SSL result', '2. RSA signatures', '1. 512 bits (sign/s)') + ssl_RSA1024b_bm_index = get_index(ssl_dict, "ssl_bm", ssl_RSA1024b_bm_ref, '4 SSL result', '2. RSA signatures', '2. 1024 bits (sign/s)') + ssl_RSA2048b_bm_index = get_index(ssl_dict, "ssl_bm", ssl_RSA2048b_bm_ref, '4 SSL result', '2. RSA signatures', '3. 2048 bits (sign/s)') + ssl_RSA4096b_bm_index = get_index(ssl_dict, "ssl_bm", ssl_RSA4096b_bm_ref, '4 SSL result', '2. RSA signatures', '4. 4096 bits (sign/s)') + ssl_RSA_bm_index = (ssl_RSA512b_bm_index + ssl_RSA1024b_bm_index + ssl_RSA2048b_bm_index + ssl_RSA4096b_bm_index) / 4 + + ssl_AES16B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES16B_bm_ref, '4 SSL result', '3. AES-128-cbc throughput', '1. 16 Bytes block (B/sec)') + ssl_AES64B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES64B_bm_ref, '4 SSL result', '3. AES-128-cbc throughput', '2. 64 Bytes block (B/sec)') + ssl_AES256B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES256B_bm_ref, '4 SSL result', '3. AES-128-cbc throughput', '3. 256 Bytes block (B/sec)') + ssl_AES1024B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES1024B_bm_ref, '4 SSL result', '3. AES-128-cbc throughput', '4. 1024 Bytes block (B/sec)') + ssl_AES8192B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES8192B_bm_ref, '4 SSL result', '3. AES-128-cbc throughput', '5. 8192 Bytes block (B/sec)') + ssl_AES_bm_index = (ssl_AES16B_bm_index + ssl_AES64B_bm_index + ssl_AES256B_bm_index + ssl_AES1024B_bm_index + ssl_AES8192B_bm_index) / 5 + + ssl_bm_index = (ssl_RSA_bm_index + ssl_AES_bm_index) / 2 + + ssl_RSA512b_vm_ref = get_reference('compute', 'ssl_vm', 'RSA', '512b') + ssl_RSA1024b_vm_ref = get_reference('compute', 'ssl_vm', 'RSA', '1024b') + ssl_RSA2048b_vm_ref = get_reference('compute', 'ssl_vm', 'RSA', '2048b') + ssl_RSA4096b_vm_ref = get_reference('compute', 'ssl_vm', 'RSA', '4096b') + + ssl_AES16B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '16B') + ssl_AES64B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '64B') + ssl_AES256B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '256B') + ssl_AES1024B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '1024B') + ssl_AES8192B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '8192B') + + ssl_RSA512b_vm_index = get_index(ssl_dict, "ssl_vm", ssl_RSA512b_vm_ref, '4 SSL result', '2. RSA signatures', '1. 512 bits (sign/s)') + ssl_RSA1024b_vm_index = get_index(ssl_dict, "ssl_vm", ssl_RSA1024b_vm_ref, '4 SSL result', '2. RSA signatures', '2. 1024 bits (sign/s)') + ssl_RSA2048b_vm_index = get_index(ssl_dict, "ssl_vm", ssl_RSA2048b_vm_ref, '4 SSL result', '2. RSA signatures', '3. 2048 bits (sign/s)') + ssl_RSA4096b_vm_index = get_index(ssl_dict, "ssl_vm", ssl_RSA4096b_vm_ref, '4 SSL result', '2. RSA signatures', '4. 4096 bits (sign/s)') + ssl_RSA_vm_index = (ssl_RSA512b_vm_index + ssl_RSA1024b_vm_index + ssl_RSA2048b_vm_index + ssl_RSA4096b_vm_index) / 4 + + ssl_AES16B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES16B_vm_ref, '4 SSL result', '3. AES-128-cbc throughput', '1. 16 Bytes block (B/sec)') + ssl_AES64B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES64B_vm_ref, '4 SSL result', '3. AES-128-cbc throughput', '2. 64 Bytes block (B/sec)') + ssl_AES256B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES256B_vm_ref, '4 SSL result', '3. AES-128-cbc throughput', '3. 256 Bytes block (B/sec)') + ssl_AES1024B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES1024B_vm_ref, '4 SSL result', '3. AES-128-cbc throughput', '4. 1024 Bytes block (B/sec)') + ssl_AES8192B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES8192B_vm_ref, '4 SSL result', '3. AES-128-cbc throughput', '5. 8192 Bytes block (B/sec)') + ssl_AES_vm_index = (ssl_AES16B_vm_index + ssl_AES64B_vm_index + ssl_AES256B_vm_index + ssl_AES1024B_vm_index + ssl_AES8192B_vm_index) / 5 + + ssl_vm_index = (ssl_RSA_vm_index + ssl_AES_vm_index) / 2 + + ssl_index = (ssl_bm_index + ssl_vm_index) / 2 + + ssl_dict_i = {} + ssl_dict_i['1. Index'] = ssl_index + ssl_dict_i['2. Results'] = ssl_dict return ssl_dict_i diff --git a/data/ref_results/compute_suite.py b/data/ref_results/compute_suite.py index 86e8a877..bcaf83c8 100644 --- a/data/ref_results/compute_suite.py +++ b/data/ref_results/compute_suite.py @@ -2,56 +2,49 @@ import json import compute_benchmarks_indices as benchmark_indices -compute_dict={}; - +compute_dict = {} try: - compute_dict['DPI']=benchmark_indices.dpi_index() -except OSError: + compute_dict['DPI'] = benchmark_indices.dpi_index() +except OSError: pass - - try: - compute_dict['Dhrystone']=benchmark_indices.dhrystone_index() + compute_dict['Dhrystone'] = benchmark_indices.dhrystone_index() except OSError: pass - - try: - compute_dict['Whetstone']=benchmark_indices.whetstone_index() + compute_dict['Whetstone'] = benchmark_indices.whetstone_index() except OSError: pass try: - compute_dict['SSL']=benchmark_indices.ssl_index() + compute_dict['SSL'] = benchmark_indices.ssl_index() except OSError: pass try: - compute_dict['RamSpeed']=benchmark_indices.ramspeed_index() + compute_dict['RamSpeed'] = benchmark_indices.ramspeed_index() except OSError: pass +compute_bench_list = ['DPI', 'Dhrystone', 'Whetstone', 'SSL', 'RamSpeed'] +l = len(compute_bench_list) -compute_bench_list=['DPI','Dhrystone','Whetstone','SSL','RamSpeed'] -l=len(compute_bench_list) - -temp=0 +temp = 0 for benchmark in compute_bench_list: try: - temp=temp+float(compute_dict[benchmark]['1. Index']) + temp = temp + float(compute_dict[benchmark]['1. Index']) except KeyError: - l=l-1 + l = l - 1 pass if l == 0: print "No compute suite results found" else: - compute_suite_index=temp/l - compute_dict_f={}; - compute_dict_f['index']=compute_suite_index - compute_dict_f['suite results']=compute_dict + compute_suite_index = temp / l + compute_dict_f = {} + compute_dict_f['index'] = compute_suite_index + compute_dict_f['suite results'] = compute_dict with open('../../results/compute_result.json', 'w+') as result_json: json.dump(compute_dict_f, result_json, indent=4, sort_keys=True) - diff --git a/data/ref_results/generator_ref_json.py b/data/ref_results/generator_ref_json.py index f5b8e12a..6b2d813c 100644 --- a/data/ref_results/generator_ref_json.py +++ b/data/ref_results/generator_ref_json.py @@ -1,85 +1,81 @@ -import os import json -dict_ref={}; -dict_ref['compute']={}; -dict_ref['compute']['dpi_bm']=8.12 -dict_ref['compute']['dpi_vm']=22.12 +dict_ref = {} +dict_ref['compute'] = {} +dict_ref['compute']['dpi_bm'] = 8.12 +dict_ref['compute']['dpi_vm'] = 22.12 -dict_ref['compute']['whetstone_bm']={}; -dict_ref['compute']['whetstone_vm']={}; -dict_ref['compute']['whetstone_bm']['single_cpu']=806.1 -dict_ref['compute']['whetstone_bm']['multi_cpu']=41483.3 -dict_ref['compute']['whetstone_vm']['single_cpu']=789.0 -dict_ref['compute']['whetstone_vm']['multi_cpu']=2950.6 +dict_ref['compute']['whetstone_bm'] = {} +dict_ref['compute']['whetstone_vm'] = {} +dict_ref['compute']['whetstone_bm']['single_cpu'] = 806.1 +dict_ref['compute']['whetstone_bm']['multi_cpu'] = 41483.3 +dict_ref['compute']['whetstone_vm']['single_cpu'] = 789.0 +dict_ref['compute']['whetstone_vm']['multi_cpu'] = 2950.6 -dict_ref['compute']['dhrystone_bm']={}; -dict_ref['compute']['dhrystone_vm']={}; -dict_ref['compute']['dhrystone_bm']['single_cpu']=3231.7 -dict_ref['compute']['dhrystone_bm']['multi_cpu']=103362.1 -dict_ref['compute']['dhrystone_vm']['single_cpu']=2953.6 -dict_ref['compute']['dhrystone_vm']['multi_cpu']=10585.8 +dict_ref['compute']['dhrystone_bm'] = {} +dict_ref['compute']['dhrystone_vm'] = {} +dict_ref['compute']['dhrystone_bm']['single_cpu'] = 3231.7 +dict_ref['compute']['dhrystone_bm']['multi_cpu'] = 103362.1 +dict_ref['compute']['dhrystone_vm']['single_cpu'] = 2953.6 +dict_ref['compute']['dhrystone_vm']['multi_cpu'] = 10585.8 -dict_ref['compute']['ssl_bm']={}; -dict_ref['compute']['ssl_bm']['RSA']={}; -dict_ref['compute']['ssl_bm']['AES']={}; -dict_ref['compute']['ssl_bm']['RSA']['512b']=22148.9 -dict_ref['compute']['ssl_bm']['RSA']['1024b']=7931.44 -dict_ref['compute']['ssl_bm']['RSA']['2048b']=1544.3 -dict_ref['compute']['ssl_bm']['RSA']['4096b']=161.92 -dict_ref['compute']['ssl_bm']['AES']['16B']=735490250 -dict_ref['compute']['ssl_bm']['AES']['64B']=788429210 -dict_ref['compute']['ssl_bm']['AES']['256B']=803323650 -dict_ref['compute']['ssl_bm']['AES']['1024B']=808861020 -dict_ref['compute']['ssl_bm']['AES']['8192B']=807701160 +dict_ref['compute']['ssl_bm'] = {} +dict_ref['compute']['ssl_bm']['RSA'] = {} +dict_ref['compute']['ssl_bm']['AES'] = {} +dict_ref['compute']['ssl_bm']['RSA']['512b'] = 22148.9 +dict_ref['compute']['ssl_bm']['RSA']['1024b'] = 7931.44 +dict_ref['compute']['ssl_bm']['RSA']['2048b'] = 1544.3 +dict_ref['compute']['ssl_bm']['RSA']['4096b'] = 161.92 +dict_ref['compute']['ssl_bm']['AES']['16B'] = 735490250 +dict_ref['compute']['ssl_bm']['AES']['64B'] = 788429210 +dict_ref['compute']['ssl_bm']['AES']['256B'] = 803323650 +dict_ref['compute']['ssl_bm']['AES']['1024B'] = 808861020 +dict_ref['compute']['ssl_bm']['AES']['8192B'] = 807701160 -dict_ref['compute']['ssl_vm']={}; -dict_ref['compute']['ssl_vm']['RSA']={}; -dict_ref['compute']['ssl_vm']['AES']={}; -dict_ref['compute']['ssl_vm']['RSA']['512b']=22148.9 -dict_ref['compute']['ssl_vm']['RSA']['1024b']=7931.44 -dict_ref['compute']['ssl_vm']['RSA']['2048b']=1544.3 -dict_ref['compute']['ssl_vm']['RSA']['4096b']=161.92 -dict_ref['compute']['ssl_vm']['AES']['16B']=735490250 -dict_ref['compute']['ssl_vm']['AES']['64B']=788429210 -dict_ref['compute']['ssl_vm']['AES']['256B']=803323650 -dict_ref['compute']['ssl_vm']['AES']['1024B']=808861020 -dict_ref['compute']['ssl_vm']['AES']['8192B']=807701160 +dict_ref['compute']['ssl_vm'] = {} +dict_ref['compute']['ssl_vm']['RSA'] = {} +dict_ref['compute']['ssl_vm']['AES'] = {} +dict_ref['compute']['ssl_vm']['RSA']['512b'] = 22148.9 +dict_ref['compute']['ssl_vm']['RSA']['1024b'] = 7931.44 +dict_ref['compute']['ssl_vm']['RSA']['2048b'] = 1544.3 +dict_ref['compute']['ssl_vm']['RSA']['4096b'] = 161.92 +dict_ref['compute']['ssl_vm']['AES']['16B'] = 735490250 +dict_ref['compute']['ssl_vm']['AES']['64B'] = 788429210 +dict_ref['compute']['ssl_vm']['AES']['256B'] = 803323650 +dict_ref['compute']['ssl_vm']['AES']['1024B'] = 808861020 +dict_ref['compute']['ssl_vm']['AES']['8192B'] = 807701160 +dict_ref['compute']['ramspeed_bm'] = {} +dict_ref['compute']['ramspeed_bm']['INTmem'] = {} +dict_ref['compute']['ramspeed_bm']['FLOATmem'] = {} +dict_ref['compute']['ramspeed_bm']['INTmem']['Average (MB/s)'] = 12268.38 +dict_ref['compute']['ramspeed_bm']['FLOATmem']['Average (MB/s)'] = 9758.79 -dict_ref['compute']['ramspeed_bm']={}; -dict_ref['compute']['ramspeed_bm']['INTmem']={}; -dict_ref['compute']['ramspeed_bm']['FLOATmem']={}; -dict_ref['compute']['ramspeed_bm']['INTmem']['Average (MB/s)']=12268.38 -dict_ref['compute']['ramspeed_bm']['FLOATmem']['Average (MB/s)']=9758.79 +dict_ref['compute']['ramspeed_vm'] = {} +dict_ref['compute']['ramspeed_vm']['INTmem'] = {} +dict_ref['compute']['ramspeed_vm']['FLOATmem'] = {} +dict_ref['compute']['ramspeed_vm']['INTmem']['Average (MB/s)'] = 12147.59 +dict_ref['compute']['ramspeed_vm']['FLOATmem']['Average (MB/s)'] = 9064.09 -dict_ref['compute']['ramspeed_vm']={}; -dict_ref['compute']['ramspeed_vm']['INTmem']={}; -dict_ref['compute']['ramspeed_vm']['FLOATmem']={}; -dict_ref['compute']['ramspeed_vm']['INTmem']['Average (MB/s)']=12147.59 -dict_ref['compute']['ramspeed_vm']['FLOATmem']['Average (MB/s)']=9064.09 +dict_ref['storage'] = {} +dict_ref['storage']['fio_bm'] = {} +dict_ref['storage']['fio_bm']['read'] = {} +dict_ref['storage']['fio_bm']['write'] = {} +dict_ref['storage']['fio_bm']['read']['IOPS'] = 6693 +dict_ref['storage']['fio_bm']['write']['IOPS'] = 6688 +dict_ref['storage']['fio_vm'] = {} +dict_ref['storage']['fio_vm']['read'] = {} +dict_ref['storage']['fio_vm']['write'] = {} +dict_ref['storage']['fio_vm']['read']['IOPS'] = 2239 +dict_ref['storage']['fio_vm']['write']['IOPS'] = 2237 -dict_ref['storage']={}; -dict_ref['storage']['fio_bm']={}; -dict_ref['storage']['fio_bm']['read']={}; -dict_ref['storage']['fio_bm']['write']={}; -dict_ref['storage']['fio_bm']['read']['IOPS']=6693 -dict_ref['storage']['fio_bm']['write']['IOPS']=6688 - -dict_ref['storage']['fio_vm']={}; -dict_ref['storage']['fio_vm']['read']={}; -dict_ref['storage']['fio_vm']['write']={}; -dict_ref['storage']['fio_vm']['read']['IOPS']=2239 -dict_ref['storage']['fio_vm']['write']['IOPS']=2237 - -dict_ref['network']={}; -dict_ref['network']['iperf_bm']={}; -dict_ref['network']['iperf_vm']={}; -dict_ref['network']['iperf_vm_2']={}; -dict_ref['network']['iperf_bm']['throughput received(b/s)']=944473000.0 -dict_ref['network']['iperf_vm']['throughput received(b/s)']=14416700000.0 -dict_ref['network']['iperf_vm_2']['throughput received(b/s)']=2461530000.0 +dict_ref['network'] = {} +dict_ref['network']['iperf_bm'] = {} +dict_ref['network']['iperf_vm'] = {} +dict_ref['network']['iperf_vm_2'] = {} +dict_ref['network']['iperf_bm']['throughput received(b/s)'] = 944473000.0 +dict_ref['network']['iperf_vm']['throughput received(b/s)'] = 14416700000.0 +dict_ref['network']['iperf_vm_2']['throughput received(b/s)'] = 2461530000.0 with open('reference.json', 'w+') as result_json: json.dump(dict_ref, result_json, indent=4, sort_keys=True) - diff --git a/data/ref_results/index_calculation.py b/data/ref_results/index_calculation.py index ed597fb8..e3c75350 100644 --- a/data/ref_results/index_calculation.py +++ b/data/ref_results/index_calculation.py @@ -1,44 +1,41 @@ import json -from cinderclient.utils import arg -from result_accum import result_concat as concat -def compute_index(total_measured,ref_result,count): + +def compute_index(total_measured, ref_result, count): try: - average=float(total_measured/count) - + average = float(total_measured / count) + except ZeroDivisionError: - average=0 - - index=average/ref_result + average = 0 + index = average / ref_result return index -def get_reference (*args): - - with open ('./reference.json') as reference_file: - reference_djson=json.load(reference_file) - temp=list(args) - for arg in args: - ref_n=reference_djson.get(str(arg)) - reference_djson=reference_djson.get(str(arg)) - +def get_reference(*args): + + with open('./reference.json') as reference_file: + reference_djson = json.load(reference_file) + for arg in args: + ref_n = reference_djson.get(str(arg)) + reference_djson = reference_djson.get(str(arg)) return ref_n -def generic_index(dict_gen,testcase,reference_num,*args): - c=len(args) - count=0 - total=0 - result=0 - for k,v in dict_gen.iteritems(): - dict_temp=dict_gen[k] + +def generic_index(dict_gen, testcase, reference_num, *args): + c = len(args) + count = 0 + total = 0 + result = 0 + for k, v in dict_gen.iteritems(): + dict_temp = dict_gen[k] if dict_gen[k]['1 Testcase Name'] == str(testcase): - count=count+1 + count = count + 1 for arg in args: - if arg == args[c-1]: + if arg == args[c - 1]: try: - result=float(dict_temp.get(str(arg))) + result = float(dict_temp.get(str(arg))) except ValueError: - result=float(dict_temp.get(str(arg))[:-1])*1000 - dict_temp=dict_temp.get(str(arg)) - total=total+result - return compute_index(total, reference_num, count) + result = float(dict_temp.get(str(arg))[:-1]) * 1000 + dict_temp = dict_temp.get(str(arg)) + total = total + result + return compute_index(total, reference_num, count) diff --git a/data/ref_results/network_benchmarks_indices.py b/data/ref_results/network_benchmarks_indices.py index b98269b6..96ccd4fc 100644 --- a/data/ref_results/network_benchmarks_indices.py +++ b/data/ref_results/network_benchmarks_indices.py @@ -1,27 +1,20 @@ from index_calculation import generic_index as get_index -from index_calculation import get_reference +from index_calculation import get_reference from result_accum import result_concat as concat -def iperf_index (): - iperf_dict=concat('../../results/iperf/') - #print iperf_dict - iperf_bm_ref = get_reference('network','iperf_bm','throughput received(b/s)') - - iperf_bm_index= get_index(iperf_dict,'iperf_bm',iperf_bm_ref,'4 IPERF result', '2. Bandwidth','2. throughput Received (b/s)') - - iperf_vm_ref = get_reference('network','iperf_vm','throughput received(b/s)') - iperf_vm_index= get_index(iperf_dict,'iperf_vm',iperf_vm_ref,'4 IPERF result', '2. Bandwidth','2. throughput Received (b/s)') - - iperf_vm_2_ref = get_reference('network','iperf_vm_2','throughput received(b/s)') - iperf_vm_2_index= get_index(iperf_dict,'iperf_vm_2',iperf_vm_2_ref,'4 IPERF result', '2. Bandwidth','2. throughput Received (b/s)') - +def iperf_index(): + iperf_dict = concat('../../results/iperf/') + iperf_bm_ref = get_reference('network', 'iperf_bm', 'throughput received(b/s)') + iperf_bm_index = get_index(iperf_dict, 'iperf_bm', iperf_bm_ref, '4 IPERF result', '2. Bandwidth', '2. throughput Received (b/s)') + iperf_vm_ref = get_reference('network', 'iperf_vm', 'throughput received(b/s)') + iperf_vm_index = get_index(iperf_dict, 'iperf_vm', iperf_vm_ref, '4 IPERF result', '2. Bandwidth', '2. throughput Received (b/s)') - - iperf_index= float(iperf_bm_index+iperf_vm_index+iperf_vm_2_index)/3 + iperf_vm_2_ref = get_reference('network', 'iperf_vm_2', 'throughput received(b/s)') + iperf_vm_2_index = get_index(iperf_dict, 'iperf_vm_2', iperf_vm_2_ref, '4 IPERF result', '2. Bandwidth', '2. throughput Received (b/s)') + iperf_index = float(iperf_bm_index + iperf_vm_index + iperf_vm_2_index) / 3 print iperf_index - iperf_dict_i={}; - iperf_dict_i['1. Index']=iperf_index - iperf_dict_i['2. Results']=iperf_dict + iperf_dict_i = {} + iperf_dict_i['1. Index'] = iperf_index + iperf_dict_i['2. Results'] = iperf_dict return iperf_dict_i - diff --git a/data/ref_results/network_suite.py b/data/ref_results/network_suite.py index fd756aa0..37dcb093 100644 --- a/data/ref_results/network_suite.py +++ b/data/ref_results/network_suite.py @@ -2,33 +2,29 @@ import json import network_benchmarks_indices as benchmark_indices -network_dict={}; +network_dict = {} try: - network_dict['IPERF']=benchmark_indices.iperf_index() + network_dict['IPERF'] = benchmark_indices.iperf_index() except: pass - - - -network_bench_list=['IPERF'] -temp=0 -l=len(network_bench_list) +network_bench_list = ['IPERF'] +temp = 0 +l = len(network_bench_list) for benchmark in network_bench_list: try: - temp=temp+float(network_dict[benchmark]['1. Index']) + temp = temp + float(network_dict[benchmark]['1. Index']) except: - l=l-1 + l = l - 1 pass if l == 0: - print "No network results found" + print "No network results found" else: - network_suite_index=temp/len(network_bench_list) - network_dict_f={}; - network_dict_f['index']=network_suite_index - network_dict_f['suite results']=network_dict + network_suite_index = temp / len(network_bench_list) + network_dict_f = {} + network_dict_f['index'] = network_suite_index + network_dict_f['suite results'] = network_dict with open('../../results/network_result.json', 'w+') as result_json: json.dump(network_dict_f, result_json, indent=4, sort_keys=True) - diff --git a/data/ref_results/result_accum.py b/data/ref_results/result_accum.py index 4fffb6b1..6cd55886 100644 --- a/data/ref_results/result_accum.py +++ b/data/ref_results/result_accum.py @@ -1,11 +1,12 @@ import os import json + def result_concat(targ_dir): - list_vm=[]; - list_bm=[]; - diction={}; - + list_vm = [] + list_bm = [] + diction = {} + for file in os.listdir(targ_dir): if file.endswith(".json"): if file.startswith("instance"): @@ -13,21 +14,18 @@ def result_concat(targ_dir): list_vm.append(file) else: list_bm.append(file) - l=len(list_bm) - k=len(list_vm) + l = len(list_bm) + k = len(list_vm) + + for x in range(0, l): + file_t = list_bm[x] + with open(targ_dir + file_t) as result_file: + result_djson = json.load(result_file) + diction['Baremetal' + str(int(x + 1))] = result_djson - for x in range (0,l): - file_t=list_bm[x] - with open (targ_dir+file_t) as result_file: - result_djson=json.load(result_file) - diction['Baremetal'+str(int(x+1))]=result_djson - - for x in range (0,k): - file_t=list_vm[x] - with open (targ_dir+file_t) as result_file: - result_djson=json.load(result_file) - diction['Virtual Machine '+str(x+1)]=result_djson + for x in range(0, k): + file_t = list_vm[x] + with open(targ_dir + file_t) as result_file: + result_djson = json.load(result_file) + diction['Virtual Machine ' + str(x + 1)] = result_djson return diction - - - diff --git a/data/ref_results/storage_benchmarks_indices.py b/data/ref_results/storage_benchmarks_indices.py index db3890a0..f51b3d6e 100644 --- a/data/ref_results/storage_benchmarks_indices.py +++ b/data/ref_results/storage_benchmarks_indices.py @@ -1,34 +1,30 @@ from index_calculation import generic_index as get_index -from index_calculation import get_reference +from index_calculation import get_reference from result_accum import result_concat as concat -def fio_index (): - fio_dict=concat('../../results/fio/') - #print _perf_dict - fio_r_bm_ref = get_reference('storage','fio_bm','read','IOPS') - fio_r_bm_index= get_index(fio_dict,'fio_bm',fio_r_bm_ref,'4 FIO result', 'Job_0','read','IO/sec') - - fio_w_bm_ref = get_reference('storage','fio_bm','write','IOPS') - fio_w_bm_index= get_index(fio_dict,'fio_bm',fio_w_bm_ref,'4 FIO result', 'Job_0','write','IO/sec') - - fio_bm_index= (fio_r_bm_index+fio_w_bm_index)/2 - - - - fio_r_vm_ref = get_reference('storage','fio_vm','read','IOPS') - fio_r_vm_index= get_index(fio_dict,'fio_vm',fio_r_vm_ref,'4 FIO result', 'Job_0','read','IO/sec') - - fio_w_vm_ref = get_reference('storage','fio_vm','write','IOPS') - fio_w_vm_index= get_index(fio_dict,'fio_vm',fio_w_vm_ref,'4 FIO result', 'Job_0','write','IO/sec') - - fio_vm_index= (fio_r_vm_index+fio_w_vm_index)/2 - - fio_index=(fio_bm_index+fio_vm_index)/2 +def fio_index(): + fio_dict = concat('../../results/fio/') + fio_r_bm_ref = get_reference('storage', 'fio_bm', 'read', 'IOPS') + fio_r_bm_index = get_index(fio_dict, 'fio_bm', fio_r_bm_ref, '4 FIO result', 'Job_0', 'read', 'IO/sec') + + fio_w_bm_ref = get_reference('storage', 'fio_bm', 'write', 'IOPS') + fio_w_bm_index = get_index(fio_dict, 'fio_bm', fio_w_bm_ref, '4 FIO result', 'Job_0', 'write', 'IO/sec') + + fio_bm_index = (fio_r_bm_index + fio_w_bm_index) / 2 + + fio_r_vm_ref = get_reference('storage', 'fio_vm', 'read', 'IOPS') + fio_r_vm_index = get_index(fio_dict, 'fio_vm', fio_r_vm_ref, '4 FIO result', 'Job_0', 'read', 'IO/sec') + + fio_w_vm_ref = get_reference('storage', 'fio_vm', 'write', 'IOPS') + fio_w_vm_index = get_index(fio_dict, 'fio_vm', fio_w_vm_ref, '4 FIO result', 'Job_0', 'write', 'IO/sec') + + fio_vm_index = (fio_r_vm_index + fio_w_vm_index) / 2 + + fio_index = (fio_bm_index + fio_vm_index) / 2 print fio_index - - fio_dict_i={}; - fio_dict_i['1. Index']=fio_index - fio_dict_i['2. Results']=fio_dict + + fio_dict_i = {} + fio_dict_i['1. Index'] = fio_index + fio_dict_i['2. Results'] = fio_dict return fio_dict_i - diff --git a/data/ref_results/storage_suite.py b/data/ref_results/storage_suite.py index 5c1a89a2..52d6c8c6 100644 --- a/data/ref_results/storage_suite.py +++ b/data/ref_results/storage_suite.py @@ -2,30 +2,26 @@ import json import storage_benchmarks_indices as benchmark_indices -storage_dict={}; +storage_dict = {} try: - storage_dict['FIO']=benchmark_indices.fio_index() + storage_dict['FIO'] = benchmark_indices.fio_index() except OSError: pass - -storage_bench_list=['FIO'] -l=len(storage_bench_list) -temp=0 +storage_bench_list = ['FIO'] +l = len(storage_bench_list) +temp = 0 for benchmark in storage_bench_list: try: - temp=temp+float(storage_dict[benchmark]['1. Index']) + temp = temp + float(storage_dict[benchmark]['1. Index']) except KeyError: - l-=1 - - + l -= 1 if l == 0: print "No Storage results found" else: - storage_suite_index=temp/l - storage_dict_f={}; - storage_dict_f['index']=storage_suite_index - storage_dict_f['storage suite']=storage_dict + storage_suite_index = temp / l + storage_dict_f = {} + storage_dict_f['index'] = storage_suite_index + storage_dict_f['storage suite'] = storage_dict with open('../../results/storage_result.json', 'w+') as result_json: json.dump(storage_dict_f, result_json, indent=4, sort_keys=True) - diff --git a/data/report/Qtip_Report.py b/data/report/Qtip_Report.py index 9f2226c4..cd20d57c 100644 --- a/data/report/Qtip_Report.py +++ b/data/report/Qtip_Report.py @@ -1,113 +1,108 @@ -from reportlab.pdfgen import canvas from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image -from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle +from reportlab.lib.styles import getSampleStyleSheet from reportlab.lib.units import inch from reportlab.lib.pagesizes import letter -from reportlab.platypus import ListFlowable, ListItem -import qtip_graph as graph +import qtip_graph as graph import get_indices as results from get_results import report_concat from get_results import generate_result -def dump_result(Stor,directory, testcase): + +def dump_result(Stor, directory, testcase): try: - lower_s=testcase.lower() - Stor.append(Paragraph(testcase,Style['h3'])) - l1=report_concat(directory,lower_s) - l=1 + lower_s = testcase.lower() + Stor.append(Paragraph(testcase, Style['h3'])) + l1 = report_concat(directory, lower_s) + l = 1 for a in l1: - Stor.append(Paragraph(testcase+" result_"+str(l),Style['h5'])) - raw_string=generate_result(a,0) - replaced_string=raw_string.replace('\n', '<br/> ').replace(' ',' ') - Stor.append(Paragraph(replaced_string,Style['BodyText'])) - l=l+1 + Stor.append(Paragraph(testcase + " result_" + str(l), Style['h5'])) + raw_string = generate_result(a, 0) + replaced_string = raw_string.replace('\n', '<br/> ').replace(' ', ' ') + Stor.append(Paragraph(replaced_string, Style['BodyText'])) + l = l + 1 except OSError: print "Results for {0} not found".format(testcase) -doc = SimpleDocTemplate("../../results/QTIP_results.pdf",pagesize=letter, - rightMargin=72,leftMargin=72, - topMargin=72,bottomMargin=18) -Stor=[] -Style=getSampleStyleSheet() -Title="QTIP Benchmark Suite" -Stor.append(Paragraph(Title,Style['Title'])) -H1="Results" -Stor.append(Spacer(0,36)) +doc = SimpleDocTemplate("../../results/QTIP_results.pdf", pagesize=letter, + rightMargin=72, leftMargin=72, + topMargin=72, bottomMargin=18) +Stor = [] +Style = getSampleStyleSheet() +Title = "QTIP Benchmark Suite" +Stor.append(Paragraph(Title, Style['Title'])) +H1 = "Results" +Stor.append(Spacer(0, 36)) Stor.append(Paragraph(H1, Style['h2'])) -compute=0 -storage=0 -network=0 +compute = 0 +storage = 0 +network = 0 try: - compute=results.get_index('compute_result') + compute = results.get_index('compute_result') except IOError: pass try: - storage=results.get_index('storage_result') + storage = results.get_index('storage_result') except IOError: pass try: - network=results.get_index('network_result') + network = results.get_index('network_result') except IOError: pass -Stor.append(Paragraph("Compute Suite: %f" %compute, Style['h5'])) -Stor.append(Paragraph("Storage Suite: %f" %storage, Style['h5'])) -Stor.append(Paragraph("Network Suite: %f" %network, Style['h5'])) -graph.plot_indices(compute,storage,network) -qtip_graph=('qtip_graph.jpeg') -im=Image(qtip_graph, 5*inch,4*inch) +Stor.append(Paragraph("Compute Suite: %f" % compute, Style['h5'])) +Stor.append(Paragraph("Storage Suite: %f" % storage, Style['h5'])) +Stor.append(Paragraph("Network Suite: %f" % network, Style['h5'])) +graph.plot_indices(compute, storage, network) +qtip_graph = ('qtip_graph.jpeg') +im = Image(qtip_graph, 5 * inch, 4 * inch) Stor.append(im) Stor.append(Spacer(0, 12)) Stor.append(Paragraph("Reference POD", Style['h5'])) -ptext="The Dell OPNFV Lab POD3 has been taken as the reference POD against which the reference results have been collected. The POD consists of 6 identical servers. The details of such a server are:" -Stor.append(Paragraph(ptext,Style['Normal'])) -ptext="<bullet>•</bullet>Server Type: Dell PowerEdge R630 Server" -Stor.append(Paragraph(ptext,Style['Bullet'])) -ptext="<bullet>•</bullet>CPU: Intel Xeon E5-2698 @ 2300 MHz" +ptext = "The Dell OPNFV Lab POD3 has been taken as the reference POD against which the reference results have been collected. The POD consists of 6 identical servers. The details of such a server are:" +Stor.append(Paragraph(ptext, Style['Normal'])) +ptext = "<bullet>•</bullet>Server Type: Dell PowerEdge R630 Server" +Stor.append(Paragraph(ptext, Style['Bullet'])) +ptext = "<bullet>•</bullet>CPU: Intel Xeon E5-2698 @ 2300 MHz" Stor.append(Paragraph(ptext, Style["Bullet"])) -ptext="<bullet>•</bullet>RAM: 128GB" +ptext = "<bullet>•</bullet>RAM: 128GB" Stor.append(Paragraph(ptext, Style["Bullet"])) -ptext="<bullet>•</bullet>Storage SSD: 420GB" +ptext = "<bullet>•</bullet>Storage SSD: 420GB" Stor.append(Paragraph(ptext, Style["Bullet"])) -ptext="<bullet>•</bullet>Network Card: Intel 2P X520/2P I350 rNDC" +ptext = "<bullet>•</bullet>Network Card: Intel 2P X520/2P I350 rNDC" Stor.append(Paragraph(ptext, Style["Bullet"])) -ptext="Servers interconnected through a DELL S4810 switch using a 10Gbps physical link" +ptext = "Servers interconnected through a DELL S4810 switch using a 10Gbps physical link" Stor.append(Paragraph(ptext, Style["Bullet"])) Stor.append(Spacer(0, 12)) -ptext="For Further Details of the Reference POD hardware, please visit: https://wiki.opnfv.org/reference_pod_hardware_details" -Stor.append(Paragraph(ptext,Style['Normal'])) +ptext = "For Further Details of the Reference POD hardware, please visit: https://wiki.opnfv.org/reference_pod_hardware_details" +Stor.append(Paragraph(ptext, Style['Normal'])) Stor.append(Spacer(0, 12)) -ptext="For Details of the Reference POD Results, please visit: https://wiki.opnfv.org/reference_pod_qtip_results" +ptext = "For Details of the Reference POD Results, please visit: https://wiki.opnfv.org/reference_pod_qtip_results" Stor.append(Spacer(0, 12)) -Stor.append(Paragraph(ptext,Style['Normal'])) +Stor.append(Paragraph(ptext, Style['Normal'])) Stor.append(Paragraph("RAW Results", Style['h1'])) Stor.append(Paragraph("Compute Results", Style['h2'])) -dump_result(Stor,"../../results/dhrystone/","Dhrystone_bm") -dump_result(Stor,"../../results/dhrystone/","Dhrystone_vm") - -dump_result(Stor,"../../results/whetstone/","Whetstone_bm") -dump_result(Stor,"../../results/whetstone/","Whetstone_vm") +dump_result(Stor, "../../results/dhrystone/", "Dhrystone_bm") +dump_result(Stor, "../../results/dhrystone/", "Dhrystone_vm") -dump_result(Stor,"../../results/ramspeed/","Ramspeed_bm") -dump_result(Stor,"../../results/ramspeed/","Ramspeed_vm") +dump_result(Stor, "../../results/whetstone/", "Whetstone_bm") +dump_result(Stor, "../../results/whetstone/", "Whetstone_vm") -dump_result(Stor,"../../results/ssl/","SSL_bm") -dump_result(Stor,"../../results/ssl/","SSL_vm") +dump_result(Stor, "../../results/ramspeed/", "Ramspeed_bm") +dump_result(Stor, "../../results/ramspeed/", "Ramspeed_vm") -#dump_result(Stor,"../../results/dpi/","DPI_bm") -#dump_result(Stor,"../../results/dpi/","DPI_vm") +dump_result(Stor, "../../results/ssl/", "SSL_bm") +dump_result(Stor, "../../results/ssl/", "SSL_vm") Stor.append(Paragraph("Network Results", Style['h2'])) -dump_result(Stor,"../../results/iperf/","IPERF_bm") -dump_result(Stor,"../../results/iperf/","IPERF_vm") -dump_result(Stor,"../../results/iperf/","IPERF_vm_2") +dump_result(Stor, "../../results/iperf/", "IPERF_bm") +dump_result(Stor, "../../results/iperf/", "IPERF_vm") +dump_result(Stor, "../../results/iperf/", "IPERF_vm_2") Stor.append(Paragraph("Storage Results", Style['h2'])) -dump_result(Stor,"../../results/fio/","fio_bm") -dump_result(Stor,"../../results/fio/","fio_vm") +dump_result(Stor, "../../results/fio/", "fio_bm") +dump_result(Stor, "../../results/fio/", "fio_vm") doc.build(Stor) -#canvas.save() diff --git a/data/report/get_indices.py b/data/report/get_indices.py index e23fdb89..91219c0b 100644 --- a/data/report/get_indices.py +++ b/data/report/get_indices.py @@ -1,8 +1,8 @@ import json + def get_index(suite): - with open ('../../results/'+suite+'.json') as result_file: - result_djson=json.load(result_file) - index=result_djson['index'] - + with open('../../results/' + suite + '.json') as result_file: + result_djson = json.load(result_file) + index = result_djson['index'] return index diff --git a/data/report/get_results.py b/data/report/get_results.py index 01fb8080..23fd5383 100644 --- a/data/report/get_results.py +++ b/data/report/get_results.py @@ -2,48 +2,49 @@ import os import json -def report_concat (targ_dir, testcase): - machine_temp=[]; - machines=[]; - diction={}; +def report_concat(targ_dir, testcase): + machine_temp = [] + machines = [] for file in os.listdir(targ_dir): if file.endswith(".json"): machine_temp.append(file) - l=len(machine_temp) + l = len(machine_temp) - for x in range (0,l): - file_t=machine_temp[x] - with open (targ_dir+file_t) as result_file: - result_djson=json.load(result_file) + for x in range(0, l): + file_t = machine_temp[x] + with open(targ_dir + file_t) as result_file: + result_djson = json.load(result_file) if result_djson['1 Testcase Name'] == str(testcase): machines.append(result_djson) return machines + def space_count(l): - spc='' + spc = '' for x in range(l): - spc=spc+' ' + spc = spc + ' ' return spc -def custom_dict(list1,list2,k): - string_1='' - for num_1 in range (0,len(list1)): - string_1=string_1+space_count(k)+str(list1[num_1][0])+"="+str(list2[num_1])+"\n" +def custom_dict(list1, list2, k): + string_1 = '' + for num_1 in range(0, len(list1)): + string_1 = string_1 + space_count(k) + str(list1[num_1][0]) + "=" + str(list2[num_1]) + "\n" return string_1 -def generate_result(dict_a,k): - list_1=[] - list_2=[] - count=0 - for i,j in sorted(dict_a.iteritems()): + +def generate_result(dict_a, k): + list_1 = [] + list_2 = [] + count = 0 + for i, j in sorted(dict_a.iteritems()): list_1.append([]) list_1[count].append(i) if (str(type(dict_a.get(i)))) == "<type 'dict'>": - list_2.append(str("\n"+generate_result(dict_a.get(i),int(k+1)))) + list_2.append(str("\n" + generate_result(dict_a.get(i), int(k + 1)))) else: list_2.append(dict_a.get(i)) - count=count+1 - return custom_dict(list_1,list_2,k) + count = count + 1 + return custom_dict(list_1, list_2, k) diff --git a/data/report/qtip_graph.py b/data/report/qtip_graph.py index d7e64140..acbda40c 100644 --- a/data/report/qtip_graph.py +++ b/data/report/qtip_graph.py @@ -1,29 +1,30 @@ import matplotlib -matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np -def plot_indices(a,b,c): - N=3 - ind= np.arange(N) - y_axis = (a,b,c ) - width=0.35 - f=plt.figure() - ax=f.gca() +matplotlib.use('Agg') + + +def plot_indices(a, b, c): + N = 3 + ind = np.arange(N) + y_axis = (a, b, c) + width = 0.35 + f = plt.figure() + ax = f.gca() ax.set_autoscale_on(True) - my_bars=ax.bar(ind,y_axis,width, color='b') + my_bars = ax.bar(ind, y_axis, width, color='b') ax.set_ylabel('Index Score*') ax.set_xlabel('Suite') ax.set_title(' QTIP benchmark scores') ax.axis('on') - my_bars=ax.bar(ind,y_axis,width) - ax.set_xticks(ind+width/2) - ax.set_xticklabels(['Compute','Storage','Network']) - ax.axis([0,3,0,1.25]) - f.text(0.7,0.01,'* With Comparison to Refernece POD', fontsize=9) + my_bars = ax.bar(ind, y_axis, width) + ax.set_xticks(ind + width / 2) + ax.set_xticklabels(['Compute', 'Storage', 'Network']) + ax.axis([0, 3, 0, 1.25]) + f.text(0.7, 0.01, '* With Comparison to Refernece POD', fontsize=9) for rect in my_bars: height = rect.get_height() - ax.text(rect.get_x() + rect.get_width()/2., 1.05*height, height , ha='center', va='bottom') - + ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, height, ha='center', va='bottom') f.savefig('qtip_graph.jpeg') diff --git a/docs/etc/conf.py b/docs/etc/conf.py index 00660351..65e85db9 100644 --- a/docs/etc/conf.py +++ b/docs/etc/conf.py @@ -1,6 +1,5 @@ import datetime -import sys -import os + try: __import__('imp').find_module('sphinx.ext.numfig') @@ -20,9 +19,7 @@ html_use_index = False pdf_documents = [('index', u'OPNFV', u'OPNFV Project', u'OPNFV')] pdf_fit_mode = "shrink" -pdf_stylesheets = ['sphinx','kerning','a4'] -#latex_domain_indices = False -#latex_use_modindex = False +pdf_stylesheets = ['sphinx', 'kerning', 'a4'] latex_elements = { 'printindex': '', diff --git a/docs/roadmap.rst b/docs/roadmap.rst new file mode 100644 index 00000000..42caec92 --- /dev/null +++ b/docs/roadmap.rst @@ -0,0 +1,100 @@ +.. two dots create a comment. please leave this logo at the top of each of your rst files. +.. image:: ../etc/opnfv-logo.png + :height: 40 + :width: 200 + :alt: OPNFV + :align: left +.. these two pipes are to seperate the logo from the first title +| +| + +Roadmap for Release D +===================== + +The development of QTIP has been paused after Brahmaputra release due the +shortage on resource. We will skip Colorado release and target for Release D. + +The project will stick to the original scope as a benchmark platform and +continue to develop on existing framework. + +QTIP will focus on + +- integrating more benchmark tools +- supporting new technology applied in OPNFV +- improve the result dashboard for better visualization + +Besides the technical parts, QTIP is also aiming to attract more contributors by + +- providing more comprehensive documents +- refactoring source code for better maintenanability +- improving development management for better collaboration + +Framework +--------- + +Error Handling +^^^^^^^^^^^^^^ + +The QTIP will be used against different environment. It is not possible to run +without any error all at once. + +We will not be able to get rid of errors, but we may handle them gracefully. + +Comprehensive error messages will help to locate the issue quickly and help user +to resolve them. + +Declarative Playbook +^^^^^^^^^^^^^^^^^^^^ + +QTIP uses ansible for setting up the environment. It is nice and powerful tool +we will keep for Release D. + +However, existing playbooks is full of hardcoded shell scripts which sometimes +will fail in specific OS distribution. + +Although most system administrators will be familiar with shell script, it is +not easy to tell the purpose of a long command line at a glance. + +Ansible's solution for these issues is to provide modules as an abstract layer +to handle the devergence, and it will also be more compact and easier to +understand. This is something we should leverage. + +Scenario Configuration +^^^^^^^^^^^^^^^^^^^^^^ + +Currently the scenario configuration is hard coded and not able to be run under +different environment. The variables should be separated from the configuration +template. + +Features +-------- + +Benchmarks +^^^^^^^^^^ + +1. vswitch perf +2. Cyclictest +3. Stress +4. Lmbench +5. Sar + +Technology +^^^^^^^^^^ + +Some new technology is introduced into OPNFV and it would be good if we can +support them at the first time. + +- SR-IOV: a key technology to improve network performance in VM and the VM can + achieve nearly physical NIC's performance. +- DPDK: a key technology to improve the NIC's performance through poll mode + which dismisses physical interrupt as less as possible. The byproduct of DPDK + is nearly 100% CPU usage. It can also be used in VM. + +Development Management +---------------------- + +We will make improvement on development management + +1. Continuous Integration +2. Documentation +3. Issue Tracking diff --git a/func/cli.py b/func/cli.py index 129ab96c..5e8f02cf 100644 --- a/func/cli.py +++ b/func/cli.py @@ -15,94 +15,95 @@ from func.spawn_vm import SpawnVM import argparse -class cli(): - - def _getfile(self, filepath): - - with open('test_list/'+filepath,'r') as finput: - _benchmarks=finput.readlines() - for items in range( len(_benchmarks)): - _benchmarks[items]=_benchmarks[items].rstrip() +class cli: + + @staticmethod + def _getfile(file_path): + with open('test_list/' + file_path, 'r') as fin_put: + _benchmarks = fin_put.readlines() + for items in range(len(_benchmarks)): + _benchmarks[items] = _benchmarks[items].rstrip() return _benchmarks - def _getsuite(self, filepath): + @staticmethod + def _getsuite(file_path): - return filepath + return file_path - def _checkTestList(self, filename): + @staticmethod + def _check_test_list(filename): - if os.path.isfile('test_list/'+filename): + if os.path.isfile('test_list/' + filename): return True else: return False - def _checkLabName(self, labname): + @staticmethod + def _check_lab_name(lab_name): - if os.path.isdir('test_cases/'+labname): + if os.path.isdir('test_cases/' + lab_name): return True else: return False - def _get_fname(self,file_name): + @staticmethod + def _get_f_name(file_name): return file_name[0: file_name.find('.')] - def __init__(self): - - suite=[] + @staticmethod + def _parse_args(args): parser = argparse.ArgumentParser() - parser.add_argument('-l ', '--lab', help='Name of Lab on which being tested, These can' \ - 'be found in the test_cases/ directory. Please ' \ - 'ensure that you have edited the respective files '\ - 'before using them. For testing other than through Jenkins'\ - ' The user should list default after -l . all the fields in'\ - ' the files are necessary and should be filled') - parser.add_argument('-f', '--file', help = 'File in test_list with the list of tests. there are three files' \ - '\n compute '\ - '\n storage '\ - '\n network '\ - 'They contain all the tests that will be run. They are listed by suite.' \ - 'Please ensure there are no empty lines') - args = parser.parse_args() - - if not self._checkTestList(args.file): + parser.add_argument('-l ', '--lab', help='Name of Lab on which being tested, These can' + 'be found in the test_cases/ directory. Please ' + 'ensure that you have edited the respective files ' + 'before using them. For testing other than through Jenkins' + ' The user should list default after -l . all the fields in' + ' the files are necessary and should be filled') + parser.add_argument('-f', '--file', help='File in test_list with the list of tests. there are three files' + '\n compute ' + '\n storage ' + '\n network ' + 'They contain all the tests that will be run. They are listed by suite.' + 'Please ensure there are no empty lines') + return parser.parse_args(args) + + def __init__(self, args=sys.argv[1:]): + + suite = [] + args = self._parse_args(args) + + if not self._check_test_list(args.file): print '\n\n ERROR: Test File Does not exist in test_list/ please enter correct file \n\n' sys.exit(0) - if not self._checkLabName(args.lab): - print '\n\n You have specified a lab that is not present in test_cases/ please enter correct'\ - ' file. If unsure how to proceed, use -l default.\n\n' + if not self._check_lab_name(args.lab): + print '\n\n You have specified a lab that is not present in test_cases/ please enter correct \ + file. If unsure how to proceed, use -l default.\n\n' sys.exit(0) benchmarks = self._getfile(args.file) suite.append(args.file) - suite=self._getsuite(suite) - for items in range (len(benchmarks)): - if (suite and benchmarks): - - roles='' - vm_info='' - benchmark_details='' - pip='' - obj='' + suite = self._getsuite(suite) + for items in range(len(benchmarks)): + if suite and benchmarks: obj = Env_setup() - if os.path.isfile('./test_cases/'+args.lab.lower()+'/'+suite[0]+'/' +benchmarks[items]): - [benchmark, roles, vm_info, benchmark_details, pip, proxy_info] = obj.parse('./test_cases/' - +args.lab.lower()+'/'+suite[0]+'/'+benchmarks[items]) + if os.path.isfile('./test_cases/' + args.lab.lower() + '/' + suite[0] + '/' + benchmarks[items]): + [benchmark, vm_info, benchmark_details, proxy_info] = \ + obj.parse('./test_cases/' + args.lab.lower() + '/' + suite[0] + '/' + benchmarks[items]) if len(vm_info) != 0: - vmObj ='' - vmObj = SpawnVM(vm_info) - if obj.callpingtest(): - obj.callsshtest() - obj.updateAnsible() - dvr = Driver() - dvr.drive_bench(benchmark, - obj.roles_dict.items(), - self._get_fname(benchmarks[items]), - benchmark_details, - obj.ip_pw_dict.items(), - proxy_info) + SpawnVM(vm_info) + obj.call_ping_test() + obj.call_ssh_test() + obj.update_ansible() + dvr = Driver() + dvr.drive_bench(benchmark, + obj.roles_dict.items(), + self._get_f_name(benchmarks[items]), + benchmark_details, + obj.ip_pw_dict.items(), + proxy_info) else: print (benchmarks[items], ' is not a Template in the Directory - \ Enter a Valid file name. or use qtip.py -h for list') diff --git a/func/create_zones.py b/func/create_zones.py index 44ba7568..e715dfd4 100644 --- a/func/create_zones.py +++ b/func/create_zones.py @@ -1,127 +1,130 @@ -############################################################################## -# Copyright (c) 2015 Dell Inc and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - - - -from keystoneclient.auth.identity import v2 -from keystoneclient import session -from novaclient import client -import os -import re -from collections import defaultdict - - -class create_zones: - - def __init__(self): - print 'Creating Zones' - self._keystone_client = None - self._nova_client = None - - def _get_keystone_client(self): - '''returns a keystone client instance''' - - if self._keystone_client is None: - ''' - self._keystone_client = keystoneclient.v2_0.client.Client( - auth_url=os.environ.get('OS_AUTH_URL'), - username=os.environ.get('OS_USERNAME'), - password=os.environ.get('OS_PASSWORD'), - tenant_name=os.environ.get('OS_TENANT_NAME')) - ''' - auth = v2.Password(auth_url=os.environ.get('OS_AUTH_URL'), - username=os.environ.get('OS_USERNAME'), - password=os.environ.get('OS_PASSWORD'), - tenant_name=os.environ.get('OS_TENANT_NAME')) - - sess = session.Session(auth=auth) - - return sess - - def _get_nova_client(self): - if self._nova_client is None: - keystone = self._get_keystone_client() - self._nova_client = client.Client('2', session=keystone) - return self._nova_client - - def check_aggregate(self, nova, agg_name): - list1 = nova.aggregates.list() - - agg_name_exist = False - for x in list1: - - if x.name == agg_name: - agg_name_exist = True - return agg_name_exist - - def get_aggregate_id(self, nova, agg_name): - list1 = nova.aggregates.list() - agg_id = 0 - agg_name_exist = False - for x in list1: - if x.name == agg_name: - agg_id = x.id - return agg_id - - def check_host_added_to_aggregate(self, nova, agg_id, hostname): - host_added = False - list1 = nova.aggregates.get_details(agg_id) - - nme = str(list1.hosts) - if(hostname in nme): - host_added = True - return host_added - - def del_agg(self, nova, id, host): - - nova.aggregates.remove_host(id, host) - nova.aggregates.delete(id) - - def get_compute_num(self, computeName): - - num = re.findall(r'\d+',computeName) - return (int(num[0])-1) - - def create_agg(self, D): - nova = self._get_nova_client() - hyper_list = nova.hypervisors.list() - hostnA = [] - zone_machine = defaultdict(list) - - x = 0 - for x in range(len(hyper_list)): - - hostnA.append(hyper_list[x].service['host']) - hostnA[x] = str(hostnA[x]) - - hostnA.sort() - for k in D: - - zone_machine[k].append(' ') - - for x in range(len(zone_machine)): - compute_index = self.get_compute_num(D[x]) - if compute_index > len(hyper_list): - print '\n The specified compute node doesnt exist. using compute 1' - compute_index = 1 - if not self.check_aggregate(nova, hostnA[compute_index]): - agg_idA = nova.aggregates.create(hostnA[compute_index], D[x]) - nova.aggregates.add_host(aggregate=agg_idA, host=hostnA[compute_index]) - - else: - - id1 = self.get_aggregate_id(nova, hostnA[compute_index]) - self.del_agg(nova, id1, hostnA[compute_index]) - agg_idA = nova.aggregates.create(hostnA[compute_index], D[x]) - id1 = self.get_aggregate_id(nova, hostnA[compute_index]) - - if not self.check_host_added_to_aggregate( - nova, id1, hostnA[compute_index]): - - nova.aggregates.add_host(aggregate=id1, host=hostnA[compute_index]) +##############################################################################
+# Copyright (c) 2015 Dell Inc and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from keystoneclient.auth.identity import v2
+from keystoneclient import session
+from novaclient import client
+import os
+import re
+from collections import defaultdict
+
+
+class create_zones:
+
+ def __init__(self):
+ print 'Creating Zones'
+ self._keystone_client = None
+ self._nova_client = None
+
+ def _get_keystone_client(self):
+ """returns a keystone client instance"""
+
+ if self._keystone_client is None:
+ '''
+ self._keystone_client = keystoneclient.v2_0.client.Client(
+ auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+ '''
+ auth = v2.Password(auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+
+ sess = session.Session(auth=auth)
+ else:
+ return self._keystone_client
+
+ return sess
+
+ def _get_nova_client(self):
+ if self._nova_client is None:
+ keystone = self._get_keystone_client()
+ self._nova_client = client.Client('2', session=keystone)
+ return self._nova_client
+
+ @staticmethod
+ def check_aggregate(nova, agg_name):
+ list1 = nova.aggregates.list()
+ agg_name_exist = False
+ for x in list1:
+ if x.name == agg_name:
+ agg_name_exist = True
+ return agg_name_exist
+
+ @staticmethod
+ def get_aggregate_id(nova, agg_name):
+ list1 = nova.aggregates.list()
+ for x in list1:
+ if x.name == agg_name:
+ agg_id = x.id
+ return agg_id
+
+ @staticmethod
+ def check_host_added_to_aggregate(nova, agg_id, hostname):
+ host_added = False
+ list1 = nova.aggregates.get_details(agg_id)
+
+ nme = str(list1.hosts)
+ if hostname in nme:
+ host_added = True
+ return host_added
+
+ @staticmethod
+ def del_agg(nova, id, host):
+
+ nova.aggregates.remove_host(id, host)
+ nova.aggregates.delete(id)
+
+ @staticmethod
+ def get_compute_num(compute_name):
+
+ num = re.findall(r'\d+', compute_name)
+ return int(num[0]) - 1
+
+ def test(self):
+ nova = self._get_nova_client()
+ hyper_list = nova.hypervisors.list()
+ return hyper_list
+
+ def create_agg(self, d):
+ nova = self._get_nova_client()
+ hyper_list = nova.hypervisors.list()
+ host_a = []
+ zone_machine = defaultdict(list)
+
+ for x in range(len(hyper_list)):
+
+ host_a.append(hyper_list[x].service['host'])
+ host_a[x] = str(host_a[x])
+
+ host_a.sort()
+ for k in d:
+
+ zone_machine[k].append(' ')
+
+ for x in range(len(zone_machine)):
+ compute_index = self.get_compute_num(d[x])
+ if compute_index > len(hyper_list):
+ print '\n The specified compute node doesnt exist. using compute 1'
+ compute_index = 1
+ if not self.check_aggregate(nova, host_a[compute_index]):
+ agg_id_a = nova.aggregates.create(host_a[compute_index], d[x])
+ nova.aggregates.add_host(aggregate=agg_id_a, host=host_a[compute_index])
+
+ else:
+ id1 = self.get_aggregate_id(nova, host_a[compute_index])
+ self.del_agg(nova, id1, host_a[compute_index])
+ nova.aggregates.create(host_a[compute_index], d[x])
+ id1 = self.get_aggregate_id(nova, host_a[compute_index])
+
+ if not self.check_host_added_to_aggregate(
+ nova, id1, host_a[compute_index]):
+
+ nova.aggregates.add_host(aggregate=id1, host=host_a[compute_index])
diff --git a/func/driver.py b/func/driver.py index 48c09c5d..33dbe320 100644 --- a/func/driver.py +++ b/func/driver.py @@ -6,13 +6,11 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - - - import os import json from collections import defaultdict + class Driver: def __init__(self): @@ -21,8 +19,8 @@ class Driver: print os.environ['PWD'] self.dic_json = defaultdict() - def drive_bench(self, benchmark, roles, benchmark_fname, benchmark_detail = None, pip_dict = None, proxy_info = None): - roles= sorted(roles) + def drive_bench(self, benchmark, roles, benchmark_fname, benchmark_detail=None, pip_dict=None, proxy_info=None): + roles = sorted(roles) pip_dict = sorted(pip_dict) result_dir = 'results' benchmark_name = benchmark + '.yaml' @@ -33,30 +31,30 @@ class Driver: self.dic_json['workingdir'] = str(os.environ['PWD']) self.dic_json['fname'] = str(benchmark_fname) self.dic_json['username'] = str('root') - + for key in proxy_info.keys(): self.dic_json[key] = proxy_info[key] - + if os.environ['INSTALLER_TYPE'] == str('joid'): - self.dic_json['username']=str('ubuntu') + self.dic_json['username'] = str('ubuntu') if os.environ['INSTALLER_TYPE'] == str('apex'): - self.dic_json['username']=str('heat-admin') - for k,v in benchmark_detail: - self.dic_json[k]=v + self.dic_json['username'] = str('heat-admin') + for k, v in benchmark_detail: + self.dic_json[k] = v for k, v in roles: - self.dic_json['role']=k - index=1 - if benchmark_detail != None: + self.dic_json['role'] = k + index = 1 + if benchmark_detail is not None: for values in v: - if k == '1-server': + if k == '1-server': print values, 'saving IP' - self.dic_json['ip'+str(index)]= str(values) + self.dic_json['ip' + str(index)] = str(values) if pip_dict[0][1][0]: - self.dic_json['privateip'+str(index)] = pip_dict[0][1] + self.dic_json['privateip' + str(index)] = pip_dict[0][1] if not pip_dict[0][1][0]: - self.dic_json['privateip'+str(index)] = 'NONE' - index= index+1 + self.dic_json['privateip' + str(index)] = 'NONE' + index = index + 1 dic_json = json.dumps(dict(self.dic_json.items())) print dic_json run_play = 'ansible-playbook ./benchmarks/playbooks/{0} --private-key=./data/QtipKey -i ./data/hosts --extra-vars \'{1}\''.format(benchmark_name, dic_json) - status = os.system(run_play) + os.system(run_play) diff --git a/func/env_setup.py b/func/env_setup.py index c1e2a003..9c0dadb3 100644 --- a/func/env_setup.py +++ b/func/env_setup.py @@ -13,19 +13,22 @@ from collections import defaultdict import yaml import time import paramiko -class Env_setup(): +import socket + + +class Env_setup: roles_ip_list = [] # ROLE and its corresponding IP address list ip_pw_list = [] # IP and password, this will be used to ssh roles_dict = defaultdict(list) ip_pw_dict = defaultdict(list) ip_pip_list = [] vm_parameters = defaultdict(list) - benchmark_details= defaultdict() + benchmark_details = defaultdict() benchmark = '' def __init__(self): print '\nParsing class initiated\n' - self.roles_ip_list[:]=[] + self.roles_ip_list[:] = [] self.ip_pw_list[:] = [] self.roles_dict.clear() self.ip_pw_dict.clear() @@ -35,41 +38,44 @@ class Env_setup(): self.benchmark_details.clear() self.benchmark = '' - def writeTofile(self, role): - fname2 = open('./data/hosts', 'w') + @staticmethod + def write_to_file(role): + f_name_2 = open('./data/hosts', 'w') print role.items() for k in role: - fname2.write('[' + k + ']\n') + f_name_2.write('[' + k + ']\n') num = len(role[k]) for x in range(num): - fname2.write(role[k][x] + '\n') - fname2.close + f_name_2.write(role[k][x] + '\n') + f_name_2.close() - def sshtest(self, lister): - print 'list: ',lister + @staticmethod + def ssh_test(lister): + print 'list: ', lister for k, v in lister: - ipvar = k - pwvar = v + ip_var = k print '\nBeginning SSH Test!\n' if v != '': - print ('\nSSH->>>>> {0} {1}\n'.format(k,v)) + print ('\nSSH->>>>> {0} {1}\n'.format(k, v)) time.sleep(2) ssh_c = 'ssh-keyscan {0} >> ~/.ssh/known_hosts'.format(k) os.system(ssh_c) - ssh_cmd = './data/qtip_creds.sh {0}'.format(ipvar) + ssh_cmd = './data/qtip_creds.sh {0}'.format(ip_var) print ssh_cmd - res = os.system(ssh_cmd) + os.system(ssh_cmd) for infinity in range(100): - try : + try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.connect(k , key_filename= './data/QtipKey') + ssh.connect(k, key_filename='./data/QtipKey') stdin, stdout, stderr = ssh.exec_command('ls') print('SSH successful') + for line in stdout: + print '... ' + line.strip('\n') break - except: - print 'Retrying aSSH' + except socket.error: + print 'Retrying aSSH %s' % infinity time.sleep(1) if v == '': print ('SSH->>>>>', k) @@ -79,99 +85,91 @@ class Env_setup(): os.system(ssh_c) for infinity in range(10): - try : + try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.connect(k, key_filename= './data/QtipKey') + ssh.connect(k, key_filename='./data/QtipKey') stdin, stdout, stderr = ssh.exec_command('ls') + print('SSH successful') + for line in stdout: + print '... ' + line.strip('\n') break - except: - print 'Retrying SSH' + except socket.error: + print 'Retrying SSH %s' % infinity + + @staticmethod + def ping_test(lister): - def pingtest(self, lister): - pingFlag = 0 - result = True for k, v in lister.iteritems(): time.sleep(10) for val in v: ipvar = val ping_cmd = 'ping -D -c1 {0}'.format(ipvar) - while (os.system(ping_cmd) != 0) &(pingFlag <=20): + while os.system(ping_cmd) != 0: print '\nWaiting for machine\n' time.sleep(10) - pingFlag = pingFlag+1 - if pingFlag <= 2: - print ('\n\n %s is UP \n\n ' % ipvar) - else: - result = False - return result - + print ('\n\n %s is UP \n\n ' % ipvar) - def GetHostMachineinfo(self, Hosttag): + def get_host_machine_info(self, host_tag): - num = len(Hosttag) + num = len(host_tag) offset = len(self.roles_ip_list) for x in range(num): hostlabel = 'machine_' + str(x + 1) self.roles_ip_list.insert( - offset, (Hosttag[hostlabel]['role'], Hosttag[hostlabel]['ip'])) + offset, (host_tag[hostlabel]['role'], host_tag[hostlabel]['ip'])) self.ip_pw_list.insert( - offset, (Hosttag[hostlabel]['ip'], Hosttag[hostlabel]['pw'])) + offset, (host_tag[hostlabel]['ip'], host_tag[hostlabel]['pw'])) - def GetVirtualMachineinfo(self, Virtualtag): + def get_virtual_machine_info(self, virtual_tag): - num = len(Virtualtag) + num = len(virtual_tag) for x in range(num): - hostlabel = 'virtualmachine_' + str(x + 1) - for k, v in Virtualtag[hostlabel].iteritems(): + host_label = 'virtualmachine_' + str(x + 1) + for k, v in virtual_tag[host_label].iteritems(): self.vm_parameters[k].append(v) - def GetBenchmarkDetails(self, detail_dic): + def get_bench_mark_details(self, detail_dic): print detail_dic - for k,v in detail_dic.items(): - self.benchmark_details[k]= v + for k, v in detail_dic.items(): + self.benchmark_details[k] = v - def parse(self, configfilepath): + def parse(self, config_file_path): try: - fname = open(configfilepath, 'r+') - doc = yaml.load(fname) -# valid_file = validate_yaml.Validate_Yaml(doc) - fname.close() - for scenario in doc: + f_name = open(config_file_path, 'r+') + doc = yaml.load(f_name) + f_name.close() + if doc['Scenario']['benchmark']: self.benchmark = doc['Scenario']['benchmark'] if doc['Context']['Virtual_Machines']: - self.GetVirtualMachineinfo(doc['Context']['Virtual_Machines']) + self.get_virtual_machine_info(doc['Context']['Virtual_Machines']) if doc['Context']['Host_Machines']: - self.GetHostMachineinfo(doc['Context']['Host_Machines']) - if doc.get('Scenario',{}).get('benchmark_details',{}): - self.GetBenchmarkDetails(doc.get('Scenario',{}).get('benchmark_details',{})) - if 'Proxy_Environment' in doc['Context'].keys(): + self.get_host_machine_info(doc['Context']['Host_Machines']) + if doc.get('Scenario', {}).get('benchmark_details', {}): + self.get_bench_mark_details(doc.get('Scenario', {}).get('benchmark_details', {})) + if 'Proxy_Environment' in doc['Context'].keys(): self.proxy_info['http_proxy'] = doc['Context']['Proxy_Environment']['http_proxy'] self.proxy_info['https_proxy'] = doc['Context']['Proxy_Environment']['https_proxy'] - self.proxy_info['no_proxy'] = doc['Context']['Proxy_Environment']['no_proxy'] + self.proxy_info['no_proxy'] = doc['Context']['Proxy_Environment']['no_proxy'] for k, v in self.roles_ip_list: self.roles_dict[k].append(v) for k, v in self.ip_pw_list: self.ip_pw_dict[k].append(v) return ( self.benchmark, - self.roles_dict.items(), self.vm_parameters, self.benchmark_details.items(), - self.ip_pw_dict.items(), self.proxy_info) - except KeyboardInterrupt: - fname.close() print 'ConfigFile Closed: exiting!' sys.exit(0) - def updateAnsible(self): - self.writeTofile(self.roles_dict) + def update_ansible(self): + self.write_to_file(self.roles_dict) - def callpingtest(self): - self.pingtest(self.roles_dict) + def call_ping_test(self): + self.ping_test(self.roles_dict) - def callsshtest(self): - self.sshtest(self.ip_pw_list) + def call_ssh_test(self): + self.ssh_test(self.ip_pw_list) diff --git a/func/fetchimg.py b/func/fetchimg.py index 1c621a3b..1ed3def6 100644 --- a/func/fetchimg.py +++ b/func/fetchimg.py @@ -1,30 +1,35 @@ -############################################################################## -# Copyright (c) 2015 Dell Inc and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - - - -import os -import time - - -class FetchImg: - - def __init__(self): - print 'Fetching Image!' - print 'Fetching QTIP_VM Image' - - def download(self): - time.sleep(2) - os.system( - 'mkdir -p Temp_Img && wget http://artifacts.opnfv.org/qtip/QTIP_CentOS.qcow2 -P Temp_Img') - - filepath = './Temp_Img/QTIP_CentOS.qcow2' - while not os.path.isfile(filepath): - time.sleep(10) - print 'Download Completed!' +##############################################################################
+# Copyright (c) 2015 Dell Inc and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import time
+
+IMGSTORE = "/home/opnfv/imgstore"
+
+
+class FetchImg:
+
+ def __init__(self):
+ print 'Fetching Image!'
+
+ @staticmethod
+ def download():
+ time.sleep(2)
+ os.system('mkdir -p Temp_Img')
+ filepath = './Temp_Img/QTIP_CentOS.qcow2'
+ imgstorepath = IMGSTORE + "/QTIP_CentOS.qcow2"
+ if os.path.isfile(imgstorepath):
+ os.system("ln -s %s %s" % (imgstorepath, filepath))
+ print "QTIP_CentOS.qcow2 exists locally. Skipping the download and using the file from IMG store"
+ else:
+ print 'Fetching QTIP_CentOS.qcow2'
+ os.system('wget http://artifacts.opnfv.org/qtip/QTIP_CentOS.qcow2 -P Temp_Img')
+
+ while not os.path.isfile(filepath):
+ time.sleep(10)
+ print 'Download Completed!'
diff --git a/func/spawn_vm.py b/func/spawn_vm.py index 94aa424a..ec63230c 100644 --- a/func/spawn_vm.py +++ b/func/spawn_vm.py @@ -1,282 +1,279 @@ -############################################################################## -# Copyright (c) 2015 Dell Inc and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -import os -import sys -from collections import defaultdict -from func.env_setup import Env_setup -from func.fetchimg import FetchImg -import yaml -import heatclient.client -import keystoneclient -import glanceclient -from novaclient import client -import time -import json -from func.create_zones import create_zones - -class SpawnVM(Env_setup): - vm_role_ip_dict = defaultdict(list) - installer = '' - - def __init__(self, vm_info): - print 'SpawnVM Class initiated' - vm_role_ip_dict = vm_info.copy() - print 'Generating Heat Template\n' - self._keystone_client = None - self._heat_client = None - self._glance_client = None - self._nova_client = None - nova =self. _get_nova_client() - azoneobj = create_zones() - azoneobj.create_agg(vm_info['availability_zone']) - installer= self.get_installer_type() - self.Heat_template1 = self.HeatTemplate_vm(vm_info,installer) - self.create_stack(vm_role_ip_dict, self.Heat_template1) - - def get_installer_type(self): - print 'Getting Installer Name' - return os.environ['INSTALLER_TYPE'] - - def get_public_network(self,installer_detected): - - ''' - TODO: GET THE NAMES OF THE PUBLIC NETWORKS for OTHER PROJECTS - ''' - print 'Getting Public Network' - if installer_detected.lower() == 'fuel': - return 'admin_floating_net' - if installer_detected.lower() == 'apex': - return 'external' - if installer_detected.lower() == 'compass': - return 'ext-net' - if installer_detected.lower() == 'joid': - return 'ext-net' - - def HeatTemplate_vm(self, vm_params, installer): - try: - Heat_Dic='' - with open('./heat/SampleHeat.yaml', 'r+') as H_temp: - Heat_Dic = yaml.load(H_temp) - except yaml.YAMLError as exc: - if hasattr(exc, 'problem_mark'): - mark = exc.problem_mark - print 'Error in qtip/heat/SampleHeat.yaml at: (%s,%s)' % (mark.line + 1, mark.column + 1) - print 'EXITING PROGRAM. Correct File and restart' - sys.exit(0) - fopen = open('./data/QtipKey.pub', 'r') - fopenstr = fopen.read() - fopenstr = fopenstr.rstrip() - scriptcmd = '#!/bin/bash \n echo {0} >> foo.txt \n echo {1} >> /root/.ssh/authorized_keys'.format( - fopenstr, fopenstr) - - netName = self.get_public_network(installer) - print netName - Heat_Dic['heat_template_version'] = '2014-10-16' - Heat_Dic['resources']['KeyPairSavePrivate'] = { - 'type': 'OS::Nova::KeyPair', - 'properties': { - 'save_private_key': 'true', - 'name': 'my_key' - } - } - Heat_Dic['parameters']['public_network'] = { - 'type': 'string', - #'default': vm_params['public_network'][0] - 'default': netName - } - for x in range(1, len(vm_params['availability_zone']) + 1): - avail_zone = vm_params['availability_zone'][x - 1] - img = vm_params['OS_image'][x - 1] - flavor = vm_params['flavor'][x - 1] - - Heat_Dic['parameters']['availability_zone_' +str(x)] = { - 'description': 'Availability Zone of the instance', - 'default': avail_zone, - 'type': 'string' - - } - - Heat_Dic['resources']['public_port_' +str(x)] = { - 'type': 'OS::Neutron::Port', - 'properties': { - 'network': {'get_resource': 'private_network'}, - 'security_groups': [{ 'get_resource': 'demo1_security_Group'}], - 'fixed_ips': [ - {'subnet_id': {'get_resource': 'private_subnet'}}]}} - - Heat_Dic['resources']['floating_ip_' + str(x)] = { - 'type': 'OS::Neutron::FloatingIP', - 'properties': { - 'floating_network': {'get_param': 'public_network'}}} - - Heat_Dic['resources']['floating_ip_assoc_' + str(x)] = { - 'type': 'OS::Neutron::FloatingIPAssociation', - 'properties': { - 'floatingip_id': {'get_resource': 'floating_ip_' + str(x)}, - 'port_id': {'get_resource': 'public_port_' + str(x)}}} - - Heat_Dic['resources']['my_instance_' + str(x)] = { - 'type': 'OS::Nova::Server', - 'properties': { - 'image': img, - 'networks':[ - {'port': {'get_resource': 'public_port_' + str(x)}}], - 'flavor': flavor, - 'availability_zone': avail_zone, - 'name': 'instance' + str(x), - 'key_name': {'get_resource': 'KeyPairSavePrivate'}, - 'user_data_format': 'RAW', - 'user_data': scriptcmd}} - - Heat_Dic['resources']['demo1_security_Group'] = { - 'type': 'OS::Neutron::SecurityGroup', - 'properties': { - 'name': 'demo1_security_Group', - 'rules': [{ - 'protocol': 'tcp', - 'port_range_min': 22, - 'port_range_max': 5201}, - {'protocol': 'udp', - 'port_range_min': 22, - 'port_range_max': 5201}, - {'protocol': 'icmp'}]}} - - Heat_Dic['outputs']['instance_PIP_' +str(x)] = { - 'description': 'IP address of the instance', - 'value': {'get_attr': ['my_instance_' + str(x), 'first_address']}} - Heat_Dic['outputs']['instance_ip_' +str(x)] = { - 'description': 'IP address of the instance', - 'value': {'get_attr': ['floating_ip_' + str(x), 'floating_ip_address']}} - - Heat_Dic['outputs']['availability_instance_' + str(x)] = { - 'description': 'Availability Zone of the Instance', - 'value': { 'get_param': 'availability_zone_'+str(x)}} - - - Heat_Dic['outputs']['KeyPair_PublicKey'] = { - 'description': 'Private Key', - 'value': {'get_attr': ['KeyPairSavePrivate', 'private_key']} - } - del Heat_Dic['outputs']['description'] - return Heat_Dic - - def _get_keystone_client(self): - '''returns a keystone client instance''' - - if self._keystone_client is None: - self._keystone_client = keystoneclient.v2_0.client.Client( - auth_url=os.environ.get('OS_AUTH_URL'), - username=os.environ.get('OS_USERNAME'), - password=os.environ.get('OS_PASSWORD'), - tenant_name=os.environ.get('OS_TENANT_NAME')) - return self._keystone_client - - def _get_nova_client(self): - if self._nova_client is None: - keystone = self._get_keystone_client() - self._nova_client = client.Client('2', token=keystone.auth_token) - return self._nova_client - - def _get_heat_client(self): - '''returns a heat client instance''' - if self._heat_client is None: - keystone = self._get_keystone_client() - heat_endpoint = keystone.service_catalog.url_for( - service_type='orchestration') - self._heat_client = heatclient.client.Client( - '1', endpoint=heat_endpoint, token=keystone.auth_token) - return self._heat_client - - def _get_glance_client(self): - if self._glance_client is None: - keystone = self._get_keystone_client() - glance_endpoint = keystone.service_catalog.url_for( - service_type='image') - self._glance_client = glanceclient.Client( - '2', glance_endpoint, token=keystone.auth_token) - return self._glance_client - - def create_stack(self, vm_role_ip_dict, Heat_template): - - stackname = 'QTIP' - heat = self._get_heat_client() - glance = self._get_glance_client() - - available_images = [] - for image_list in glance.images.list(): - - available_images.append(image_list.name) - - if 'QTIP_CentOS' in available_images: - print 'Image Present' - - elif 'QTIP_CentOS' not in available_images: - fetchImage = FetchImg() - fetchImage.download() - print 'Uploading Image to Glance. Please wait' - qtip_image = glance.images.create( - name='QTIP_CentOS', - visibility='public', - disk_format='qcow2', - container_format='bare') - qtip_image = glance.images.upload( - qtip_image.id, open('./Temp_Img/QTIP_CentOS.qcow2')) - json_temp = json.dumps(Heat_template) - - for checks in range(3): - for prev_stacks in heat.stacks.list(): - - if prev_stacks.stack_name == 'QTIP': - print 'QTIP Stacks exists.\nDeleting Existing Stack' - heat.stacks.delete('QTIP') - time.sleep(10) - - print '\nStack Creating Started\n' - - try: - heat.stacks.create(stack_name=stackname, template=Heat_template) - except: - print 'Create Failed :( ' - - cluster_detail = heat.stacks.get(stackname) - while(cluster_detail.status != 'COMPLETE'): - if cluster_detail.status == 'IN_PROGRESS': - print 'Stack Creation in Progress' - cluster_detail = heat.stacks.get(stackname) - time.sleep(10) - print 'Stack Created' - print 'Getting Public IP(s)' - zone = [] - s=0 - for vm in range(len(vm_role_ip_dict['OS_image'])): - - for I in cluster_detail.outputs: - availabilityKey = 'availability_instance_'+str(vm+1) - - if I['output_key'] == availabilityKey: - zone.insert(s,str(I['output_value'])) - s=s+1 - for i in cluster_detail.outputs: - instanceKey = "instance_ip_" + str(vm + 1) - privateIPkey = 'instance_PIP_' + str(vm +1) - if i['output_key'] == instanceKey: - Env_setup.roles_dict[vm_role_ip_dict['role'][vm]].append( - str(i['output_value'])) - Env_setup.ip_pw_list.append((str(i['output_value']),'')) - - if i['output_key'] == privateIPkey: - Env_setup.ip_pw_dict[vm_role_ip_dict['role'][vm]]=str(i['output_value']) - if i['output_key'] == 'KeyPair_PublicKey': - sshkey = str(i['output_value']) - - with open('./data/my_key.pem', 'w') as fopen: - fopen.write(sshkey) - fopen.close() - print Env_setup.ip_pw_list +##############################################################################
+# Copyright (c) 2015 Dell Inc and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import sys
+from collections import defaultdict
+from func.env_setup import Env_setup
+from func.fetchimg import FetchImg
+import yaml
+import heatclient.client
+import keystoneclient
+import glanceclient
+from novaclient import client
+import time
+from func.create_zones import create_zones
+
+
+class SpawnVM(Env_setup):
+ vm_role_ip_dict = defaultdict(list)
+ installer = ''
+
+ def __init__(self, vm_info):
+ Env_setup.__init__(self)
+ print 'SpawnVM Class initiated'
+ vm_role_ip_dict = vm_info.copy()
+ print 'Generating Heat Template\n'
+ self._keystone_client = None
+ self._heat_client = None
+ self._glance_client = None
+ self._nova_client = None
+ self. _get_nova_client()
+ azoneobj = create_zones()
+ azoneobj.create_agg(vm_info['availability_zone'])
+ installer = self.get_installer_type()
+ self.Heat_template1 = self.heat_template_vm(vm_info, installer)
+ self.create_stack(vm_role_ip_dict, self.Heat_template1)
+
+ @staticmethod
+ def get_installer_type():
+ print 'Getting Installer Name'
+ return os.environ['INSTALLER_TYPE']
+
+ @staticmethod
+ def get_public_network(installer_detected):
+
+ """
+ TODO: GET THE NAMES OF THE PUBLIC NETWORKS for OTHER PROJECTS
+ """
+ print 'Getting Public Network'
+ if installer_detected.lower() == 'fuel':
+ return 'admin_floating_net'
+ if installer_detected.lower() == 'apex':
+ return 'external'
+ if installer_detected.lower() == 'compass':
+ return 'ext-net'
+ if installer_detected.lower() == 'joid':
+ return 'ext-net'
+
+ def heat_template_vm(self, vm_params, installer):
+ Heat_Dic = {}
+ try:
+ with open('./heat/SampleHeat.yaml', 'r+') as H_temp:
+ Heat_Dic = yaml.load(H_temp)
+ except yaml.YAMLError as exc:
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ print 'Error in qtip/heat/SampleHeat.yaml at: (%s,%s)' % (mark.line + 1, mark.column + 1)
+ print 'EXITING PROGRAM. Correct File and restart'
+ sys.exit(0)
+ fopen = open('./data/QtipKey.pub', 'r')
+ fopenstr = fopen.read()
+ fopenstr = fopenstr.rstrip()
+ scriptcmd = '#!/bin/bash \n echo {0} >> foo.txt \n echo {1} >> /root/.ssh/authorized_keys'.format(
+ fopenstr, fopenstr)
+
+ netName = self.get_public_network(installer)
+ print netName
+ Heat_Dic['heat_template_version'] = '2014-10-16'
+ Heat_Dic['resources']['KeyPairSavePrivate'] = {
+ 'type': 'OS::Nova::KeyPair',
+ 'properties': {
+ 'save_private_key': 'true',
+ 'name': 'my_key'
+ }
+ }
+ Heat_Dic['parameters']['public_network'] = {
+ 'type': 'string',
+ 'default': netName
+ }
+ for x in range(1, len(vm_params['availability_zone']) + 1):
+ avail_zone = vm_params['availability_zone'][x - 1]
+ img = vm_params['OS_image'][x - 1]
+ flavor = vm_params['flavor'][x - 1]
+
+ Heat_Dic['parameters']['availability_zone_' + str(x)] = \
+ {'description': 'Availability Zone of the instance',
+ 'default': avail_zone,
+ 'type': 'string'}
+
+ Heat_Dic['resources']['public_port_' + str(x)] = \
+ {'type': 'OS::Neutron::Port',
+ 'properties': {'network': {'get_resource': 'private_network'},
+ 'security_groups': [{'get_resource': 'demo1_security_Group'}],
+ 'fixed_ips': [{'subnet_id':
+ {'get_resource': 'private_subnet'}}]}}
+
+ Heat_Dic['resources']['floating_ip_' + str(x)] = {
+ 'type': 'OS::Neutron::FloatingIP',
+ 'properties': {
+ 'floating_network': {'get_param': 'public_network'}}}
+
+ Heat_Dic['resources']['floating_ip_assoc_' + str(x)] = {
+ 'type': 'OS::Neutron::FloatingIPAssociation',
+ 'properties': {
+ 'floatingip_id': {'get_resource': 'floating_ip_' + str(x)},
+ 'port_id': {'get_resource': 'public_port_' + str(x)}}}
+
+ Heat_Dic['resources']['my_instance_' + str(x)] = \
+ {'type': 'OS::Nova::Server',
+ 'properties': {'image': img,
+ 'networks':
+ [{'port': {'get_resource': 'public_port_' + str(x)}}],
+ 'flavor': flavor,
+ 'availability_zone': avail_zone,
+ 'name': 'instance' + str(x),
+ 'key_name': {'get_resource': 'KeyPairSavePrivate'},
+ 'user_data_format': 'RAW',
+ 'user_data': scriptcmd}}
+
+ Heat_Dic['resources']['demo1_security_Group'] = {
+ 'type': 'OS::Neutron::SecurityGroup',
+ 'properties': {
+ 'name': 'demo1_security_Group',
+ 'rules': [{
+ 'protocol': 'tcp',
+ 'port_range_min': 22,
+ 'port_range_max': 5201},
+ {'protocol': 'udp',
+ 'port_range_min': 22,
+ 'port_range_max': 5201},
+ {'protocol': 'icmp'}]}}
+
+ Heat_Dic['outputs']['instance_PIP_' + str(x)] = {
+ 'description': 'IP address of the instance',
+ 'value': {'get_attr': ['my_instance_' + str(x), 'first_address']}}
+ Heat_Dic['outputs']['instance_ip_' + str(x)] = {
+ 'description': 'IP address of the instance',
+ 'value': {'get_attr': ['floating_ip_' + str(x), 'floating_ip_address']}}
+
+ Heat_Dic['outputs']['availability_instance_' + str(x)] = {
+ 'description': 'Availability Zone of the Instance',
+ 'value': {'get_param': 'availability_zone_' + str(x)}}
+
+ Heat_Dic['outputs']['KeyPair_PublicKey'] = {
+ 'description': 'Private Key',
+ 'value': {'get_attr': ['KeyPairSavePrivate', 'private_key']}
+ }
+ del Heat_Dic['outputs']['description']
+ print Heat_Dic
+ return Heat_Dic
+
+ def _get_keystone_client(self):
+ """returns a keystone client instance"""
+
+ if self._keystone_client is None:
+ self._keystone_client = keystoneclient.v2_0.client.Client(
+ auth_url=os.environ.get('OS_AUTH_URL'),
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ tenant_name=os.environ.get('OS_TENANT_NAME'))
+ return self._keystone_client
+
+ def _get_nova_client(self):
+ if self._nova_client is None:
+ keystone = self._get_keystone_client()
+ self._nova_client = client.Client('2', token=keystone.auth_token)
+ return self._nova_client
+
+ def _get_heat_client(self):
+ """returns a heat client instance"""
+ if self._heat_client is None:
+ keystone = self._get_keystone_client()
+ heat_endpoint = keystone.service_catalog.url_for(
+ service_type='orchestration')
+ self._heat_client = heatclient.client.Client(
+ '1', endpoint=heat_endpoint, token=keystone.auth_token)
+ return self._heat_client
+
+ def _get_glance_client(self):
+ if self._glance_client is None:
+ keystone = self._get_keystone_client()
+ glance_endpoint = keystone.service_catalog.url_for(
+ service_type='image')
+ self._glance_client = glanceclient.Client(
+ '2', glance_endpoint, token=keystone.auth_token)
+ return self._glance_client
+
+ def create_stack(self, vm_role_ip_dict, heat_template):
+
+ global sshkey
+ stackname = 'QTIP'
+ heat = self._get_heat_client()
+ glance = self._get_glance_client()
+
+ available_images = []
+ for image_list in glance.images.list():
+
+ available_images.append(image_list.name)
+
+ if 'QTIP_CentOS' in available_images:
+ print 'Image Present'
+
+ elif 'QTIP_CentOS' not in available_images:
+ fetchImage = FetchImg()
+ fetchImage.download()
+ print 'Uploading Image to Glance. Please wait'
+ qtip_image = glance.images.create(
+ name='QTIP_CentOS',
+ visibility='public',
+ disk_format='qcow2',
+ container_format='bare')
+ glance.images.upload(
+ qtip_image.id, open('./Temp_Img/QTIP_CentOS.qcow2'))
+ for checks in range(3):
+ print "Try to delete heats %s" % checks
+ for prev_stacks in heat.stacks.list():
+ if prev_stacks.stack_name == 'QTIP':
+ print 'QTIP Stacks exists.\nDeleting Existing Stack'
+ heat.stacks.delete('QTIP')
+ time.sleep(10)
+
+ print '\nStack Creating Started\n'
+
+ try:
+ heat.stacks.create(stack_name=stackname, template=heat_template)
+ except Exception:
+ print 'Create Failed :( '
+
+ cluster_detail = heat.stacks.get(stackname)
+ while cluster_detail.status != 'COMPLETE':
+ if cluster_detail.status == 'IN_PROGRESS':
+ print 'Stack Creation in Progress'
+ cluster_detail = heat.stacks.get(stackname)
+ time.sleep(10)
+ print 'Stack Created'
+ print 'Getting Public IP(s)'
+ zone = []
+ s = 0
+ for vm in range(len(vm_role_ip_dict['OS_image'])):
+
+ for I in cluster_detail.outputs:
+ availabilityKey = 'availability_instance_' + str(vm + 1)
+
+ if I['output_key'] == availabilityKey:
+ zone.insert(s, str(I['output_value']))
+ s = s + 1
+ for i in cluster_detail.outputs:
+ instanceKey = "instance_ip_" + str(vm + 1)
+ privateIPkey = 'instance_PIP_' + str(vm + 1)
+ if i['output_key'] == instanceKey:
+ Env_setup.roles_dict[vm_role_ip_dict['role'][vm]] \
+ .append(str(i['output_value']))
+ Env_setup.ip_pw_list.append((str(i['output_value']), ''))
+
+ if i['output_key'] == privateIPkey:
+ Env_setup.ip_pw_dict[vm_role_ip_dict['role'][vm]] = str(i['output_value'])
+ if i['output_key'] == 'KeyPair_PublicKey':
+ sshkey = str(i['output_value'])
+
+ with open('./data/my_key.pem', 'w') as fopen:
+ fopen.write(sshkey)
+ fopen.close()
+ print Env_setup.ip_pw_list
diff --git a/func/validate_yaml.py b/func/validate_yaml.py deleted file mode 100644 index fcf32a21..00000000 --- a/func/validate_yaml.py +++ /dev/null @@ -1,28 +0,0 @@ -##############################################################################
-# Copyright (c) 2015 Dell Inc and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-class Validate_Yaml(object):
-
- def __init__(self, doc):
-
- print('Validating YAML CONFIG FILE')
-
- if not doc['Scenario']:
- print('\nScenario Field missing\na')
- if not doc['Scenario']['benchmark']:
- print('\nBenchmark field missing')
- if not doc['Scenario']['pointless']:
- print('')
- if not doc['Context']:
- print('\nEntire Context is missing')
- if not doc['Context']['Host_Machine']:
- print('\nNo Host Machine')
- if not doc['Context']['Host_Machine']['machine_1']:
- print('\nNo Host Machine')
diff --git a/requirements.txt b/requirements.txt index cc048389..358b7d0d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ pyyaml==3.10 +paramiko==1.16.0 python-neutronclient==2.6.0 python-novaclient==2.28.1 python-glanceclient==1.1.0 diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..52874dd3 --- /dev/null +++ b/setup.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python + +from distutils.core import setup + + +setup(name='qtip', + py_modules=['qtip'], + version='1.0', + author='opnfv', + packages=['func', 'data.ref_results', 'data.report']) diff --git a/test-requirements.txt b/test-requirements.txt index e4a0274b..31581503 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,3 +3,5 @@ # process, which may cause wedges in the gate later. pytest +pykwalify +mock diff --git a/tests/cli_test.py b/tests/cli_test.py new file mode 100644 index 00000000..f12e8fed --- /dev/null +++ b/tests/cli_test.py @@ -0,0 +1,20 @@ +import pytest +from func.cli import cli + + +class TestClass: + @pytest.mark.parametrize("test_input, expected", [ + (['-l', + 'zte', + '-f', + 'compute'], "You have specified a lab that is not present in test_cases"), + (['-l', + 'zte-pod1', + '-f', + 'test'], "Test File Does not exist in test_list") + ]) + def test_cli_error(self, capfd, test_input, expected): + with pytest.raises(SystemExit): + cli(test_input) + resout, reserr = capfd.readouterr() + assert expected in resout diff --git a/tests/create_zones_test.py b/tests/create_zones_test.py new file mode 100644 index 00000000..e431a907 --- /dev/null +++ b/tests/create_zones_test.py @@ -0,0 +1,89 @@ +import pytest +import mock +from mock import Mock, MagicMock +import os +from func.create_zones import create_zones + +return_list = [] + + +def get_agg_mock(host): + agg = Mock() + agg.name = host + agg.id = host + return agg + + +class HyperMock(MagicMock): + def list(self): + mock_hypervisor = [Mock(service={'host': '10.20.0.4'}), Mock(service={'host': '10.20.0.5'})] + return mock_hypervisor + + +class AggMock(MagicMock): + def get_details(self, agg_id): + print "get_detail:{0}".format(agg_id) + return Mock(hosts=[]) + + def create(self, host, agg): + print "create:{0}:{1}".format(host, agg) + return agg + + def list(self): + return return_list + + def delete(self, agg_id): + print "delete:{0}".format(agg_id) + pass + + def add_host(self, aggregate, host): + print "add_host:{0}:{1}".format(aggregate, host) + pass + + def remove_host(self, agg_id, host): + pass + + +class NovaMock(MagicMock): + hypervisors = HyperMock() + aggregates = AggMock() + + +class TestClass: + @pytest.mark.parametrize("test_input, expected", [ + ([[], ['compute1', 'compute2']], + ['create:10.20.0.4:compute1', + 'add_host:compute1:10.20.0.4', + 'create:10.20.0.5:compute2', + 'add_host:compute2:10.20.0.5']), + ([[get_agg_mock('10.20.0.4'), get_agg_mock('10.20.0.5')], ['compute1', 'compute2']], + ['delete:10.20.0.4', + 'create:10.20.0.4:compute1', + 'get_detail:10.20.0.4', + 'add_host:10.20.0.4:10.20.0.4', + 'delete:10.20.0.5', + 'create:10.20.0.5:compute2', + 'get_detail:10.20.0.5', + 'add_host:10.20.0.5:10.20.0.5']), + ([[], ['compute1', 'compute5']], + ['The specified compute node doesnt exist. using compute 1']) + ]) + @mock.patch('func.create_zones.client', autospec=True) + @mock.patch('func.create_zones.v2', autospec=True) + @mock.patch('func.create_zones.session') + def test_create_zones_success(self, mock_keystone_session, mock_keystone_v2, mock_nova_client, test_input, expected, capfd): + global return_list + return_list = test_input[0] + nova_obj = NovaMock() + mock_nova_client.Client.return_value = nova_obj() + k = mock.patch.dict(os.environ, {'OS_AUTH_URL': 'http://172.10.0.5:5000', + 'OS_USERNAME': 'admin', + 'OS_PASSWORD': 'admin', + 'OS_TENANT_NAME': 'admin'}) + k.start() + create = create_zones() + create.create_agg(test_input[1]) + k.stop() + resout, reserr = capfd.readouterr() + for x in expected: + assert x in resout diff --git a/tests/driver_test.py b/tests/driver_test.py new file mode 100644 index 00000000..39adc939 --- /dev/null +++ b/tests/driver_test.py @@ -0,0 +1,65 @@ +import pytest +import mock +import os +import json +from func.driver import Driver + + +class TestClass: + @pytest.mark.parametrize("test_input, expected", [ + (["iperf", + [('host', ['10.20.0.13', '10.20.0.15'])], + "iperf_bm.yaml", + [('duration', 20), ('protocol', 'tcp'), ('bandwidthGbps', 0)], + [("10.20.0.13", [None]), ("10.20.0.15", [None])], + {'http_proxy': 'http://10.20.0.1:8118', + 'https_proxy': 'http://10.20.0.1:8118', + 'no_proxy': 'localhost,127.0.0.1,10.20.*,192.168.*'}, + 'fuel'], + {'Dest_dir': 'results', + 'ip1': '', + 'ip2': '', + 'installer': 'fuel', + 'workingdir': '/home', + 'fname': 'iperf_bm.yaml', + 'username': 'root', + 'http_proxy': 'http://10.20.0.1:8118', + 'https_proxy': 'http://10.20.0.1:8118', + 'no_proxy': 'localhost,127.0.0.1,10.20.*,192.168.*', + 'duration': 20, + 'protocol': 'tcp', + 'bandwidthGbps': 0, + "role": "host"}), + (["iperf", + [('1-server', ['10.20.0.13']), ('2-host', ['10.20.0.15'])], + "iperf_vm.yaml", + [('duration', 20), ('protocol', 'tcp'), ('bandwidthGbps', 0)], + [("10.20.0.13", [None]), ("10.20.0.15", [None])], + {}, + 'joid'], + {'Dest_dir': 'results', + 'ip1': '10.20.0.13', + 'ip2': '', + 'installer': 'joid', + "privateip1": "NONE", + 'workingdir': '/home', + 'fname': 'iperf_vm.yaml', + 'username': 'ubuntu', + 'duration': 20, + 'protocol': 'tcp', + 'bandwidthGbps': 0, + "role": "2-host"}) + ]) + @mock.patch('func.driver.os.system') + def test_driver_success(self, mock_system, test_input, expected): + mock_system.return_value = True + k = mock.patch.dict(os.environ, {'INSTALLER_TYPE': test_input[6], 'PWD': '/home'}) + k.start() + dri = Driver() + dri.drive_bench(test_input[0], test_input[1], test_input[2], test_input[3], test_input[4], test_input[5]) + call = mock_system.call_args + k.stop() + call_args, call_kwargs = call + real_call = call_args[0].split('extra-vars \'')[1] + real_call = real_call[0: len(real_call) - 1] + assert json.loads(real_call) == json.loads(json.dumps(expected)) diff --git a/tests/env_setup_test.py b/tests/env_setup_test.py new file mode 100644 index 00000000..9112ff94 --- /dev/null +++ b/tests/env_setup_test.py @@ -0,0 +1,59 @@ +import pytest +import filecmp +from func.env_setup import Env_setup + + +class TestClass: + + @pytest.mark.parametrize("test_input, expected", [ + ("tests/test_case/bm_with_proxy.yaml", ["dhrystone", + {}, + [], + {'http_proxy': 'http://10.20.0.1:8118', + 'https_proxy': 'http://10.20.0.1:8118', + 'no_proxy': 'localhost,127.0.0.1,10.20.*,192.168.*'}]), + ("tests/test_case/bm_without_proxy.yaml", ["dhrystone", + {}, + [], + {}]), + ("tests/test_case/vm.yaml", ["iperf", + {'availability_zone': ['compute1', 'compute1'], + 'OS_image': ['QTIP_CentOS', 'QTIP_CentOS'], + 'public_network': ['admin-floating_net', 'admin-floating_net'], + 'flavor': ['m1.large', 'm1.large'], + 'role': ['1-server', '2-host']}, + [('duration', 20), ('protocol', 'tcp'), ('bandwidthGbps', 0)], + {'http_proxy': 'http://10.20.0.1:8118', + 'https_proxy': 'http://10.20.0.1:8118', + 'no_proxy': 'localhost,127.0.0.1,10.20.*,192.168.*'}]) + ]) + def test_parse_success(self, test_input, expected): + print (test_input) + print (expected) + test_class = Env_setup() + benchmark, vm_para, details, proxy = \ + test_class.parse(test_input) + assert benchmark == expected[0] + assert vm_para == expected[1] + assert sorted(details) == sorted(expected[2]) + assert proxy == expected[3] + + def test_parse_vm_error(self): + test_class = Env_setup() + with pytest.raises(KeyError) as excinfo: + test_class.parse("tests/test_case/vm_error.yaml") + assert "benchmark" in str(excinfo.value) + + def test_update_ansible(self): + test_class = Env_setup() + test_class.parse("tests/test_case/bm_without_proxy.yaml") + test_class.update_ansible() + result = filecmp.cmp('tests/output/hosts', 'data/hosts') + assert result + + def test_ping(self, capfd): + test_class = Env_setup() + test_class.parse("tests/test_case/bm_ping.yaml") + test_class.call_ping_test() + resout, reserr = capfd.readouterr() + assert '127.0.0.1 is UP' in resout diff --git a/tests/fetchimg_test.py b/tests/fetchimg_test.py new file mode 100644 index 00000000..683c9701 --- /dev/null +++ b/tests/fetchimg_test.py @@ -0,0 +1,22 @@ +import mock +from func.fetchimg import FetchImg + + +class TestClass: + @mock.patch('func.fetchimg.os') + @mock.patch('func.fetchimg.os.path') + def test_fetch_img_success(self, mock_path, mock_os): + mock_os.system.return_value = True + mock_path.isfile.return_value = True + img = FetchImg() + img.download() + + @mock.patch('func.fetchimg.time') + @mock.patch('func.fetchimg.os.system') + @mock.patch('func.fetchimg.os.path') + def test_fetch_img_fail(self, mock_path, mock_system, mock_time): + img = FetchImg() + mock_system.return_value = True + mock_path.isfile.side_effect = [False, True] + img.download() + assert mock_time.sleep.call_count == 2 diff --git a/tests/output/hosts b/tests/output/hosts new file mode 100644 index 00000000..9b47df0e --- /dev/null +++ b/tests/output/hosts @@ -0,0 +1,3 @@ +[host] +10.20.0.29 +10.20.0.28 diff --git a/tests/schema/test_bm_schema.yaml b/tests/schema/test_bm_schema.yaml new file mode 100644 index 00000000..740725f5 --- /dev/null +++ b/tests/schema/test_bm_schema.yaml @@ -0,0 +1,75 @@ +type: map +mapping: + Scenario: + type: map + mapping: + benchmark: + type: str + required: True + host: + type: str + server: + type: str + allowempty: True + client: + type: str + allowempty: True + topology: + type: str + allowempty: True + benchmark_details: + type: map + mapping: + duration: + type: int + protocol: + type: str + bandwidthGbps: + type: int + description: + type: str + 1Run: + type: str + + Context: + type: map + mapping: + Host_Machines: + type: map + required: True + mapping: + regex;(^machine): + type: map + mapping: + role: + type: str + ip: + type: str + pw: + type: str + allowempty: True + Virtual_Machines: + type: map + allowempty: True + Proxy_Environment: + type: map + mapping: + http_proxy: + type: str + https_proxy: + type: str + no_proxy: + type: str + + Test_Description: + type: map + mapping: + Test_category: + type: str + allowempty: True + Benchmark: + type: str + allowempty: True + Overview: + type: str + allowempty: True diff --git a/tests/schema/test_vm_schema.yaml b/tests/schema/test_vm_schema.yaml new file mode 100644 index 00000000..524f8fe4 --- /dev/null +++ b/tests/schema/test_vm_schema.yaml @@ -0,0 +1,80 @@ +type: map +mapping: + Scenario: + type: map + mapping: + benchmark: + type: str + required: True + host: + type: str + server: + type: str + allowempty: True + 1Run: + type: str + client: + type: str + allowempty: True + topology: + type: str + allowempty: True + benchmark_details: + type: map + mapping: + duration: + type: int + protocol: + type: str + bandwidthGbps: + type: int + teststream: + type: str + description: + type: str + + Context: + type: map + mapping: + Host_Machines: + type: map + allowempty: True + Virtual_Machines: + type: map + required: True + mapping: + regex;(^virtualmachine): + type: map + mapping: + availability_zone: + type: str + OS_image: + type: str + public_network: + type: str + role: + type: str + flavor: + type: str + Proxy_Environment: + type: map + mapping: + http_proxy: + type: str + https_proxy: + type: str + no_proxy: + type: str + + Test_Description: + type: map + mapping: + Test_category: + type: str + allowempty: True + Benchmark: + type: str + allowempty: True + Overview: + type: str + allowempty: True diff --git a/tests/spawn_vm_test.py b/tests/spawn_vm_test.py new file mode 100644 index 00000000..eb843ad9 --- /dev/null +++ b/tests/spawn_vm_test.py @@ -0,0 +1,71 @@ +import pytest +import mock +from mock import Mock, MagicMock +import os +from func.spawn_vm import SpawnVM + + +class KeystoneMock(MagicMock): + auth_token = Mock() + v2_0 = Mock() + + +class ImageMock(MagicMock): + name = 'QTIP_CentOS' + + +class ImagesMock(MagicMock): + def list(self): + return [ImageMock()] + + +class StackMock(MagicMock): + status = 'COMPLETE' + outputs = [{'output_key': 'availability_instance_1', + 'output_value': 'output_value_1'}, + {'output_key': 'instance_ip_1', + "output_value": "172.10.0.154"}, + {"output_key": "instance_PIP_1", + "output_value": "10.10.17.5"}, + {'output_key': 'KeyPair_PublicKey', + "output_value": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpwIBAAKCAQEAqCiHcrLBXtxG0LhnKndU7VIVpYxORmv0d4tvujkWOkYuagiW\nU/MTRk0zhRvFQDVPEs0Jrj/BIecqm6fjjT6dZ/H7JLYGaqJitRkoupKgBsMSIqUz\nrR0ekOlfXZ6N+Ud8k6s+qjc7BO4b1ezz78jHisC5o0GCkUV0ECx64Re1fO+oKs1c\nfL9aaexahJUYN3J48pazQz+imc2x/G9nuqHX3cqEszmxnT4jwv//In1GjHy2AyXw\n1oA5F6wZoQCSrXc2BditU+1tlVhEkPFt5JgiHUpY8T8mYbroT7JH6xjcGSKUN+HG\nN8PXNUTD1VAQfwHpkfsGMfDyzjytCXsoTEOqnwIDAQABAoIBAAEL/4vfQQTuKiKy\ngzHofEbd8/SL4xDdKzBzVca7BEBon3FZjFYJdV1CrcduXNQBgPSFAkJrczBa2BEQ\nAoKmmSREhWO9Hl0blbG67l36+7QPEtXUYXX6cG5Ghal3izq6DzR8JG+62Es3kETM\nrNgZT+S1PnKdvcpZvFc9b6ZnF2InuTbrmNVBZKrhdWOJ5tCwRGKKUl6BHoJH3yu0\nT5hUW277e1LYHx+hZtoZ98ToC+LGe6/M8a8y6VLYpcQlX2AtVXeGDalomunF+p3f\nuY6din6s4lq1gSJz03PTpUbwiuhYCTe8Xkseu74Y+XYYJXPHopFju0Ewd6p0Db9Q\nJzzxCoECggCBAM2ox9zyrDc/Vlc0bb9SciFGUd/nEJF89+UHy98bAkpo22zNZIDg\nfacSgkg/6faZD+KrOU0I5W7m2B5t6w2fNHHik6NYGSLQ1JhgbXELGV7X/qECDL02\nctPaf+8o+dYoZja2LdJNASq2nmEmPI3LSHhzAt4dWY4W+geXiHt4iWVHAoIAgQDR\nUdN09xv4U+stWqNcSfgjtx6boEUE8Ky7pyj+LrZKG0L61Jy9cSDP0x0rCtkW9vVR\n6RjidWM/DHQ5cl6aq+7pPy20/OqtqttFYT4R+C3AoAnRSaNzPD9a80C2gjv7WEz0\nPPFstWkI1gsN71KKRx7e6NIa9CNn5x9iE+SGfjgb6QKCAIBXylzG7LCnRNpOj4rp\nyP//RE1fDvv7nyUTF6jnrFfl+6zvXR4yBaKd10DWJrJxGhW15PGo+Ms39EL9el6E\nihmRI+9yIwFX411dToxpXRuPaRTBFmbpvnx2Ayfpp8w+pzA62rnktApzeVFSl0fy\nH3zoLfBjcJPyG8zPwNf6HRJJsQKCAIAE2S5asTaWo+r4m/bYtmXm/eDZnfa7TI/T\nsOWELbTPNp5wjOgsgyhNaAhu7MtmesXn5cxLwohP94vhoMKMNptMD8iRPqJ471Iw\n4zW62NLGeW6AyIHes3CMPMIs+AtHoR33MkotSG5sY/jRk8+HoGoYo6/qK+l+CJ5z\neR579wR5sQKCAIAvPWq+bvcPTDKUU1Fe/Y/GyWoUA+uSqmCdORBkK38lALFGphxj\nfDz9dXskimqW+A9hOPOS8dm8YcVvi/TLXVE5Vsx9VkOg6z6AZBQpgNXGfOgpju4W\nbjER7bQaASatuWQyCxbA9oNlAUdSeOhGTxeFLkLj7hNMd6tLjfd8w7A/hA==\n-----END RSA PRIVATE KEY-----\n"}] + + +class HeatMock(MagicMock): + def list(self): + return [] + + def get(self, stackname): + return StackMock() + + def create(self, stack_name, template): + pass + + +class TestClass: + @pytest.mark.parametrize("test_input, expected", [ + ({'availability_zone': ['compute1', 'compute1'], + 'OS_image': ['QTIP_CentOS', 'QTIP_CentOS'], + 'public_network': ['admin-floating_net', 'admin-floating_net'], + 'flavor': ['m1.large', 'm1.large'], + 'role': ['1-server', '2-host']}, + [('172.10.0.154', '')]), + ]) + @mock.patch('func.spawn_vm.Env_setup') + @mock.patch('func.spawn_vm.FetchImg') + @mock.patch('func.spawn_vm.create_zones') + @mock.patch('func.spawn_vm.client', autospec=True) + @mock.patch('func.spawn_vm.glanceclient', autospec=True) + @mock.patch('func.spawn_vm.keystoneclient.v2_0', autospec=True) + @mock.patch('func.spawn_vm.heatclient.client', autospec=True) + def test_create_zones_success(self, mock_heat, mock_keystone, mock_glance, + mock_nova_client, mock_zone, mock_fetch, + mock_setup, test_input, expected): + mock_glance.Client.return_value = Mock(images=ImagesMock()) + mock_nova_client.Client.return_value = Mock() + mock_heat.Client.return_value = Mock(stacks=HeatMock()) + k = mock.patch.dict(os.environ, {'INSTALLER_TYPE': 'fuel'}) + k.start() + SpawnVM(test_input) + k.stop() + mock_setup.ip_pw_list.append.assert_called_with(expected[0]) diff --git a/tests/test_case/bm_ping.yaml b/tests/test_case/bm_ping.yaml new file mode 100644 index 00000000..41d696e2 --- /dev/null +++ b/tests/test_case/bm_ping.yaml @@ -0,0 +1,29 @@ +
+Scenario:
+ benchmark: dhrystone
+ host: machine_1
+ server:
+
+Context:
+ Host_Machines:
+ machine_1:
+ ip: 127.0.0.1
+ pw:
+ role: host
+
+ Virtual_Machines:
+
+
+Test_Description:
+ Test_category: "Compute"
+ Benchmark: "dhrystone"
+ Overview: >
+ ''' This test will run the dhrystone benchmark in parallel on machine_1 and machine_2.\n
+ if you wish to add a virtual machine add the following information under the Virtual_Machine tag
+
+ virtualmachine_1:
+ availability_zone:
+ public_network:
+ OS_image:
+ flavor:
+ role: '''
diff --git a/tests/test_case/bm_with_proxy.yaml b/tests/test_case/bm_with_proxy.yaml new file mode 100644 index 00000000..1d73300b --- /dev/null +++ b/tests/test_case/bm_with_proxy.yaml @@ -0,0 +1,39 @@ +
+Scenario:
+ benchmark: dhrystone
+ host: machine_1, machine_2
+ server:
+
+Context:
+ Host_Machines:
+ machine_1:
+ ip: 10.20.0.28
+ pw:
+ role: host
+ machine_2:
+ ip: 10.20.0.29
+ pw:
+ role: host
+
+ Virtual_Machines:
+
+ Proxy_Environment:
+ http_proxy: http://10.20.0.1:8118
+ https_proxy: http://10.20.0.1:8118
+ no_proxy: localhost,127.0.0.1,10.20.*,192.168.*
+
+
+
+Test_Description:
+ Test_category: "Compute"
+ Benchmark: "dhrystone"
+ Overview: >
+ ''' This test will run the dhrystone benchmark in parallel on machine_1 and machine_2.\n
+ if you wish to add a virtual machine add the following information under the Virtual_Machine tag
+
+ virtualmachine_1:
+ availability_zone:
+ public_network:
+ OS_image:
+ flavor:
+ role: '''
diff --git a/tests/test_case/bm_without_proxy.yaml b/tests/test_case/bm_without_proxy.yaml new file mode 100644 index 00000000..a9ae3b71 --- /dev/null +++ b/tests/test_case/bm_without_proxy.yaml @@ -0,0 +1,33 @@ +
+Scenario:
+ benchmark: dhrystone
+ host: machine_1, machine_2
+ server:
+
+Context:
+ Host_Machines:
+ machine_1:
+ ip: 10.20.0.28
+ pw:
+ role: host
+ machine_2:
+ ip: 10.20.0.29
+ pw:
+ role: host
+
+ Virtual_Machines:
+
+
+Test_Description:
+ Test_category: "Compute"
+ Benchmark: "dhrystone"
+ Overview: >
+ ''' This test will run the dhrystone benchmark in parallel on machine_1 and machine_2.\n
+ if you wish to add a virtual machine add the following information under the Virtual_Machine tag
+
+ virtualmachine_1:
+ availability_zone:
+ public_network:
+ OS_image:
+ flavor:
+ role: '''
diff --git a/tests/test_case/vm.yaml b/tests/test_case/vm.yaml new file mode 100644 index 00000000..4c8453ca --- /dev/null +++ b/tests/test_case/vm.yaml @@ -0,0 +1,48 @@ +Scenario:
+ benchmark: iperf
+ topology: Client and Server on ONE compute
+ server : virtualmachine_1
+ client: virtualmachine_2
+ description: 'Leave the bandwidth as 0 to throttle maximum traffic'
+ benchmark_details:
+ duration: 20
+ protocol: tcp
+ bandwidthGbps: 0
+
+Context:
+ Host_Machines:
+
+ Virtual_Machines:
+ virtualmachine_1:
+ availability_zone: compute1
+ OS_image: QTIP_CentOS
+ public_network: 'admin-floating_net'
+ role: 1-server
+ flavor: m1.large
+
+ virtualmachine_2:
+ availability_zone: compute1
+ OS_image: QTIP_CentOS
+ public_network: 'admin-floating_net'
+ role: 2-host
+ flavor: m1.large
+
+ Proxy_Environment:
+ http_proxy: http://10.20.0.1:8118
+ https_proxy: http://10.20.0.1:8118
+ no_proxy: localhost,127.0.0.1,10.20.*,192.168.*
+
+Test_Description:
+ Test_category: "network"
+ Benchmark: "iperf"
+ Overview: >
+ '''This test will run the IPERF benchmark on virutalmachine_1 and virtualmachine_2. On the\n
+ same compute node
+ if you wish to add a host machine add the following information under the Host_Machine tag
+
+ machine_1:
+ ip:
+ pw:
+ role:
+ '''
+
diff --git a/tests/test_case/vm_error.yaml b/tests/test_case/vm_error.yaml new file mode 100644 index 00000000..f13d3a00 --- /dev/null +++ b/tests/test_case/vm_error.yaml @@ -0,0 +1,42 @@ +Scenario:
+ topology: Client and Server on ONE compute
+ server : virtualmachine_1
+ client: virtualmachine_2
+ description: 'Leave the bandwidth as 0 to throttle maximum traffic'
+ benchmark_details:
+ duration: 20
+ protocol: tcp
+ bandwidthGbps: 0
+
+Context:
+ Host_Machines:
+
+ Virtual_Machines:
+ virtualmachine_1:
+ availability_zone: compute1
+ OS_image: QTIP_CentOS
+ public_network: 'admin-floating_net'
+ role: 1-server
+ flavor: m1.large
+
+ virtualmachine_2:
+ availability_zone: compute1
+ OS_image: QTIP_CentOS
+ public_network: 'admin-floating_net'
+ role: 2-host
+ flavor: m1.large
+
+Test_Description:
+ Test_category: "network"
+ Benchmark: "iperf"
+ Overview: >
+ '''This test will run the IPERF benchmark on virutalmachine_1 and virtualmachine_2. On the\n
+ same compute node
+ if you wish to add a host machine add the following information under the Host_Machine tag
+
+ machine_1:
+ ip:
+ pw:
+ role:
+ '''
+
diff --git a/tests/yaml_schema_test.py b/tests/yaml_schema_test.py new file mode 100644 index 00000000..a975dca6 --- /dev/null +++ b/tests/yaml_schema_test.py @@ -0,0 +1,16 @@ +import os +import os.path +from pykwalify.core import Core + + +class TestClass: + def test_schema_success(self): + for root, dirs, files in os.walk("test_cases"): + for name in files: + print root + "/" + name + if "_bm" in name: + schema = "tests/schema/test_bm_schema.yaml" + if "_vm" in name: + schema = "tests/schema/test_vm_schema.yaml" + c = Core(source_file=root + "/" + name, schema_files=[schema]) + c.validate(raise_exception=True) @@ -8,24 +8,25 @@ envlist = py27 skipsdist = True [testenv] -changedir=tests -deps = +usedevelop = True +install_command = pip install -U {opts} {packages} +deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands= py.test \ - --basetemp={envtmpdir} \ # py.test tempdir setting - {posargs} # substitute with tox' positional arguments + --basetemp={envtmpdir} \ + {posargs} tests [testenv:pep8] deps = flake8 -commands = flake8 {toxinidir} +commands = flake8 {toxinidir} [flake8] # H803 skipped on purpose per list discussion. # E123, E125 skipped as they are invalid PEP-8. show-source = True -ignore = E123,E125,H803 +ignore = E123,E125,H803,E501 builtins = _ exclude=.venv,.git,.tox,dist,doc,build |