summaryrefslogtreecommitdiffstats
path: root/benchmarks
diff options
context:
space:
mode:
authorzhifeng.jiang <jiang.zhifeng@zte.com.cn>2016-07-12 22:36:43 +0800
committerzhifeng jiang <jiang.zhifeng@zte.com.cn>2016-07-13 01:51:02 +0000
commitf385a6d107b3c5c479583e74e18ef3c5fa55b304 (patch)
treed28028bc5fd404ae560ee6cb21e0dc365ac7c550 /benchmarks
parent01c843df1684678072988283b3789e11a34b7499 (diff)
Fix pep8 errors for python files in benchmarks,data,dashboard
JIRA:QTIP-89 Change-Id: I3465221f0bdc9a8eb7c4e26069f7367fb1add729 Signed-off-by: zhifeng.jiang <jiang.zhifeng@zte.com.cn>
Diffstat (limited to 'benchmarks')
-rw-r--r--benchmarks/playbooks/result_transform/dpi/dpi_transform.py3
-rwxr-xr-xbenchmarks/playbooks/result_transform/fio/fio_result_transform.py61
-rw-r--r--benchmarks/playbooks/result_transform/iperf/iperf_transform.py37
-rw-r--r--benchmarks/playbooks/result_transform/ramspd/ramspd_transform.py36
-rw-r--r--benchmarks/playbooks/result_transform/ssl/ssl_transform.py9
-rw-r--r--benchmarks/playbooks/result_transform/ubench_transform.py1
6 files changed, 59 insertions, 88 deletions
diff --git a/benchmarks/playbooks/result_transform/dpi/dpi_transform.py b/benchmarks/playbooks/result_transform/dpi/dpi_transform.py
index b95e0e23..622030cd 100644
--- a/benchmarks/playbooks/result_transform/dpi/dpi_transform.py
+++ b/benchmarks/playbooks/result_transform/dpi/dpi_transform.py
@@ -1,5 +1,4 @@
import os
-import json
import pickle
import datetime
@@ -46,4 +45,4 @@ result = {}
result['DPI_benchmark(M pps)'] = round(dpi_result_pps, 3)
result['DPI_benchmark(Gb/s)'] = round(dpi_result_bps, 3)
with open('./result_temp', 'w+') as result_file:
- pickle.dump(result, result_file) \ No newline at end of file
+ pickle.dump(result, result_file)
diff --git a/benchmarks/playbooks/result_transform/fio/fio_result_transform.py b/benchmarks/playbooks/result_transform/fio/fio_result_transform.py
index f9410a62..9929aa18 100755
--- a/benchmarks/playbooks/result_transform/fio/fio_result_transform.py
+++ b/benchmarks/playbooks/result_transform/fio/fio_result_transform.py
@@ -4,22 +4,20 @@ import os
import datetime
with open("fio_result.json") as fio_raw:
- fio_data=json.load(fio_raw)
+ fio_data = json.load(fio_raw)
-r_iops=[];
-r_io_bytes=[];
-r_io_runtime=[];
-r_lat=[];
-w_iops=[];
-w_io_bytes=[];
-w_io_runtime=[];
-w_lat=[];
+r_iops = []
+r_io_bytes = []
+r_io_runtime = []
+r_lat = []
+w_iops = []
+w_io_bytes = []
+w_io_runtime = []
+w_lat = []
+total_jobs = len(fio_data["jobs"])
-
-total_jobs=len(fio_data["jobs"])
-
-for x in range (0,int(total_jobs)):
+for x in range(0, int(total_jobs)):
r_iops.append(fio_data["jobs"][x]["read"]["iops"])
r_io_bytes.append(fio_data["jobs"][x]["read"]["io_bytes"])
r_io_runtime.append(fio_data["jobs"][x]["read"]["runtime"])
@@ -29,29 +27,24 @@ for x in range (0,int(total_jobs)):
w_io_runtime.append(fio_data["jobs"][x]["write"]["runtime"])
w_lat.append(fio_data["jobs"][x]["write"]["lat"]["mean"])
+FIO_result_dict = {}
+for x in range(0, total_jobs):
+ FIO_result_dict['Job_' + str(x)] = {}
+ FIO_result_dict['Job_' + str(x)]['read'] = {}
+ FIO_result_dict['Job_' + str(x)]['read']['Total_IO_Bytes'] = r_io_bytes[x]
+ FIO_result_dict['Job_' + str(x)]['read']['IO/sec'] = r_iops[x]
+ FIO_result_dict['Job_' + str(x)]['read']['IO_runtime (millisec)'] = r_io_runtime[x]
+ FIO_result_dict['Job_' + str(x)]['read']['mean_IO_latenchy (microsec)'] = r_lat[x]
-FIO_result_dict={};
-
-for x in range (0,total_jobs):
- FIO_result_dict['Job_'+str(x)]={};
- FIO_result_dict['Job_'+str(x)]['read']={};
- FIO_result_dict['Job_'+str(x)]['read']['Total_IO_Bytes']=r_io_bytes[x]
- FIO_result_dict['Job_'+str(x)]['read']['IO/sec']=r_iops[x]
- FIO_result_dict['Job_'+str(x)]['read']['IO_runtime (millisec)']=r_io_runtime[x]
- FIO_result_dict['Job_'+str(x)]['read']['mean_IO_latenchy (microsec)']=r_lat[x]
-
- FIO_result_dict['Job_'+str(x)]['write']={};
- FIO_result_dict['Job_'+str(x)]['write']['Total_IO_Bytes']=w_io_bytes[x]
- FIO_result_dict['Job_'+str(x)]['write']['IO/sec']=w_iops[x]
- FIO_result_dict['Job_'+str(x)]['write']['IO_runtime (millisec)']=w_io_runtime[x]
- FIO_result_dict['Job_'+str(x)]['write']['mean_IO_latenchy (microsec)']=w_lat[x]
-
-
+ FIO_result_dict['Job_' + str(x)]['write'] = {}
+ FIO_result_dict['Job_' + str(x)]['write']['Total_IO_Bytes'] = w_io_bytes[x]
+ FIO_result_dict['Job_' + str(x)]['write']['IO/sec'] = w_iops[x]
+ FIO_result_dict['Job_' + str(x)]['write']['IO_runtime (millisec)'] = w_io_runtime[x]
+ FIO_result_dict['Job_' + str(x)]['write']['mean_IO_latenchy (microsec)'] = w_lat[x]
host_name = (os.popen("hostname").read().rstrip())
report_time = str(datetime.datetime.utcnow().isoformat())
-os.system("mv fio_result.json "+str(host_name)+"-"+report_time+".log")
-with open('./result_temp','w+')as out_fio_result:
- pickle.dump(FIO_result_dict,out_fio_result)
-
+os.system("mv fio_result.json " + str(host_name) + "-" + report_time + ".log")
+with open('./result_temp', 'w + ')as out_fio_result:
+ pickle.dump(FIO_result_dict, out_fio_result)
diff --git a/benchmarks/playbooks/result_transform/iperf/iperf_transform.py b/benchmarks/playbooks/result_transform/iperf/iperf_transform.py
index 39c5956c..8df5a79a 100644
--- a/benchmarks/playbooks/result_transform/iperf/iperf_transform.py
+++ b/benchmarks/playbooks/result_transform/iperf/iperf_transform.py
@@ -1,30 +1,29 @@
import json
import datetime
import pickle
-with open('iperf_raw.json','r') as ifile:
- raw_iperf_data=json.loads(ifile.read().rstrip())
-
-
-bits_sent= raw_iperf_data['end']['sum_sent']['bits_per_second']
-bits_received= raw_iperf_data['end']['sum_received']['bits_per_second']
-total_byte_sent=raw_iperf_data['end']['sum_sent']['bytes']
-total_byte_received=raw_iperf_data['end']['sum_received']['bytes']
-cpu_host_total_percent=raw_iperf_data['end']['cpu_utilization_percent']['host_total']
-cpu_remote_total_percent=raw_iperf_data['end']['cpu_utilization_percent']['remote_total']
+with open('iperf_raw.json', 'r') as ifile:
+ raw_iperf_data = json.loads(ifile.read().rstrip())
-result={}
+bits_sent = raw_iperf_data['end']['sum_sent']['bits_per_second']
+bits_received = raw_iperf_data['end']['sum_received']['bits_per_second']
+total_byte_sent = raw_iperf_data['end']['sum_sent']['bytes']
+total_byte_received = raw_iperf_data['end']['sum_received']['bytes']
+cpu_host_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['host_total']
+cpu_remote_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['remote_total']
+
+result = {}
time_stamp = str(datetime.datetime.utcnow().isoformat())
-result['1. Version']=raw_iperf_data['start']['version']
-result['2. Bandwidth']={}
+result['1. Version'] = raw_iperf_data['start']['version']
+result['2. Bandwidth'] = {}
result['2. Bandwidth']['1. throughput Sender (b/s)'] = bits_sent
result['2. Bandwidth']['2. throughput Received (b/s)'] = bits_received
-result['3. CPU']={}
-result['3. CPU']['1. CPU host total (%)']=cpu_host_total_percent
-result['3. CPU']['2. CPU remote total (%)']=cpu_remote_total_percent
+result['3. CPU'] = {}
+result['3. CPU']['1. CPU host total (%)'] = cpu_host_total_percent
+result['3. CPU']['2. CPU remote total (%)'] = cpu_remote_total_percent
-with open('iperf_raw-'+time_stamp+'.log','w+') as ofile:
+with open('iperf_raw-' + time_stamp + '.log', 'w+') as ofile:
ofile.write(json.dumps(raw_iperf_data))
-
+
with open('./result_temp', 'w+') as result_file:
- pickle.dump(result,result_file) \ No newline at end of file
+ pickle.dump(result, result_file)
diff --git a/benchmarks/playbooks/result_transform/ramspd/ramspd_transform.py b/benchmarks/playbooks/result_transform/ramspd/ramspd_transform.py
index aed68acf..c3f03dd0 100644
--- a/benchmarks/playbooks/result_transform/ramspd/ramspd_transform.py
+++ b/benchmarks/playbooks/result_transform/ramspd/ramspd_transform.py
@@ -1,9 +1,7 @@
import os
-import json
import pickle
import datetime
-
intmem_copy = os.popen("cat Intmem | grep 'BatchRun Copy' | awk '{print $4}'").read().rstrip()
intmem_scale = os.popen("cat Intmem | grep 'BatchRun Scale' | awk '{print $4}'").read().rstrip()
intmem_add = os.popen("cat Intmem | grep 'BatchRun Add' | awk '{print $4}'").read().rstrip()
@@ -22,35 +20,27 @@ floatmem_average = os.popen("cat Floatmem | grep 'BatchRun AVERAGE' | awk '{pri
print floatmem_copy
print floatmem_average
-
hostname = os.popen("hostname").read().rstrip()
time_stamp = str(datetime.datetime.utcnow().isoformat())
-
os.system("mv Intmem " + hostname + "-" + time_stamp + ".log")
os.system("cp Floatmem >> " + hostname + "-" + time_stamp + ".log")
+result = {}
-result = {};
-
-result['1. INTmem bandwidth'] = {};
-result['1. INTmem bandwidth']['1. Copy (MB/s)']=intmem_copy
-result['1. INTmem bandwidth']['2. Add (MB/s)']=intmem_add
-result['1. INTmem bandwidth']['3. Scale (MB/s)']=intmem_scale
-result['1. INTmem bandwidth']['4. Triad (MB/s)']=intmem_triad
-result['1. INTmem bandwidth']['5. Average (MB/s)']=intmem_average
-
-
-result['2. FLOATmem bandwidth'] = {};
-result['2. FLOATmem bandwidth']['1. Copy (MB/s)']=floatmem_copy
-result['2. FLOATmem bandwidth']['2. Add (MB/s)']=floatmem_add
-result['2. FLOATmem bandwidth']['3. Scale (MB/s)']=floatmem_scale
-result['2. FLOATmem bandwidth']['4. Triad (MB/s)']=floatmem_triad
-result['2. FLOATmem bandwidth']['5. Average (MB/s)']=floatmem_average
-
+result['1. INTmem bandwidth'] = {}
+result['1. INTmem bandwidth']['1. Copy (MB/s)'] = intmem_copy
+result['1. INTmem bandwidth']['2. Add (MB/s)'] = intmem_add
+result['1. INTmem bandwidth']['3. Scale (MB/s)'] = intmem_scale
+result['1. INTmem bandwidth']['4. Triad (MB/s)'] = intmem_triad
+result['1. INTmem bandwidth']['5. Average (MB/s)'] = intmem_average
+result['2. FLOATmem bandwidth'] = {}
+result['2. FLOATmem bandwidth']['1. Copy (MB/s)'] = floatmem_copy
+result['2. FLOATmem bandwidth']['2. Add (MB/s)'] = floatmem_add
+result['2. FLOATmem bandwidth']['3. Scale (MB/s)'] = floatmem_scale
+result['2. FLOATmem bandwidth']['4. Triad (MB/s)'] = floatmem_triad
+result['2. FLOATmem bandwidth']['5. Average (MB/s)'] = floatmem_average
with open('./result_temp', 'w+') as result_file:
pickle.dump(result, result_file)
-
-
diff --git a/benchmarks/playbooks/result_transform/ssl/ssl_transform.py b/benchmarks/playbooks/result_transform/ssl/ssl_transform.py
index 6e632251..029135ac 100644
--- a/benchmarks/playbooks/result_transform/ssl/ssl_transform.py
+++ b/benchmarks/playbooks/result_transform/ssl/ssl_transform.py
@@ -1,10 +1,7 @@
import os
-import json
import pickle
import datetime
-#total_cpu=os.popen("cat $HOME/tempD/nDPI/example/result.txt | tail -1").read()
-
openssl_version = os.popen("cat RSA_dump | head -1").read().rstrip()
rsa_512_sps = os.popen(
"cat RSA_dump | grep '512 bits ' | awk '{print $6}' ").read().rstrip()
@@ -23,7 +20,6 @@ rsa_4096_sps = os.popen(
rsa_4096_vps = os.popen(
"cat RSA_dump | grep '4096 bits ' | awk '{print $7}' ").read().rstrip()
-
aes_16B = os.popen(
"cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $2}' ").read().rstrip()
aes_64B = os.popen(
@@ -35,16 +31,12 @@ aes_1024B = os.popen(
aes_8192B = os.popen(
"cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $6}' ").read().rstrip()
-
hostname = os.popen("hostname").read().rstrip()
time_stamp = str(datetime.datetime.utcnow().isoformat())
-
os.system("mv RSA_dump " + hostname + "-" + time_stamp + ".log")
os.system("cat AES-128-CBC_dump >> " + hostname + "-" + time_stamp + ".log")
-
-
result = {}
result['1. Version'] = [openssl_version]
@@ -64,4 +56,3 @@ result['3. AES-128-cbc throughput']['5. 8192 Bytes block (B/sec)'] = aes_8192B
with open('./result_temp', 'w+') as result_file:
pickle.dump(result, result_file)
-
diff --git a/benchmarks/playbooks/result_transform/ubench_transform.py b/benchmarks/playbooks/result_transform/ubench_transform.py
index f15943d7..3c8ba1d8 100644
--- a/benchmarks/playbooks/result_transform/ubench_transform.py
+++ b/benchmarks/playbooks/result_transform/ubench_transform.py
@@ -1,7 +1,6 @@
import os
import json
import pickle
-import datetime
total_cpu = os.popen(
"cat $HOME/tempT/UnixBench/results/* | grep 'of tests' | awk '{print $1;}' | awk 'NR==1'").read().rstrip()