summaryrefslogtreecommitdiffstats
path: root/data
diff options
context:
space:
mode:
authorNauman_Ahad <Nauman_Ahad@dell.com>2016-01-05 21:01:17 +0500
committerNauman_Ahad <Nauman_Ahad@dell.com>2016-01-05 21:01:17 +0500
commit57922dcb8e6497f02fdee9306494e932e25dcace (patch)
tree03243a0673de88e39cc682d4d64e54be2ad1c898 /data
parent8812f4746015c669dc8dcab23069f5244ff8acb6 (diff)
QTIP Index calculation along with a script to run qtip from docker
Calculation of Result Indices for 1. Compute suite 2. Storage suite 3. Network suite A script to run qtip from inside the docker container (for Jenkins Jobs) Resolved bugs with ssl_trasform.py, sys info collection on ubuntu and system information collection script Change-Id: Ic5abb3dbd0012cd2257b588b1b3067a6677b1f5d Signed-off-by: Nauman_Ahad <Nauman_Ahad@dell.com>
Diffstat (limited to 'data')
-rwxr-xr-xdata/fetch_os_creds.sh188
-rwxr-xr-xdata/get_env_info.sh1
-rwxr-xr-xdata/qtip_creds.sh4
-rw-r--r--data/ref_results/.reference.json.swpbin0 -> 12288 bytes
-rw-r--r--data/ref_results/compute_benchmarks_indices.py142
-rw-r--r--data/ref_results/compute_suite.py22
-rw-r--r--data/ref_results/generator_ref_json.py36
-rw-r--r--data/ref_results/network_benchmarks_indices.py28
-rw-r--r--data/ref_results/network_suite.py22
-rw-r--r--data/ref_results/reference.json35
-rw-r--r--data/ref_results/result_accum.py31
-rw-r--r--data/ref_results/storage_benchmarks_indices.py35
-rw-r--r--data/ref_results/storage_suite.py22
13 files changed, 564 insertions, 2 deletions
diff --git a/data/fetch_os_creds.sh b/data/fetch_os_creds.sh
new file mode 100755
index 00000000..3b493e14
--- /dev/null
+++ b/data/fetch_os_creds.sh
@@ -0,0 +1,188 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# jose.lausuch@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+usage() {
+ echo "usage: $0 -d <destination> -i <installer_type> -a <installer_ip>" >&2
+}
+
+info () {
+ logger -s -t "fetch_os_creds.info" "$*"
+}
+
+
+error () {
+ logger -s -t "fetch_os_creds.error" "$*"
+ exit 1
+}
+
+
+verify_connectivity() {
+ local ip=$1
+ info "Verifying connectivity to $ip..."
+ for i in $(seq 0 10); do
+ if ping -c 1 -W 1 $ip > /dev/null; then
+ info "$ip is reachable!"
+ return 0
+ fi
+ sleep 1
+ done
+ error "Can not talk to $ip."
+}
+
+
+
+#Get options
+while getopts ":d:i:a:h:" optchar; do
+ case "${optchar}" in
+ d) dest_path=${OPTARG} ;;
+ i) installer_type=${OPTARG} ;;
+ a) installer_ip=${OPTARG} ;;
+ *) echo "Non-option argument: '-${OPTARG}'" >&2
+ usage
+ exit 2
+ ;;
+ esac
+done
+
+# set vars from env if not provided by user as options
+dest_path=${dest_path:-$HOME/opnfv-openrc.sh}
+installer_type=${installer_type:-$INSTALLER_TYPE}
+installer_ip=${installer_ip:-$INSTALLER_IP}
+
+if [ -z $dest_path ] || [ -z $installer_type ] || [ -z $installer_ip ]; then
+ usage
+ exit 2
+fi
+
+# Checking if destination path is valid
+if [ -d $dest_path ]; then
+ error "Please provide the full destination path for the credentials file including the filename"
+else
+ # Check if we can create the file (e.g. path is correct)
+ touch $dest_path || error "Cannot create the file specified. Check that the path is correct and run the script again."
+fi
+
+
+ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+
+# Start fetching the files
+if [ "$installer_type" == "fuel" ]; then
+ #ip_fuel="10.20.0.2"
+ verify_connectivity $installer_ip
+
+ # Check if controller is alive (online='True')
+ controller_ip=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
+ 'fuel node | grep controller | grep True | awk "{print \$10}" | tail -1') &> /dev/null
+
+ if [ -z $controller_ip ]; then
+ error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
+ fi
+
+ info "Fetching rc file from controller $controller_ip..."
+ sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
+ "scp $ssh_options ${controller_ip}:/root/openrc ." &> /dev/null
+ sshpass -p r00tme scp 2>/dev/null $ssh_options root@${installer_ip}:~/openrc $dest_path &> /dev/null
+
+ #This file contains the mgmt keystone API, we need the public one for our rc file
+ admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//')
+ public_ip=$(sshpass -p r00tme ssh $ssh_options root@${installer_ip} \
+ "ssh ${controller_ip} 'source openrc; keystone endpoint-list'" \
+ | grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null
+ #| grep http | head -1 | cut -d '|' -f 4 | sed 's/v1\/.*/v1\//' | sed 's/ //g') &> /dev/null
+ #NOTE: this is super ugly sed 's/v1\/.*/v1\//'OS_AUTH_URL
+ # but sometimes the output of endpoint-list is like this: http://172.30.9.70:8004/v1/%(tenant_id)s
+
+elif [ "$installer_type" == "apex" ]; then
+ verify_connectivity $installer_ip
+
+ # The credentials file is located in the Instack VM (192.0.2.1)
+ # NOTE: This might change for bare metal deployments
+ info "Fetching rc file from Instack VM $installer_ip..."
+ if [ -f /root/.ssh/id_rsa ]; then
+ chmod 600 /root/.ssh/id_rsa
+ fi
+ sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc $dest_path
+
+elif [ "$installer_type" == "compass" ]; then
+ verify_connectivity $installer_ip
+ controller_ip=$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \
+ 'mysql -ucompass -pcompass -Dcompass -e"select * from cluster;"' \
+ | awk -F"," '{for(i=1;i<NF;i++)if($i~/\"host1\"/) {print $(i+1);break;}}' \
+ | grep -oP "\d+.\d+.\d+.\d+")
+
+ if [ -z $controller_ip ]; then
+ error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
+ fi
+
+ info "Fetching rc file from controller $controller_ip..."
+ sshpass -p root ssh 2>/dev/null $ssh_options root@${installer_ip} \
+ "scp $ssh_options ${controller_ip}:/opt/admin-openrc.sh ." &> /dev/null
+ sshpass -p root scp 2>/dev/null $ssh_options root@${installer_ip}:~/admin-openrc.sh $dest_path &> /dev/null
+ echo 'export OS_REGION_NAME=regionOne' >> $dest_path
+
+ info "This file contains the mgmt keystone API, we need the public one for our rc file"
+ admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//')
+ info "admin_ip: $admin_ip"
+ public_ip=$(sshpass -p root ssh $ssh_options root@${installer_ip} \
+ "ssh ${controller_ip} 'source /opt/admin-openrc.sh; keystone endpoint-list'" \
+ | grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1)
+ info "public_ip: $public_ip"
+
+
+elif [ "$installer_type" == "joid" ]; then
+ # do nothing...for the moment
+ # we can either do a scp from the jumphost or use the -v option to transmit the param to the docker file
+ echo "Do nothing, creds will be provided through volume option at docker creation for joid"
+
+elif [ "$installer_type" == "foreman" ]; then
+ #ip_foreman="172.30.10.73"
+ controller="oscontroller1.opnfv.com"
+ verify_connectivity $installer_ip
+
+ # Check if controller is alive (here is more difficult to get the ip from a command like "fuel node")
+ sshpass -p vagrant ssh $ssh_options root@${installer_ip} \
+ "sshpass -p Op3nStack ssh $ssh_options root@${controller} 'ls'" &> /dev/null
+ if [ $? -ne 0 ]; then
+ error "The controller ${controller} is not up. Please check that the POD is correctly deployed."
+ fi
+
+ info "Fetching openrc from a Foreman Controller '${controller}'..."
+ sshpass -p vagrant ssh $ssh_options root@${installer_ip} \
+ "sshpass -p Op3nStack scp $ssh_options root@${controller}:~/keystonerc_admin ." &> /dev/null
+ sshpass -p vagrant scp $ssh_options root@${installer_ip}:~/keystonerc_admin $dest_path &> /dev/null
+
+ #This file contains the mgmt keystone API, we need the public one for our rc file
+ admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//')
+ public_ip=$(sshpass -p vagrant ssh $ssh_options root@${installer_ip} \
+ "sshpass -p Op3nStack ssh $ssh_options root@${controller} \
+ 'source keystonerc_admin;keystone endpoint-list'" \
+ | grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null
+
+else
+ error "Installer $installer is not supported by this script"
+fi
+
+
+if [ ! -f $dest_path ]; then
+ error "There has been an error retrieving the credentials"
+fi
+
+if [ "$public_ip" != "" ]; then
+ info "Exchanging keystone public IP in rc file to $public_ip"
+ sed -i "/OS_AUTH_URL/c\export OS_AUTH_URL=\'$public_ip'" $dest_path
+fi
+
+
+
+echo "-------- Credentials: --------"
+cat $dest_path
+
+exit 0
diff --git a/data/get_env_info.sh b/data/get_env_info.sh
index 98ca68a6..98ad6dd6 100755
--- a/data/get_env_info.sh
+++ b/data/get_env_info.sh
@@ -19,3 +19,4 @@ while getopts ":n:i:" optchar; do
usage ;;
esac
done
+./fetch_os_creds.sh -d opnfv-creds.sh
diff --git a/data/qtip_creds.sh b/data/qtip_creds.sh
index e2619d64..085e3a60 100755
--- a/data/qtip_creds.sh
+++ b/data/qtip_creds.sh
@@ -16,8 +16,8 @@ case "$INSTALLER_TYPE" in
*)
echo "Unkown installer $INSTALLER_TYPE specified";;
esac
-
+echo $PWD
sshoptions="-o StrictHostKeyChecking=no"
-sshpass -p $PSWD scp $sshoptions QtipKey.pub root@$INSTALLER_IP:/root
+sshpass -p $PSWD scp $sshoptions ./data/QtipKey.pub root@$INSTALLER_IP:/root
sshpass -p $PSWD ssh $sshoptions root@$INSTALLER_IP "ssh-copy-id -i /root/QtipKey.pub root@$DEST_IP && rm -rf /root/QtipKey.pub"
diff --git a/data/ref_results/.reference.json.swp b/data/ref_results/.reference.json.swp
new file mode 100644
index 00000000..45891cdb
--- /dev/null
+++ b/data/ref_results/.reference.json.swp
Binary files differ
diff --git a/data/ref_results/compute_benchmarks_indices.py b/data/ref_results/compute_benchmarks_indices.py
new file mode 100644
index 00000000..305b6b02
--- /dev/null
+++ b/data/ref_results/compute_benchmarks_indices.py
@@ -0,0 +1,142 @@
+import os
+import json
+from result_accum import result_concat as concat
+
+
+def dpi_index ():
+ total=0
+ dpi_dict=concat('../../results/dpi/')
+ for k,v in dpi_dict.iteritems():
+ for i,j in dpi_dict[k].iteritems():
+ if i=="3 DPI result":
+ raw_num=int(dpi_dict[k][i]["DPI_benchmark(Gb/s)"])
+ total=total+raw_num
+
+ l=len(dpi_dict)
+ with open ('./reference.json') as reference_file:
+ reference_djson=json.load(reference_file)
+ dpi_ref=reference_djson['compute']['dpi']
+ dpi_index= float((total/l)/dpi_ref)
+ dpi_dict_i={};
+ dpi_dict_i['1. Index']=dpi_index
+ dpi_dict_i['2. Results']=dpi_dict
+ return dpi_dict_i
+
+def dwstone_index (file_dir,benchmark):
+ total=0
+ dwstone_dict=concat('../../results/'+file_dir+'/')
+ for k,v in dwstone_dict.iteritems():
+ for i,j in dwstone_dict[k].iteritems():
+ if i=="3 "+benchmark+" result":
+ for a,b in dwstone_dict[k][i].iteritems():
+ if a=="2.Single CPU test":
+ raw_num=float(dwstone_dict[k][i][a]["2.Index score"])
+ total=total+raw_num
+
+ l= len(dwstone_dict)
+
+ with open ('./reference.json') as reference_file:
+ reference_djson=json.load(reference_file)
+ dwstone_ref=reference_djson['compute'][file_dir]
+
+ dwstone_index=float((total/l)/dwstone_ref)
+ dwstone_dict_i={};
+ dwstone_dict_i['1. Index']=dwstone_index
+ dwstone_dict_i['2. Results']=dwstone_dict
+ return dwstone_dict_i
+
+def ssl_index ():
+ total_512rsa=0
+ total_1024rsa=0
+ total_2048rsa=0
+ total_4096rsa=0
+
+ total_16aes=0
+ total_64aes=0
+ total_256aes=0
+ total_1024aes=0
+ total_8192aes=0
+
+ ssl_dict=concat('../../results/ssl/')
+ for k,v in ssl_dict.iteritems():
+ for i,j in ssl_dict[k].iteritems():
+ if i=="3 SSL result":
+ for a,b in ssl_dict[k][i].iteritems():
+ if a=="2. RSA signatures":
+ raw_num_512rsa=float(ssl_dict[k][i][a]["1. 512 bits (sign/s)"])
+ raw_num_1024rsa=float(ssl_dict[k][i][a]["2. 1024 bits (sign/s)"])
+ raw_num_2048rsa=float(ssl_dict[k][i][a]["3. 2048 bits (sign/s)"])
+ raw_num_4096rsa=float(ssl_dict[k][i][a]["4. 4096 bits (sign/s)"])
+ total_512rsa=total_512rsa+raw_num_512rsa
+ total_1024rsa=total_512rsa+raw_num_1024rsa
+ total_2048rsa=total_2048rsa+raw_num_2048rsa
+ total_4096rsa=total_4096rsa+raw_num_4096rsa
+ elif a=="3. AES-128-cbc throughput":
+ raw_num_16aes=float(ssl_dict[k][i][a]["1. 16 Bytes block (B/sec)"][:-1])*1000
+ raw_num_64aes=float(ssl_dict[k][i][a]["2. 64 Bytes block (B/sec)"][:-1])*1000
+ raw_num_256aes=float(ssl_dict[k][i][a]["3. 256 Bytes block (B/sec)"][:-1])*1000
+ raw_num_1024aes=float(ssl_dict[k][i][a]["4. 1024 Bytes block (B/sec)"][:-1])*1000
+ raw_num_8192aes=float(ssl_dict[k][i][a]["5. 8192 Bytes block (B/sec)"][:-1])*1000
+ total_16aes=raw_num_16aes+total_16aes
+ total_64aes=raw_num_64aes+total_64aes
+ total_256aes=raw_num_256aes+total_256aes
+ total_1024aes=raw_num_1024aes+total_1024aes
+ total_8192aes=raw_num_8192aes+total_8192aes
+
+ with open ('./reference.json') as reference_file:
+ reference_djson=json.load(reference_file)
+ ssl_ref512rsa=reference_djson['compute']['ssl']['RSA']['512b']
+ ssl_ref1024rsa=reference_djson['compute']['ssl']['RSA']['1024b']
+ ssl_ref2048rsa=reference_djson['compute']['ssl']['RSA']['2048b']
+ ssl_ref4096rsa=reference_djson['compute']['ssl']['RSA']['4096b']
+
+
+ ssl_ref16aes=reference_djson['compute']['ssl']['AES']['16B']
+ ssl_ref64aes=reference_djson['compute']['ssl']['AES']['64B']
+ ssl_ref256aes=reference_djson['compute']['ssl']['AES']['256B']
+ ssl_ref1024aes=reference_djson['compute']['ssl']['AES']['1024B']
+ ssl_ref8192aes=reference_djson['compute']['ssl']['AES']['8192B']
+
+
+ l=len(ssl_dict)
+ index_512rsa=float((total_512rsa/l)/ssl_ref512rsa)
+ index_1024rsa= float((total_1024rsa/l)/ssl_ref1024rsa)
+ index_2048= float((total_2048rsa/l)/ssl_ref2048rsa)
+ index_4096= float((total_4096rsa/l)/ssl_ref4096rsa)
+
+ index_16aes=float((total_16aes/l)/ssl_ref16aes)
+ index_64aes=float((total_64aes/l)/ssl_ref64aes)
+ index_256aes=float((total_256aes/l)/ssl_ref256aes)
+ index_1024aes=float((total_1024aes/l)/ssl_ref1024aes)
+ index_8192aes=float((total_8192aes/l)/ssl_ref8192aes)
+
+ index_sum= (index_512rsa+index_1024rsa+index_2048+index_4096+index_16aes+index_64aes+index_256aes+index_1024aes+index_8192aes)
+ ssl_index=float(index_sum/9)
+ ssl_dict_i={};
+ ssl_dict_i['1. Index']=ssl_index
+ ssl_dict_i['2. Results']=ssl_dict
+ return ssl_dict_i
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/data/ref_results/compute_suite.py b/data/ref_results/compute_suite.py
new file mode 100644
index 00000000..a57a4b04
--- /dev/null
+++ b/data/ref_results/compute_suite.py
@@ -0,0 +1,22 @@
+import json
+import compute_benchmarks_indices as benchmark_indices
+
+
+compute_dict={};
+compute_dict['DPI']=benchmark_indices.dpi_index()
+compute_dict['Dhrystone']=benchmark_indices.dwstone_index('dhrystone','Dhrystone')
+compute_dict['Whetstone']=benchmark_indices.dwstone_index('whetstone','Whetstone')
+compute_dict['SSL']=benchmark_indices.ssl_index()
+
+compute_bench_list=['DPI','Dhrystone','Whetstone','SSL']
+temp=0
+for benchmark in compute_bench_list:
+ temp=temp+float(compute_dict[benchmark]['1. Index'])
+compute_suite_index=temp/len(compute_bench_list)
+
+compute_dict_f={};
+compute_dict_f['1. Compute Index']=compute_suite_index
+compute_dict_f['2. Compute suite results']=compute_dict
+with open('../../results/compute_result.json', 'w+') as result_json:
+ json.dump(compute_dict_f, result_json, indent=4, sort_keys=True)
+
diff --git a/data/ref_results/generator_ref_json.py b/data/ref_results/generator_ref_json.py
new file mode 100644
index 00000000..986ac61c
--- /dev/null
+++ b/data/ref_results/generator_ref_json.py
@@ -0,0 +1,36 @@
+import os
+import json
+
+dict_ref={};
+dict_ref['compute']={};
+dict_ref['compute']['dpi']=8.12
+dict_ref['compute']['whetstone']=859.1
+dict_ref['compute']['dhrystone']=3146.66
+dict_ref['compute']['ssl']={};
+dict_ref['compute']['ssl']['RSA']={};
+dict_ref['compute']['ssl']['AES']={};
+dict_ref['compute']['ssl']['RSA']['512b']=22148.9
+dict_ref['compute']['ssl']['RSA']['1024b']=7931.44
+dict_ref['compute']['ssl']['RSA']['2048b']=1544.3
+dict_ref['compute']['ssl']['RSA']['4096b']=161.92
+
+dict_ref['compute']['ssl']['AES']['16B']=735490250
+dict_ref['compute']['ssl']['AES']['64B']=788429210
+dict_ref['compute']['ssl']['AES']['256B']=803323650
+dict_ref['compute']['ssl']['AES']['1024B']=808861020
+dict_ref['compute']['ssl']['AES']['8192B']=807701160
+
+
+dict_ref['storage']={};
+dict_ref['storage']['read']={};
+dict_ref['storage']['write']={};
+dict_ref['storage']['read']['IOPS']= 6995
+dict_ref['storage']['write']['IOPS']= 6990
+
+dict_ref['network']={};
+dict_ref['network']['iperf']={};
+dict_ref['network']['iperf']['throughput received(b/s)']=9973180000.0
+
+with open('reference.json', 'w+') as result_json:
+ json.dump(dict_ref, result_json, indent=4, sort_keys=True)
+
diff --git a/data/ref_results/network_benchmarks_indices.py b/data/ref_results/network_benchmarks_indices.py
new file mode 100644
index 00000000..f841a65f
--- /dev/null
+++ b/data/ref_results/network_benchmarks_indices.py
@@ -0,0 +1,28 @@
+import os
+import json
+from result_accum import result_concat as concat
+
+def iperf_index ():
+ total_r=0
+ iperf_dict=concat('../../results/iperf/')
+ for k,v in iperf_dict.iteritems():
+ for i,j in iperf_dict[k].iteritems():
+ if i=="3 IPERF result":
+ for a,b in iperf_dict[k][i].iteritems():
+ if a=="2. Bandwidth":
+ raw_num=iperf_dict[k][i][a]['2. throughput Received (b/s)']
+ total_r=total_r+raw_num
+
+ l= len(iperf_dict)
+
+ with open ('./reference.json') as reference_file:
+ reference_djson=json.load(reference_file)
+ iperf_ref_r=reference_djson['network']['iperf']['throughput received(b/s)']
+
+
+ iperf_index_r=float((total_r/l)/iperf_ref_r)
+ iperf_dict_i={};
+ iperf_dict_i['1. Index']=iperf_index_r
+ iperf_dict_i['2. Results']=iperf_dict
+ return iperf_dict_i
+
diff --git a/data/ref_results/network_suite.py b/data/ref_results/network_suite.py
new file mode 100644
index 00000000..df80b8b5
--- /dev/null
+++ b/data/ref_results/network_suite.py
@@ -0,0 +1,22 @@
+import json
+import network_benchmarks_indices as benchmark_indices
+
+
+network_dict={};
+network_dict['IPERF']=benchmark_indices.iperf_index()
+
+
+
+
+network_bench_list=['IPERF']
+temp=0
+for benchmark in network_bench_list:
+ temp=temp+float(network_dict[benchmark]['1. Index'])
+network_suite_index=temp/len(network_bench_list)
+
+network_dict_f={};
+network_dict_f['1. Network Index']=network_suite_index
+network_dict_f['2. Network suite results']=network_dict
+with open('../../results/network_result.json', 'w+') as result_json:
+ json.dump(network_dict_f, result_json, indent=4, sort_keys=True)
+
diff --git a/data/ref_results/reference.json b/data/ref_results/reference.json
new file mode 100644
index 00000000..7ded5532
--- /dev/null
+++ b/data/ref_results/reference.json
@@ -0,0 +1,35 @@
+{
+ "compute": {
+ "dhrystone": 3146.66,
+ "dpi": 8.12,
+ "ssl": {
+ "AES": {
+ "1024B": 808861020,
+ "16B": 735490250,
+ "256B": 803323650,
+ "64B": 788429210,
+ "8192B": 807701160
+ },
+ "RSA": {
+ "1024b": 7931.44,
+ "2048b": 1544.3,
+ "4096b": 161.92,
+ "512b": 22148.9
+ }
+ },
+ "whetstone": 859.1
+ },
+ "network": {
+ "iperf": {
+ "throughput received(b/s)": 9973180000.0
+ }
+ },
+ "storage": {
+ "read": {
+ "IOPS": 6995
+ },
+ "write": {
+ "IOPS": 6990
+ }
+ }
+} \ No newline at end of file
diff --git a/data/ref_results/result_accum.py b/data/ref_results/result_accum.py
new file mode 100644
index 00000000..4c600048
--- /dev/null
+++ b/data/ref_results/result_accum.py
@@ -0,0 +1,31 @@
+import os
+import json
+
+def result_concat(targ_dir):
+ list_vm=[];
+ list_bm=[];
+ diction={};
+ for file in os.listdir(targ_dir):
+ if file.endswith(".json"):
+ if file.startswith("instance"):
+ print str(file)
+ list_vm.append(file)
+ #echo "I am here"
+ else:
+ list_bm.append(file)
+ l=len(list_bm)
+ k=len(list_vm)
+
+ for x in range (0,l):
+ file_t=list_bm[x]
+ with open (targ_dir+file_t) as result_file:
+ result_djson=json.load(result_file)
+ diction['Baremetal'+str(int(x+1))]=result_djson
+
+ for x in range (0,k):
+ file_t=list_vm[x]
+ with open (targ_dir+file_t) as result_file:
+ result_djson=json.load(result_file)
+ diction['Virtual Machine '+str(x+1)]=result_djson
+ return diction
+
diff --git a/data/ref_results/storage_benchmarks_indices.py b/data/ref_results/storage_benchmarks_indices.py
new file mode 100644
index 00000000..6114ad6b
--- /dev/null
+++ b/data/ref_results/storage_benchmarks_indices.py
@@ -0,0 +1,35 @@
+import os
+import json
+from result_accum import result_concat as concat
+
+def fio_index ():
+ total_r=0
+ total_w=0
+ fio_dict=concat('../../results/fio/')
+ for k,v in fio_dict.iteritems():
+ for i,j in fio_dict[k].iteritems():
+ if i=="3 FIO result":
+ for a,b in fio_dict[k][i].iteritems():
+ for c,d in fio_dict[k][i][a].iteritems():
+ if c=='read':
+ raw_num=float(fio_dict[k][i][a][c]["IO/sec"])
+ total_r=total_r+raw_num
+ elif c=='write':
+ raw_num=float(fio_dict[k][i][a][c]["IO/sec"])
+ total_w=total_w+raw_num
+
+ l= len(fio_dict)
+
+ with open ('./reference.json') as reference_file:
+ reference_djson=json.load(reference_file)
+ fio_ref_r=reference_djson['storage']['read']['IOPS']
+ fio_ref_w=reference_djson['storage']['write']['IOPS']
+
+ fio_index_r=float((total_r/l)/fio_ref_r)
+ fio_index_w=float((total_w/l)/fio_ref_w)
+ fio_index=float((fio_index_r+fio_index_w)/2)
+ fio_dict_i={};
+ fio_dict_i['1. Index']=fio_index
+ fio_dict_i['2. Results']=fio_dict
+ return fio_dict_i
+
diff --git a/data/ref_results/storage_suite.py b/data/ref_results/storage_suite.py
new file mode 100644
index 00000000..fe4e940c
--- /dev/null
+++ b/data/ref_results/storage_suite.py
@@ -0,0 +1,22 @@
+import json
+import storage_benchmarks_indices as benchmark_indices
+
+
+storage_dict={};
+storage_dict['FIO']=benchmark_indices.fio_index()
+
+
+
+
+storage_bench_list=['FIO']
+temp=0
+for benchmark in storage_bench_list:
+ temp=temp+float(storage_dict[benchmark]['1. Index'])
+storage_suite_index=temp/len(storage_bench_list)
+
+storage_dict_f={};
+storage_dict_f['1. Storage Index']=storage_suite_index
+storage_dict_f['2. Storage suite results']=storage_dict
+with open('../../results/storage_result.json', 'w+') as result_json:
+ json.dump(storage_dict_f, result_json, indent=4, sort_keys=True)
+