diff options
56 files changed, 1676 insertions, 509 deletions
diff --git a/benchmarks/perftest/common/git_proxy_pbook.yaml b/benchmarks/perftest/common/git_proxy_pbook.yaml new file mode 100644 index 00000000..5cb6f450 --- /dev/null +++ b/benchmarks/perftest/common/git_proxy_pbook.yaml @@ -0,0 +1,11 @@ +#git +- name: set git proxy(http) + shell: "git config --global http.proxy {{ http_proxy }}" + when: http_proxy is defined + ignore_errors: yes + +- name: set git proxy(https) + shell: "git config --global https.proxy {{https_proxy}}" + when: https_proxy is defined + ignore_errors: yes + diff --git a/benchmarks/perftest/common/sys_info_pbook.yaml b/benchmarks/perftest/common/sys_info_pbook.yaml new file mode 100644 index 00000000..5c2d8f79 --- /dev/null +++ b/benchmarks/perftest/common/sys_info_pbook.yaml @@ -0,0 +1,42 @@ + - name: Epel Release install when CentOS + shell: sudo yum install epel-release -y + when: ansible_os_family == "RedHat" + + - name: Inxi install when CentOS + shell: sudo yum install inxi -y + when: ansible_os_family == "RedHat" + + - name: Software Properties Common + shell: sudo apt-get install software-properties-common -y + when: ansible_os_family == "Debian" + + - name: adding trusty-backport main repo + shell: sudo apt-add-repository "deb http://archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe multiverse" + when: ansible_os_family == "Debian" + + - name: adding trusty main repo + shell: sudo apt-add-repository "deb http://archive.ubuntu.com/ubuntu/ trusty main restricted universe multiverse" + when: ansible_os_family == "Debian" + + - name: system info collection tool install when Ubuntu + shell: sudo apt-get update && apt-get install inxi -y + when: ansible_os_family == "Debian" + + - name: Install ansible copy dependencies if remote host has selinux enabled + shell: sudo yum install libselinux-python -y + when: ansible_os_family == "RedHat" + + - name: Install ansiblle copy dependencies if remote host has selinux enaled + shell: sudo apt-get install python-selinux -y + when: ansible_os_family == "Debian" + + - name: system_info script copy + copy: src=../etc/info_collect.py dest={{home_dir.stdout}}/qtip_result/ + + - name: collecting system informaton for non-network test cases + shell: cd $HOME/qtip_result && sudo python info_collect.py c + when: not network + + - name: collecting system information for network test cases + shell: cd $HOME/qtip_result && sudo python info_collect.py n + when: network diff --git a/benchmarks/perftest/common/sys_proxy_pbook.yaml b/benchmarks/perftest/common/sys_proxy_pbook.yaml new file mode 100644 index 00000000..bf4a8ccb --- /dev/null +++ b/benchmarks/perftest/common/sys_proxy_pbook.yaml @@ -0,0 +1,53 @@ +#env +- name: insert shell proxy http + lineinfile: dest=/etc/profile.d/proxy.sh state=present create=yes owner=root group=root mode=0644 regexp="export http_proxy={{ http_proxy }}" + insertafter=EOF line="export http_proxy={{ http_proxy }}" + when: http_proxy is defined + ignore_errors: yes + +- name: insert shell proxy https + lineinfile: dest=/etc/profile.d/proxy.sh state=present create=yes owner=root group=root mode=0644 regexp="export https_proxy={{ https_proxy }}" + insertafter=EOF line="export https_proxy={{ https_proxy }}" + when: https_proxy is defined + ignore_errors: yes + +- name: insert no proxy + lineinfile: dest=/etc/profile.d/proxy.sh state=present create=yes owner=root group=root mode=0644 regexp="{{ no_proxy }}" + insertafter=EOF line="export no_proxy={{ no_proxy }}" + when: no_proxy is defined + ignore_errors: yes + +#wget +- name: insert wget proxy(http) + lineinfile: dest=/etc/wgetrc state=present regexp="http_proxy={{ http_proxy }}" + insertafter="^#http_proxy" line="http_proxy={{ http_proxy }}" + when: http_proxy is defined + ignore_errors: yes + +- name: insert wget proxy(https) + lineinfile: dest=/etc/wgetrc state=present regexp="https_proxy={{ https_proxy }}" + insertafter="^#https_proxy" line="https_proxy={{ https_proxy }}" + when: https_proxy is defined + ignore_errors: yes + +#yum +- name: insert yum proxy(http) + lineinfile: dest=/etc/yum.conf state=present regexp="proxy={{ http_proxy }}" + insertafter=EOF line="proxy={{ http_proxy }}" + when: ansible_os_family == "RedHat" and http_proxy is defined + ignore_errors: yes + +#apt + +- name: insert apt proxy(http) + lineinfile: dest=/etc/apt/apt.conf state=present create=yes regexp="Acquire::http::Proxy \"{{ http_proxy }}\";" + insertafter=EOF line="Acquire::http::Proxy \"{{ http_proxy }}\";" + when: ansible_os_family == "Debian" and http_proxy is defined + ignore_errors: yes + +- name: insert apt proxy(https) + lineinfile: dest=/etc/apt/apt.conf state=present create=yes regexp="Acquire::https::Proxy \"{{ https_proxy }}\";" + insertafter=EOF line="Acquire::https::Proxy \"{{ https_proxy }}\";" + when: ansible_os_family == "Debian" and https_proxy is defined + ignore_errors: yes + diff --git a/benchmarks/perftest/dhrystone.yaml b/benchmarks/perftest/dhrystone.yaml new file mode 100644 index 00000000..7899bbd7 --- /dev/null +++ b/benchmarks/perftest/dhrystone.yaml @@ -0,0 +1,111 @@ + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: making dhrystone directory + file: path={{workingdir}}/{{Dest_dir}}/dhrystone state=directory + + - name: making temporary dhrystone directory + file: path={{workingdir}}/{{Dest_dir}}/dhrystone/dhrystone_temp state=directory + + - hosts: "{{role}}" + become: yes + remote_user: "{{username}}" + + tasks: + - name: checking home directory + shell: echo $HOME + register: home_dir + + - name: cleaning tempT + file: path=$HOME/tempT state=absent + + - name: cleaning qtip_result + file: path=$HOME/qtip_result state=absent + + - name: make directory + file: path=$HOME/qtip_result state=directory + + - include: ./common/sys_proxy_pbook.yaml + + - include: ./common/sys_info_pbook.yaml + vars: + network: false + + - name: Installing UnixBench dependencies if CentOS + shell: yum install git gcc patch perl-Time-HiRes -y + when: ansible_os_family == "RedHat" + + - name: Installing UnixBench dependencies if Ubuntu + shell: apt-get install git gcc patch perl -y + when: ansible_os_family == "Debian" + + - include: ./common/git_proxy_pbook.yaml + + - name: Clone unixbench + git: repo=https://github.com/kdlucas/byte-unixbench.git + dest=$HOME/tempT + + - name: make + shell: sudo make --directory $HOME/tempT/UnixBench/ + + - name: Run dhrystone + shell: cd $HOME/tempT/UnixBench/&& sudo ./Run -v dhrystone + + - name: collecting and transforming result script copy + copy: src={{workingdir}}/utils/transform/ubench_transform.py dest={{home_dir.stdout}}/qtip_result/ + + - name: transforming result + shell: cd $HOME/qtip_result/ && sudo python ubench_transform.py + + - name: copying consolidated report script + copy: src={{workingdir}}/utils/transform/final_report.py dest={{home_dir.stdout}}/qtip_result/ + + - name: making consolidated report + shell: cd $HOME/qtip_result && sudo python final_report.py Dhrystone {{fname}} + + - name: making directory + file: path={{home_dir.stdout}}/qtip_result/log state=directory + + - name: copying result to temp directory + shell: sudo cp -r $HOME/tempT/UnixBench/results/* $HOME/qtip_result/log/ + + - name: registering files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.json") | cut -d'/' -f2 + register: files_to_copy + + - name: copy results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/dhrystone/dhrystone_temp + with_items: "{{files_to_copy.stdout_lines}}" + + - name: registering log files + shell: (cd $HOME/qtip_result/log/; find . -maxdepth 1 -name "*.log") | cut -d'/' -f2 + register: copy_log_results + + - name: copying log results + fetch: src={{home_dir.stdout}}/qtip_result/log/{{item}} dest={{workingdir}}/{{Dest_dir}}/dhrystone/dhrystone_temp + with_items: "{{copy_log_results.stdout_lines}}" + + - name: cleaning tempT + file: path=$HOME/tempT state=absent + + - name: cleaning_qtip_result + file: path=$HOME/qtip_result state=absent + + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: extracting_json + shell: ( find {{workingdir}}/{{Dest_dir}}/dhrystone/dhrystone_temp/ -name "*.json" | xargs cp -t {{workingdir}}/{{Dest_dir}}/dhrystone/) + + - name: making_logs_folder + file: path={{workingdir}}/{{Dest_dir}}/dhrystone/logs state=directory + + - name: extracting_log + shell: ( find {{workingdir}}/{{Dest_dir}}/dhrystone/dhrystone_temp/ -name "*.log" | xargs cp -t {{workingdir}}/{{Dest_dir}}/dhrystone/logs) + + - name: removing dhrystone_temp + file: path={{workingdir}}/{{Dest_dir}}/dhrystone/dhrystone_temp state=directory diff --git a/benchmarks/perftest/dpi.yaml b/benchmarks/perftest/dpi.yaml new file mode 100644 index 00000000..5ce5d09b --- /dev/null +++ b/benchmarks/perftest/dpi.yaml @@ -0,0 +1,126 @@ + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: making dpi directory + file: path={{workingdir}}/{{Dest_dir}}/dpi state=directory + + - name: making temporary whetstone directory + file: path={{workingdir}}/{{Dest_dir}}/dpi/dpi_temp state=directory + + - hosts: "{{role}}" + become: yes + remote_user: "{{username}}" + + tasks: + - name: echo + shell: echo $USER + + - name: checking home directory + shell: echo $HOME + register: home_dir + + - name: cleaning + file: path=$HOME/tempD state=absent + + - name: cleaning previous results + file: path=$HOME/qtip_result state=absent + + - name: make qtip_result + file: path=$HOME/qtip_result state=directory + + - include: ./common/sys_proxy_pbook.yaml + + - include: ./common/sys_info_pbook.yaml + vars: + network: false + + - name: Installing nDPI dependencies if CentOS + shell: sudo yum install git gcc patch perl-Time-HiRes autofconf automake libpcap-devel libtool -y + when: ansible_os_family == "RedHat" + + - name: Installing nDPI dependcies if Ubuntu + shell: sudo apt-get install git gcc patch autoconf automake libpcap-dev libtool -y + when: ansible_os_family == "Debian" + + - name: making nDPI temporary directory + file: path=$HOME/tempD state=directory + + - include: ./common/git_proxy_pbook.yaml + + - name: Clone nDPI + git: repo=https://github.com/ntop/nDPI.git + dest=$HOME/tempD/nDPI + + - name: autogen + shell: cd $HOME/tempD/nDPI && sudo ./autogen.sh + + - name: configure + shell: cd $HOME/tempD/nDPI && sudo ./configure + + - name: make + shell: cd $HOME/tempD/nDPI && sudo make + + - name: Fetching Test_pcap file + shell: cd $HOME/tempD/nDPI/example && wget http://build.opnfv.org/artifacts.opnfv.org/qtip/utilities/test.pcap + + - name: fetch Averaging script + copy: src=./etc/dpi_average.sh dest={{home_dir.stdout}}/tempD/nDPI/example mode=777 + + - name: Run nDPI benchmark + shell: cd $HOME/tempD/nDPI/example && sudo ./dpi_average.sh + + - name: copy result to temp_direc + shell: sudo cp $HOME/tempD/nDPI/example/dpi_dump.txt $HOME/qtip_result + + - name: fetch dpi result transform script + copy: src={{workdingdir}}/utils/transform/dpi_transform.py dest={{home_dir.stdout}}/qtip_result + + - name: Transforming results + shell: cd $HOME/qtip_result && sudo python dpi_transform.py + + - name: copy report formation script + copy: src={{workdingdir}}/utils/transform/final_report.py dest={{home_dir.stdout}}/qtip_result + + - name: consolidating report + shell: cd $HOME/qtip_result && sudo python final_report.py DPI {{fname}} + + - name: registering files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.json") | cut -d'/' -f2 + register: files_to_copy + + - name: copy results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/dpi/dpi_temp + with_items: "{{files_to_copy.stdout_lines}}" + + - name: registering log files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.log") | cut -d'/' -f2 + register: copy_log_results + + - name: copying log results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/dpi/dpi_temp + with_items: "{{copy_log_results.stdout_lines}}" + + - name: cleaning tempD + file: path=$HOME/tempD state=absent + + - name: cleaning_qtip_result + file: path=$HOME/qtip_result state=absent + + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: extracting_json + shell: ( find {{workingdir}}/{{Dest_dir}}/dpi/dpi_temp/ -name "*.json" | xargs cp -t {{workingdir}}/{{Dest_dir}}/dpi/) + + - name: making_logs_folder + file: path={{workingdir}}/{{Dest_dir}}/dpi/logs state=directory + + - name: extracting_log + shell: ( find {{workingdir}}/{{Dest_dir}}/dpi/dpi_temp/ -name "*.log" | xargs cp -t {{workingdir}}/{{Dest_dir}}/dpi/logs) + + - name: removing dpi_temp + file: path={{workingdir}}/{{Dest_dir}}/dpi/dpi_temp state=absent diff --git a/benchmarks/perftest/etc/dpi_average.sh b/benchmarks/perftest/etc/dpi_average.sh new file mode 100644 index 00000000..405d3ff6 --- /dev/null +++ b/benchmarks/perftest/etc/dpi_average.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +COUNTER=0 +WDIR=$PWD +while [ $COUNTER -lt 10 ]; do + + echo $WDIR + $( ./ndpiReader -i test.pcap >> $WDIR/dpi_dump.txt ) + let COUNTER=COUNTER+1 + echo "Run number: $COUNTER" + +done + + diff --git a/benchmarks/perftest/etc/info_collect.py b/benchmarks/perftest/etc/info_collect.py new file mode 100644 index 00000000..3fc35d5a --- /dev/null +++ b/benchmarks/perftest/etc/info_collect.py @@ -0,0 +1,86 @@ +import os +import pickle +import json +import sys + +os.system('inxi -b -c0 -n > $PWD/est_2') +est_ob = open("est_2", "r+") +est_ob2 = open("est_1", "w+") +in_string = est_ob.read().replace('\n', ' ') +cpu_idle = float(os.popen("""top -bn1 | grep "Cpu(s)" | awk '{print $8}'""").read().rstrip()) +cpu_usage = 100 - cpu_idle +est_ob2.write(in_string) +est_ob.close() +est_ob2.close() + +inxi_host = os.popen("""cat $PWD/est_1 | grep -o -P '(?<=Host:).*(?=Kernel)' """).read().lstrip().rstrip() +inxi_mem = os.popen("""cat $PWD/est_1 | grep -o -P '(?<=Memory:).*(?=MB)' """).read().lstrip().rstrip() + "MB" +inxi_cpu = os.popen("""cat $PWD/est_1 | grep -o -P '(?<=CPU).*(?=speed)' | cut -f2 -d':'""").read().lstrip().rstrip() +inxi_distro = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Distro:).*(?=Machine:)' """).read().rstrip().lstrip() +inxi_kernel = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Kernel:).*(?=Console:)' """).read().rstrip().lstrip() +inxi_HD = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=HDD Total Size:).*(?=Info:)' """).read().rstrip().lstrip() +inxi_product = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=product:).*(?=Mobo:)' """).read().rstrip().lstrip() + +info_dict = {'hostname': inxi_host, + 'product': inxi_product, + 'os': inxi_distro, + 'kernel': inxi_kernel, + 'cpu': inxi_cpu, + 'cpu_usage': '{0}%'.format(str(round(cpu_usage, 3))), + 'memory_usage': inxi_mem, + 'disk_usage': inxi_HD} +network_flag = str(sys.argv[1]).rstrip() + +if (network_flag == 'n'): + + info_dict['network_interfaces'] = {} + tem_2 = """ cat $PWD/est_1 | grep -o -P '(?<=Network:).*(?=Info:)'""" + print os.system(tem_2 + ' > Hello') + i = int(os.popen(tem_2 + " | grep -o 'Card' | wc -l ").read()) + print i + + for x in range(1, i + 1): + tem = """ cat $PWD/est_1 | grep -o -P '(?<=Card-""" + str(x) + """:).*(?=Card-""" + str(x + 1) + """)'""" + if i == 1: + tem = """ cat $PWD/est_1 | grep -o -P '(?<=Network:).*(?=Info:)'""" + inxi_card_1 = ((os.popen(tem + " | grep -o -P '(?<=Card:).*(?=Drives:)'|sed 's/ *driver:.*//'").read().rstrip().lstrip())) + print inxi_card_1 + info_dict['network_interfaces']['interface_' + str(x)] = {} + info_dict['network_interfaces']['interface_' + str(x)]['network_card'] = inxi_card_1 + inxi_card_2 = ((os.popen(tem + "| grep -o -P '(?<=Card:).*(?=Drives:)'|sed -e 's/^.*IF: //'").read())).rstrip().lstrip() + info_dict['network_interfaces']['interface_' + str(x)]['interface_info'] = inxi_card_2 + elif x < (i): + print "two" + inxi_card_1 = ((os.popen(tem + "| sed 's/ *driver:.*//'").read().rstrip().lstrip())) + info_dict['network_interfaces']['interface_' + str(x)] = {} + info_dict['network_interfaces']['interface_' + str(x)]['network_Card'] = inxi_card_1 + inxi_card_2 = ((os.popen(tem + "|sed -e 's/^.*IF: //'").read())).rstrip().lstrip() + info_dict['network_interfaces']['interface_' + str(x)]['interface_info'] = inxi_card_2 + elif x == i: + print "Three" + info_dict['network_interfaces']['interface_' + str(x)] = {} + inxi_card_1 = ((os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Card-""" + str(x) + """:).*(?=Drives:)'| sed 's/ *driver:.*//' """).read().rstrip().lstrip())) + info_dict['network_interfaces']['interface_' + str(x)]['network_Card'] = inxi_card_1 + inxi_card_2 = ((os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Card-""" + str(x) + """:).*(?=Drives:)'| sed -e 's/^.*IF: //' """).read().rstrip().lstrip())) + info_dict['network_interfaces']['interface_' + str(x)]['interface_info'] = inxi_card_2 + else: + print "No network cards" + os.system("bwm-ng -o plain -c 1 | grep -v '=' | grep -v 'iface' | grep -v '-' > bwm_dump") + n_interface = int(os.popen(" cat bwm_dump | grep -v 'total' | wc -l ").read().rstrip()) + interface = {} + for x in range(1, n_interface): + interface_name = os.popen(" cat bwm_dump | awk 'NR==" + str(x) + "' | awk '{print $1}' ").read().rstrip().replace(':', '') + interface[str(interface_name)] = {} + interface[str(interface_name)]['Rx (KB/s)'] = os.popen(" cat bwm_dump | awk 'NR==" + str(x) + "' | awk '{print $2}' ").read().rstrip() + interface[str(interface_name)]['Tx (KB/s)'] = os.popen(" cat bwm_dump | awk 'NR==" + str(x) + "' | awk '{print $4}' ").read().rstrip() + interface[str(interface_name)]['Total (KB/s)'] = os.popen(" cat bwm_dump | awk 'NR== " + str(x) + "' | awk '{print $6}' ").read().rstrip() + + info_dict['interface_io'] = interface + +print info_dict + +with open('./sys_info_temp', 'w+')as out_info: + pickle.dump(info_dict, out_info) + +with open('temp', 'w+') as result_json: + json.dump(info_dict, result_json, indent=4, sort_keys=True) diff --git a/benchmarks/perftest/etc/test_job b/benchmarks/perftest/etc/test_job new file mode 100644 index 00000000..6817abca --- /dev/null +++ b/benchmarks/perftest/etc/test_job @@ -0,0 +1,13 @@ +[global] + +runtime= 600 +ioengine=libaio +iodepth=2 +direct=1 +bs=4k +rw=randrw + +[job1] +size=5G + + diff --git a/benchmarks/perftest/fio.yaml b/benchmarks/perftest/fio.yaml new file mode 100644 index 00000000..94a4c80d --- /dev/null +++ b/benchmarks/perftest/fio.yaml @@ -0,0 +1,112 @@ + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: making fio directory + file: path={{workingdir}}/{{Dest_dir}}/fio state=directory + + - name: making temporary fio directory + file: path={{workingdir}}/{{Dest_dir}}/fio/fio_temp state=directory + + + - hosts: "{{role}}" + become: yes + remote_user: "{{username}}" + + tasks: + - name: checking home directory + shell: echo $HOME + register: home_dir + + - name: cleaning fio directory + file: path=$HOME/fio state=absent + + - name: cleaning previous results + file: path=$HOME/qtip_result state=absent + + - name: making fio temporary directory + file: path=$HOME/fio state=directory + + - name: making results temporary directory + file: path=$HOME/qtip_result state=directory + + - include: ./common/sys_proxy_pbook.yaml + + - include: ./common/sys_info_pbook.yaml + vars: + network: false + + - name: Installing fio dependencies when CentOS + shell: sudo yum install wget gcc libaio-devel -y + when: ansible_os_family == "RedHat" + + - name: Installing fio dependencies when Ubuntu + shell: sudo apt-get install wget gcc libaio-dev -y + when: ansible_os_family == "Debian" + + - name: Fetching fio + shell: cd $HOME/fio/ && wget http://freecode.com/urls/3aa21b8c106cab742bf1f20d60629e3f -O fio.tar.gz + + - name: Untar fio + shell: cd $HOME/fio/ && sudo tar -zxvf fio.tar.gz + + - name: configure + shell: cd $HOME/fio/fio-2.1.10 && sudo ./configure && sudo make + + - name: Fetching fio job + copy: src=./etc/fio_test_job dest={{home_dir.stdout}}/fio/fio-2.1.10/ + + - name: Benchmarking block storage through fio + shell: cd $HOME/fio/fio-2.1.10 && sudo ./fio --output-format=json --output=$HOME/qtip_result/fio_result.json fio_test_job + + - name: Fetching result transformation script + copy: src={{workingdir}}/utils/fio_transform.py dest={{home_dir.stdout}}/qtip_result + + - name: Transforming result + shell: cd $HOME/qtip_result && sudo python fio_transform.py + + - name: copy report formation script + copy: src={{workingdir}}/utils/transform/final_report.py dest={{home_dir.stdout}}/qtip_result + + - name: consolidating report + shell: cd $HOME/qtip_result && sudo python final_report.py FIO {{fname}} + + - name: registering files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.json") | cut -d'/' -f2 + register: files_to_copy + + - name: copy results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/fio/fio_temp + with_items: "{{files_to_copy.stdout_lines}}" + + - name: registering log files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.log") | cut -d'/' -f2 + register: copy_log_results + + - name: copying log results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/fio/fio_temp + with_items: "{{copy_log_results.stdout_lines}}" + + - name: cleaning fio + file: path=$HOME/fio state=absent + + - name: cleaning_qtip_result + file: path=$HOME/qtip_result + + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: extracting_json + shell: ( find {{workingdir}}/{{Dest_dir}}/fio/fio_temp/ -name "*.json" | xargs cp -t {{workingdir}}/{{Dest_dir}}/fio/) + + - name: making_logs_folder + file: path={{workingdir}}/{{Dest_dir}}/fio/logs state=directory + + - name: extracting_log + shell: ( find {{workingdir}}/{{Dest_dir}}/fio/fio_temp/ -name "*.log" | xargs cp -t {{workingdir}}/{{Dest_dir}}/fio/logs) + + - name: removing fio_log + file: {{workingdir}}/{{Dest_dir}}/fio/fio_temp state=absent diff --git a/benchmarks/perftest/iperf.yaml b/benchmarks/perftest/iperf.yaml new file mode 100644 index 00000000..481a2e3e --- /dev/null +++ b/benchmarks/perftest/iperf.yaml @@ -0,0 +1,166 @@ + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: getting directory + shell: sudo echo $PWD + register: qtip_dir + + - name: making Iperf directory + file: path={{workingdir}}/{{Dest_dir}}/iperf state=directory + + - name: making temporary iperf directory + file: path={{workingdir}}/{{Dest_dir}}/iperf/iperf_temp state=directory + + + - hosts: "{{role}}" + become: yes + remote_user: "{{username}}" + + tasks: + - name: Rolename + set_fact: + rolename: "{{role}}" + when: role is defined + + - name: installertype + set_fact: + installertype: "{{installer}}" + + - name: Get Hostname + shell: echo $HOSTNAME + register: hostID + + - name: echo + shell: echo index_var + + - name: checking home directory + shell: echo $HOME + register: home_dir + + - name: cleaning iperf directory + file: path=$HOME/iperf state=absent + + - name: cleaning previous results + file: path=$HOME/qtip_result state=absent + + - name: making Iperf temporary directory + file: path=$HOME/iperf state=directory + + - name: making results temporary directory + file: path=$HOME/qtip_result state=directory + + - include: ./common/sys_proxy_pbook.yaml + + - include: ./common/sys_info_pbook.yaml + vars: + network: true + + - name: Installing Epel-release when CentOS + shell: sudo yum install epel-release -y + when: ansible_os_family == "RedHat" + + - name: Allow iperf server port in iptables input rules + shell: iptables -A INPUT -p tcp --dport {{iperf_port}} -j ACCEPT + vars: + iperf_port: 5201 + ignore_errors: yes + when: rolename == "1-server" and installertype == 'fuel' + + - name: Installing IPERF when Ubuntu + shell: sudo apt-get install iperf3 -y + when: ansible_os_family == "Debian" + + - name: Installing Iperf3 + shell: sudo yum install iperf3 -y + when: ansible_os_family == "RedHat" + + - name: Running iperf on server + shell: iperf3 -s + async: 400 + poll: 0 + when: rolename == "1-server" + + - name: Running Iperf on Host + shell: iperf3 --time {{duration}} -b 0 G -c {{ip1}} -J -O10 >> {{home_dir.stdout}}/qtip_result/iperf_raw.json + ignore_errors: yes + with_items: + - "{{ip1}}" + when: rolename == "2-host" and "{{privateip1}}" == "NONE" + + - name: Running Iperf on Host + shell: iperf3 --time {{duration}} -b 0 G -c {{privateip1}} -J -O10 >> {{home_dir.stdout}}/qtip_result/iperf_raw.json + ignore_errors: yes + with_items: + - "{{ip1}}" + when: rolename == "2-host" and "{{privateip1}}" != "NONE" + + - name: Fetching result transformation script + copy: src={{workingdir}}/utils/transform/iperf_transform.py dest={{home_dir.stdout}}/qtip_result + - name: Transforming result + + shell: cd $HOME/qtip_result && sudo python iperf_transform.py + when: rolename =="2-host" and "{{ip2}}" == '' + + - name: copy report formation script + copy: src={{workdingdir}}/utils/transform/final_report.py dest={{home_dir.stdout}}/qtip_result + when: rolename =="2-host" and "{{ip2}}" == '' + + - name: consolidating report + shell: cd $HOME/qtip_result && sudo python final_report.py IPERF {{fname}} + when: rolename =="2-host" and "{{ip2}}" == '' + + - name: Files to Copy + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.json") | cut -d'/' -f2 + register: files_to_copy + when: rolename =="2-host" and "{{ip2}}" == '' + + - name: copy results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/iperf/iperf_temp + with_items: "{{files_to_copy.stdout_lines}}" + when: rolename =="2-host" and "{{ip2}}" == '' + + - name: registering log files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.log") | cut -d'/' -f2 + register: copy_log_results + when: rolename =="2-host" and "{{ip2}}" == '' + + - name: copying log results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/iperf/iperf_temp + with_items: "{{copy_log_results.stdout_lines}}" + when: rolename =="2-host" and "{{ip2}}" == '' + + - name: cleaning iperf directory + file: path=$HOME/iperf state=absent + + - name: cleaning previous results + file: path=$HOME/qtip_result state=absent + + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: Rolename + set_fact: + rolename: "{{role}}" + when: role is defined + + - name: extracting_json + shell: ( find {{workingdir}}/{{Dest_dir}}/iperf/iperf_temp/ -name "*.json" | xargs cp -t {{workingdir}}/{{Dest_dir}}/iperf/) + when: rolename == "2-host" + + - name: making_logs_folder + file: path={{workingdir}}/{{Dest_dir}}/iperf/logs state=directory + + - name: extracting_log + shell: ( find {{workingdir}}/{{Dest_dir}}/iperf/iperf_temp/ -name "*.log" | xargs cp -t {{workingdir}}/{{Dest_dir}}/iperf/logs) + when: rolename == "2-host" + + - name: removing iperf_raw file + file: path={{workingdir}}/{{Dest_dir}}/iperf/iperf_raw.json state=absent + when: rolename == "2-host" + + - name: removing iperf_temp + file: path={{workingdir}}/{{Dest_dir}}/iperf/iperf_temp state=absent diff --git a/benchmarks/perftest/ramspeed.yaml b/benchmarks/perftest/ramspeed.yaml new file mode 100644 index 00000000..fb624c85 --- /dev/null +++ b/benchmarks/perftest/ramspeed.yaml @@ -0,0 +1,115 @@ + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: making ramspeed directory + file: path={{workingdir}}/{{Dest_dir}}/ramspeed state=directory + + - name: making temporary ramspeed directory + file: path={{workingdir}}/{{Dest_dir}}/ramspeed/ramspeed_temp state=directory + + + - hosts: "{{role}}" + become: yes + remote_user: "{{username}}" + + tasks: + - name: checking home directory + shell: echo $HOME + register: home_dir + + - name: cleaning ramspeed directory + file: path=$HOME/ramspeed state=absent + + - name: cleaning previous results + file: path=$HOME/qtip_result state=absent + + - name: making ramspeed temporary directory + file: path=$HOME/ramspeed state=directory + + - name: making results temporary directory + file: path=$HOME/qtip_result state=directory + + - include: ./common/sys_proxy_pbook.yaml + + - include: ./common/sys_info_pbook.yaml + vars: + network: false + + - name: Installing RAM_Speed dependencies when CentOS + shell: sudo yum install wget gcc -y + when: ansible_os_family == "RedHat" + + - name: Installing RAM_Speed dependencies when Ubuntu + shell: sudo apt-get install wget gcc -y + when: ansible_os_family == "Debian" + + - name: make dummy file + shell: sudo touch $HOME/ramspeed/ramspeed.tar.gz + + - name: Fetching RAM_Speed + shell: cd $HOME/ramspeed/ && sudo wget -O ramspeed.tar.gz https://docs.google.com/uc?id=0B92Bp5LZTM7gRFctalZLMktTNDQ + + - name: Untar RAM_SPeed + shell: cd $HOME/ramspeed/ && sudo tar -zxvf ramspeed.tar.gz + + - name: configure + shell: cd $HOME/ramspeed/ramsmp-3.5.0 && ./build.sh + + - name: Benchmarking IntMem Bandwidth + shell: cd $HOME/ramspeed/ramsmp-3.5.0 && ./ramsmp -b 3 -l 5 -p 1 >> $HOME/qtip_result/Intmem + + - name: Benchmarking FloatMem Bandwidth + shell: cd $HOME/ramspeed/ramsmp-3.5.0 && ./ramsmp -b 6 -l 5 -p 1 >> $HOME/qtip_result/Floatmem + + - name: Fetching result transformation script + copy: src={{workdingdir}}/utils/transform/ramspeed_transform.py dest={{home_dir.stdout}}/qtip_result + + - name: Transforming result + shell: cd $HOME/qtip_result && sudo python ramspeed_transform.py + + - name: copy report formation script + copy: src={{workingdir}}/utils/transform/final_report.py dest={{home_dir.stdout}}/qtip_result + + - name: consolidating report + shell: cd $HOME/qtip_result && sudo python final_report.py RamSpeed {{fname}} + + - name: registering files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.json") | cut -d'/' -f2 + register: files_to_copy + + - name: copy results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/ramspeed/ramspeed_temp + with_items: "{{files_to_copy.stdout_lines}}" + + - name: registering log files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.log") | cut -d'/' -f2 + register: copy_log_results + + - name: copying log results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/ramspeed/ramspeed_temp + with_items: "{{copy_log_results.stdout_lines}}" + + - name: cleaning ramspeed directory + file: path=$HOME/ramspeed state=absent + + - name: cleaning previous results + file: path=$HOME/qtip_result state=absent + + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: extracting_json + shell: ( find {{workingdir}}/{{Dest_dir}}/ramspeed/ramspeed_temp/ -name "*.json" | xargs cp -t {{workingdir}}/{{Dest_dir}}/ramspeed/) + + - name: making_logs_folder + file: path={{workingdir}}/{{Dest_dir}}/ramspeed/logs state=directory + + - name: extracting_log + shell: ( find {{workingdir}}/{{Dest_dir}}/ramspeed/ramspeed_temp/ -name "*.log" | xargs cp -t {{workingdir}}/{{Dest_dir}}/ramspeed/logs) + + - name: removing ramspeed_log + file: path={{workingdir}}/{{Dest_dir}}/ramspeed/ramspeed_temp state=absent diff --git a/benchmarks/perftest/ssl.yaml b/benchmarks/perftest/ssl.yaml new file mode 100644 index 00000000..ef36265e --- /dev/null +++ b/benchmarks/perftest/ssl.yaml @@ -0,0 +1,119 @@ + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: making ssl directory + file: path={{workingdir}}/{{Dest_dir}}/ssl state=directory + + - name: making temporary ssl directory + file: path={{workingdir}}/{{Dest_dir}}/ssl/ssl_temp state=directory + + - hosts: "{{role}}" + become: yes + remote_user: "{{username}}" + + tasks: + - name: checking home directory + shell: sudo echo $HOME + register: home_dir + + - name: cleaning Open_SSL directory + file: path=$HOME/Open_SSL state=absent + + - name: cleaning_qtip_result + file: path=$HOME/qtip_result state=absent + + - name: making OpenSSL temporary directory + file: path=$HOME/Open_SSL state=directory + + - name: making results temporary directory + file: path=$HOME/qtip_result state=directory + + - include: ./common/sys_proxy_pbook.yaml + + - include: ./common/sys_info_pbook.yaml + vars: + network: false + + - name: Installing OpenSSL dependencies when CentOS + shell: sudo yum install git wget gcc patch perl-Time-HiRes autofconf automake libpcap-devel libtool -y + when: ansible_os_family == "RedHat" + + - name: Installing OpenSSL dependencies when Ubuntu + shell: sudo apt-get install git gcc wget perl autoconf automake libpcap-dev libtool -y + when: ansible_os_family == "Debian" + + - name: Fetching OpenSSL + shell: cd $HOME/Open_SSL/ && sudo wget http://artifacts.opnfv.org/qtip/utilities/openssl-1.0.2f.tar.gz + + - name: Untar OpenSSL + shell: cd $HOME/Open_SSL/ && sudo tar -zxvf openssl-1.0.2f.tar.gz + - name: configure + shell: cd $HOME/Open_SSL/openssl-1.0.2f && sudo ./config + + - name: make + shell: cd $HOME/Open_SSL/openssl-1.0.2f && sudo make + + - name: make install + shell: cd $HOME/Open_SSL/openssl-1.0.2f && sudo make install + + - name: Benchmarking RSA signatures + shell: cd $HOME/Open_SSL/openssl-1.0.2f/apps && sudo ./openssl speed rsa >> $HOME/qtip_result/RSA_dump + + - name: Benchmaring AES-128-cbc cipher encryption throughput + shell: cd $HOME/Open_SSL/openssl-1.0.2f/apps && sudo ./openssl speed -evp aes-128-cbc >> $HOME/qtip_result/AES-128-CBC_dump + + - name: Fetching result transformation script + copy: src={{workingdir}}/utils/transform/ssl_transform.py dest={{home_dir.stdout}}/qtip_result + + - name: Transforming result + shell: cd $HOME/qtip_result && python ssl_transform.py + + - name: copy report formation script + copy: src={{workingdir}}/utils/transform/final_report.py dest={{home_dir.stdout}}/qtip_result + + - name: consolidating report + shell: cd $HOME/qtip_result && python final_report.py SSL {{fname}} + + - name: registering files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.json") | cut -d'/' -f2 + register: files_to_copy + + - name: copy results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/ssl/ssl_temp + with_items: "{{files_to_copy.stdout_lines}}" + + - name: registering log files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.log") | cut -d'/' -f2 + register: copy_log_results + + - name: copying log results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/ssl/ssl_temp + with_items: "{{copy_log_results.stdout_lines}}" + + - name: cleaning Open_SSL directory + file: path=$HOME/Open_SSL state=absent + + - name: cleaning_qtip_result + file: path=$HOME/qtip_result state=absent + + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: echo + shell: echo $PWD + + - name: extracting_json + shell: ( find {{workingdir}}/{{Dest_dir}}/ssl/ssl_temp/ -name "*.json" | xargs cp -t {{workingdir}}/{{Dest_dir}}/ssl/) + + - name: making_logs_folder + file: path={{workingdir}}/{{Dest_dir}}/ssl/logs state=directory + + - name: extracting_log + shell: ( find {{workingdir}}/{{Dest_dir}}/ssl/ssl_temp/ -name "*.log" | xargs cp -t {{workingdir}}/{{Dest_dir}}/ssl/logs) + + - name: removing ssl_temp + file: path={{workingdir}}/{{Dest_dir}}/ssl/ssl_temp state=absent diff --git a/benchmarks/perftest/whetstone.yaml b/benchmarks/perftest/whetstone.yaml new file mode 100644 index 00000000..4dcddb99 --- /dev/null +++ b/benchmarks/perftest/whetstone.yaml @@ -0,0 +1,111 @@ + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: making whetstone directory + file: path={{workingdir}}/{{Dest_dir}}/whetstone state=directory + + - name: making temporary whetstone directory + file: path={{workingdir}}/{{Dest_dir}}/whetstone/whetstone_temp state=directory + + - hosts: "{{role}}" + become: yes + remote_user: "{{username}}" + + tasks: + - name: storing_home + shell: echo $HOME + register: home_dir + + - name: cleaning tempT directory + file: path=$HOME/tempT state=absent + + - name: cleaning qtip result directory + file: path=$HOME/qtip_result state=absent + + - name: making qtip_result directory + file: path=$HOME/qtip_result state=directory + + - include: ./common/sys_proxy_pbook.yaml + + - include: ./common/sys_info_pbook.yaml + vars: + network: false + + - name: Installing UnixBench dependencies if CentOS + shell: sudo yum install git gcc patch perl-Time-HiRes -y + when: ansible_os_family == "RedHat" + + - name: Installing UnixBench dependencies if Ubuntu + shell: sudo apt-get install git gcc patch perl -y + when: ansible_os_family == "Debian" + + - include: ./common/git_proxy_pbook.yaml + + - name: Clone unixbench + git: repo=https://github.com/kdlucas/byte-unixbench.git + dest=$HOME/tempT + + - name: make + shell: sudo make --directory $HOME/tempT/UnixBench/ + + - name: Run Whetstone + shell: cd $HOME/tempT/UnixBench/&&./Run -v whetstone + + - name: collecting and transforming result script copy + copy: src={{workingdir}}/utils/transform/ubench_transform.py dest={{home_dir.stdout}}/qtip_result/ + + - name: transforming result + shell: cd $HOME/qtip_result && sudo python ubench_transform.py + + - name: copying consolidated report script + copy: src={{workingdir}}/utils/transform/final_report.py dest={{home_dir.stdout}}/qtip_result/ + + - name: making consolidated report + shell: cd $HOME/qtip_result && sudo python final_report.py Whetstone {{fname}} + + - name: making directory + file: path={{home_dir.stdout}}/qtip_result/log state=directory + + - name: copying result to temp directory + shell: sudo cp -r $HOME/tempT/UnixBench/results/* $HOME/qtip_result/log + + - name: registering files + shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.json") | cut -d'/' -f2 + register: files_to_copy + + - name: copy results + fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{workingdir}}/{{Dest_dir}}/whetstone/whetstone_temp + with_items: "{{files_to_copy.stdout_lines}}" + + - name: registering log files + shell: (cd $HOME/qtip_result/log/; find . -maxdepth 1 -name "*.log") | cut -d'/' -f2 + register: copy_log_results + + - name: copying log results + fetch: src={{home_dir.stdout}}/qtip_result/log/{{item}} dest={{workingdir}}/{{Dest_dir}}/whetstone/whetstone_temp + with_items: "{{copy_log_results.stdout_lines}}" + + - name: cleaning tempT directory + file: path=$HOME/tempT state=absent + + - name: cleaning qtip result directory + file: path=$HOME/qtip_result state=absent + + - hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: extracting_json + shell: ( find {{workingdir}}/{{Dest_dir}}/whetstone/whetstone_temp/ -name "*.json" | xargs cp -t {{workingdir}}/{{Dest_dir}}/whetstone/) + + - name: making_logs_folder + file: path={{workingdir}}/{{Dest_dir}}/whetstone/logs state=directory + + - name: extracting_log + shell: ( find {{workingdir}}/{{Dest_dir}}/whetstone/whetstone_temp/ -name "*.log" | xargs cp -t {{workingdir}}/{{Dest_dir}}/whetstone/logs) + + - name: removing whetstone_temp + file: {{workingdir}}/{{Dest_dir}}/whetstone/whetstone_temp state=absent diff --git a/test_list/compute b/benchmarks/suite/compute index 3bf1b184..3bf1b184 100644 --- a/test_list/compute +++ b/benchmarks/suite/compute diff --git a/test_list/network b/benchmarks/suite/network index 58ce5cb9..58ce5cb9 100644 --- a/test_list/network +++ b/benchmarks/suite/network diff --git a/test_list/storage b/benchmarks/suite/storage index f3068dd5..f3068dd5 100644 --- a/test_list/storage +++ b/benchmarks/suite/storage diff --git a/config/QtipKey b/config/QtipKey deleted file mode 100644 index 3f520775..00000000 --- a/config/QtipKey +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAxVpAC+Q8LTyftATCgVHIKvDkXYzHI/8CZeKlVWHlHUA0/6Eu -qSrhzbpOwSaQi2C3x4Eszvh7/CfuVMql11yShsXwFD7aV6x0YG7q8IUScHILUFal -m0Fx52No7IpB3llluUV+kh8ay68V9OGvMZrQ+wWw4ieh2alUnpvFwteXmb8NQtXW -6pm2algUVixc4R3//YKLnGkj93pGFlQlDz5Q0vg+69jHNgodGewIrxCWeZk2pnn0 -vNAdWTQUTm7z/1uYz6AIeR/Wx66msmchFRmmDpP7aHXSSQEBySF7v/GEsZ1JwtBW -07WebNiTv9wYoOfyKVuZuTbBwjqlT4x5CpIJEwIDAQABAoIBAFHcDZThJtTcwKG7 -F7LsaUrmgNMNAc08iZIZYNr5sD9h0pn2EZS55M+g5+nWRT6K77AhNKTlDQiax5EE -PaYHEAA3Ok4rhAW50svtNiZuDCf4Jhk815R+oPCJm4wCDTBdhIRE/ys9G7BA/6qD -slexD94Pjj9AkTHnuuHPW0hmhMuQaSg96EO6QUWCG/HeMFDGcorBlM0s+NeR46Jm -dI8tOvt+rSa68oDtKEwhUbP5cATNdAITzo9+4We5EnYhW9/nRsaF/um2BPih6JnU -zG9udvZwj+YARmEfxhXzeRDpi30qYil/+CUF+qdyd8eoPBvhsW6rr1TA6XYGmPDN -SnlPLAECgYEA6RtgGqL34orxiqT3tDkA2Lb4aSq9Zntr09VAqwft4I3550XihHsa -lqJoy2macX36f60oRDZEL3v4cH22zyjojav4MXe2fLlCiApy45xCzsGkWizxTe5D -184jUIcRxb/sGbOulbXBdu8lmtNiyslvkAoj75bvL2MXhytbYgikhrMCgYEA2Lv1 -9Os+VXT9py/67dO/GY14NUpT1sFq2zxPYWpKxJD+j/NVZsflpPd/V5cGANZGovA0 -c8WNupJmCO1P20uldRX5dJ3EhhER2kn0yKhZuBBxmrELQZdnXGc9T8ub0xJVbo2u -K3Km3C1Dx7Us4BwzGOO6K2kYbT1ij/vAbZWmpCECgYBdkhqStqYwbrukfrHbyyH2 -3AN9G6XpdFOFNc3+mXE4OWV+G4Rgz5WNr+XG+T4AnqQmChjmwK3ALdA9P4lZQL+Q -1t4K5VYAXNFDEIarrPb4Tayucenu8VyUTO/KDF3q9i5M5t6Gw+3D0x1SN9YpNpCs -zhU8wGaErA8uuA23nWaFlwKBgQDXSf6MB5GnucqtZI/R5uCRNWIPLYISdZb7p/EC -R1912sHDpGdU7YREVkV8cFxaQH9yI0E3LyoWBo6sl28X2xDEOcvN91ncAuDFAWnS -WMimek8e5nbT9N2LgFH7Dbn/9NpAMySrq/vsAlqt6l5lUB3Bv5SSwpatDKj3dZA0 -Ss95gQKBgQCvwxW4PSd+hxdofwGd6KBwhA404GPdcVLLmASGVYRNcVNyfhUmMCn+ -UF1WsBCOcnWjJ/pT864S9Rp1bbqzpQS8Pz1vJwhkaHEJD0a5l+KhD4llsWDjUNfI -KfVffcmydNC3uRD59WPZEAgu+gOS8kRw7lybFTNuMS7B3dC5v9UtWg== ------END RSA PRIVATE KEY----- diff --git a/config/QtipKey.pub b/config/QtipKey.pub deleted file mode 100644 index 7a40f91c..00000000 --- a/config/QtipKey.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFWkAL5DwtPJ+0BMKBUcgq8ORdjMcj/wJl4qVVYeUdQDT/oS6pKuHNuk7BJpCLYLfHgSzO+Hv8J+5UyqXXXJKGxfAUPtpXrHRgburwhRJwcgtQVqWbQXHnY2jsikHeWWW5RX6SHxrLrxX04a8xmtD7BbDiJ6HZqVSem8XC15eZvw1C1dbqmbZqWBRWLFzhHf/9goucaSP3ekYWVCUPPlDS+D7r2Mc2Ch0Z7AivEJZ5mTamefS80B1ZNBRObvP/W5jPoAh5H9bHrqayZyEVGaYOk/toddJJAQHJIXu/8YSxnUnC0FbTtZ5s2JO/3Big5/IpW5m5NsHCOqVPjHkKkgkT root@foreman-jump.opnfv.com diff --git a/config/SampleHeat.yaml b/config/SampleHeat.yaml index ae9f5667..a42cdb79 100644 --- a/config/SampleHeat.yaml +++ b/config/SampleHeat.yaml @@ -1,36 +1,66 @@ -heat_template_version: 2014-10-16 -parameters: +heat_template_version: 2015-04-30 + +description: > + Used to run VMs for Qtip - private_net_name: +parameters: + image: type: string - default: 'private_network' - - availability_zone: + description: Name of the image + default: QTIP_CentOS + + external_net_name: type: string - description: The AvailZone. - default: compute1 - + description: Name of the external network which management network will connect to + default: admin_floating_net + resources: + flavor: + type: OS::Nova::Flavor + properties: + ram: 8192 + vcpus: 8 + disk: 80 - private_network: - type: OS::Neutron::Net - private_subnet: + network: + type: OS::Neutron::Net + properties: + name: qtip_net + + subnet: type: OS::Neutron::Subnet properties: - network_id: { get_resource: private_network } - cidr: '10.10.17.0/24' - dns_nameservers: [ '8.8.8.8' ] - gateway_ip: '10.10.17.1' - allocation_pools: [ {"start":'10.10.17.2', "end": '10.10.17.200'} ] - router_1: + name: qtip_subnet + ip_version: 4 + cidr: 192.168.0.0/24 + network: { get_resource: network } + dns_nameservers: [8.8.8.8] + + management_router: type: OS::Neutron::Router properties: + name: qtip_router external_gateway_info: - network: { get_param: public_network } - router_interface: + network: { get_param: external_net_name } + + management_router_interface: type: OS::Neutron::RouterInterface properties: - router_id: { get_resource: router_1 } - subnet: { get_resource: private_subnet } + router: { get_resource: management_router } + subnet: { get_resource: subnet } + + security_group: + type: OS::Neutron::SecurityGroup + properties: + name: qtip_security_group + rules: + - port_range_min: 22 + port_range_max: 5201 + protocol: tcp + - port_range_min: 22 + port_range_max: 5201 + protocol: udp + - protocol: icmp + outputs: description: 'none' diff --git a/docker/Dockerfile b/docker/Dockerfile index 10537ecc..8d951fc5 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -4,7 +4,7 @@ FROM ubuntu:14.04 -MAINTAINER Nauman Ahad <Nauman_Ahad@dell.com> +MAINTAINER Yujun Zhang <zhang.yujunz@zte.com.cn> LABEL version="0.1" description="OPNFV QTIP Docker container" @@ -34,6 +34,8 @@ libjpeg62 \ libjpeg62-dev \ zlib1g-dev \ python-tk \ +curl \ +supervisor \ --no-install-recommends RUN apt-add-repository ppa:ansible/ansible -y @@ -59,4 +61,11 @@ RUN git clone https://gerrit.opnfv.org/gerrit/releng $REPOS_DIR/releng RUN pip install -r $REPOS_DIR/qtip/requirements.txt -CMD cd $REPOS_DIR/qtip && python restful_server/qtip_server.py>$HOME/qtip/logs/run.log +#Config supervisor + +RUN mkdir -p /var/log/supervisor +RUN locale-gen en_US en_US.UTF-8 +COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf + + +CMD ["/usr/bin/supervisord"] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..35ac0935 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,11 @@ +# QTIP The Indices for Performance + +[QTIP] is an [OPNFV] project. + +It aims to build a platform for creating and sharing indices of [NFVI] performance. + +See the [project vision](https://wiki.opnfv.org/display/qtip/Vision) for more details. + +[QTIP]: https://wiki.opnfv.org/display/qtip +[OPNFV]: https://www.opnfv.org +[NFVI]: https://en.wikipedia.org/wiki/Network_function_virtualization diff --git a/docker/run_qtip.sh b/docker/run_qtip.sh index 62f97c88..a7a20501 100755 --- a/docker/run_qtip.sh +++ b/docker/run_qtip.sh @@ -1,4 +1,5 @@ #! /bin/bash + run_test_suite() { if [ "$TEST_CASE" == "compute" ]; then @@ -21,8 +22,16 @@ run_test_suite() fi } +rm -f ${QTIP_DIR}/config/QtipKey* + +echo "Generating ssh keypair" +ssh-keygen -t rsa -N "" -f ${QTIP_DIR}/config/QtipKey -q + source ${QTIP_DIR}/docker/prepare_qtip_image.sh run_test_suite source ${QTIP_DIR}/docker/cleanup_qtip_image.sh + +echo "Remove ssh keypair" +rm -f ${QTIP_DIR}/config/QtipKey* diff --git a/docker/supervisord.conf b/docker/supervisord.conf new file mode 100644 index 00000000..35d16c7e --- /dev/null +++ b/docker/supervisord.conf @@ -0,0 +1,13 @@ +[supervisord] +nodaemon=true + +[program:qtip_server] +command=bash -c "cd $REPOS_DIR/qtip&&python restful_server/qtip_server.py" +numprocs=1 +autostart=true +autorestart=true +user=root +environment=INSTALLER_TYPE="%(ENV_INSTALLER_TYPE)s",INSTALLER_IP="%(ENV_INSTALLER_IP)s",NODE_NAME="%(ENV_NODE_NAME)s" +stdout_logfile=/var/log/supervisor/%(program_name)s.log +stderr_logfile=/var/log/supervisor/%(program_name)s.log + diff --git a/docs/configguide/configuration.rst b/docs/configguide/configuration.rst index e5228541..d6d2fd5d 100644 --- a/docs/configguide/configuration.rst +++ b/docs/configguide/configuration.rst @@ -7,39 +7,14 @@ Configuration ************* -QTIP currently supports by using a Docker image or by pulling the repo from -the upstream repository found at https://git.opnfv.org/qtip. Detailed steps -about setting up QTIP using both of these options can be found below. +QTIP currently supports by using a Docker image. Detailed steps +about setting up QTIP can be found below. To use QTIP you should have access to an OpenStack environment, with at least Nova, Neutron, Glance, Keystone and Heat installed. Add a brief introduction to configure OPNFV with this specific installer -Pre-configuration activities ----------------------------- - - -Setting QTIP framework on Ubuntu 14.04 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Install dependencies: -:: - - sudo apt-get install python-dev - sudo apt-get install python-pip - sudo apt-get install build-essential - sudo apt-get install git wget - sudo pip install python-heatclient python-glanceclient python-neutronclient - - -Download source code and install python dependencies: -:: - - git clone https://git.opnfv.org/qtip - cd qtip - - Installing QTIP using Docker ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -47,18 +22,25 @@ QTIP has a Docker images on the docker hub. Pulling opnfv/qtip docker image from docker hub: :: - sudo docker pull opnfv/qtip + docker pull opnfv/qtip Verify that opnfv/qtip has been downloaded. It should be listed as an image by running the following command. :: - sudo docker images + docker images -Run the Docker instance: +Make dir to store the QTIP image which will be used to create vm in cloud. :: - docker run -it opnfv/qtip /bin/bash + mkdir $HOME/imgstore + +Run and enter the Docker instance: +:: + envs="INSTALLER_TYPE={INSTALLER_TYPE} -e INSTALLER_IP={INSTALLER_IP} +-e NODE_NAME={NODE_NAME}" + docker run --name qtip -id -e $envs -v "$HOME/imgstore:/home/opnfv/imgstore" opnfv/qtip + docker exec -i -t qtip /bin/bash Now you are in the container and QTIP can be found in the /repos/qtip and can be navigated to using the following command. @@ -79,22 +61,22 @@ from the OpenStack *openrc* file. This can be done by running the following command. :: - source get_env_info.sh -n {INSTALLER_TYPE} -i {INSTALLER_IP} + source scripts/get_env_info.sh -n {INSTALLER_TYPE} -i {INSTALLER_IP} source opnfv-creds.sh This provides a ``opnfv-creds.sh`` file which can be sources to get the -environment variables. For running QTIP manually, it is also necessary to -export the installer type. -:: - - export INSTALLER_TYPE="{installer-type}" +environment variables. QTIP default key pair """""""""""""""""""""" -QTIP uses a SSH key pair to connect to the guest image. This key pair can -be found in the ``config/`` directory. +QTIP uses a SSH key pair to connect to the guest image. You should generate key pair +before running QTIP test. And put key pair in the ``config/`` directory. +:: + + ssh-keygen -t rsa -N "" -f config/QtipKey -q + Hardware configuration diff --git a/docs/userguide/_01-compute.rst b/docs/userguide/_01-compute.rst index 7cd4c2ce..56be5488 100644 --- a/docs/userguide/_01-compute.rst +++ b/docs/userguide/_01-compute.rst @@ -58,7 +58,7 @@ OpenSSL Speed OpenSSL Speed can be used to benchmark compute performance of a machine. In QTIP, two OpenSSL Speed benchmarks are incorporated: 1. RSA signatunes/sec signed by a machine -2. AES 128-bit encnyption throught for a machine for cipher block sizes +2. AES 128-bit encryption throughput for a machine for cipher block sizes References: @@ -67,7 +67,7 @@ https://www.openssl.org/docs/manmaster/apps/speed.html RAMSpeed ^^^^^^^^ -RAMSpeed is used to measune a machine's memory perfomace. +RAMSpeed is used to measure a machine's memory perfomace. The problem(array)size is large enough to ensure Cache Misses so that the main machine memory is used. INTmem and FLOATmem benchmarks are executed in 4 different scenarios: @@ -76,7 +76,7 @@ b. Add: a(i)=b(i)+c(i) c. Scale: a(i)=b(i)*d d. Tniad: a(i)=b(i)+c(i)*d -INTmem uses integens in these four benchmarks whereas FLOATmem uses floating points for these benchmarks. +INTmem uses integers in these four benchmarks whereas FLOATmem uses floating points for these benchmarks. References: diff --git a/docs/userguide/annex.rst b/docs/userguide/annex.rst index 406d5132..e8bf5555 100644 --- a/docs/userguide/annex.rst +++ b/docs/userguide/annex.rst @@ -7,13 +7,12 @@ Annex ***** -Templates -========= - -.. include:: _testcase_description.rst +.. toctree:: + :maxdepth: 2 + _testcase_description.rst Downloads ========= -- :download:`Sample configuration <../download/sample_config.yaml>`
\ No newline at end of file +- :download:`Sample configuration <../download/sample_config.yaml>` diff --git a/docs/userguide/benchmark-suites.rst b/docs/userguide/benchmark-suites.rst index 9a3929cc..84d1c647 100644 --- a/docs/userguide/benchmark-suites.rst +++ b/docs/userguide/benchmark-suites.rst @@ -7,6 +7,9 @@ Benchmark Suites **************** -.. include:: _01-compute.rst -.. include:: _02-network.rst -.. include:: _03-storage.rst +.. toctree:: + :maxdepth: 2 + + _01-compute.rst + _02-network.rst + _03-storage.rst diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst index 3032f357..5ae4f345 100644 --- a/docs/userguide/index.rst +++ b/docs/userguide/index.rst @@ -11,6 +11,6 @@ QTIP User Guide .. toctree:: :maxdepth: 2 - ./introduction.rst - ./benchmark-suites.rst - ./annex.rst + introduction.rst + benchmark-suites.rst + annex.rst diff --git a/docs/userguide/introduction.rst b/docs/userguide/introduction.rst index 2655347e..4876d0e2 100644 --- a/docs/userguide/introduction.rst +++ b/docs/userguide/introduction.rst @@ -9,16 +9,16 @@ Introduction ************ This guide will serve as a first step to familiarize the user with how to -run QTIP the first time when the user clones QTIP on to their host machine. -In order to clone QTIP please follow the instructions in the -installation.rst located in docs/userguide/installation.rst. +run QTIP the first time when the user pull QTIP image on to their host machine. +In order to install and config QTIP please follow the instructions in the +configuration.rst located in docs/configguide/configuration.rst. QTIP Directory structure: ------------------------- The QTIP directory has been sectioned off into multiple folders to facilitate segmenting information into relevant categories. The folders that concern - the end user are `test_cases/` and `test_list/`. + the end user are `test_cases/` and `benchmarks/suite/`. **test_cases/:** @@ -58,29 +58,29 @@ distinguishes between a test to be run on the Virtual Machine or the compute node itself, respectively. -**test_list/:** +**benchmarks/suite/:** This folder contains three files, namely `compute`, `network` and `storage`. These files list the benchmarks are to be run by the QTIP framework. Sample compute test file is shown below :: -{ + { "bm": [ - "dhrystone_bm.yaml", - "whetstone_bm.yaml", - "ramspeed_bm.yaml", - "dpi_bm.yaml", - "ssl_bm.yaml" + "dhrystone_bm.yaml", + "whetstone_bm.yaml", + "ramspeed_bm.yaml", + "dpi_bm.yaml", + "ssl_bm.yaml" ], "vm": [ - "dhrystone_vm.yaml", - "whetstone_vm.yaml", - "ramspeed_vm.yaml", - "dpi_vm.yaml", - "ssl_vm.yaml" + "dhrystone_vm.yaml", + "whetstone_vm.yaml", + "ramspeed_vm.yaml", + "dpi_vm.yaml", + "ssl_vm.yaml" ] -} + } The compute file will now run all the benchmarks listed above one after another on the environment. @@ -132,11 +132,13 @@ The `Context` tag helps the user list the number of compute nodes they want to run dhrystone on. The user can list all the compute nodes under the `Host_Machines` tag. All the machines under test must be listed under the `Host_Machines` and naming it incrementally higher. The `ip:` tag is used - to specify the IP of the particular compute node. The `pw:` tag can be left - blank because QTIP uses its own key for ssh. In order to run dhrystone on - one compute node at a time the user needs to edit the `role:` tag. `role: - host` for machine_1 and `role: server` for machine_2 will allow for - dhrystone to be run on machine_1 and then run on machine_2. + to specify the IP of the particular compute node.The `ip:` tag can be left + blank when installer type is 'fuel',because QTIP will get ip + from installer. The `pw:` tag can be left blank because QTIP uses its own + key for ssh. In order to run dhrystone on one compute node at a time the user + needs to edit the `role:` tag. `role: host` for machine_1 and `role: server` + for machine_2 will allow for dhrystone to be run on machine_1 and then run + on machine_2. :: @@ -312,71 +314,67 @@ Sample dhrystone_vm.yaml file: Commands to run the Framework: ------------------------------ -In order to start QTIP on the default lab please use the following commands (asssuming you have prepared the config files in the test_cases/default/ directory and listed the intended suite in the test_list/<RELEVANT-SUITE-FILE>): +In order to start QTIP on the default lab please use the following commands (asssuming your installer +is 'fuel' or 'compass', you use the config files in the test_cases/default/ directory and listed the +intended suite in the benchmarks/suite/<RELEVANT-SUITE-FILE>): -First step is to export the necessary information to the environment. -:: - - source get_env_info.sh -n <INSTALLER_TYPE> -i <INSTALLER_IP> - -for running qtip on an openstack deployed using FUEL with the Installer IP 10.20.0.2 -:: - - source get_env_info.sh -n fuel -i 10.20.0.2 +First step is to export the necessary information to the environment and generate QTIP key pair. +Please follow the instructions in the configuration.rst. -This will generate the `opnfv-creds.sh` file needed to use the python clients for keystone, glance, nova, and neutron. +Secondary step download the QTIP image and upload it to the Cloud.QTIP will use this image +to create VM when test VM performance. :: - source opnfv-creds.sh + source docker/prepare_qtip_image.sh -Running QTIP on the using `default` as the pod name and for the `compute` suite by cli +Running QTIP on the using `default` as the pod name and for the `compute` suite by cli. :: python qtip.py -l default -f compute -Running QTIP on the using 'default' as the pod name and for the 'compute' suite 'bm' type by restful api +Running QTIP on the using 'default' as the pod name and for the 'compute' suite 'bm' type by restful api. :: - curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"compute", "type": "BM"}' -H "Content-Type: application/json" http://qtip_server_ip:5000/api/v1.0/jobs + curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"compute", "type": "BM"}' -H "Content-Type: application/json" http://127.0.0.1:5000/api/v1.0/jobs -Running QTIP on the using 'default' as the pod name and for the 'compute' suite 'vm' type by restful api +Running QTIP on the using 'default' as the pod name and for the 'compute' suite 'vm' type by restful api. :: - curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"compute", "type": "VM"}' -H "Content-Type: application/json" http://qtip_server_ip:5000/api/v1.0/jobs + curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"compute", "type": "VM"}' -H "Content-Type: application/json" http://127.0.0.1:5000/api/v1.0/jobs -Running QTIP on the using `default` as the pod name and for the `network` suite by cli +Running QTIP on the using `default` as the pod name and for the `network` suite by cli. :: python qtip.py -l default -f network -Running QTIP on the using 'default' as the pod name and for the 'network' suite 'bm' type by restful api +Running QTIP on the using 'default' as the pod name and for the 'network' suite 'bm' type by restful api. :: - curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"network", "type": "BM"}' -H "Content-Type: application/json" http://qtip_server_ip:5000/api/v1.0/jobs + curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"network", "type": "BM"}' -H "Content-Type: application/json" http://127.0.0.1:5000/api/v1.0/jobs -Running QTIP on the using `default` as the pod name and for the `storage` suite by cli +Running QTIP on the using `default` as the pod name and for the `storage` suite by cli. :: python qtip.py -l default -f network -Running QTIP on the using 'default' as the pod name and for the 'storage' suite 'bm' type by restful api +Running QTIP on the using 'default' as the pod name and for the 'storage' suite 'bm' type by restful api. :: - curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"storage", "type": "BM"}' -H "Content-Type: application/json" http://qtip_server_ip:5000/api/v1.0/jobs + curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"storage", "type": "BM"}' -H "Content-Type: application/json" http://127.0.0.1:5000/api/v1.0/jobs Get running QTIP job status by restful api :: - curl --trace-ascii debug.txt -X GET http://qtip_server_ip:5000/api/v1.0/jobs/job-id + curl --trace-ascii debug.txt -X GET http://127.0.0.1:5000/api/v1.0/jobs/job-id For example: - curl --trace-ascii debug.txt -X GET http://172.37.0.3:5000/api/v1.0/jobs/5b71f035-3fd6-425c-9cc7-86acd3a04214 + curl --trace-ascii debug.txt -X GET http://127.0.0.1:5000/api/v1.0/jobs/5b71f035-3fd6-425c-9cc7-86acd3a04214 Stop running QTIP job by restful api.The job will finish the current benchmark test and stop. :: - curl --trace-ascii debug.txt -X DELTET http://qtip_server_ip:5000/api/v1.0/jobs/job-id + curl --trace-ascii debug.txt -X DELTET http://127.0.0.1:5000/api/v1.0/jobs/job-id For example: - curl --trace-ascii debug.txt -X DELETE http://172.37.0.3:5000/api/v1.0/jobs/5b71f035-3fd6-425c-9cc7-86acd3a04214q + curl --trace-ascii debug.txt -X DELETE http://127.0.0.1:5000/api/v1.0/jobs/5b71f035-3fd6-425c-9cc7-86acd3a04214q Results: -------- diff --git a/func/args_handler.py b/func/args_handler.py index 59712800..624f90c4 100644 --- a/func/args_handler.py +++ b/func/args_handler.py @@ -14,8 +14,8 @@ from func.spawn_vm import SpawnVM from func.driver import Driver -def get_files_in_test_list(suite_name, case_type='all'): - benchmark_list = json.load(file('test_list/{0}'.format(suite_name))) +def get_files_in_suite(suite_name, case_type='all'): + benchmark_list = json.load(file('benchmarks/suite/{0}'.format(suite_name))) return reduce(add, benchmark_list.values()) \ if case_type == 'all' else benchmark_list[case_type] @@ -30,8 +30,8 @@ def get_benchmark_path(lab, suit, benchmark): return './test_cases/{0}/{1}/{2}'.format(lab, suit, benchmark) -def check_suite_in_test_list(suite_name): - return True if os.path.isfile('test_list/' + suite_name) else False +def check_suite(suite_name): + return True if os.path.isfile('benchmarks/suite/' + suite_name) else False def check_lab_name(lab_name): @@ -59,10 +59,12 @@ def prepare_ansible_env(benchmark_test_case): def run_benchmark(installer_type, pwd, benchmark, benchmark_details, proxy_info, env_setup, benchmark_test_case): driver = Driver() - return driver.drive_bench(installer_type, pwd, benchmark, - env_setup.roles_dict.items(), - _get_f_name(benchmark_test_case), - benchmark_details, env_setup.ip_pw_dict.items(), proxy_info) + result = driver.drive_bench(installer_type, pwd, benchmark, + env_setup.roles_dict.items(), + _get_f_name(benchmark_test_case), + benchmark_details, env_setup.ip_pw_dict.items(), proxy_info) + env_setup.cleanup_authorized_keys() + return result def prepare_and_run_benchmark(installer_type, pwd, benchmark_test_case): diff --git a/func/cli.py b/func/cli.py index d914a2de..c5f5d2b7 100644 --- a/func/cli.py +++ b/func/cli.py @@ -11,6 +11,9 @@ import sys import os import args_handler import argparse +from utils import logger_utils + +logger = logger_utils.QtipLogger('cli').get class Cli: @@ -26,40 +29,41 @@ class Cli: ' The user should list default after -l . all the fields in' ' the files are necessary and should be filled') parser.add_argument('-f', '--file', required=True, help='File in ' - 'test_list with the list of tests. there are three files' + 'benchmarks/suite/ with the list of tests. there are three files' '\n compute ' '\n storage ' '\n network ' 'They contain all the tests that will be run. They are listed by suite.' 'Please ensure there are no empty lines') parser.add_argument('-b', '--benchmark', help='Name of the benchmark.' - 'Can be found in test_lists/file_name') + 'Can be found in benchmarks/suite/file_name') return parser.parse_args(args) def __init__(self, args=sys.argv[1:]): args = self._parse_args(args) - if not args_handler.check_suite_in_test_list(args.file): - print('\n\n ERROR: Test File Does not exist in test_list/ please enter correct file \n\n') + if not args_handler.check_suite(args.file): + logger.error("ERROR: This suite file doesn't exist under benchmarks/suite/.\ + Please enter correct file." % str(args.file)) sys.exit(1) if not args_handler.check_lab_name(args.lab): - print('\n\n You have specified a lab that is not present in test_cases/ please enter \ - correct file. If unsure how to proceed, use -l default.\n\n') + logger.error("You have specified a lab that is not present under test_cases/.\ + Please enter correct file. If unsure how to proceed, use -l default.") sys.exit(1) suite = args.file - benchmarks = args_handler.get_files_in_test_list(suite) + benchmarks = args_handler.get_files_in_suite(suite) test_cases = args_handler.get_files_in_test_case(args.lab, suite) benchmarks_list = filter(lambda x: x in test_cases, benchmarks) if args.benchmark: if not args_handler.check_benchmark_name(args.lab, args.file, args.benchmark): - print('\n\n You have specified an incorrect benchmark. Please' - 'enter the correct one.\n\n') + logger.error("You have specified an incorrect benchmark.\ + Please enter the correct one.") sys.exit(1) else: - print("Starting with " + args.benchmark) + logger.info("Starting with " + args.benchmark) args_handler.prepare_and_run_benchmark( os.environ['INSTALLER_TYPE'], os.environ['PWD'], args_handler.get_benchmark_path(args.lab.lower(), args.file, args.benchmark)) @@ -68,5 +72,5 @@ class Cli: os.environ['INSTALLER_TYPE'], os.environ['PWD'], args_handler.get_benchmark_path(args.lab.lower(), suite, x)), benchmarks_list) - print('{0} is not a Template in the Directory Enter a Valid file name.' - 'or use qtip.py -h for list'.format(filter(lambda x: x not in test_cases, benchmarks))) + logger.info("{0} is not a Template in the Directory Enter a Valid file name.\ + or use qtip.py -h for list".format(filter(lambda x: x not in test_cases, benchmarks))) diff --git a/func/env_setup.py b/func/env_setup.py index 9e21a5b6..6027f904 100644 --- a/func/env_setup.py +++ b/func/env_setup.py @@ -208,3 +208,9 @@ class Env_setup: def call_ssh_test(self): self.ssh_test(self.ip_pw_list) + + def cleanup_authorized_keys(self): + for ip, pw in self.ip_pw_list: + cmd = './scripts/cleanup_creds.sh %s' % ip + logger.info("cleanup authorized_keys: %s " % cmd) + os.system(cmd) diff --git a/func/spawn_vm.py b/func/spawn_vm.py index 3a16e02d..0a24d7a4 100644 --- a/func/spawn_vm.py +++ b/func/spawn_vm.py @@ -1,169 +1,138 @@ ##############################################################################
-# Copyright (c) 2015 Dell Inc and others.
+# Copyright (c) 2016 Dell Inc, ZTE and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
import os
import sys
-from collections import defaultdict
-from func.env_setup import Env_setup
import yaml
import heatclient.client
import keystoneclient
-from novaclient import client
import time
+from func.env_setup import Env_setup
from func.create_zones import AvailabilityZone
+from utils import logger_utils
+
+logger = logger_utils.QtipLogger('spawn_vm').get
class SpawnVM(Env_setup):
- vm_role_ip_dict = defaultdict(list)
- installer = ''
def __init__(self, vm_info):
- print 'SpawnVM Class initiated'
- print 'vm_info: %s' % vm_info
+ logger.info('vm_info: %s' % vm_info)
vm_role_ip_dict = vm_info.copy()
- print 'Generating Heat Template\n'
self._keystone_client = None
self._heat_client = None
self._glance_client = None
self._nova_client = None
- self. _get_nova_client()
self.azone = AvailabilityZone()
# TODO: it should clean up aggregates and stack after test case finished.
self.azone.clean_all_aggregates()
self.azone.create_aggs(vm_info['availability_zone'])
- installer = self.get_installer_type()
- self.Heat_template1 = self.heat_template_vm(vm_info, installer)
- self.create_stack(vm_role_ip_dict, self.Heat_template1)
+ self.heat_template = self.generate_heat_template(vm_info)
+ self.create_stack(vm_role_ip_dict)
@staticmethod
- def get_installer_type():
- print 'Getting Installer Name'
- return os.environ['INSTALLER_TYPE']
-
- @staticmethod
- def get_public_network(installer_detected):
+ def get_public_network():
"""
TODO: GET THE NAMES OF THE PUBLIC NETWORKS for OTHER PROJECTS
"""
- print 'Getting Public Network'
- if installer_detected.lower() == 'fuel':
+ installer = os.environ['INSTALLER_TYPE']
+
+ if installer.lower() == 'fuel':
return 'admin_floating_net'
- if installer_detected.lower() == 'apex':
+ if installer.lower() == 'apex':
return 'external'
- if installer_detected.lower() == 'compass':
+ if installer.lower() == 'compass':
return 'ext-net'
- if installer_detected.lower() == 'joid':
+ if installer.lower() == 'joid':
return 'ext-net'
- def heat_template_vm(self, vm_params, installer):
- Heat_Dic = {}
+ def generate_heat_template(self, vm_params):
+ logger.info('Generating Heat Template')
+ heat_dict = {}
try:
with open('./config/SampleHeat.yaml', 'r+') as H_temp:
- Heat_Dic = yaml.safe_load(H_temp)
+ heat_dict = yaml.safe_load(H_temp)
except yaml.YAMLError as exc:
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
- print 'Error in qtip/config/SampleHeat.yaml at: (%s,%s)' % (mark.line + 1, mark.column + 1)
- print 'EXITING PROGRAM. Correct File and restart'
+ logger.error(
+ 'Error in qtip/config/SampleHeat.yaml at: (%s,%s)' % (mark.line + 1,
+ mark.column + 1))
+ logger.error('EXITING PROGRAM. Correct File and restart')
sys.exit(1)
+
fopen = open('./config/QtipKey.pub', 'r')
fopenstr = fopen.read()
fopenstr = fopenstr.rstrip()
scriptcmd = '#!/bin/bash \n echo {0} >> foo.txt \n echo {1} >> /root/.ssh/authorized_keys'.format(
fopenstr, fopenstr)
- netName = self.get_public_network(installer)
- print netName
- Heat_Dic['heat_template_version'] = '2014-10-16'
- Heat_Dic['resources']['KeyPairSavePrivate'] = {
- 'type': 'OS::Nova::KeyPair',
- 'properties': {
- 'save_private_key': 'true',
- 'name': 'my_key'
- }
- }
- Heat_Dic['parameters']['public_network'] = {
+ netName = self.get_public_network()
+ heat_dict['heat_template_version'] = '2015-04-30'
+
+ heat_dict['parameters']['public_network'] = {
'type': 'string',
'default': netName
}
+
for x in range(1, len(vm_params['availability_zone']) + 1):
avail_zone = vm_params['availability_zone'][x - 1]
- img = vm_params['OS_image'][x - 1]
- flavor = vm_params['flavor'][x - 1]
- Heat_Dic['parameters']['availability_zone_' + str(x)] = \
+ heat_dict['parameters']['availability_zone_' + str(x)] = \
{'description': 'Availability Zone of the instance',
'default': avail_zone,
'type': 'string'}
- Heat_Dic['resources']['public_port_' + str(x)] = \
+ heat_dict['resources']['public_port_' + str(x)] = \
{'type': 'OS::Neutron::Port',
- 'properties': {'network': {'get_resource': 'private_network'},
- 'security_groups': [{'get_resource': 'demo1_security_Group'}],
- 'fixed_ips': [{'subnet_id':
- {'get_resource': 'private_subnet'}}]}}
+ 'properties': {'network': {'get_resource': 'network'},
+ 'security_groups': [{'get_resource': 'security_group'}],
+ 'fixed_ips': [{'subnet_id': {'get_resource': 'subnet'}}]}}
- Heat_Dic['resources']['floating_ip_' + str(x)] = {
+ heat_dict['resources']['floating_ip_' + str(x)] = {
'type': 'OS::Neutron::FloatingIP',
- 'properties': {
- 'floating_network': {'get_param': 'public_network'}}}
+ 'properties': {'floating_network': {'get_param': 'external_net_name'}}}
- Heat_Dic['resources']['floating_ip_assoc_' + str(x)] = {
+ heat_dict['resources']['floating_ip_assoc_' + str(x)] = {
'type': 'OS::Neutron::FloatingIPAssociation',
'properties': {
'floatingip_id': {'get_resource': 'floating_ip_' + str(x)},
'port_id': {'get_resource': 'public_port_' + str(x)}}}
- Heat_Dic['resources']['my_instance_' + str(x)] = \
+ heat_dict['resources']['my_instance_' + str(x)] = \
{'type': 'OS::Nova::Server',
- 'properties': {'image': img,
+ 'properties': {'image': {'get_param': 'image'},
'networks':
[{'port': {'get_resource': 'public_port_' + str(x)}}],
- 'flavor': flavor,
+ 'flavor': {'get_resource': 'flavor'},
'availability_zone': avail_zone,
+ 'security_groups': [{'get_resource': 'security_group'}],
'name': 'instance' + str(x),
- 'key_name': {'get_resource': 'KeyPairSavePrivate'},
'user_data_format': 'RAW',
'user_data': scriptcmd}}
- Heat_Dic['resources']['demo1_security_Group'] = {
- 'type': 'OS::Neutron::SecurityGroup',
- 'properties': {
- 'name': 'demo1_security_Group',
- 'rules': [{
- 'protocol': 'tcp',
- 'port_range_min': 22,
- 'port_range_max': 5201},
- {'protocol': 'udp',
- 'port_range_min': 22,
- 'port_range_max': 5201},
- {'protocol': 'icmp'}]}}
-
- Heat_Dic['outputs']['instance_PIP_' + str(x)] = {
+ heat_dict['outputs']['instance_PIP_' + str(x)] = {
'description': 'IP address of the instance',
'value': {'get_attr': ['my_instance_' + str(x), 'first_address']}}
- Heat_Dic['outputs']['instance_ip_' + str(x)] = {
+
+ heat_dict['outputs']['instance_ip_' + str(x)] = {
'description': 'IP address of the instance',
'value': {'get_attr': ['floating_ip_' + str(x), 'floating_ip_address']}}
- Heat_Dic['outputs']['availability_instance_' + str(x)] = {
+ heat_dict['outputs']['availability_instance_' + str(x)] = {
'description': 'Availability Zone of the Instance',
'value': {'get_param': 'availability_zone_' + str(x)}}
- Heat_Dic['outputs']['KeyPair_PublicKey'] = {
- 'description': 'Private Key',
- 'value': {'get_attr': ['KeyPairSavePrivate', 'private_key']}
- }
- del Heat_Dic['outputs']['description']
- print Heat_Dic
- return Heat_Dic
+ del heat_dict['outputs']['description']
+ logger.info(heat_dict)
+
+ return heat_dict
def _get_keystone_client(self):
"""returns a keystone client instance"""
@@ -176,12 +145,6 @@ class SpawnVM(Env_setup): tenant_name=os.environ.get('OS_TENANT_NAME'))
return self._keystone_client
- def _get_nova_client(self):
- if self._nova_client is None:
- keystone = self._get_keystone_client()
- self._nova_client = client.Client('2', token=keystone.auth_token)
- return self._nova_client
-
def _get_heat_client(self):
"""returns a heat client instance"""
if self._heat_client is None:
@@ -192,45 +155,29 @@ class SpawnVM(Env_setup): '1', endpoint=heat_endpoint, token=keystone.auth_token)
return self._heat_client
- def create_stack(self, vm_role_ip_dict, heat_template):
-
- global sshkey
+ def create_stack(self, vm_role_ip_dict):
stackname = 'QTIP'
heat = self._get_heat_client()
- for checks in range(3):
- print "Try to delete heats %s" % checks
- for prev_stacks in heat.stacks.list():
- if prev_stacks.stack_name == 'QTIP':
- print 'QTIP Stacks exists.\nDeleting Existing Stack'
- heat.stacks.delete('QTIP')
- time.sleep(10)
- print '\nStack Creating Started\n'
+ self.delete_stack(stackname)
- try:
- heat.stacks.create(stack_name=stackname, template=heat_template)
- except Exception:
- print 'Create Failed :( '
-
- cluster_detail = heat.stacks.get(stackname)
- while cluster_detail.status != 'COMPLETE':
- if cluster_detail.status == 'IN_PROGRESS':
- print 'Stack Creation in Progress'
- cluster_detail = heat.stacks.get(stackname)
- time.sleep(10)
- print 'Stack Created'
- print 'Getting Public IP(s)'
- zone = []
- s = 0
- for vm in range(len(vm_role_ip_dict['OS_image'])):
+ logger.info('Start to create stack %s' % stackname)
+ heat.stacks.create(stack_name=stackname, template=self.heat_template)
+
+ stack_status = "IN_PROGRESS"
+ while stack_status != 'COMPLETE':
+ if stack_status == 'IN_PROGRESS':
+ logger.debug('Create in Progress')
+ if stack_status == 'CREATE_FAILED':
+ raise RuntimeError("Stack %s created failed!" % stackname)
+ stack_status = heat.stacks.get(stackname).status
+ time.sleep(15)
+ logger.info('Stack %s Created Complete!' % stackname)
- for I in cluster_detail.outputs:
- availabilityKey = 'availability_instance_' + str(vm + 1)
+ stack_outputs = heat.stacks.get(stackname).outputs
- if I['output_key'] == availabilityKey:
- zone.insert(s, str(I['output_value']))
- s = s + 1
- for i in cluster_detail.outputs:
+ for vm in range(len(vm_role_ip_dict['OS_image'])):
+ for i in stack_outputs:
instanceKey = "instance_ip_" + str(vm + 1)
privateIPkey = 'instance_PIP_' + str(vm + 1)
if i['output_key'] == instanceKey:
@@ -240,10 +187,20 @@ class SpawnVM(Env_setup): if i['output_key'] == privateIPkey:
Env_setup.ip_pw_dict[vm_role_ip_dict['role'][vm]] = str(i['output_value'])
- if i['output_key'] == 'KeyPair_PublicKey':
- sshkey = str(i['output_value'])
- with open('./config/my_key.pem', 'w') as fopen:
- fopen.write(sshkey)
- fopen.close()
- print Env_setup.ip_pw_list
+ logger.info('Getting Public IP(s): %s' % Env_setup.ip_pw_list)
+
+ def delete_stack(self, stack_name):
+ heat = self._get_heat_client()
+
+ stacks = heat.stacks.list()
+ exists = map(lambda x: x.stack_name, stacks)
+ if stack_name in exists:
+ logger.info("Delete stack %s" % stack_name)
+ heat.stacks.delete(stack_name)
+ while stack_name in exists:
+ time.sleep(10)
+ stacks = heat.stacks.list()
+ exists = map(lambda x: x.stack_name, stacks)
+ logger.debug("exists_stacks: %s" % exists)
+ logger.info("%s doesn't exist" % stack_name)
@@ -7,11 +7,9 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## from func.cli import Cli -import os def main(): - os.system('./scripts/file_permission.sh') Cli() diff --git a/restful_server/qtip_server.py b/restful_server/qtip_server.py index f2676595..7b55020a 100644 --- a/restful_server/qtip_server.py +++ b/restful_server/qtip_server.py @@ -13,6 +13,7 @@ import threading from copy import copy import db import func.args_handler as args_handler +import restful_server.result_handler as result_handler app = Flask(__name__) @@ -145,8 +146,8 @@ default is all benchmarks in suite with specified type, help='testdb_url should be test db http url,for example http://testresults.opnfv.org/test/api/v1') parser.add_argument('node_name', type=str, required=False, default=None, help='node_name should be string') args = parser.parse_args() - if not args_handler.check_suite_in_test_list(args["suite_name"]): - return abort(404, 'message:Test suite {0} does not exist in test_list'.format(args["suite_name"])) + if not args_handler.check_suite(args["suite_name"]): + return abort(404, 'message:Test suite {0} does not exist under benchmarks/suite'.format(args["suite_name"])) if not args_handler.check_lab_name(args["pod_name"]): return abort(404, 'message: You have specified a lab {0}\ that is not present in test_cases'.format(args['pod_name'])) @@ -155,8 +156,8 @@ default is all benchmarks in suite with specified type, if not job_id: return abort(409, 'message:It already has one job running now!') - benchmarks = args_handler.get_files_in_test_list(args["suite_name"], - args["type"].lower()) + benchmarks = args_handler.get_files_in_suite(args["suite_name"], + args["type"].lower()) test_cases = args_handler.get_files_in_test_case(args["pod_name"], args["suite_name"], args["type"].lower()) @@ -174,11 +175,14 @@ default is all benchmarks in suite with specified type, args["pod_name"], args["suite_name"], job_id, + args["testdb_url"], + args["node_name"], thread_stop)) db.start_thread(job_id, post_thread, thread_stop) return {'job_id': str(job_id)} - def thread_post(self, installer_type, benchmarks_list, pod_name, suite_name, job_id, stop_event): + def thread_post(self, installer_type, benchmarks_list, pod_name, suite_name, + job_id, testdb_url, node_name, stop_event): for benchmark in benchmarks_list: if db.is_job_timeout(job_id) or stop_event.is_set(): break @@ -190,6 +194,8 @@ default is all benchmarks in suite with specified type, benchmark)) db.update_job_result_detail(job_id, benchmark, copy(result)) db.update_benchmark_state(job_id, benchmark, 'finished') + if (result_handler.dump_suite_result(suite_name) and testdb_url): + result_handler.push_suite_result_to_db(suite_name, testdb_url, installer_type, node_name) db.finish_job(job_id) diff --git a/restful_server/result_handler.py b/restful_server/result_handler.py index 059a2510..200330cb 100644 --- a/restful_server/result_handler.py +++ b/restful_server/result_handler.py @@ -7,7 +7,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import json -import data.ref_results.suite_result as suite_result +import scripts.ref_results.suite_result as suite_result import dashboard.pushtoDB as push_to_db diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/scripts/__init__.py diff --git a/scripts/cleanup_creds.sh b/scripts/cleanup_creds.sh new file mode 100644 index 00000000..9bf44305 --- /dev/null +++ b/scripts/cleanup_creds.sh @@ -0,0 +1,14 @@ +#! /bin/bash + +DEST_IP=$1 +HOSTNAME=$(hostname) +sshoptions="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + +case "$INSTALLER_TYPE" in + fuel) + ssh $sshoptions -i ./config/QtipKey root@$DEST_IP "sed -i '/root@$HOSTNAME/d' /root/.ssh/authorized_keys" + ;; +esac + + + diff --git a/scripts/fetch_os_creds.sh b/scripts/fetch_os_creds.sh deleted file mode 100755 index 3b493e14..00000000 --- a/scripts/fetch_os_creds.sh +++ /dev/null @@ -1,188 +0,0 @@ -#!/bin/bash -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# jose.lausuch@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - - -usage() { - echo "usage: $0 -d <destination> -i <installer_type> -a <installer_ip>" >&2 -} - -info () { - logger -s -t "fetch_os_creds.info" "$*" -} - - -error () { - logger -s -t "fetch_os_creds.error" "$*" - exit 1 -} - - -verify_connectivity() { - local ip=$1 - info "Verifying connectivity to $ip..." - for i in $(seq 0 10); do - if ping -c 1 -W 1 $ip > /dev/null; then - info "$ip is reachable!" - return 0 - fi - sleep 1 - done - error "Can not talk to $ip." -} - - - -#Get options -while getopts ":d:i:a:h:" optchar; do - case "${optchar}" in - d) dest_path=${OPTARG} ;; - i) installer_type=${OPTARG} ;; - a) installer_ip=${OPTARG} ;; - *) echo "Non-option argument: '-${OPTARG}'" >&2 - usage - exit 2 - ;; - esac -done - -# set vars from env if not provided by user as options -dest_path=${dest_path:-$HOME/opnfv-openrc.sh} -installer_type=${installer_type:-$INSTALLER_TYPE} -installer_ip=${installer_ip:-$INSTALLER_IP} - -if [ -z $dest_path ] || [ -z $installer_type ] || [ -z $installer_ip ]; then - usage - exit 2 -fi - -# Checking if destination path is valid -if [ -d $dest_path ]; then - error "Please provide the full destination path for the credentials file including the filename" -else - # Check if we can create the file (e.g. path is correct) - touch $dest_path || error "Cannot create the file specified. Check that the path is correct and run the script again." -fi - - -ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - -# Start fetching the files -if [ "$installer_type" == "fuel" ]; then - #ip_fuel="10.20.0.2" - verify_connectivity $installer_ip - - # Check if controller is alive (online='True') - controller_ip=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \ - 'fuel node | grep controller | grep True | awk "{print \$10}" | tail -1') &> /dev/null - - if [ -z $controller_ip ]; then - error "The controller $controller_ip is not up. Please check that the POD is correctly deployed." - fi - - info "Fetching rc file from controller $controller_ip..." - sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \ - "scp $ssh_options ${controller_ip}:/root/openrc ." &> /dev/null - sshpass -p r00tme scp 2>/dev/null $ssh_options root@${installer_ip}:~/openrc $dest_path &> /dev/null - - #This file contains the mgmt keystone API, we need the public one for our rc file - admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//') - public_ip=$(sshpass -p r00tme ssh $ssh_options root@${installer_ip} \ - "ssh ${controller_ip} 'source openrc; keystone endpoint-list'" \ - | grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null - #| grep http | head -1 | cut -d '|' -f 4 | sed 's/v1\/.*/v1\//' | sed 's/ //g') &> /dev/null - #NOTE: this is super ugly sed 's/v1\/.*/v1\//'OS_AUTH_URL - # but sometimes the output of endpoint-list is like this: http://172.30.9.70:8004/v1/%(tenant_id)s - -elif [ "$installer_type" == "apex" ]; then - verify_connectivity $installer_ip - - # The credentials file is located in the Instack VM (192.0.2.1) - # NOTE: This might change for bare metal deployments - info "Fetching rc file from Instack VM $installer_ip..." - if [ -f /root/.ssh/id_rsa ]; then - chmod 600 /root/.ssh/id_rsa - fi - sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc $dest_path - -elif [ "$installer_type" == "compass" ]; then - verify_connectivity $installer_ip - controller_ip=$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \ - 'mysql -ucompass -pcompass -Dcompass -e"select * from cluster;"' \ - | awk -F"," '{for(i=1;i<NF;i++)if($i~/\"host1\"/) {print $(i+1);break;}}' \ - | grep -oP "\d+.\d+.\d+.\d+") - - if [ -z $controller_ip ]; then - error "The controller $controller_ip is not up. Please check that the POD is correctly deployed." - fi - - info "Fetching rc file from controller $controller_ip..." - sshpass -p root ssh 2>/dev/null $ssh_options root@${installer_ip} \ - "scp $ssh_options ${controller_ip}:/opt/admin-openrc.sh ." &> /dev/null - sshpass -p root scp 2>/dev/null $ssh_options root@${installer_ip}:~/admin-openrc.sh $dest_path &> /dev/null - echo 'export OS_REGION_NAME=regionOne' >> $dest_path - - info "This file contains the mgmt keystone API, we need the public one for our rc file" - admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//') - info "admin_ip: $admin_ip" - public_ip=$(sshpass -p root ssh $ssh_options root@${installer_ip} \ - "ssh ${controller_ip} 'source /opt/admin-openrc.sh; keystone endpoint-list'" \ - | grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) - info "public_ip: $public_ip" - - -elif [ "$installer_type" == "joid" ]; then - # do nothing...for the moment - # we can either do a scp from the jumphost or use the -v option to transmit the param to the docker file - echo "Do nothing, creds will be provided through volume option at docker creation for joid" - -elif [ "$installer_type" == "foreman" ]; then - #ip_foreman="172.30.10.73" - controller="oscontroller1.opnfv.com" - verify_connectivity $installer_ip - - # Check if controller is alive (here is more difficult to get the ip from a command like "fuel node") - sshpass -p vagrant ssh $ssh_options root@${installer_ip} \ - "sshpass -p Op3nStack ssh $ssh_options root@${controller} 'ls'" &> /dev/null - if [ $? -ne 0 ]; then - error "The controller ${controller} is not up. Please check that the POD is correctly deployed." - fi - - info "Fetching openrc from a Foreman Controller '${controller}'..." - sshpass -p vagrant ssh $ssh_options root@${installer_ip} \ - "sshpass -p Op3nStack scp $ssh_options root@${controller}:~/keystonerc_admin ." &> /dev/null - sshpass -p vagrant scp $ssh_options root@${installer_ip}:~/keystonerc_admin $dest_path &> /dev/null - - #This file contains the mgmt keystone API, we need the public one for our rc file - admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//') - public_ip=$(sshpass -p vagrant ssh $ssh_options root@${installer_ip} \ - "sshpass -p Op3nStack ssh $ssh_options root@${controller} \ - 'source keystonerc_admin;keystone endpoint-list'" \ - | grep $admin_ip | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null - -else - error "Installer $installer is not supported by this script" -fi - - -if [ ! -f $dest_path ]; then - error "There has been an error retrieving the credentials" -fi - -if [ "$public_ip" != "" ]; then - info "Exchanging keystone public IP in rc file to $public_ip" - sed -i "/OS_AUTH_URL/c\export OS_AUTH_URL=\'$public_ip'" $dest_path -fi - - - -echo "-------- Credentials: --------" -cat $dest_path - -exit 0 diff --git a/scripts/file_permission.sh b/scripts/file_permission.sh deleted file mode 100755 index a8af957e..00000000 --- a/scripts/file_permission.sh +++ /dev/null @@ -1,3 +0,0 @@ -#! /bin/bash -chmod 0600 config/QtipKey -chmod 0600 config/QtipKey.pub diff --git a/scripts/get_env_info.sh b/scripts/get_env_info.sh index 4b362fac..cd49ac87 100755 --- a/scripts/get_env_info.sh +++ b/scripts/get_env_info.sh @@ -34,4 +34,4 @@ if [ $INSTALLER_TYPE == "apex" ] fi -./scripts/fetch_os_creds.sh -d ./opnfv-creds.sh +${REPOS_DIR}/releng/utils/fetch_os_creds.sh -d ${QTIP_DIR}/opnfv-creds.sh diff --git a/scripts/qtip_creds.sh b/scripts/qtip_creds.sh index 94d9133c..af051ac5 100755 --- a/scripts/qtip_creds.sh +++ b/scripts/qtip_creds.sh @@ -1,4 +1,4 @@ -! /bin/bash +#! /bin/bash DEST_IP=$1 echo $INSTALLER_TYPE diff --git a/scripts/ref_results/suite_result.py b/scripts/ref_results/suite_result.py index d0b4647f..4d9eae08 100644 --- a/scripts/ref_results/suite_result.py +++ b/scripts/ref_results/suite_result.py @@ -15,7 +15,8 @@ logger = logger_utils.QtipLogger('suite_result').get def get_benchmark_result(benchmark_name, suite_name): - benchmark_indices = importlib.import_module('{0}_benchmarks_indices'.format(suite_name)) + benchmark_indices = importlib.import_module('scripts.ref_results' + '.{0}_benchmarks_indices'.format(suite_name)) methodToCall = getattr(benchmark_indices, '{0}_index'.format(benchmark_name)) return methodToCall() diff --git a/supporting/servers/roles/elk/tasks/main.yml b/supporting/servers/roles/elk/tasks/main.yml index 7628f378..cc544be5 100644 --- a/supporting/servers/roles/elk/tasks/main.yml +++ b/supporting/servers/roles/elk/tasks/main.yml @@ -26,6 +26,18 @@ become: true apt: name=docker-engine +- name: install pip + apt: + pkg:{{ item }} state=installed + state: installed + with_items: + - python-dev + - python-pip + +- name: install docker-py + pip: + name: docker-py + - name: pulling elasticsearch and kibana become: true docker_image: name={{ item }} state=present diff --git a/supporting/servers/roles/ssh/defaults/main.yml b/supporting/servers/roles/ssh/defaults/main.yml index 59dfd086..41ee9853 100644 --- a/supporting/servers/roles/ssh/defaults/main.yml +++ b/supporting/servers/roles/ssh/defaults/main.yml @@ -3,3 +3,4 @@ users: - { name: yujunz, comment: "Yujun Zhang <zhang.yujunz@zte.com.cn>" } - { name: taseer, comment: "Taseer Ahmed <taseer94@gmail.com>" } - { name: serena, comment: "Serena Feng <feng.xiaowei@zte.com.cn>" } + - { name: zhifeng, comment: "Zhifeng Jiang<jiang.zhifeng@zte.com.cn>" } diff --git a/supporting/servers/roles/ssh/files/zhifeng.authorized_keys b/supporting/servers/roles/ssh/files/zhifeng.authorized_keys new file mode 100644 index 00000000..195cfdca --- /dev/null +++ b/supporting/servers/roles/ssh/files/zhifeng.authorized_keys @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuck9a5uUXLtzlTaCYhwcLDffFH8o5ldhU4iKr0D4KaXlFfUsKD7VyHN+Zck3HBWTB4U7X9FEeFINtp2v2aoY8n74TS4LUGT8yqRYLyvsh2LgrhE4ouRvYgWlrZGice2x6ZZrcGM4uoGTC/lUHEvMDGDkDxUCfhxlFWcrplCUMcgd1V/5U14s0ufDgLGyEhXWWXFW4pNoqKBEGZNChBSvnq+NvOD7I4jgStUm9REooOp/VPpubH/6mSvDSTokCvrgWSCaNpcDqLCUjhwpoT/D1oFzEd4jBfPSV3jva+eAKPE2r/dnudQR5NR8T/eUz25YRGeJfrjDP6cMgXJoG43IXQ== root@fuel.domain.tld diff --git a/tests/cli_test.py b/tests/cli_test.py index 43a11089..e47d99ad 100644 --- a/tests/cli_test.py +++ b/tests/cli_test.py @@ -13,7 +13,7 @@ class TestClass: (['-l', 'default', '-f', - 'test'], "Test File Does not exist in test_list") + 'test'], "This suite file doesn't exist under benchmarks/suite/") ]) def test_cli_error(self, capfd, test_input, expected): k = mock.patch.dict(os.environ, {'INSTALLER_TYPE': 'fuel', 'PWD': '/home'}) diff --git a/tests/spawn_vm_test.py b/tests/spawn_vm_test.py index 7890abd1..fca7dd00 100644 --- a/tests/spawn_vm_test.py +++ b/tests/spawn_vm_test.py @@ -17,9 +17,7 @@ class StackMock(MagicMock): {'output_key': 'instance_ip_1', "output_value": "172.10.0.154"}, {"output_key": "instance_PIP_1", - "output_value": "10.10.17.5"}, - {'output_key': 'KeyPair_PublicKey', - "output_value": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpwIBAAKCAQEAqCiHcrLBXtxG0LhnKndU7VIVpYxORmv0d4tvujkWOkYuagiW\nU/MTRk0zhRvFQDVPEs0Jrj/BIecqm6fjjT6dZ/H7JLYGaqJitRkoupKgBsMSIqUz\nrR0ekOlfXZ6N+Ud8k6s+qjc7BO4b1ezz78jHisC5o0GCkUV0ECx64Re1fO+oKs1c\nfL9aaexahJUYN3J48pazQz+imc2x/G9nuqHX3cqEszmxnT4jwv//In1GjHy2AyXw\n1oA5F6wZoQCSrXc2BditU+1tlVhEkPFt5JgiHUpY8T8mYbroT7JH6xjcGSKUN+HG\nN8PXNUTD1VAQfwHpkfsGMfDyzjytCXsoTEOqnwIDAQABAoIBAAEL/4vfQQTuKiKy\ngzHofEbd8/SL4xDdKzBzVca7BEBon3FZjFYJdV1CrcduXNQBgPSFAkJrczBa2BEQ\nAoKmmSREhWO9Hl0blbG67l36+7QPEtXUYXX6cG5Ghal3izq6DzR8JG+62Es3kETM\nrNgZT+S1PnKdvcpZvFc9b6ZnF2InuTbrmNVBZKrhdWOJ5tCwRGKKUl6BHoJH3yu0\nT5hUW277e1LYHx+hZtoZ98ToC+LGe6/M8a8y6VLYpcQlX2AtVXeGDalomunF+p3f\nuY6din6s4lq1gSJz03PTpUbwiuhYCTe8Xkseu74Y+XYYJXPHopFju0Ewd6p0Db9Q\nJzzxCoECggCBAM2ox9zyrDc/Vlc0bb9SciFGUd/nEJF89+UHy98bAkpo22zNZIDg\nfacSgkg/6faZD+KrOU0I5W7m2B5t6w2fNHHik6NYGSLQ1JhgbXELGV7X/qECDL02\nctPaf+8o+dYoZja2LdJNASq2nmEmPI3LSHhzAt4dWY4W+geXiHt4iWVHAoIAgQDR\nUdN09xv4U+stWqNcSfgjtx6boEUE8Ky7pyj+LrZKG0L61Jy9cSDP0x0rCtkW9vVR\n6RjidWM/DHQ5cl6aq+7pPy20/OqtqttFYT4R+C3AoAnRSaNzPD9a80C2gjv7WEz0\nPPFstWkI1gsN71KKRx7e6NIa9CNn5x9iE+SGfjgb6QKCAIBXylzG7LCnRNpOj4rp\nyP//RE1fDvv7nyUTF6jnrFfl+6zvXR4yBaKd10DWJrJxGhW15PGo+Ms39EL9el6E\nihmRI+9yIwFX411dToxpXRuPaRTBFmbpvnx2Ayfpp8w+pzA62rnktApzeVFSl0fy\nH3zoLfBjcJPyG8zPwNf6HRJJsQKCAIAE2S5asTaWo+r4m/bYtmXm/eDZnfa7TI/T\nsOWELbTPNp5wjOgsgyhNaAhu7MtmesXn5cxLwohP94vhoMKMNptMD8iRPqJ471Iw\n4zW62NLGeW6AyIHes3CMPMIs+AtHoR33MkotSG5sY/jRk8+HoGoYo6/qK+l+CJ5z\neR579wR5sQKCAIAvPWq+bvcPTDKUU1Fe/Y/GyWoUA+uSqmCdORBkK38lALFGphxj\nfDz9dXskimqW+A9hOPOS8dm8YcVvi/TLXVE5Vsx9VkOg6z6AZBQpgNXGfOgpju4W\nbjER7bQaASatuWQyCxbA9oNlAUdSeOhGTxeFLkLj7hNMd6tLjfd8w7A/hA==\n-----END RSA PRIVATE KEY-----\n"}] + "output_value": "10.10.17.5"}] class HeatMock(MagicMock): @@ -44,16 +42,15 @@ class TestClass: ]) @mock.patch('func.spawn_vm.Env_setup') @mock.patch('func.spawn_vm.AvailabilityZone') - @mock.patch('func.spawn_vm.client', autospec=True) @mock.patch('func.spawn_vm.keystoneclient.v2_0', autospec=True) @mock.patch('func.spawn_vm.heatclient.client', autospec=True) def test_create_zones_success(self, mock_heat, mock_keystone, - mock_nova_client, mock_zone, - mock_setup, test_input, expected): - mock_nova_client.Client.return_value = Mock() + mock_zone, mock_setup, test_input, expected): + open('./config/QtipKey.pub', 'a').close() mock_heat.Client.return_value = Mock(stacks=HeatMock()) k = mock.patch.dict(os.environ, {'INSTALLER_TYPE': 'fuel'}) k.start() SpawnVM(test_input) k.stop() + os.remove('./config/QtipKey.pub') mock_setup.ip_pw_list.append.assert_called_with(expected[0]) diff --git a/utils/transform/__init__.py b/utils/transform/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/utils/transform/__init__.py diff --git a/utils/transform/dpi_transform.py b/utils/transform/dpi_transform.py new file mode 100644 index 00000000..ee29d8e2 --- /dev/null +++ b/utils/transform/dpi_transform.py @@ -0,0 +1,47 @@ +import os +import pickle +import datetime + +sum_dpi_pps = float(0) +sum_dpi_bps = float(0) + +for x in range(1, 11): + dpi_result_pps = float( + os.popen( + "cat $HOME/qtip_result/dpi_dump.txt | grep 'nDPI throughput:' | awk 'NR=='" + + str(x) + + " | awk '{print $3}'").read().lstrip()) + dpi_result_bps = float( + os.popen( + "cat $HOME/qtip_result/dpi_dump.txt | grep 'nDPI throughput:' | awk 'NR=='" + + str(x) + + " | awk '{print $7}'").read().rstrip()) + + if (dpi_result_pps > 100): + dpi_result_pps = dpi_result_pps / 1000 + + if (dpi_result_bps > 100): + dpi_result_bps = dpi_result_bps / 1000 + + sum_dpi_pps += dpi_result_pps + sum_dpi_bps += dpi_result_bps + +dpi_result_pps = sum_dpi_pps / 10 +dpi_result_bps = sum_dpi_bps / 10 + +host = os.popen("hostname").read().rstrip() +log_time_stamp = str(datetime.datetime.utcnow().isoformat()) + +os.popen( + "cat $HOME/qtip_result/dpi_dump.txt > $HOME/qtip_result/" + + host + + "-" + + log_time_stamp + + ".log") + +home_dir = str(os.popen("echo $HOME").read().rstrip()) +host = os.popen("echo $HOSTNAME") +result = {'pps': round(dpi_result_pps, 3), + 'bps': round(dpi_result_bps, 3)} +with open('./result_temp', 'w+') as result_file: + pickle.dump(result, result_file) diff --git a/utils/transform/final_report.py b/utils/transform/final_report.py new file mode 100644 index 00000000..274742d4 --- /dev/null +++ b/utils/transform/final_report.py @@ -0,0 +1,24 @@ +import pickle +import json +import datetime +import os +import sys + +home_dir = str((os.popen("echo $HOME").read().rstrip())) + +with open('./sys_info_temp', 'r') as sys_info_f: + sys_info_dict = pickle.load(sys_info_f) +with open('./result_temp', 'r') as result_f: + result_dict = pickle.load(result_f) + +host_name = (os.popen("hostname").read().rstrip()) +benchmark_name = str(sys.argv[1]) +testcase_name = str(sys.argv[2]) +report_time_stamp = str(datetime.datetime.utcnow().isoformat()) +final_dict = {"name": testcase_name, + "time": report_time_stamp, + "system_information": sys_info_dict, + "details": result_dict} + +with open('./' + host_name + '-' + report_time_stamp + '.json', 'w+') as result_json: + json.dump(final_dict, result_json, indent=4, sort_keys=True) diff --git a/utils/transform/fio_transform.py b/utils/transform/fio_transform.py new file mode 100755 index 00000000..5ecac823 --- /dev/null +++ b/utils/transform/fio_transform.py @@ -0,0 +1,29 @@ +import json +import pickle +import os +import datetime + + +def get_fio_job_result(fio_job_data): + return {'read': {'io_bytes': fio_job_data["read"]["io_bytes"], + 'io_ps': fio_job_data["read"]["iops"], + 'io_runtime_millisec': fio_job_data["read"]["runtime"], + 'mean_io_latenchy_microsec': fio_job_data["read"]["lat"]["mean"]}, + 'write': {'io_bytes': fio_job_data["write"]["io_bytes"], + 'io_ps': fio_job_data["write"]["iops"], + 'io_runtime_millisec': fio_job_data["write"]["runtime"], + 'mean_io_latenchy_microsec': fio_job_data["write"]["lat"]["mean"]}} + + +with open("fio_result.json") as fio_raw: + fio_data = json.load(fio_raw) + +fio_result_dict = {} +for x, result in enumerate(map(get_fio_job_result, fio_data["jobs"])): + fio_result_dict['job_{0}'.format(x)] = result + +host_name = (os.popen("hostname").read().rstrip()) +report_time = str(datetime.datetime.utcnow().isoformat()) +os.system("mv fio_result.json " + str(host_name) + "-" + report_time + ".log") +with open('./result_temp', 'w + ')as out_fio_result: + pickle.dump(fio_result_dict, out_fio_result) diff --git a/utils/transform/iperf_transform.py b/utils/transform/iperf_transform.py new file mode 100644 index 00000000..b52e4634 --- /dev/null +++ b/utils/transform/iperf_transform.py @@ -0,0 +1,27 @@ +import json
+import datetime
+import pickle
+with open('iperf_raw.json', 'r') as ifile:
+ raw_iperf_data = json.loads(ifile.read().rstrip())
+
+bits_sent = raw_iperf_data['end']['sum_sent']['bits_per_second']
+bits_received = raw_iperf_data['end']['sum_received']['bits_per_second']
+total_byte_sent = raw_iperf_data['end']['sum_sent']['bytes']
+total_byte_received = raw_iperf_data['end']['sum_received']['bytes']
+cpu_host_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['host_total']
+cpu_remote_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['remote_total']
+
+time_stamp = str(datetime.datetime.utcnow().isoformat())
+
+result = {'version': raw_iperf_data['start']['version'],
+ 'bandwidth': {'sender_throughput': bits_sent,
+ 'received_throughput': bits_received},
+ 'cpu': {'cpu_host': cpu_host_total_percent,
+ 'cpu_remote': cpu_remote_total_percent}
+ }
+
+with open('iperf_raw-' + time_stamp + '.log', 'w+') as ofile:
+ ofile.write(json.dumps(raw_iperf_data))
+
+with open('./result_temp', 'w+') as result_file:
+ pickle.dump(result, result_file)
diff --git a/utils/transform/ramspeed_transform.py b/utils/transform/ramspeed_transform.py new file mode 100644 index 00000000..960f84fc --- /dev/null +++ b/utils/transform/ramspeed_transform.py @@ -0,0 +1,41 @@ +import os +import pickle +import datetime + +intmem_copy = os.popen("cat Intmem | grep 'BatchRun Copy' | awk '{print $4}'").read().rstrip() +intmem_scale = os.popen("cat Intmem | grep 'BatchRun Scale' | awk '{print $4}'").read().rstrip() +intmem_add = os.popen("cat Intmem | grep 'BatchRun Add' | awk '{print $4}'").read().rstrip() +intmem_triad = os.popen("cat Intmem | grep 'BatchRun Triad' | awk '{print $4}'").read().rstrip() +intmem_average = os.popen("cat Intmem | grep 'BatchRun AVERAGE' | awk '{print $4}'").read().rstrip() + +print intmem_copy +print intmem_average + +floatmem_copy = os.popen("cat Floatmem | grep 'BatchRun Copy' | awk '{print $4}'").read().rstrip() +floatmem_scale = os.popen("cat Floatmem | grep 'BatchRun Scale' | awk '{print $4}'").read().rstrip() +floatmem_add = os.popen("cat Floatmem | grep 'BatchRun Add' | awk '{print $4}'").read().rstrip() +floatmem_triad = os.popen("cat Floatmem | grep 'BatchRun Triad' | awk '{print $4}'").read().rstrip() +floatmem_average = os.popen("cat Floatmem | grep 'BatchRun AVERAGE' | awk '{print $4}'").read().rstrip() + +print floatmem_copy +print floatmem_average + +hostname = os.popen("hostname").read().rstrip() +time_stamp = str(datetime.datetime.utcnow().isoformat()) + +os.system("mv Intmem " + hostname + "-" + time_stamp + ".log") +os.system("cp Floatmem >> " + hostname + "-" + time_stamp + ".log") + +result = {"int_bandwidth": {"copy": intmem_copy, + "add": intmem_add, + "scale": intmem_scale, + "triad": intmem_triad, + "average": intmem_average}, + "float_bandwidth": {"copy": floatmem_copy, + "add": floatmem_add, + "scale": floatmem_scale, + "triad": floatmem_triad, + "average": floatmem_average}} + +with open('./result_temp', 'w+') as result_file: + pickle.dump(result, result_file) diff --git a/utils/transform/ssl_transform.py b/utils/transform/ssl_transform.py new file mode 100644 index 00000000..de84d24b --- /dev/null +++ b/utils/transform/ssl_transform.py @@ -0,0 +1,54 @@ +import os +import pickle +import datetime + +openssl_version = os.popen("cat RSA_dump | head -1").read().rstrip() +rsa_512_sps = os.popen( + "cat RSA_dump | grep '512 bits ' | awk '{print $6}' ").read().rstrip() +rsa_512_vps = os.popen( + "cat RSA_dump | grep '512 bits ' | awk '{print $7}' ").read().rstrip() +rsa_1024_sps = os.popen( + "cat RSA_dump | grep '1024 bits ' | awk '{print $6}' ").read().rstrip() +rsa_1024_vps = os.popen( + "cat RSA_dump | grep '1024 bits ' | awk '{print $7}' ").read().rstrip() +rsa_2048_sps = os.popen( + "cat RSA_dump | grep '2048 bits ' | awk '{print $6}' ").read().rstrip() +rsa_2048_vps = os.popen( + "cat RSA_dump | grep '2048 bits ' | awk '{print $7}' ").read().rstrip() +rsa_4096_sps = os.popen( + "cat RSA_dump | grep '4096 bits ' | awk '{print $6}' ").read().rstrip() +rsa_4096_vps = os.popen( + "cat RSA_dump | grep '4096 bits ' | awk '{print $7}' ").read().rstrip() + +aes_16B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $2}' ").read().rstrip() +aes_64B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $3}' ").read().rstrip() +aes_256B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $4}' ").read().rstrip() +aes_1024B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $5}' ").read().rstrip() +aes_8192B = os.popen( + "cat AES-128-CBC_dump | grep 'aes-128-cbc ' | awk '{print $6}' ").read().rstrip() + +hostname = os.popen("hostname").read().rstrip() +time_stamp = str(datetime.datetime.utcnow().isoformat()) + +os.system("mv RSA_dump " + hostname + "-" + time_stamp + ".log") +os.system("cat AES-128-CBC_dump >> " + hostname + "-" + time_stamp + ".log") + +result = {"version": [openssl_version], + "rsa_sig": {"512_bits": rsa_512_sps, + "1024_bits": rsa_1024_sps, + "2048_bits": rsa_2048_sps, + "4096_bits": rsa_4096_sps, + "unit": "sig/sec"}, + "aes_128_cbc": {"16B_block": aes_16B, + "64B_block": aes_64B, + "256B_block": aes_256B, + "1024B_block": aes_1024B, + "8192B_block": aes_8192B, + "unit": "B/sec"}} + +with open('./result_temp', 'w+') as result_file: + pickle.dump(result, result_file) diff --git a/utils/transform/ubench_transform.py b/utils/transform/ubench_transform.py new file mode 100644 index 00000000..ab5fe171 --- /dev/null +++ b/utils/transform/ubench_transform.py @@ -0,0 +1,32 @@ +import os +import json +import pickle + +total_cpu = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'of tests' | awk '{print $1;}' | awk 'NR==1'").read().rstrip() + +cpu_1 = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'of tests' | awk '{print $6;}' | awk 'NR==1'").read().rstrip() + + +cpu_2 = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'of tests' | awk '{print $6;}' | awk 'NR==2'").read().rstrip() + + +index_1 = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'Index Score (Partial Only) ' | awk '{print $7;}' | awk 'NR==1'").read().rstrip() +index_2 = os.popen( + "cat $HOME/tempT/UnixBench/results/* | grep 'Index Score (Partial Only) ' | awk '{print $7;}' | awk 'NR==2'").read().rstrip() + + +result = {"n_cpu": total_cpu, + "single": {"n_para_test": cpu_1, + "score": index_1}, + "multi": {"n_para_test": cpu_2, + "score": index_2} + } + +with open('result_temp', 'w+') as result_file: + pickle.dump(result, result_file) +print json.dumps(result, indent=4, sort_keys=True) +# print result.items() |