summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--INFO1
-rw-r--r--INFO.yaml65
-rwxr-xr-xci/create_glance_image.sh15
-rwxr-xr-xci/create_storperf_flavor.sh2
-rwxr-xr-xci/daily.sh17
-rwxr-xr-xci/generate-admin-rc.sh19
-rwxr-xr-xci/verify-build.sh2
-rwxr-xr-xci/verify.sh19
-rw-r--r--cli.py186
-rw-r--r--docker/local-docker-compose.yaml3
-rw-r--r--docker/storperf-graphite/Dockerfile2
-rw-r--r--docker/storperf-httpfrontend/Dockerfile96
-rw-r--r--docker/storperf-httpfrontend/html/3rd_party/css/bootstrap.min.css (renamed from docker/storperf-httpfrontend/html/css/bootstrap.min.css)0
-rw-r--r--docker/storperf-httpfrontend/html/index.html4
-rw-r--r--docker/storperf-master/Dockerfile28
-rw-r--r--docker/storperf-master/requirements.pip17
-rw-r--r--docker/storperf-master/rest_server.py393
-rw-r--r--docker/storperf-master/storperf/carbon/converter.py10
-rw-r--r--docker/storperf-master/storperf/carbon/emitter.py6
-rw-r--r--docker/storperf-master/storperf/db/configuration_db.py120
-rw-r--r--docker/storperf-master/storperf/db/graphite_db.py2
-rw-r--r--docker/storperf-master/storperf/db/job_db.py4
-rw-r--r--docker/storperf-master/storperf/fio/fio_invoker.py45
-rw-r--r--docker/storperf-master/storperf/resources/hot/agent-group.yaml28
-rw-r--r--docker/storperf-master/storperf/resources/hot/storperf-agent.yaml29
-rw-r--r--docker/storperf-master/storperf/resources/hot/storperf-volume.yaml57
-rw-r--r--docker/storperf-master/storperf/storperf_master.py612
-rw-r--r--docker/storperf-master/storperf/test_executor.py282
-rw-r--r--docker/storperf-master/storperf/utilities/data_handler.py22
-rw-r--r--docker/storperf-master/storperf/utilities/ip_helper.py27
-rw-r--r--docker/storperf-master/storperf/utilities/math.py67
-rw-r--r--docker/storperf-master/storperf/utilities/steady_state.py15
-rw-r--r--docker/storperf-master/storperf/workloads/_base_workload.py33
-rw-r--r--docker/storperf-master/storperf/workloads/_custom_workload.py36
-rw-r--r--docker/storperf-master/storperf/workloads/_ssd_preconditioning.py17
-rw-r--r--docker/storperf-master/tests/carbon_tests/emitter_test.py21
-rw-r--r--docker/storperf-master/tests/carbon_tests/json_to_carbon_test.py4
-rw-r--r--docker/storperf-master/tests/db_tests/configuration_db_test.py71
-rw-r--r--docker/storperf-master/tests/db_tests/graphite_db_test.py3
-rw-r--r--docker/storperf-master/tests/db_tests/job_db_test.py3
-rw-r--r--docker/storperf-master/tests/fio_tests/fio_invoker_test.py14
-rw-r--r--docker/storperf-master/tests/storperf_master_test.py37
-rw-r--r--docker/storperf-master/tests/utilities_tests/data_handler_test.py20
-rw-r--r--docker/storperf-master/tests/utilities_tests/ip_helper_test.py39
-rw-r--r--docker/storperf-master/tests/utilities_tests/math_range_test.py12
-rw-r--r--docker/storperf-master/tests/utilities_tests/math_slope_series_test.py48
-rw-r--r--docker/storperf-master/tests/utilities_tests/math_slope_test.py5
-rw-r--r--docker/storperf-master/tests/workload_tests/workload_subclass_test.py20
-rw-r--r--docker/storperf-reporting/Dockerfile12
-rw-r--r--docker/storperf-reporting/src/app.py8
-rw-r--r--docker/storperf-reporting/src/static/3rd_party/css/bootstrap.min.css (renamed from docker/storperf-reporting/src/static/css/bootstrap.min.css)0
-rw-r--r--docker/storperf-reporting/src/static/3rd_party/js/Chart.min.js (renamed from docker/storperf-reporting/src/static/js/Chart.min.js)0
-rw-r--r--docker/storperf-reporting/src/static/3rd_party/js/bootstrap.min.js (renamed from docker/storperf-reporting/src/static/js/bootstrap.min.js)0
-rw-r--r--docker/storperf-reporting/src/static/3rd_party/js/jquery-2.1.3.min.js (renamed from docker/storperf-reporting/src/static/js/jquery-2.1.3.min.js)0
-rw-r--r--docker/storperf-reporting/src/static/3rd_party/js/jquery.bootpag.min.js (renamed from docker/storperf-reporting/src/static/js/jquery.bootpag.min.js)0
-rw-r--r--docker/storperf-reporting/src/static/3rd_party/js/plotly-latest.min.js (renamed from docker/storperf-reporting/src/static/js/plotly-latest.min.js)0
-rw-r--r--docker/storperf-reporting/src/templates/index.html10
-rw-r--r--docker/storperf-reporting/src/templates/plot_jobs.html22
-rw-r--r--docker/storperf-reporting/src/templates/plot_multi_data.html12
-rw-r--r--docker/storperf-reporting/src/templates/plot_tables.html24
-rw-r--r--docker/storperf-swaggerui/Dockerfile78
-rw-r--r--docker/storperf-workloadagent/Dockerfile37
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/index.rst6
-rw-r--r--docs/release/release-notes/index.rst8
-rw-r--r--docs/release/release-notes/release-notes.rst83
-rw-r--r--docs/requirements.txt2
-rw-r--r--docs/testing/developer/devguide/gerrit.rst (renamed from docs/dev/gerrit.rst)0
-rw-r--r--docs/testing/developer/devguide/ide.rst (renamed from docs/dev/ide.rst)0
-rw-r--r--docs/testing/developer/devguide/index.rst (renamed from docs/dev/index.rst)0
-rw-r--r--docs/testing/developer/devguide/initial.rst (renamed from docs/dev/initial.rst)0
-rw-r--r--docs/testing/developer/devguide/unit_tests.rst (renamed from docs/dev/unit_tests.rst)0
-rwxr-xr-xdocs/testing/user/installation.rst176
-rw-r--r--docs/testing/user/introduction.rst229
-rw-r--r--docs/testing/user/storperf-reporting.rst2
-rw-r--r--docs/testing/user/test-usage.rst422
-rw-r--r--tox.ini19
79 files changed, 2509 insertions, 1226 deletions
diff --git a/.gitignore b/.gitignore
index 2dde665..9ea0deb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,5 +10,6 @@ cover
storperf.egg-info
*.db
ci/job
-docs_build
docs_output
+.tox
+docs/_build/*
diff --git a/INFO b/INFO
index d87adb2..82418cb 100644
--- a/INFO
+++ b/INFO
@@ -13,6 +13,7 @@ Committers:
mark.beierl@emc.com
jose.lausuch@ericsson.com
taseer94@gmail.com
+shrenik.jain@research.iiit.ac.in
Link to TSC approval of the project:
http://meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-09-15-13.59.log.html
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 0000000..7426d67
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,65 @@
+---
+project: 'Storage Performance Benchmarking for NFVI (storperf)'
+project_creation_date: '2015-09-15'
+project_category: 'Integration and Testing'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_storperf_ptl
+ name: 'Mark Beierl'
+ email: 'mark.beierl@dell.com'
+ company: 'dell.com'
+ id: 'mbeierl'
+ timezone: 'EST'
+primary_contact: *opnfv_storperf_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/STORPERF'
+ key: 'STORPERF'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[storperf]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-storperf'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: https://wiki.opnfv.org/display/meetings/Storperf+Team+Weekly+Meeting
+ url: https://global.gotomeeting.com/join/819733085
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: '14:00 UTC'
+repositories:
+ - 'storperf'
+committers:
+ - <<: *opnfv_storperf_ptl
+ - name: 'Jose Lausuch'
+ email: 'jalausuch@suse.com'
+ company: 'suse.com'
+ id: 'jose.lausuch'
+ - name: 'Taseer Ahmed'
+ email: 'taseer94@gmail.com'
+ company: 'gmail.com'
+ id: 'linux_geek'
+ - name: 'Shrenik Jain'
+ email: 'shrenik.jain@research.iiit.ac.in'
+ company: 'research.iiit.ac.in'
+ id: 'shrenikjain38'
+tsc:
+ # yamllint disable rule:line-length
+ approval: 'http://meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-09-15-13.59.log.html'
+ changes:
+ - type: 'promotion'
+ name: 'Jose Lausuch'
+ link: 'http://lists.opnfv.org/pipermail/opnfv-tech-discuss/2015-December/007109.html'
+ - type: 'removal'
+ name: 'Edgar Stpierre'
+ link: 'https://lists.opnfv.org/pipermail/opnfv-tsc/2017-April/003419.html'
+ - type: 'removal'
+ name: 'Ferenc Farkas'
+ link: 'https://lists.opnfv.org/pipermail/opnfv-tsc/2017-August/003680.html'
+ - type: 'promotion'
+ name: 'Taseer Ahmed'
+ link: 'https://gerrit.opnfv.org/gerrit/#/c/38979/'
+ # yamllint enable rule:line-length
diff --git a/ci/create_glance_image.sh b/ci/create_glance_image.sh
index 9181a05..bb2a869 100755
--- a/ci/create_glance_image.sh
+++ b/ci/create_glance_image.sh
@@ -13,7 +13,7 @@ mkdir -p job
ARCH="${ARCH:-$(uname -m)}"
-IMAGE_NAME="Ubuntu 17.04 ${ARCH}"
+IMAGE_NAME="Ubuntu 16.04 ${ARCH}"
echo "Checking for ${IMAGE_NAME} in Glance"
@@ -24,14 +24,14 @@ then
case "${ARCH}" in
aarch64)
- FILE=ubuntu-17.04-server-cloudimg-arm64.img
+ FILE=ubuntu-16.04-server-cloudimg-arm64-disk1.img
PROPERTIES="--property hw_firmware_type=uefi --property hw_video_model=vga"
;;
armhf)
- FILE=ubuntu-17.04-server-cloudimg-armhf.img
+ FILE=ubuntu-16.04-server-cloudimg-armhf-disk1.img
;;
x86_64)
- FILE=ubuntu-17.04-server-cloudimg-amd64.img
+ FILE=ubuntu-16.04-server-cloudimg-amd64-disk1.img
;;
*)
echo "Unsupported architecture: ${ARCH}"
@@ -39,7 +39,12 @@ then
;;
esac
- wget --continue -q "https://cloud-images.ubuntu.com/releases/17.04/release/${FILE}"
+ echo wget --continue -q "https://cloud-images.ubuntu.com/releases/16.04/release/${FILE}"
+ wget --continue -q "https://cloud-images.ubuntu.com/releases/16.04/release/${FILE}"
+ if [ ! -e "${FILE}" ] ; then
+ echo https://cloud-images.ubuntu.com/releases/16.04/release/${FILE} not found
+ exit 1
+ fi
openstack image create "${IMAGE_NAME}" --disk-format qcow2 --public \
${PROPERTIES} \
--container-format bare --file "${FILE}"
diff --git a/ci/create_storperf_flavor.sh b/ci/create_storperf_flavor.sh
index f25d56d..4f751f4 100755
--- a/ci/create_storperf_flavor.sh
+++ b/ci/create_storperf_flavor.sh
@@ -10,8 +10,6 @@
echo "Checking for StorPerf flavor"
-openstack flavor delete storperf
-
FLAVOUR=`openstack flavor list | grep "storperf"`
if [ -z "$FLAVOUR" ]
then
diff --git a/ci/daily.sh b/ci/daily.sh
index 0cafbe8..1e99709 100755
--- a/ci/daily.sh
+++ b/ci/daily.sh
@@ -23,17 +23,16 @@ fi
rm -rf "${WORKSPACE}/ci/job/releng"
git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng "${WORKSPACE}/ci/job/releng"
+rm -rf "${WORKSPACE}/ci/job/joid"
+git clone --depth 1 https://gerrit.opnfv.org/gerrit/joid "${WORKSPACE}/ci/job/joid"
-virtualenv "${WORKSPACE}/ci/job/storperf_daily_venv"
+python3 -m venv "${WORKSPACE}/ci/job/storperf_daily_venv"
# shellcheck source=/dev/null
source "${WORKSPACE}/ci/job/storperf_daily_venv/bin/activate"
-pip install --upgrade setuptools==33.1.1
-pip install cryptography==1.7.2
-pip install functools32==3.2.3.post2
-pip install pytz==2016.10
-pip install osc_lib==1.3.0
-pip install python-openstackclient==3.7.0
+python3 -m pip install --upgrade setuptools==40.5.0
+python3 -m pip install python-openstackclient==3.16.1
+python3 -m pip install python-heatclient==1.16.1
"${WORKSPACE}/ci/generate-admin-rc.sh"
@@ -90,14 +89,14 @@ JOB=$("${WORKSPACE}/ci/start_job.sh" \
curl -s -X GET "http://127.0.0.1:5000/api/v1.0/jobs?id=${JOB}&type=status" \
-o "${WORKSPACE}/ci/job/status.json"
-JOB_STATUS=$(grep -A2 $JOB "${WORKSPACE}/ci/job/status.json" | awk '/Status/ {print $2}' | cut -d\" -f2)
+JOB_STATUS=$(awk '/Status/ {print $2}' "${WORKSPACE}/ci/job/status.json" | cut -d\" -f2)
while [ "${JOB_STATUS}" != "Completed" ]
do
sleep 600
mv "${WORKSPACE}/ci/job/status.json" "${WORKSPACE}/ci/job/old-status.json"
curl -s -X GET "http://127.0.0.1:5000/api/v1.0/jobs?id=${JOB}&type=status" \
-o "${WORKSPACE}/ci/job/status.json"
- JOB_STATUS=$(grep -A2 $JOB "${WORKSPACE}/ci/job/status.json" | awk '/Status/ {print $2}' | cut -d\" -f2)
+ JOB_STATUS=$(awk '/Status/ {print $2}' "${WORKSPACE}/ci/job/status.json" | cut -d\" -f2)
if diff "${WORKSPACE}/ci/job/status.json" "${WORKSPACE}/ci/job/old-status.json" >/dev/null
then
cat "${WORKSPACE}/ci/job/status.json"
diff --git a/ci/generate-admin-rc.sh b/ci/generate-admin-rc.sh
index 6767fab..1a7ffed 100755
--- a/ci/generate-admin-rc.sh
+++ b/ci/generate-admin-rc.sh
@@ -17,16 +17,15 @@ CA_CERT=""
INSTALLER="$(./detect_installer.sh)"
case $INSTALLER in
joid)
- OS_AUTH_URL=http://$(juju status keystone | grep public | awk '{print $2}'):5000/v2.0
- OS_USERNAME=admin
- OS_PASSWORD=openstack
- cat << EOF > job/openstack.rc
-export OS_AUTH_URL=$OS_AUTH_URL
-export OS_USERNAME=$OS_USERNAME
-export OS_PASSWORD=$OS_PASSWORD
-export OS_TENANT_NAME=admin
-export OS_PROJECT_NAME=admin
-EOF
+ CUR_DIR="$(pwd)"
+ set -x
+ export JOB_DIR="${CUR_DIR}/job"
+ sed -i '/echo_info "Creating external network with neutron"/i \
+exit 0' job/joid/ci/openstack.sh
+ sed -i "s|~/joid_config/admin-openrc|${JOB_DIR}/openstack.rc|g" job/joid/ci/openstack.sh
+ cd job/joid/ci
+ ./openstack.sh
+ cd "${CUR_DIR}"
;;
fuel)
INSTALLER_IP=$(sudo virsh net-dumpxml mcpcontrol | grep 'cfg01' | cut -d"'" -f6)
diff --git a/ci/verify-build.sh b/ci/verify-build.sh
index c98fea4..5230cee 100755
--- a/ci/verify-build.sh
+++ b/ci/verify-build.sh
@@ -35,6 +35,8 @@ export ARCH=${ARCH}
echo Using $ARCH architecture
+export CURRENT_UID=$(id -u):$(id -g)
+
docker-compose -f local-docker-compose.yaml down
docker-compose -f local-docker-compose.yaml build
docker-compose -f local-docker-compose.yaml up -d
diff --git a/ci/verify.sh b/ci/verify.sh
index 996a3af..deaafb5 100755
--- a/ci/verify.sh
+++ b/ci/verify.sh
@@ -15,18 +15,17 @@ then
WORKSPACE="$HOME"
fi
-virtualenv $WORKSPACE/storperf_venv
+python3 -m venv $WORKSPACE/storperf_venv
source $WORKSPACE/storperf_venv/bin/activate
-pip install --upgrade setuptools==33.1.1
-pip install autoflake==0.6.6
-pip install autopep8==1.2.2
-pip install coverage==4.1
-pip install cryptography==1.7.2
-pip install flake8==2.5.4
-pip install mock==1.3.0
-pip install nose==1.3.7
-pip install -r docker/storperf-master/requirements.pip
+python3 -m pip install --upgrade setuptools
+python3 -m pip install autoflake==1.2
+python3 -m pip install autopep8==1.3.5
+python3 -m pip install coverage==4.5.1
+python3 -m pip install flake8==3.5.0
+python3 -m pip install mock==2.0.0
+python3 -m pip install nose==1.3.7
+python3 -m pip install -r docker/storperf-master/requirements.pip
final_rc=0
diff --git a/cli.py b/cli.py
deleted file mode 100644
index fda05c2..0000000
--- a/cli.py
+++ /dev/null
@@ -1,186 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 EMC and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-"""
-"""
-
-from storperf.storperf_master import StorPerfMaster
-from threading import Thread
-import cPickle
-import getopt
-import json
-import logging
-import logging.config
-import logging.handlers
-import requests
-import socket
-import struct
-import sys
-
-
-class Usage(Exception):
- pass
-
-
-def event(event_string):
- logging.getLogger(__name__).info(event_string)
-
-
-class LogRecordStreamHandler(object):
-
- def __init__(self):
- self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- self.socket.bind((
- 'localhost', logging.handlers.DEFAULT_UDP_LOGGING_PORT))
- self.level = logging.INFO
-
- def read_logs(self):
- try:
- while True:
- datagram = self.socket.recv(8192)
- chunk = datagram[0:4]
- struct.unpack(">L", chunk)[0]
- chunk = datagram[4:]
- obj = cPickle.loads(chunk)
- record = logging.makeLogRecord(obj)
- if (record.levelno >= self.level):
- logger = logging.getLogger(record.name)
- logger.handle(record)
-
- except Exception as e:
- print "ERROR: " + str(e)
- finally:
- self.socket.close()
-
-
-def main(argv=None):
- verbose = False
- debug = False
- report = None
- erase = False
- terminate = False
- options = {}
-
- storperf = StorPerfMaster()
-
- if argv is None:
- argv = sys.argv
- try:
- try:
- opts, args = getopt.getopt(argv[1:], "t:w:r:f:escvdTh",
- ["target=",
- "workload=",
- "report=",
- "configure=",
- "erase",
- "nossd",
- "nowarm",
- "verbose",
- "debug",
- "terminate",
- "help",
- ])
- except getopt.error, msg:
- raise Usage(msg)
-
- configuration = None
- options['workload'] = None
-
- for o, a in opts:
- if o in ("-h", "--help"):
- print __doc__
- return 0
- elif o in ("-t", "--target"):
- options['filename'] = a
- elif o in ("-v", "--verbose"):
- verbose = True
- elif o in ("-d", "--debug"):
- debug = True
- elif o in ("-s", "--nossd"):
- options['nossd'] = a
- elif o in ("-c", "--nowarm"):
- options['nowarm'] = False
- elif o in ("-w", "--workload"):
- options['workload'] = a
- elif o in ("-r", "--report"):
- report = a
- elif o in ("-e", "--erase"):
- erase = True
- elif o in ("-T", "--terminate"):
- terminate = True
- elif o in ("-f", "--configure"):
- configuration = dict(x.split('=') for x in a.split(','))
-
- if (debug) or (verbose):
- udpserver = LogRecordStreamHandler()
-
- if (debug):
- udpserver.level = logging.DEBUG
-
- logging.basicConfig(format="%(asctime)s - %(name)s - " +
- "%(levelname)s - %(message)s")
-
- t = Thread(target=udpserver.read_logs, args=())
- t.setDaemon(True)
- t.start()
-
- if (erase):
- response = requests.delete(
- 'http://127.0.0.1:5000/api/v1.0/configurations')
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
- return 0
-
- if (terminate):
- response = requests.delete(
- 'http://127.0.0.1:5000/api/v1.0/jobs')
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
- return 0
-
- if (configuration is not None):
- response = requests.post(
- 'http://127.0.0.1:5000/api/v1.0/configurations', json=configuration)
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
-
- if (report is not None):
- print "Fetching report for %s..." % (report,)
- response = requests.get(
- 'http://127.0.0.1:5000/api/v1.0/jobs?id=%s' % (report,))
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
- content = json.loads(response.content)
- print content
- else:
- print "Calling start..."
- response = requests.post(
- 'http://127.0.0.1:5000/api/v1.0/jobs', json=options)
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
-
- content = json.loads(response.content)
- print "Started job id: " + content['job_id']
-
- except Usage as e:
- print >> sys.stderr, str(e)
- print >> sys.stderr, "For help use --help"
- return 2
-
- except Exception as e:
- print >> sys.stderr, str(e)
- return 2
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/docker/local-docker-compose.yaml b/docker/local-docker-compose.yaml
index 6daa6e2..a4b69b4 100644
--- a/docker/local-docker-compose.yaml
+++ b/docker/local-docker-compose.yaml
@@ -17,8 +17,10 @@ services:
args:
ARCH: ${ARCH}
env_file: ${ENV_FILE}
+ user: ${CURRENT_UID}
volumes:
- ./storperf-master/:/storperf
+ - ./certs:/etc/ssl/certs/
links:
- storperf-graphite
@@ -28,6 +30,7 @@ services:
context: storperf-reporting
args:
ARCH: ${ARCH}
+ user: ${CURRENT_UID}
volumes:
- ./storperf-reporting/:/home/opnfv/storperf-reporting
diff --git a/docker/storperf-graphite/Dockerfile b/docker/storperf-graphite/Dockerfile
index b566458..c2ffa81 100644
--- a/docker/storperf-graphite/Dockerfile
+++ b/docker/storperf-graphite/Dockerfile
@@ -18,6 +18,8 @@ ARG ARCH=x86_64
ARG ALPINE_VERSION=v3.5
FROM multiarch/alpine:$ARCH-$ALPINE_VERSION
+RUN ulimit -n 1024
+
# Install basic stuff =)
RUN apk add --no-cache \
bash \
diff --git a/docker/storperf-httpfrontend/Dockerfile b/docker/storperf-httpfrontend/Dockerfile
index 9b5b5f9..6f072b0 100644
--- a/docker/storperf-httpfrontend/Dockerfile
+++ b/docker/storperf-httpfrontend/Dockerfile
@@ -13,100 +13,8 @@
##
ARG ARCH=x86_64
-ARG ALPINE_VERSION=v3.6
-FROM multiarch/alpine:$ARCH-$ALPINE_VERSION
-
-# This comes from https://github.com/nginxinc/docker-nginx/blob/14c1b938737cf4399a6bb039bc506957dce562ae/stable/alpine/Dockerfile
-# Is is cloned here so that we can use multiarch alpine
-
-MAINTAINER NGINX Docker Maintainers "docker-maint@nginx.com"
-
-ENV NGINX_VERSION 1.8.1
-
-ENV GPG_KEYS B0F4253373F8F6F510D42178520A9993A1C052F8
-ENV CONFIG "\
- --prefix=/etc/nginx \
- --sbin-path=/usr/sbin/nginx \
- --conf-path=/etc/nginx/nginx.conf \
- --error-log-path=/var/log/nginx/error.log \
- --http-log-path=/var/log/nginx/access.log \
- --pid-path=/var/run/nginx.pid \
- --lock-path=/var/run/nginx.lock \
- --http-client-body-temp-path=/var/cache/nginx/client_temp \
- --http-proxy-temp-path=/var/cache/nginx/proxy_temp \
- --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \
- --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \
- --http-scgi-temp-path=/var/cache/nginx/scgi_temp \
- --user=nginx \
- --group=nginx \
- --with-http_ssl_module \
- --with-http_realip_module \
- --with-http_addition_module \
- --with-http_sub_module \
- --with-http_dav_module \
- --with-http_flv_module \
- --with-http_mp4_module \
- --with-http_gunzip_module \
- --with-http_gzip_static_module \
- --with-http_random_index_module \
- --with-http_secure_link_module \
- --with-http_stub_status_module \
- --with-http_auth_request_module \
- --with-mail \
- --with-mail_ssl_module \
- --with-file-aio \
- --with-http_spdy_module \
- --with-ipv6 \
- "
-
-RUN \
- addgroup -S nginx \
- && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \
- && apk add --no-cache --virtual .build-deps \
- gcc \
- libc-dev \
- make \
- openssl-dev \
- pcre-dev \
- zlib-dev \
- linux-headers \
- curl \
- gnupg
-
-RUN gpg --keyserver pgp.mit.edu --recv-keys "$GPG_KEYS" || \
- gpg --keyserver ha.pool.sks-keyservers.net --recv-keys "$GPG_KEYS" || \
- gpg --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys "$GPG_KEYS"
-
-RUN curl -fSL http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \
- && curl -fSL http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \
- && gpg --verify nginx.tar.gz.asc \
- && mkdir -p /usr/src \
- && tar -zxC /usr/src -f nginx.tar.gz \
- && rm nginx.tar.gz* \
- && rm -r /root/.gnupg \
- && cd /usr/src/nginx-$NGINX_VERSION \
- && ./configure $CONFIG --with-debug \
- && make \
- && mv objs/nginx objs/nginx-debug \
- && ./configure $CONFIG \
- && make \
- && make install \
- && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \
- && strip /usr/sbin/nginx* \
- && runDeps="$( \
- scanelf --needed --nobanner /usr/sbin/nginx \
- | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
- | sort -u \
- | xargs -r apk info --installed \
- | sort -u \
- )" \
- && apk add --virtual .nginx-rundeps $runDeps \
- && apk del .build-deps \
- && rm -rf /usr/src/nginx-* \
- \
- # forward request and error logs to docker log collector
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log
+ARG ALPINE_VERSION=v3.10
+FROM nginx:alpine
EXPOSE 80 443
diff --git a/docker/storperf-httpfrontend/html/css/bootstrap.min.css b/docker/storperf-httpfrontend/html/3rd_party/css/bootstrap.min.css
index ed3905e..ed3905e 100644
--- a/docker/storperf-httpfrontend/html/css/bootstrap.min.css
+++ b/docker/storperf-httpfrontend/html/3rd_party/css/bootstrap.min.css
diff --git a/docker/storperf-httpfrontend/html/index.html b/docker/storperf-httpfrontend/html/index.html
index 3b41653..d07ec97 100644
--- a/docker/storperf-httpfrontend/html/index.html
+++ b/docker/storperf-httpfrontend/html/index.html
@@ -17,7 +17,7 @@ StorPerf Home
<head>
<title>Storperf</title>
<!-- Latest compiled and minified CSS -->
-<link rel="stylesheet" href="./css/bootstrap.min.css">
+<link rel="stylesheet" href="./3rd_party/css/bootstrap.min.css">
</head>
@@ -47,4 +47,4 @@ elaboration about Storperf. Open to Suggestions.</p>
</body>
-</html> \ No newline at end of file
+</html>
diff --git a/docker/storperf-master/Dockerfile b/docker/storperf-master/Dockerfile
index eaaf811..a2e1a1d 100644
--- a/docker/storperf-master/Dockerfile
+++ b/docker/storperf-master/Dockerfile
@@ -16,10 +16,12 @@
#
ARG ARCH=x86_64
-ARG ALPINE_VERSION=v3.6
+ARG ALPINE_VERSION=v3.10
FROM multiarch/alpine:$ARCH-$ALPINE_VERSION as storperf-builder
-LABEL version="5.0" description="OPNFV Storperf Docker container"
+RUN ulimit -n 1024
+
+LABEL version="8.0" description="OPNFV Storperf Docker container"
ARG BRANCH=master
@@ -28,6 +30,7 @@ ENV repos_dir /home/opnfv/repos
RUN apk --no-cache add --update \
git \
alpine-sdk \
+ coreutils \
linux-headers \
libaio \
libaio-dev \
@@ -37,35 +40,34 @@ RUN apk --no-cache add --update \
RUN git config --global http.sslVerify false
RUN git clone http://git.kernel.dk/fio.git ${repos_dir}/fio
RUN cd ${repos_dir}/fio && git checkout tags/fio-2.99
-RUN cd ${repos_dir}/fio && EXTFLAGS="-static" make install
+RUN cd ${repos_dir}/fio && EXTFLAGS="-static" make -j $(grep -c ^processor /proc/cpuinfo) install
# Build StorPerf
RUN apk --no-cache add --update \
libffi-dev \
libressl-dev \
- python \
- py-pip \
- python-dev \
+ python3=3.7.5-r1 \
+ python3-dev=3.7.5-r1 \
alpine-sdk \
- linux-headers \
- bash
+ linux-headers
# Install StorPerf
COPY requirements.pip /storperf/
-RUN pip install --upgrade setuptools==33.1.1
-RUN pip install -r /storperf/requirements.pip
+RUN python3 -m pip install --upgrade setuptools==33.1.1
+RUN python3 -m pip install -r /storperf/requirements.pip
# Build stripped down StorPerf image
FROM multiarch/alpine:$ARCH-$ALPINE_VERSION as storperf-master
RUN apk --no-cache add --update \
- python \
+ libressl-dev \
+ python3=3.7.5-r1 \
bash
-COPY --from=storperf-builder /usr/lib/python2.7/site-packages /usr/lib/python2.7/site-packages
+COPY --from=storperf-builder /usr/lib/python3.7/site-packages /usr/lib/python3.7/site-packages
COPY --from=storperf-builder /usr/local/bin/fio /usr/local/bin/fio
COPY . /storperf
@@ -77,4 +79,4 @@ RUN chmod 600 storperf/resources/ssh/storperf_rsa
EXPOSE 5000
# Entry point
-CMD [ "python", "./rest_server.py" ]
+CMD [ "python3", "./rest_server.py" ]
diff --git a/docker/storperf-master/requirements.pip b/docker/storperf-master/requirements.pip
index a591e84..020a1d5 100644
--- a/docker/storperf-master/requirements.pip
+++ b/docker/storperf-master/requirements.pip
@@ -1,11 +1,10 @@
-pyyaml==3.10
-flask==0.10
-flask_cors==3.0.2
-flask-restful==0.3.5
-flask-restful-swagger==0.19
-flask-swagger==0.2.12
+flask==1.0.2
+flask_cors==3.0.6
+flask-restful==0.3.6
+flask-restful-swagger==0.20.1
+flask-swagger==0.2.13
html2text==2016.1.8
-paramiko==2.0.2
-requests==2.13.0
-scp==0.10.2
+paramiko==2.4.1
+requests==2.19.1
+scp==0.11.0
git+https://gerrit.opnfv.org/gerrit/snaps#egg=snaps
diff --git a/docker/storperf-master/rest_server.py b/docker/storperf-master/rest_server.py
index 67d2d05..7606eca 100644
--- a/docker/storperf-master/rest_server.py
+++ b/docker/storperf-master/rest_server.py
@@ -10,7 +10,6 @@
import json
import logging.config
import os
-import sys
from flask import abort, Flask, request, jsonify
from flask_cors import CORS
@@ -18,6 +17,7 @@ from flask_restful import Resource, Api, fields
from flask_restful_swagger import swagger
from storperf.storperf_master import StorPerfMaster
+import flask
class ReverseProxied(object):
@@ -37,6 +37,7 @@ class ReverseProxied(object):
:param app: the WSGI application
'''
+
def __init__(self, app):
self.app = app
@@ -98,8 +99,12 @@ class ConfigurationRequestModel:
'agent_flavor': fields.String,
'agent_image': fields.String,
'public_network': fields.String,
+ 'volume_count': fields.Integer,
'volume_size': fields.Integer,
+ 'volume_type': fields.String,
'availability_zone': fields.String,
+ 'subnet_CIDR': fields.String,
+ 'stack_name': fields.String,
'username': fields.String,
'password': fields.String
}
@@ -114,8 +119,13 @@ class ConfigurationResponseModel:
'public_network': fields.String,
'stack_created': fields.Boolean,
'stack_id': fields.String,
+ 'volume_count': fields.Integer,
'volume_size': fields.Integer,
- 'availability_zone': fields.String
+ 'volume_type': fields.String,
+ 'availability_zone': fields.String,
+ 'subnet_CIDR': fields.String,
+ 'stack_name': fields.String,
+ 'slave_addresses': fields.Nested
}
@@ -127,22 +137,48 @@ class Configure(Resource):
self.logger = logging.getLogger(__name__)
@swagger.operation(
- notes='Fetch the current agent configuration',
+ notes='''Fetch the current agent configuration.
+
+ This API is in sunset until the next OPNFV release.''',
+ parameters=[
+ {
+ "name": "stack_name",
+ "description": "The name of the stack to use, defaults to" +
+ "StorPerfAgentGroup or the last stack named",
+ "required": False,
+ "type": "string",
+ "allowMultiple": False,
+ "paramType": "query"
+ }],
type=ConfigurationResponseModel.__name__
)
def get(self):
- return jsonify({'agent_count': storperf.agent_count,
+ stack_name = request.args.get('stack_name')
+ if stack_name:
+ storperf.stack_name = stack_name
+
+ json = jsonify({'agent_count': storperf.agent_count,
'agent_flavor': storperf.agent_flavor,
'agent_image': storperf.agent_image,
'public_network': storperf.public_network,
+ 'volume_count': storperf.volume_count,
'volume_size': storperf.volume_size,
+ 'volume_type': storperf.volume_type,
'stack_created': storperf.is_stack_created,
'availability_zone': storperf.availability_zone,
+ 'subnet_CIDR': storperf.subnet_CIDR,
+ 'stack_name': storperf.stack_name,
+ 'slave_addresses': storperf.slave_addresses,
'stack_id': storperf.stack_id})
+ response = flask.make_response(json)
+ response.headers['Sunset'] = "Tue. 31 Mar 2020 23:59:59 GMT"
+ return response
@swagger.operation(
notes='''Set the current agent configuration and create a stack in
- the controller. Returns once the stack create is completed.''',
+ the controller. Returns once the stack create is completed.
+
+ This API is in sunset until the next OPNFV release.''',
parameters=[
{
"name": "configuration",
@@ -162,6 +198,10 @@ class Configure(Resource):
abort(400, "ERROR: No data specified")
try:
+ # Note this must be first in order to be able to create
+ # more than one stack in the same StorPerf instance.
+ if ('stack_name' in request.json):
+ storperf.stack_name = request.json['stack_name']
if ('agent_count' in request.json):
storperf.agent_count = request.json['agent_count']
if ('agent_flavor' in request.json):
@@ -170,10 +210,16 @@ class Configure(Resource):
storperf.agent_image = request.json['agent_image']
if ('public_network' in request.json):
storperf.public_network = request.json['public_network']
+ if ('volume_count' in request.json):
+ storperf.volume_count = request.json['volume_count']
if ('volume_size' in request.json):
storperf.volume_size = request.json['volume_size']
+ if ('volume_type' in request.json):
+ storperf.volume_type = request.json['volume_type']
if ('availability_zone' in request.json):
storperf.availability_zone = request.json['availability_zone']
+ if ('subnet_CIDR' in request.json):
+ storperf.subnet_CIDR = request.json['subnet_CIDR']
if ('username' in request.json):
storperf.username = request.json['username']
if ('password' in request.json):
@@ -183,23 +229,36 @@ class Configure(Resource):
if storperf.stack_id is None:
abort(400, storperf.status_reason)
- return jsonify({'agent_count': storperf.agent_count,
- 'agent_flavor': storperf.agent_flavor,
- 'agent_image': storperf.agent_image,
- 'availability_zone': storperf.availability_zone,
- 'public_network': storperf.public_network,
- 'volume_size': storperf.volume_size,
- 'stack_id': storperf.stack_id})
+ return self.get()
except Exception as e:
+ self.logger.exception(e)
abort(400, str(e))
@swagger.operation(
- notes='Deletes the agent configuration and the stack'
+ notes='''Deletes the agent configuration and the stack
+
+ This API is in sunset until the next OPNFV release.''',
+ parameters=[
+ {
+ "name": "stack_name",
+ "description": "The name of the stack to delete, defaults to" +
+ "StorPerfAgentGroup or the last stack named",
+ "required": False,
+ "type": "string",
+ "allowMultiple": False,
+ "paramType": "query"
+ }]
)
def delete(self):
+ stack_name = request.args.get('stack_name')
+ if stack_name:
+ storperf.stack_name = stack_name
try:
- return jsonify({'stack_id': storperf.delete_stack()})
+ json = jsonify({'stack_id': storperf.delete_stack()})
+ response = flask.make_response(json)
+ response.headers['Sunset'] = "Tue. 31 Mar 2020 23:59:59 GMT"
+ return response
except Exception as e:
self.logger.exception(e)
abort(400, str(e))
@@ -213,7 +272,8 @@ class WorkloadModel:
"steady_state_samples": fields.Integer,
'workload': fields.String,
'queue_depths': fields.String,
- 'block_sizes': fields.String
+ 'block_sizes': fields.String,
+ 'stack_name': fields.String
}
@@ -306,6 +366,10 @@ following parameters:
for any single test iteration.
"workload":if specified, the workload to run. Defaults to all.
+
+"stack_name": This field is in sunset until the next OPNVF release.
+The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named.
""",
"required": True,
"type": "WorkloadModel",
@@ -328,9 +392,13 @@ for any single test iteration.
if not request.json:
abort(400, "ERROR: Missing configuration data")
+ storperf.reset_values()
self.logger.info(request.json)
try:
+ if ('stack_name' in request.json):
+ storperf.stack_name = request.json['stack_name']
+ storperf.stackless = False
if ('target' in request.json):
storperf.filename = request.json['target']
if ('deadline' in request.json):
@@ -342,10 +410,10 @@ for any single test iteration.
storperf.queue_depths = request.json['queue_depths']
if ('block_sizes' in request.json):
storperf.block_sizes = request.json['block_sizes']
+ storperf.workloads = None
+ storperf.custom_workloads = None
if ('workload' in request.json):
storperf.workloads = request.json['workload']
- else:
- storperf.workloads = None
if ('metadata' in request.json):
metadata = request.json['metadata']
else:
@@ -369,8 +437,6 @@ for any single test iteration.
]
)
def delete(self):
- self.logger.info("Threads: %s" % sys._current_frames())
- print sys._current_frames()
try:
return jsonify({'Slaves': storperf.terminate_workloads()})
except Exception as e:
@@ -378,6 +444,281 @@ for any single test iteration.
@swagger.model
+class WorkloadsBodyModel:
+ resource_fields = {
+ "rw": fields.String(default="randrw")
+ }
+ required = ['rw']
+
+
+@swagger.model
+@swagger.nested(
+ name=WorkloadsBodyModel.__name__)
+class WorkloadsNameModel:
+ resource_fields = {
+ "name": fields.Nested(WorkloadsBodyModel.resource_fields)
+ }
+
+
+@swagger.model
+@swagger.nested(
+ workloads=WorkloadsNameModel.__name__)
+class WorkloadV2Model:
+ resource_fields = {
+ 'target': fields.String,
+ 'deadline': fields.Integer,
+ "steady_state_samples": fields.Integer,
+ 'workloads': fields.Nested(WorkloadsNameModel.resource_fields),
+ 'queue_depths': fields.String,
+ 'block_sizes': fields.String,
+ 'stack_name': fields.String,
+ 'username': fields.String,
+ 'password': fields.String,
+ 'ssh_private_key': fields.String,
+ 'slave_addresses': fields.List
+ }
+ required = ['workloads']
+
+
+class Job_v2(Resource):
+
+ """Job API"""
+
+ def __init__(self):
+ self.logger = logging.getLogger(__name__)
+
+ @swagger.operation(
+ parameters=[
+ {
+ "name": "body",
+ "description": """Start execution of a workload with the
+following parameters:
+
+"target": The target device to profile",
+
+"deadline": if specified, the maximum duration in minutes
+for any single test iteration.
+
+"workloads": A JSON formatted map of workload names and parameters for FIO.
+
+"stack_name": This field is in sunset until the next OPNFV release.
+The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named. Explicitly specifying null will bypass all Heat Stack
+operations and go directly against the IP addresses specified.
+
+"username": if specified, the username to use when logging into the slave.
+
+"password": if specified, the password to use when logging into the slave.
+
+"ssh_private_key": if specified, the ssh private key to use when logging
+into the slave.
+
+"slave_addresses": if specified, a list of IP addresses to use instead of
+looking all of them up from the stack.
+
+ """,
+ "required": True,
+ "type": "WorkloadV2Model",
+ "paramType": "body"
+ }
+ ],
+ type=WorkloadResponseModel.__name__,
+ responseMessages=[
+ {
+ "code": 200,
+ "message": "Job submitted"
+ },
+ {
+ "code": 400,
+ "message": "Missing configuration data"
+ }
+ ]
+ )
+ def post(self):
+ if not request.json:
+ abort(400, "ERROR: Missing job data")
+
+ self.logger.info(request.json)
+ storperf.reset_values()
+
+ try:
+ if ('stack_name' in request.json):
+ storperf.stack_name = request.json['stack_name']
+ if ('target' in request.json):
+ storperf.filename = request.json['target']
+ if ('deadline' in request.json):
+ storperf.deadline = request.json['deadline']
+ if ('steady_state_samples' in request.json):
+ storperf.steady_state_samples = request.json[
+ 'steady_state_samples']
+ if ('queue_depths' in request.json):
+ storperf.queue_depths = request.json['queue_depths']
+ if ('block_sizes' in request.json):
+ storperf.block_sizes = request.json['block_sizes']
+ storperf.workloads = None
+ storperf.custom_workloads = None
+ if ('workload' in request.json):
+ storperf.workloads = request.json['workload']
+ if ('workloads' in request.json):
+ storperf.custom_workloads = request.json['workloads']
+ if ('metadata' in request.json):
+ metadata = request.json['metadata']
+ else:
+ metadata = {}
+
+ if 'username' in request.json:
+ storperf.username = request.json['username']
+ if 'password' in request.json:
+ storperf.password = request.json['password']
+ if 'ssh_private_key' in request.json:
+ storperf.ssh_key = request.json['ssh_private_key']
+ if 'slave_addresses' in request.json:
+ storperf.slave_addresses = request.json['slave_addresses']
+
+ job_id = storperf.execute_workloads(metadata)
+
+ return jsonify({'job_id': job_id})
+
+ except Exception as e:
+ self.logger.exception(e)
+ abort(400, str(e))
+
+
+@swagger.model
+class WarmUpModel:
+ resource_fields = {
+ 'stack_name': fields.String,
+ 'target': fields.String,
+ 'username': fields.String,
+ 'password': fields.String,
+ 'ssh_private_key': fields.String,
+ 'slave_addresses': fields.List,
+ 'mkfs': fields.String,
+ 'mount_point': fields.String,
+ 'file_size': fields.String,
+ 'nrfiles': fields.String,
+ 'numjobs': fields.String,
+ }
+
+
+class Initialize(Resource):
+
+ """Disk initialization API"""
+
+ def __init__(self):
+ self.logger = logging.getLogger(__name__)
+
+ @swagger.operation(
+ parameters=[
+ {
+ "name": "body",
+ "description": """Fill the target with random data. If no
+target is specified, it will default to /dev/vdb
+
+"target": The target device to use.
+
+"stack_name": This field is in sunset until the next OPNFV release.
+The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named. Explicitly specifying null will bypass all Heat Stack
+operations and go directly against the IP addresses specified.
+
+"username": if specified, the username to use when logging into the slave.
+
+"password": if specified, the password to use when logging into the slave.
+
+"ssh_private_key": if specified, the ssh private key to use when logging
+into the slave.
+
+"slave_addresses": if specified, a list of IP addresses to use instead of
+looking all of them up from the stack.
+
+"mkfs": if specified, the command to execute in order to create a filesystem
+on the target device (eg: mkfs.ext4)
+
+"mount_point": if specified, the directory to use when mounting the device.
+
+"filesize": if specified, the size of the files to create when profiling
+a filesystem.
+
+"nrfiles": if specified, the number of files to create when profiling
+a filesystem
+
+"numjobs": if specified, the number of jobs for when profiling
+a filesystem
+ """,
+ "required": False,
+ "type": "WarmUpModel",
+ "paramType": "body"
+ }
+ ],
+ type=WorkloadResponseModel.__name__,
+ notes='''Initialize the target device or file by filling it to
+ capacity with random data. This is similar to the jobs API,
+ but does not have a deadline or steady state. It also
+ uses a predefined block size and queue depth.''',
+ responseMessages=[
+ {
+ "code": 200,
+ "message": "Job submitted"
+ },
+ {
+ "code": 400,
+ "message": "Missing configuration data"
+ }
+ ]
+ )
+ def post(self):
+ self.logger.info(request.json)
+ storperf.reset_values()
+
+ try:
+ warm_up_args = {
+ 'rw': 'randwrite',
+ 'direct': "1",
+ 'loops': "1"
+ }
+ storperf.queue_depths = "8"
+ storperf.block_sizes = "16k"
+
+ if request.json:
+ if 'target' in request.json:
+ storperf.filename = request.json['target']
+ if 'stack_name' in request.json:
+ storperf.stack_name = request.json['stack_name']
+ if 'username' in request.json:
+ storperf.username = request.json['username']
+ if 'password' in request.json:
+ storperf.password = request.json['password']
+ if 'ssh_private_key' in request.json:
+ storperf.ssh_key = request.json['ssh_private_key']
+ if 'slave_addresses' in request.json:
+ storperf.slave_addresses = request.json['slave_addresses']
+ if 'mkfs' in request.json:
+ storperf.mkfs = request.json['mkfs']
+ if 'mount_device' in request.json:
+ storperf.mount_device = request.json['mount_device']
+ if 'filesize' in request.json:
+ warm_up_args['filesize'] = str(request.json['filesize'])
+ if 'nrfiles' in request.json:
+ warm_up_args['nrfiles'] = str(request.json['nrfiles'])
+ if 'numjobs' in request.json:
+ warm_up_args['numjobs'] = str(request.json['numjobs'])
+
+ storperf.workloads = None
+ storperf.custom_workloads = {
+ '_warm_up': warm_up_args
+ }
+ self.logger.info(storperf.custom_workloads)
+ job_id = storperf.execute_workloads()
+
+ return jsonify({'job_id': job_id})
+
+ except Exception as e:
+ self.logger.exception(e)
+ abort(400, str(e))
+
+
+@swagger.model
class QuotaModel:
resource_fields = {
@@ -392,12 +733,18 @@ class Quota(Resource):
notes='''Fetch the current Cinder volume quota. This value limits
the number of volumes that can be created, and by extension, defines
the maximum number of agents that can be created for any given test
- scenario''',
+ scenario
+
+
+ This API is in sunset until the next OPNFV release.''',
type=QuotaModel.__name__
)
def get(self):
- quota = storperf.volume_quota
- return jsonify({'quota': quota})
+ quota = [] # storperf.volume_quota
+ # return jsonify({'quota': quota})
+ response = flask.make_response(jsonify({'quota': quota}))
+ response.headers['Sunset'] = "Tue. 31 Mar 2020 23:59:59 GMT"
+ return response
def setup_logging(default_path='logging.json',
@@ -423,8 +770,10 @@ def setup_logging(default_path='logging.json',
api.add_resource(Configure, "/api/v1.0/configurations")
+api.add_resource(Initialize, "/api/v1.0/initializations")
api.add_resource(Quota, "/api/v1.0/quotas")
api.add_resource(Job, "/api/v1.0/jobs")
+api.add_resource(Job_v2, "/api/v2.0/jobs")
api.add_resource(Logs, "/api/v1.0/logs")
if __name__ == "__main__":
diff --git a/docker/storperf-master/storperf/carbon/converter.py b/docker/storperf-master/storperf/carbon/converter.py
index 623c144..4b5e6aa 100644
--- a/docker/storperf-master/storperf/carbon/converter.py
+++ b/docker/storperf-master/storperf/carbon/converter.py
@@ -32,12 +32,12 @@ class Converter(object):
def resurse_to_flat_dictionary(self, json, prefix=None):
if type(json) == dict:
- for k, v in json.items():
+ for k, v in list(json.items()):
if prefix is None:
- key = k.decode("utf-8").replace(" ", "_")
+ key = k.replace(" ", "_")
else:
- key = prefix + "." + k.decode("utf-8").replace(" ", "_")
- if hasattr(v, '__iter__'):
+ key = prefix + "." + k.replace(" ", "_")
+ if type(v) is list or type(v) is dict:
self.resurse_to_flat_dictionary(v, key)
else:
self.flat_dictionary[key] = str(v).replace(" ", "_")
@@ -45,7 +45,7 @@ class Converter(object):
index = 0
for v in json:
index += 1
- if hasattr(v, '__iter__'):
+ if type(v) is list or type(v) is dict:
self.resurse_to_flat_dictionary(
v, prefix + "." + str(index))
else:
diff --git a/docker/storperf-master/storperf/carbon/emitter.py b/docker/storperf-master/storperf/carbon/emitter.py
index b196709..13503b2 100644
--- a/docker/storperf-master/storperf/carbon/emitter.py
+++ b/docker/storperf-master/storperf/carbon/emitter.py
@@ -40,19 +40,19 @@ class CarbonMetricTransmitter():
message = "%s %s %s\n" \
% (key, value, timestamp)
self.logger.debug("Metric: " + message.strip())
- carbon_socket.send(message)
+ carbon_socket.send(message.encode('utf-8'))
except ValueError:
self.logger.debug("Ignoring non numeric metric %s %s"
% (key, value))
message = "%s.commit-marker %s %s\n" \
% (commit_marker, timestamp, timestamp)
- carbon_socket.send(message)
+ carbon_socket.send(message.encode('utf-8'))
self.logger.debug("Marker %s" % message.strip())
self.logger.info("Sent metrics to %s:%s with timestamp %s"
% (self.host, self.port, timestamp))
- except Exception, e:
+ except Exception as e:
self.logger.error("While notifying carbon %s:%s %s"
% (self.host, self.port, e))
diff --git a/docker/storperf-master/storperf/db/configuration_db.py b/docker/storperf-master/storperf/db/configuration_db.py
deleted file mode 100644
index 5b996c7..0000000
--- a/docker/storperf-master/storperf/db/configuration_db.py
+++ /dev/null
@@ -1,120 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 EMC and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-from sqlite3 import OperationalError
-from threading import Lock
-import logging
-import sqlite3
-
-db_mutex = Lock()
-
-
-class ConfigurationDB(object):
-
- db_name = "StorPerfConfig.db"
-
- def __init__(self):
- """
- Creates the StorPerfConfig.db and configuration tables on demand
- """
-
- self.logger = logging.getLogger(__name__)
- self.logger.debug("Connecting to " + ConfigurationDB.db_name)
- with db_mutex:
- db = sqlite3.connect(ConfigurationDB.db_name)
-
- cursor = db.cursor()
- try:
- cursor.execute('''CREATE TABLE configuration
- (configuration_name text,
- key text,
- value text)''')
- self.logger.debug("Created configuration table")
- except OperationalError:
- self.logger.debug("Configuration table exists")
-
- cursor.execute('SELECT * FROM configuration')
- db.commit()
- db.close()
-
- def delete_configuration_value(self, configuration_name, key):
- """Deletes the value associated with the given key
- """
-
- with db_mutex:
- db = sqlite3.connect(ConfigurationDB.db_name)
- cursor = db.cursor()
-
- cursor.execute("delete from configuration where "
- "configuration_name=? and key=?",
- (configuration_name, key))
-
- self.logger.debug("Deleted " + configuration_name + ":" + key)
-
- db.commit()
- db.close()
-
- def get_configuration_value(self, configuration_name, key):
- """Returns a string representation of the value stored
- with this key under the given configuration name.
- """
-
- with db_mutex:
- db = sqlite3.connect(ConfigurationDB.db_name)
- cursor = db.cursor()
-
- cursor.execute(
- """select value from configuration
- where configuration_name = ?
- and key = ?""",
- (configuration_name, key,))
-
- row = cursor.fetchone()
-
- return_value = None
-
- if (row is None):
- self.logger.debug(
- configuration_name + ":" + key + " does not exist")
- else:
- self.logger.debug(
- configuration_name + ":" + key + " is " + str(row[0]))
- return_value = str(row[0])
-
- db.close()
-
- return return_value
-
- def set_configuration_value(self, configuration_name, key, value):
- """Updates or creates the key under the given configuration
- name so that it holds the value specified.
- """
-
- if (value is None):
- return self.delete_configuration_value(configuration_name, key)
-
- with db_mutex:
- value = str(value)
-
- db = sqlite3.connect(ConfigurationDB.db_name)
- cursor = db.cursor()
-
- cursor.execute("delete from configuration where "
- "configuration_name=? and key=?",
- (configuration_name, key))
-
- cursor.execute(
- """insert into configuration(configuration_name, key, value)
- values (?,?,?)""", (configuration_name, key, value))
-
- self.logger.debug(
- configuration_name + ":" + key + " set to " + value)
-
- db.commit()
- db.close()
diff --git a/docker/storperf-master/storperf/db/graphite_db.py b/docker/storperf-master/storperf/db/graphite_db.py
index 8ebd22e..59b9f5d 100644
--- a/docker/storperf-master/storperf/db/graphite_db.py
+++ b/docker/storperf-master/storperf/db/graphite_db.py
@@ -41,7 +41,7 @@ class GraphiteDB(object):
start = end - duration
request = ("http://%s:%s/graphite/render/?target="
- "%s(%s.*.jobs.1.%s.%s)"
+ "%s(%s.*.jobs.*.%s.%s)"
"&format=json"
"&from=%s"
"&until=%s"
diff --git a/docker/storperf-master/storperf/db/job_db.py b/docker/storperf-master/storperf/db/job_db.py
index eb35cac..c3632e4 100644
--- a/docker/storperf-master/storperf/db/job_db.py
+++ b/docker/storperf-master/storperf/db/job_db.py
@@ -220,7 +220,7 @@ class JobDB(object):
db = sqlite3.connect(JobDB.db_name)
cursor = db.cursor()
- for param, value in params.iteritems():
+ for param, value in params.items():
cursor.execute(
"""insert into job_params
(job_id,
@@ -265,7 +265,7 @@ class JobDB(object):
break
try:
data = json.loads(row[1])
- except:
+ except Exception:
data = row[1]
params[row[0]] = data
db.close()
diff --git a/docker/storperf-master/storperf/fio/fio_invoker.py b/docker/storperf-master/storperf/fio/fio_invoker.py
index 0360ea2..bb81eef 100644
--- a/docker/storperf-master/storperf/fio/fio_invoker.py
+++ b/docker/storperf-master/storperf/fio/fio_invoker.py
@@ -11,6 +11,7 @@ import json
import logging
from threading import Thread
import paramiko
+from storperf.utilities import ip_helper
class FIOInvoker(object):
@@ -23,6 +24,7 @@ class FIOInvoker(object):
self.callback_id = None
self.terminated = False
self.metadata = var_dict
+ self.stderr = []
@property
def remote_host(self):
@@ -44,6 +46,8 @@ class FIOInvoker(object):
self.json_body = ""
try:
for line in iter(stdout.readline, b''):
+ if type(line) == bytes:
+ line = line.decode('utf=8')
if line.startswith("fio"):
line = ""
continue
@@ -60,13 +64,13 @@ class FIOInvoker(object):
"Event listener callback")
event_listener(
self.callback_id, json_metric)
- except Exception, e:
+ except Exception as e:
self.logger.exception(
"Notifying listener %s: %s",
self.callback_id, e)
self.logger.debug(
"Event listener callback complete")
- except Exception, e:
+ except Exception as e:
self.logger.error("Error parsing JSON: %s", e)
except IOError:
pass # We might have read from the closed socket, ignore it
@@ -77,7 +81,9 @@ class FIOInvoker(object):
def stderr_handler(self, stderr):
self.logger.debug("Started")
for line in iter(stderr.readline, b''):
- self.logger.error("FIO Error: %s", line.rstrip())
+ if len(line) > 0:
+ self.logger.error("FIO Error: %s", line.rstrip())
+ self.stderr.append(line.rstrip())
# Sometime, FIO gets stuck and will give us this message:
# fio: job 'sequential_read' hasn't exited in 60 seconds,
@@ -125,6 +131,9 @@ class FIOInvoker(object):
self.logger.debug("Joining stdout handler")
tout.join()
self.logger.debug("Ended")
+ if exit_status != 0:
+ return self.stderr
+ return None
def terminate(self):
self.logger.debug("Terminating fio on " + self.remote_host)
@@ -132,10 +141,12 @@ class FIOInvoker(object):
ssh = self._ssh_client()
- command = "sudo killall fio"
-
- self.logger.debug("Executing on %s: %s" % (self.remote_host, command))
- (_, stdout, stderr) = ssh.exec_command(command)
+ kill_commands = ['sudo killall fio',
+ 'sudo pkill fio']
+ for command in kill_commands:
+ self.logger.debug("Executing on %s: %s" %
+ (self.remote_host, command))
+ (_, stdout, stderr) = ssh.exec_command(command)
for line in stdout.readlines():
self.logger.debug(line.strip())
@@ -148,13 +159,25 @@ class FIOInvoker(object):
def _ssh_client(self):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ address, port = ip_helper.parse_address_and_port(self.remote_host)
if 'username' in self.metadata and 'password' in self.metadata:
- ssh.connect(self.remote_host,
+ ssh.connect(address,
+ port=port,
+ username=self.metadata['username'],
+ password=self.metadata['password'],
+ timeout=5)
+ return ssh
+ elif 'username' in self.metadata and 'ssh_key' in self.metadata:
+ ssh.connect(address,
+ port=port,
username=self.metadata['username'],
- password=self.metadata['password'])
+ pkey=self.metadata['ssh_key'],
+ timeout=5)
return ssh
else:
- ssh.connect(self.remote_host, username='storperf',
+ ssh.connect(address,
+ port=port,
+ username='storperf',
key_filename='storperf/resources/ssh/storperf_rsa',
- timeout=2)
+ timeout=5)
return ssh
diff --git a/docker/storperf-master/storperf/resources/hot/agent-group.yaml b/docker/storperf-master/storperf/resources/hot/agent-group.yaml
index 3c02e31..f09d95a 100644
--- a/docker/storperf-master/storperf/resources/hot/agent-group.yaml
+++ b/docker/storperf-master/storperf/resources/hot/agent-group.yaml
@@ -7,7 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-heat_template_version: 2013-05-23
+heat_template_version: newton
parameters:
public_network:
@@ -20,6 +20,12 @@ parameters:
agent_image:
type: string
default: 'StorPerf Ubuntu 14.04'
+ volume_count:
+ type: number
+ default: 0
+ constraints:
+ - range: { min: 0, max: 512 }
+ description: must be between 1 and 512 agents.
volume_size:
type: number
description: Size of the volume to be created.
@@ -27,6 +33,9 @@ parameters:
constraints:
- range: { min: 1, max: 1024 }
description: must be between 1 and 1024 Gb.
+ volume_type:
+ type: string
+ default: 'None'
agent_count:
type: number
default: 1
@@ -36,6 +45,12 @@ parameters:
availability_zone:
type: string
default: nova
+ subnet_CIDR:
+ type: string
+ default: '172.16.0.0/16'
+ keypair_name:
+ type: string
+ default: storperf_agent_keypair
resources:
slaves:
@@ -54,21 +69,20 @@ resources:
availability_zone: {get_param: availability_zone},
storperf_open_security_group: {get_resource: storperf_open_security_group},
key_name: {get_resource: storperf_key_pair},
- volume_size: {get_param: volume_size}
+ volume_count: {get_param: volume_count},
+ volume_size: {get_param: volume_size},
+ volume_type: {get_param: volume_type}
}
}
storperf_network:
type: OS::Neutron::Net
- properties:
- name: storperf-network
storperf_subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: storperf_network }
- cidr: 172.16.0.0/16
- gateway_ip: 172.16.0.1
+ cidr: { get_param: subnet_CIDR}
storperf_network_router:
type: OS::Neutron::Router
@@ -86,7 +100,7 @@ resources:
type: OS::Nova::KeyPair
properties:
save_private_key: true
- name: storperf_agent_keypair
+ name: { get_param: keypair_name}
storperf_open_security_group:
type: OS::Neutron::SecurityGroup
diff --git a/docker/storperf-master/storperf/resources/hot/storperf-agent.yaml b/docker/storperf-master/storperf/resources/hot/storperf-agent.yaml
index 7841e8c..7a0a9e9 100644
--- a/docker/storperf-master/storperf/resources/hot/storperf-agent.yaml
+++ b/docker/storperf-master/storperf/resources/hot/storperf-agent.yaml
@@ -7,7 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-heat_template_version: 2013-05-23
+heat_template_version: newton
parameters:
flavor:
@@ -24,6 +24,13 @@ parameters:
default: storperf
storperf_open_security_group:
type: string
+ volume_count:
+ type: number
+ description: Number of volumes to be created
+ default: 1
+ constraints:
+ - range: { min: 0, max: 1024 }
+ description: must be between 1 and 1024.
volume_size:
type: number
description: Size of the volume to be created.
@@ -31,6 +38,8 @@ parameters:
constraints:
- range: { min: 1, max: 1024 }
description: must be between 1 and 1024 Gb.
+ volume_type:
+ type: string
agent_network:
type: string
constraints:
@@ -87,15 +96,17 @@ resources:
port_id: { get_resource: storperf_agent_port }
agent_volume:
- type: OS::Cinder::Volume
- properties:
- size: { get_param: volume_size }
-
- agent_volume_att:
- type: OS::Cinder::VolumeAttachment
+ type: OS::Heat::ResourceGroup
properties:
- instance_uuid: { get_resource: storperf_agent }
- volume_id: { get_resource: agent_volume}
+ count: { get_param: volume_count }
+ resource_def: {
+ type: "storperf-volume.yaml",
+ properties: {
+ volume_size: { get_param: volume_size },
+ volume_type: { get_param: volume_type },
+ agent_instance_uuid: { get_resource: storperf_agent }
+ }
+ }
outputs:
storperf_agent_ip:
diff --git a/docker/storperf-master/storperf/resources/hot/storperf-volume.yaml b/docker/storperf-master/storperf/resources/hot/storperf-volume.yaml
new file mode 100644
index 0000000..d64d0c2
--- /dev/null
+++ b/docker/storperf-master/storperf/resources/hot/storperf-volume.yaml
@@ -0,0 +1,57 @@
+##############################################################################
+# Copyright (c) 2018 Dell EMC and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+heat_template_version: newton
+
+parameters:
+ volume_size:
+ type: number
+ description: Size of the volume to be created.
+ default: 1
+ constraints:
+ - range: { min: 1, max: 1024 }
+ description: must be between 1 and 1024 Gb.
+ volume_type:
+ type: string
+ default: None
+ agent_instance_uuid:
+ type: string
+
+conditions: {
+ 'without_type': {equals: [{get_param: volume_type}, 'None']},
+ 'with_type': {not: {equals: [{get_param: volume_type}, 'None']}}
+}
+
+resources:
+ agent_volume_type:
+ type: OS::Cinder::Volume
+ condition: 'with_type'
+ properties:
+ size: { get_param: volume_size }
+ volume_type: { get_param: volume_type}
+
+ agent_volume_type_att:
+ type: OS::Cinder::VolumeAttachment
+ condition: 'with_type'
+ properties:
+ instance_uuid: { get_param: agent_instance_uuid }
+ volume_id: { get_resource: agent_volume_type}
+
+ agent_volume:
+ type: OS::Cinder::Volume
+ condition: 'without_type'
+ properties:
+ size: { get_param: volume_size }
+
+ agent_volume_att:
+ type: OS::Cinder::VolumeAttachment
+ condition: 'without_type'
+ properties:
+ instance_uuid: { get_param: agent_instance_uuid }
+ volume_id: { get_resource: agent_volume}
diff --git a/docker/storperf-master/storperf/storperf_master.py b/docker/storperf-master/storperf/storperf_master.py
index 45d5d89..73f8f0d 100644
--- a/docker/storperf-master/storperf/storperf_master.py
+++ b/docker/storperf-master/storperf/storperf_master.py
@@ -7,23 +7,26 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from datetime import datetime
-import logging
-import os
-import socket
-from threading import Thread
-from time import sleep
-import paramiko
+from datetime import datetime
+from io import StringIO
+from multiprocessing.pool import ThreadPool
from scp import SCPClient
from snaps.config.stack import StackConfig
from snaps.openstack.create_stack import OpenStackHeatStack
from snaps.openstack.os_credentials import OSCreds
-
-from storperf.db.configuration_db import ConfigurationDB
+from snaps.openstack.utils import heat_utils, cinder_utils, glance_utils
+from snaps.thread_utils import worker_pool
from storperf.db.job_db import JobDB
from storperf.test_executor import TestExecutor
-from snaps.openstack.utils import heat_utils
+from storperf.utilities import ip_helper
+from time import sleep
+import json
+import logging
+import os
+import paramiko
+import socket
+import uuid
class ParameterError(Exception):
@@ -35,143 +38,264 @@ class StorPerfMaster(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
- self.configuration_db = ConfigurationDB()
- self.job_db = JobDB()
+ self.reset_values()
+ self.job_db = JobDB()
self.stack_settings = StackConfig(
- name='StorPerfAgent',
+ name=self.stack_name,
template_path='storperf/resources/hot/agent-group.yaml')
- self.os_creds = OSCreds(username=os.environ.get('OS_USERNAME'),
- password=os.environ.get('OS_PASSWORD'),
- auth_url=os.environ.get('OS_AUTH_URL'),
- project_name=os.environ.get('OS_PROJECT_NAME'))
+ self.os_creds = OSCreds(
+ username=os.environ.get('OS_USERNAME'),
+ password=os.environ.get('OS_PASSWORD'),
+ auth_url=os.environ.get('OS_AUTH_URL'),
+ identity_api_version=os.environ.get('OS_IDENTITY_API_VERSION'),
+ user_domain_name=os.environ.get('OS_USER_DOMAIN_NAME'),
+ user_domain_id=os.environ.get('OS_USER_DOMAIN_ID'),
+ region_name=os.environ.get('OS_REGION_NAME'),
+ project_domain_name=os.environ.get('OS_PROJECT_DOMAIN_NAME'),
+ project_domain_id=os.environ.get('OS_PROJECT_DOMAIN_ID'),
+ project_name=os.environ.get('OS_PROJECT_NAME'))
+
+ self.logger.debug("OSCreds: %s" % self.os_creds)
self.heat_stack = OpenStackHeatStack(self.os_creds,
self.stack_settings)
- self._test_executor = TestExecutor()
- self._last_openstack_auth = datetime.now()
+
+ self._snaps_pool = worker_pool(20)
+
+ def reset_values(self):
+ self._stack_name = 'StorPerfAgentGroup'
+ self.username = None
+ self.password = None
+ self._ssh_key = None
+ self._test_executor = None
+ self._agent_count = 1
+ self._agent_image = None
+ self._agent_flavor = None
+ self._availability_zone = None
+ self._public_network = None
+ self._volume_count = 1
+ self._volume_size = 1
+ self._volume_type = None
+ self._slave_addresses = []
+ self._filename = None
+ self._deadline = None
+ self._steady_state_samples = 10
+ self._queue_depths = "1"
+ self._block_sizes = "4096"
+ self._workload_modules = []
+ self._custom_workloads = []
+ self._subnet_CIDR = '172.16.0.0/16'
+ self.slave_info = {}
+ self.stackless = False
+ self.mkfs = None
+ self.mount_device = None
+ self._last_snaps_check_time = None
+ self._cached_stack_id = None
+
+ @property
+ def volume_count(self):
+ self._get_stack_info()
+ return self._volume_count
+
+ @volume_count.setter
+ def volume_count(self, value):
+ if (self.stack_id is not None):
+ raise ParameterError(
+ "ERROR: Cannot change volume count after stack is created")
+ self._volume_count = value
@property
def volume_size(self):
- value = self.configuration_db.get_configuration_value(
- 'stack',
- 'volume_size')
- if (value is None):
- self.volume_size = 1
- value = 1
- return int(value)
+ self._get_stack_info()
+ return self._volume_size
@volume_size.setter
def volume_size(self, value):
if (self.stack_id is not None):
raise ParameterError(
"ERROR: Cannot change volume size after stack is created")
+ self._volume_size = value
- self.configuration_db.set_configuration_value(
- 'stack',
- 'volume_size',
- value)
+ @property
+ def volume_type(self):
+ self._get_stack_info()
+ return self._volume_type
+
+ @volume_type.setter
+ def volume_type(self, value):
+ if (self.stack_id is not None):
+ raise ParameterError(
+ "ERROR: Cannot change volume type after stack is created")
+ self._volume_type = value
@property
- def agent_count(self):
- value = self.configuration_db.get_configuration_value(
- 'stack',
- 'agent_count')
+ def stack_name(self):
+ return self._stack_name
- if (value is None):
- self.agent_count = 1
- value = 1
- return int(value)
+ @stack_name.setter
+ def stack_name(self, value):
+ if value is None:
+ self.stackless = True
+ else:
+ self.stackless = False
+ self._stack_name = value
+ self.stack_settings.name = self.stack_name
+ self.stack_id = None
+ self._last_snaps_check_time = None
+
+ @property
+ def subnet_CIDR(self):
+ return self._subnet_CIDR
+
+ @subnet_CIDR.setter
+ def subnet_CIDR(self, value):
+ if (self.stack_id is not None):
+ raise ParameterError(
+ "ERROR: Cannot change subnet CIDR after stack is created")
+ self._subnet_CIDR = value
+
+ @property
+ def agent_count(self):
+ self._get_stack_info()
+ return self._agent_count
@agent_count.setter
def agent_count(self, value):
if (self.stack_id is not None):
raise ParameterError(
"ERROR: Cannot change agent count after stack is created")
-
- self.configuration_db.set_configuration_value(
- 'stack',
- 'agent_count',
- value)
+ self._agent_count = value
@property
def agent_image(self):
- value = self.configuration_db.get_configuration_value(
- 'stack',
- 'agent_image')
-
- if (value is None):
- value = 'Ubuntu 14.04'
- self.agent_image = value
-
- return value
+ self._get_stack_info()
+ return self._agent_image
@agent_image.setter
def agent_image(self, value):
if (self.stack_id is not None):
raise ParameterError(
"ERROR: Cannot change agent image after stack is created")
-
- self.configuration_db.set_configuration_value(
- 'stack',
- 'agent_image',
- value)
+ self._agent_image = value
@property
def public_network(self):
- return self.configuration_db.get_configuration_value(
- 'stack',
- 'public_network')
+ self._get_stack_info()
+ return self._public_network
@public_network.setter
def public_network(self, value):
if (self.stack_id is not None):
raise ParameterError(
"ERROR: Cannot change public network after stack is created")
-
- self.configuration_db.set_configuration_value(
- 'stack',
- 'public_network',
- value)
+ self._public_network = value
@property
def agent_flavor(self):
- return self.configuration_db.get_configuration_value(
- 'stack',
- 'agent_flavor')
+ self._get_stack_info()
+ return self._agent_flavor
@agent_flavor.setter
def agent_flavor(self, value):
if (self.stack_id is not None):
raise ParameterError(
"ERROR: Cannot change flavor after stack is created")
+ self._agent_flavor = value
- self.configuration_db.set_configuration_value(
- 'stack',
- 'agent_flavor',
- value)
+ @property
+ def slave_addresses(self):
+ return self._slave_addresses
+
+ @slave_addresses.setter
+ def slave_addresses(self, value):
+ self._slave_addresses = value
@property
def stack_id(self):
+ self._get_stack_info()
+ return self._cached_stack_id
+
+ @stack_id.setter
+ def stack_id(self, value):
+ self._cached_stack_id = value
+
+ def _get_stack_info(self):
+ if self.stackless:
+ self._cached_stack_id = None
+ return None
+
+ if self._last_snaps_check_time is not None:
+ time_since_check = datetime.now() - self._last_snaps_check_time
+ if time_since_check.total_seconds() < 60:
+ return self._cached_stack_id
+
self.heat_stack.initialize()
+
if self.heat_stack.get_stack() is not None:
- return self.heat_stack.get_stack().id
+ self._cached_stack_id = self.heat_stack.get_stack().id
+ cinder_cli = cinder_utils.cinder_client(self.os_creds)
+ glance_cli = glance_utils.glance_client(self.os_creds)
+
+ router_worker = self._snaps_pool.apply_async(
+ self.heat_stack.get_router_creators)
+
+ vm_inst_creators = self.heat_stack.get_vm_inst_creators()
+ self._agent_count = len(vm_inst_creators)
+ vm1 = vm_inst_creators[0]
+ self._availability_zone = \
+ vm1.instance_settings.availability_zone
+ self._agent_flavor = vm1.instance_settings.flavor.name
+
+ self._slave_addresses = []
+ for instance in vm_inst_creators:
+ floating_ip = instance.get_floating_ip()
+ self._slave_addresses.append(floating_ip.ip)
+ self.logger.debug("Found VM at %s" % floating_ip.ip)
+
+ server = vm1.get_vm_inst()
+
+ image_worker = self._snaps_pool.apply_async(
+ glance_utils.get_image_by_id, (glance_cli, server.image_id))
+
+ self._volume_count = len(server.volume_ids)
+ if self._volume_count > 0:
+ volume_id = server.volume_ids[0]['id']
+ volume = cinder_utils.get_volume_by_id(
+ cinder_cli, volume_id)
+ self.logger.debug("Volume id %s, size=%s, type=%s" %
+ (volume.id,
+ volume.size,
+ volume.type))
+ self._volume_size = volume.size
+ self._volume_type = volume.type
+
+ image = image_worker.get()
+ self._agent_image = image.name
+
+ router_creators = router_worker.get()
+ router1 = router_creators[0]
+ self._public_network = \
+ router1.router_settings.external_gateway
+
+ self._last_snaps_check_time = datetime.now()
else:
- return None
+ self._cached_stack_id = None
+
+ return self._cached_stack_id
@property
def availability_zone(self):
- return self.configuration_db.get_configuration_value(
- 'stack',
- 'availability_zone')
+ self._get_stack_info()
+ return self._availability_zone
@availability_zone.setter
def availability_zone(self, value):
- self.configuration_db.set_configuration_value(
- 'stack',
- 'availability_zone',
- value)
+ if (self.stack_id is not None):
+ raise ParameterError(
+ "ERROR: Cannot change zone after stack is created")
+ self._availability_zone = value
@property
def volume_quota(self):
@@ -180,93 +304,81 @@ class StorPerfMaster(object):
@property
def filename(self):
- return self._test_executor.filename
+ return self._filename
@filename.setter
def filename(self, value):
- self._test_executor.filename = value
+ self._filename = value
@property
def deadline(self):
- return self._test_executor.deadline
+ return self._deadline
@deadline.setter
def deadline(self, value):
- self._test_executor.deadline = value
+ self._deadline = value
@property
def steady_state_samples(self):
- return self._test_executor.steady_state_samples
+ return self._steady_state_samples
@steady_state_samples.setter
def steady_state_samples(self, value):
- self._test_executor.steady_state_samples = value
+ self._steady_state_samples = value
@property
def queue_depths(self):
- return self._test_executor.queue_depths
+ return self._queue_depths
@queue_depths.setter
def queue_depths(self, value):
- self._test_executor.queue_depths = value
+ self._queue_depths = value
@property
def block_sizes(self):
- return self._test_executor.block_sizes
+ return self._block_sizes
@block_sizes.setter
def block_sizes(self, value):
- self._test_executor.block_sizes = value
-
- @property
- def is_stack_created(self):
- return (self.stack_id is not None and
- self.heat_stack.get_status() == u'CREATE_COMPLETE')
+ self._block_sizes = value
@property
def workloads(self):
- return self.configuration_db.get_configuration_value(
- 'workload',
- 'workloads')
+ return self._workload_modules
@workloads.setter
def workloads(self, value):
- self._test_executor.register_workloads(value)
+ executor = TestExecutor()
+ executor.register_workloads(value)
+ self._workload_modules = value
- self.configuration_db.set_configuration_value(
- 'workload',
- 'workloads',
- str(self._test_executor.workload_modules))
+ @property
+ def custom_workloads(self):
+ return self._custom_workloads
+
+ @custom_workloads.setter
+ def custom_workloads(self, value):
+ self.logger.info("Custom workloads = %s" % value)
+ self._custom_workloads = value
@property
- def username(self):
- return self.configuration_db.get_configuration_value(
- 'stack',
- 'username'
- )
-
- @username.setter
- def username(self, value):
- self.configuration_db.set_configuration_value(
- 'stack',
- 'username',
- value
- )
+ def ssh_key(self):
+ if self._ssh_key is None:
+ return None
+ key = StringIO(self._ssh_key)
+ pkey = paramiko.RSAKey.from_private_key(key)
+ key.close()
+ return pkey
+
+ @ssh_key.setter
+ def ssh_key(self, value):
+ self._ssh_key = value
@property
- def password(self):
- return self.configuration_db.get_configuration_value(
- 'stack',
- 'password'
- )
-
- @password.setter
- def password(self, value):
- self.configuration_db.set_configuration_value(
- 'stack',
- 'password',
- value
- )
+ def is_stack_created(self):
+ return (self.stack_id is not None and
+ (self.heat_stack.get_status() == u'CREATE_COMPLETE' or
+ self.heat_stack.get_status() == u'UPDATE_COMPLETE'))
def get_logs(self, lines=None):
LOG_DIR = './storperf.log'
@@ -286,22 +398,40 @@ class StorPerfMaster(object):
return logs
def create_stack(self):
+ self.stackless = False
+
self.stack_settings.resource_files = [
- 'storperf/resources/hot/storperf-agent.yaml']
+ 'storperf/resources/hot/storperf-agent.yaml',
+ 'storperf/resources/hot/storperf-volume.yaml']
self.stack_settings.env_values = self._make_parameters()
try:
- self.heat_stack.create()
- except Exception:
- heat_cli = heat_utils.heat_client(self.os_creds)
- res = heat_utils.get_resources(heat_cli,
- self.heat_stack.get_stack().id)
+ self.heat_stack.create(block=True)
+ except Exception as e:
self.logger.error("Stack creation failed")
- for resource in res:
- status = resource.status
- self.logger.error("%s: %s" % (resource.name, status))
- if status == u'CREATE_FAILED':
- self.delete_stack()
- raise Exception(resource.status_reason)
+ self.logger.exception(e)
+ heat_cli = heat_utils.heat_client(self.os_creds)
+ if self.heat_stack.get_stack() is not None:
+ res = heat_utils.get_resources(heat_cli,
+ self.heat_stack.get_stack().id)
+ reason = ""
+ failed = False
+ for resource in res:
+ if resource.status == u'CREATE_FAILED':
+ failed = True
+ reason += "%s: %s " % (resource.name,
+ resource.status_reason)
+ self.logger.error("%s - %s: %s" % (resource.name,
+ resource.status,
+ resource.status_reason))
+
+ if failed:
+ try:
+ self.heat_stack.clean()
+ except Exception:
+ pass
+ raise Exception(reason)
+ else:
+ raise e
def delete_stack(self):
if self._test_executor is not None:
@@ -310,49 +440,93 @@ class StorPerfMaster(object):
stack_id = None
if (self.stack_id is not None):
stack_id = self.stack_id
- self.heat_stack.clean()
+ try:
+ self.heat_stack.clean()
+ except Exception as e:
+ self.logger.error("Stack creation failed")
+ raise Exception(e)
+ self.stack_id = None
return stack_id
- def execute_workloads(self, metadata={}):
- if (self.stack_id is None):
- raise ParameterError("ERROR: Stack does not exist")
+ def executor_event(self, executor):
+ if executor.terminated:
+ self._test_executor = None
- if (not self._test_executor.terminated and
- self._test_executor.job_id is not None):
+ def execute_workloads(self, metadata={}):
+ if (self._test_executor is not None and
+ (not self._test_executor.terminated and
+ self._test_executor.job_id is not None)):
raise Exception("ERROR: Job {} is already running".format(
self._test_executor.job_id))
- outputs = self.heat_stack.get_outputs()
- slaves = outputs[0].value
+ if (not self.stackless and
+ self.stack_id is None):
+ raise ParameterError("ERROR: Stack %s does not exist" %
+ self.stack_name)
- setup_threads = []
+ self._test_executor = TestExecutor()
+ self._test_executor.register(self.executor_event)
+ self._test_executor.register_workloads(self._workload_modules)
+ self._test_executor.custom_workloads = self.custom_workloads
+ self._test_executor.block_sizes = self._block_sizes
+ self._test_executor.filename = self._filename
+ self._test_executor.deadline = self._deadline
+ self._test_executor.steady_state_samples = self._steady_state_samples
+ self._test_executor.queue_depths = self._queue_depths
+ slaves = self._slave_addresses
+
+ setup_pool = ThreadPool(processes=len(slaves))
+
+ workers = []
for slave in slaves:
- t = Thread(target=self._setup_slave, args=(slave,))
- setup_threads.append(t)
- t.start()
+ worker = setup_pool.apply_async(
+ self._setup_slave, (slave,))
+ workers.append(worker)
- for thread in setup_threads:
- thread.join()
+ for worker in workers:
+ worker.get()
- self._test_executor.slaves = slaves
+ setup_pool.close()
+ self._test_executor.slaves = slaves
+ self._test_executor.volume_count = self.volume_count
params = metadata
- params['agent_count'] = self.agent_count
+ params['agent_count'] = len(slaves)
+ params['agent_flavor'] = self.agent_flavor
+ params['agent_image'] = self.agent_image
+ params['agent_info'] = json.dumps(self.slave_info)
+ params['avaiability_zone'] = self.availability_zone
+ params['block_sizes'] = self.block_sizes
+ params['deadline'] = self.deadline
params['public_network'] = self.public_network
+ params['stack_name'] = self.stack_name
+ params['steady_state_samples'] = self.steady_state_samples
+ params['subnet_CIDR'] = self.subnet_CIDR
+ params['target'] = self.filename
+ params['volume_count'] = self.volume_count
params['volume_size'] = self.volume_size
- if self.username and self.password:
+ params['volume_type'] = self.volume_type
+ if self.username:
params['username'] = self.username
+ if self.password:
params['password'] = self.password
+ if self.ssh_key:
+ params['ssh_key'] = self.ssh_key
job_id = self._test_executor.execute(params)
+ self.slave_info = {}
return job_id
def terminate_workloads(self):
- return self._test_executor.terminate()
+ if self._test_executor is not None:
+ return self._test_executor.terminate()
+ else:
+ return True
def fetch_results(self, job_id):
- if self._test_executor.job_db.job_id == job_id:
+ if (self._test_executor is not None and
+ self._test_executor.job_db.job_id == job_id):
return self._test_executor.metadata['details']['metrics']
workload_params = self.job_db.fetch_workload_params(job_id)
@@ -365,7 +539,19 @@ class StorPerfMaster(object):
return self.job_db.fetch_workload_params(job_id)
def fetch_job_status(self, job_id):
- return self._test_executor.execution_status(job_id)
+ results = {}
+
+ if (self._test_executor is not None and
+ self._test_executor.job_id == job_id):
+ results['Status'] = 'Running'
+ results['Workloads'] = self._test_executor.workload_status
+ else:
+ jobs = self.job_db.fetch_jobs()
+ for job in jobs:
+ if job == job_id:
+ results['Status'] = "Completed"
+
+ return results
def fetch_all_jobs(self, metrics_type):
job_list = self.job_db.fetch_jobs()
@@ -393,7 +579,8 @@ class StorPerfMaster(object):
timer = 10
while not alive:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- result = s.connect_ex((slave, 22))
+ host, port = ip_helper.parse_address_and_port(slave)
+ result = s.connect_ex((host, port))
s.close()
if result:
@@ -410,13 +597,31 @@ class StorPerfMaster(object):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.username and self.password:
- ssh.connect(slave,
- username=self.username,
- password=self.password)
+ ssh.connect(
+ host,
+ port=port,
+ username=self.username,
+ password=self.password,
+ timeout=2)
+ elif self.username and self.ssh_key:
+ ssh.connect(
+ host,
+ port=port,
+ username=self.username,
+ pkey=self.ssh_key,
+ timeout=2)
else:
- ssh.connect(slave, username='storperf',
- key_filename='storperf/resources/ssh/storperf_rsa',
- timeout=2)
+ ssh.connect(
+ slave,
+ port=port,
+ username='storperf',
+ key_filename='storperf/resources/ssh/storperf_rsa',
+ timeout=2)
+
+ uname = self._get_uname(ssh)
+ logger.debug("Slave uname is %s" % uname)
+ self.slave_info[slave] = {}
+ self.slave_info[slave]['uname'] = uname
available = self._check_root_fs(ssh)
logger.debug("Available space on / is %s" % available)
@@ -435,6 +640,16 @@ class StorPerfMaster(object):
logger.debug("Transferring fio to %s" % slave)
scp.put('/usr/local/bin/fio', '~/')
+ if self.mkfs is not None:
+ self._mkfs(ssh, logger)
+
+ if self.mount_device is not None:
+ self._mount(ssh, logger)
+
+ def _get_uname(self, ssh):
+ (_, stdout, _) = ssh.exec_command("uname -a")
+ return stdout.readline()
+
def _check_root_fs(self, ssh):
(_, stdout, _) = ssh.exec_command("df /")
stdout.readline()
@@ -443,6 +658,59 @@ class StorPerfMaster(object):
available = lines[3]
return int(available)
+ def _mkfs(self, ssh, logger):
+ command = "sudo umount %s" % (self.mount_device)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+
+ command = "sudo mkfs.%s %s" % (self.mkfs, self.mount_device)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ rc = stdout.channel.recv_exit_status()
+ stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ error_messages = ""
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+ error_messages += line.rstrip()
+
+ if rc != 0:
+ raise Exception(
+ "Error executing on {0}: {1}".format(
+ command, error_messages))
+
+ def _mount(self, ssh, logger):
+ command = "sudo mkdir -p %s" % (self.filename)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+
+ command = "sudo mount %s %s" % (self.mount_device, self.filename)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ rc = stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ error_messages = ""
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+ error_messages += line.rstrip()
+
+ if rc != 0:
+ raise Exception(
+ "Could not mount {0}: {1}".format(
+ self.mount_device, error_messages))
+
def _resize_root_fs(self, ssh, logger):
command = "sudo /usr/sbin/resize2fs /dev/vda1"
logger.info("Attempting %s" % command)
@@ -454,10 +722,16 @@ class StorPerfMaster(object):
logger.error(line)
def _make_parameters(self):
+ random_str = uuid.uuid4().hex[:6].upper()
heat_parameters = {}
heat_parameters['public_network'] = self.public_network
heat_parameters['agent_count'] = self.agent_count
+ heat_parameters['volume_count'] = self.volume_count
heat_parameters['volume_size'] = self.volume_size
+ heat_parameters['keypair_name'] = 'storperf_agent_keypair' + random_str
+ heat_parameters['subnet_CIDR'] = self.subnet_CIDR
+ if self.volume_type is not None:
+ heat_parameters['volume_type'] = self.volume_type
heat_parameters['agent_image'] = self.agent_image
heat_parameters['agent_flavor'] = self.agent_flavor
heat_parameters['availability_zone'] = self.availability_zone
diff --git a/docker/storperf-master/storperf/test_executor.py b/docker/storperf-master/storperf/test_executor.py
index 9ed6386..cb7e478 100644
--- a/docker/storperf-master/storperf/test_executor.py
+++ b/docker/storperf-master/storperf/test_executor.py
@@ -11,6 +11,7 @@ import copy
import imp
import json
import logging
+from multiprocessing.pool import ThreadPool
from os import listdir
import os
from os.path import isfile, join
@@ -25,17 +26,23 @@ from storperf.db.job_db import JobDB
from storperf.fio.fio_invoker import FIOInvoker
from storperf.utilities.data_handler import DataHandler
from storperf.utilities.thread_gate import ThreadGate
+from storperf.workloads._custom_workload import _custom_workload
class UnknownWorkload(Exception):
pass
+class InvalidWorkloadName(Exception):
+ pass
+
+
class TestExecutor(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.workload_modules = []
+ self._custom_workloads = {}
self.filename = None
self.deadline = None
self.steady_state_samples = 10
@@ -43,7 +50,6 @@ class TestExecutor(object):
self.end_time = None
self.current_workload = None
self.workload_status = {}
- self.result_url = None
self._queue_depths = [1, 4, 8]
self._block_sizes = [512, 4096, 16384]
self.event_listeners = set()
@@ -53,6 +59,7 @@ class TestExecutor(object):
self.job_db = JobDB()
self._slaves = []
self._terminated = False
+ self._volume_count = 1
self._workload_executors = []
self._workload_thread = None
self._thread_gate = None
@@ -62,7 +69,7 @@ class TestExecutor(object):
try:
installer = os.environ['INSTALLER_TYPE']
except KeyError:
- self.logger.error("Cannot determine installer")
+ self.logger.warn("Cannot determine installer")
installer = "Unknown_installer"
self.metadata = {}
@@ -88,6 +95,25 @@ class TestExecutor(object):
self._slaves = slaves
@property
+ def volume_count(self):
+ return self._volume_count
+
+ @volume_count.setter
+ def volume_count(self, volume_count):
+ self.logger.debug("Set volume count to: " + str(volume_count))
+ self._volume_count = volume_count
+
+ @property
+ def custom_workloads(self):
+ return self._custom_workloads
+
+ @custom_workloads.setter
+ def custom_workloads(self, custom_workloads):
+ self.logger.debug("Set custom workloads to: %s " %
+ custom_workloads)
+ self._custom_workloads = custom_workloads
+
+ @property
def queue_depths(self):
return ','.join(self._queue_depths)
@@ -142,7 +168,7 @@ class TestExecutor(object):
self.logger.debug("Notifying event listener %s",
event_listener)
event_listener(self)
- except Exception, e:
+ except Exception as e:
self.logger.exception("While notifying listener %s", e)
def register_workloads(self, workloads):
@@ -175,7 +201,7 @@ class TestExecutor(object):
"ERROR: Unknown workload: " + workload)
if workload_module not in self.workload_modules:
self.workload_modules.append(workload_module)
- except ImportError, err:
+ except ImportError as err:
raise UnknownWorkload("ERROR: " + str(err))
def load_from_file(self, uri):
@@ -183,19 +209,23 @@ class TestExecutor(object):
path, fname = os.path.split(uri)
mname, _ = os.path.splitext(fname)
no_ext = os.path.join(path, mname)
- self.logger.debug("Looking for: " + no_ext)
if os.path.exists(no_ext + '.pyc'):
- self.logger.debug("Loading compiled: " + mname + " from " + no_ext)
return imp.load_compiled(mname, no_ext + '.pyc')
if os.path.exists(no_ext + '.py'):
- self.logger.debug("Compiling: " + mname + " from " + no_ext)
return imp.load_source(mname, no_ext + '.py')
return None
def execute(self, metadata):
self.job_db.create_job_id()
- self.job_db.record_workload_params(metadata)
self._setup_metadata(metadata)
+ try:
+ self.test_params()
+ except Exception as e:
+ self.terminate()
+ raise e
+ stripped_metadata = metadata.copy()
+ stripped_metadata.pop('ssh_key', None)
+ self.job_db.record_workload_params(stripped_metadata)
self._workload_thread = Thread(target=self.execute_workloads,
args=(),
name="Workload thread")
@@ -215,38 +245,63 @@ class TestExecutor(object):
terminated_hosts.append(workload.remote_host)
return terminated_hosts
- def execution_status(self, job_id):
-
- result = {}
- status = "Completed"
-
- if self.job_db.job_id == job_id and self._terminated is False:
- status = "Running"
-
- result['Status'] = status
- result['Workloads'] = self.workload_status
- result['TestResultURL'] = self.result_url
+ def test_params(self):
+ workloads = self._create_workload_matrix()
+ for current_workload in workloads:
+ workload = current_workload['workload']
+ self.logger.info("Testing FIO parameters for %s"
+ % current_workload)
+ result = self._execute_workload(current_workload,
+ workload,
+ parse_only=True)
+ if result:
+ message = result[0]
+ self.logger.error("FIO parameter validation failed")
+ raise Exception("Workload parameter validation failed %s"
+ % message)
+ pass
+
+ def _execute_workload(self, current_workload, workload, parse_only=False):
+ workload.options['iodepth'] = str(current_workload['queue-depth'])
+ workload.options['bs'] = str(current_workload['blocksize'])
+ self._workload_executors = []
+ slave_threads = []
+ thread_pool = ThreadPool(processes=len(self.slaves) *
+ self.volume_count)
- else:
- jobs = self.job_db.fetch_jobs()
- self.logger.info("Jobs")
- self.logger.info(jobs)
- for job in jobs:
- if self.job_db.job_id == job_id and self._terminated is False:
- status = "Running"
- result['Status'] = status
- result['Workloads'] = self.workload_status
- result['TestResultURL'] = self.result_url
- else:
- result[job] = {}
- result[job]['Status'] = "Completed"
+ self._workload_executors = []
+ for slave in self.slaves:
+ volume_number = 0
+ while volume_number < self.volume_count:
+ slave_workload = copy.copy(current_workload['workload'])
+ slave_workload.remote_host = slave
+ last_char_of_filename = chr(
+ ord(slave_workload.filename[-1:]) + volume_number)
+ slave_workload.filename = ("%s%s" %
+ (slave_workload.filename[:-1],
+ last_char_of_filename))
+ self.logger.debug("Device to profile on %s: %s" %
+ (slave, slave_workload.filename))
+ self._workload_executors.append(slave_workload)
- return result
+ worker = thread_pool.apply_async(
+ self.execute_on_node, (slave_workload, parse_only))
+ slave_threads.append(worker)
+ volume_number += 1
+
+ final_result = None
+ for slave_thread in slave_threads:
+ self.logger.debug("Waiting on %s" % slave_thread)
+ result = slave_thread.get()
+ self.logger.debug("Done waiting for %s, exit status %s" %
+ (slave_thread, result))
+ if result:
+ final_result = result
+ return final_result
def execute_workloads(self):
self._terminated = False
self.logger.info("Starting job %s" % (self.job_db.job_id))
- self.event_listeners.clear()
data_handler = DataHandler()
self.register(data_handler.data_event)
@@ -257,12 +312,14 @@ class TestExecutor(object):
workloads = self._create_workload_matrix()
for current_workload in workloads:
+ if self._terminated:
+ continue
+
workload = current_workload['workload']
- self._thread_gate = ThreadGate(len(self.slaves),
- workload.options['status-interval'])
+ self._thread_gate = ThreadGate(
+ len(self.slaves) * min(1, self.volume_count),
+ float(workload.options['status-interval']))
- if self._terminated:
- return
self.current_workload = current_workload['name']
self.logger.info("Starting run %s" % self.current_workload)
@@ -277,27 +334,7 @@ class TestExecutor(object):
t = Thread(target=scheduler.run, args=())
t.start()
- workload.options['iodepth'] = str(current_workload['queue-depth'])
- workload.options['bs'] = str(current_workload['blocksize'])
-
- slave_threads = []
- for slave in self.slaves:
- slave_workload = copy.copy(current_workload['workload'])
- slave_workload.remote_host = slave
-
- self._workload_executors.append(slave_workload)
-
- t = Thread(target=self.execute_on_node,
- args=(slave_workload,),
- name="%s worker" % slave)
- t.daemon = False
- t.start()
- slave_threads.append(t)
-
- for slave_thread in slave_threads:
- self.logger.debug("Waiting on %s" % slave_thread)
- slave_thread.join()
- self.logger.debug("Done waiting for %s" % slave_thread)
+ self._execute_workload(current_workload, workload)
if not scheduler.empty():
try:
@@ -320,62 +357,111 @@ class TestExecutor(object):
report = {'report': json.dumps(self.metadata)}
self.job_db.record_workload_params(report)
self.job_db.job_id = None
- if self.result_url is not None:
- self.logger.info("Results can be found at %s" % self.result_url)
def _create_workload_matrix(self):
workloads = []
- for workload_module in self.workload_modules:
- workload_name = getattr(workload_module, "__name__")
-
- constructorMethod = getattr(workload_module, workload_name)
- workload = constructorMethod()
- if (self.filename is not None):
- workload.filename = self.filename
- workload.id = self.job_db.job_id
-
- if (self.filename is not None):
- workload.filename = self.filename
-
- if (workload_name.startswith("_")):
- iodepths = [8, ]
- blocksizes = [16384, ]
- else:
- iodepths = self._queue_depths
- blocksizes = self._block_sizes
+ if self._custom_workloads:
+ for workload_name in self._custom_workloads.keys():
+ real_name = workload_name
+ if real_name.startswith('_'):
+ real_name = real_name.replace('_', '')
+ self.logger.info("--- real_name: %s" % real_name)
+
+ if not real_name.isalnum():
+ raise InvalidWorkloadName(
+ "Workload name must be alphanumeric only: %s" %
+ real_name)
+ workload = _custom_workload()
+ workload.options['name'] = real_name
+ workload.name = workload_name
+ if (self.filename is not None):
+ workload.filename = self.filename
+ workload.id = self.job_db.job_id
+
+ workload_params = self._custom_workloads[workload_name]
+ for param, value in workload_params.items():
+ if param == "readwrite":
+ param = "rw"
+ if param in workload.fixed_options:
+ self.logger.warn("Skipping fixed option %s" % param)
+ continue
+ workload.options[param] = value
+
+ for blocksize in self._block_sizes:
+ for iodepth in self._queue_depths:
+
+ name = '%s.%s.queue-depth.%s.block-size.%s' % \
+ (self.job_db.job_id, workload_name, iodepth,
+ blocksize)
+ self.workload_status[name] = "Pending"
+
+ workload.options['bs'] = blocksize
+ workload.options['iodepth'] = iodepth
+
+ parameters = {'queue-depth': iodepth,
+ 'blocksize': blocksize,
+ 'name': name,
+ 'workload_name': workload_name,
+ 'status': 'Pending',
+ 'workload': workload}
+
+ self.logger.info("Workload %s=%s" %
+ (name, workload.options))
+
+ workloads.append(parameters)
+ else:
+ for workload_module in self.workload_modules:
+ workload_name = getattr(workload_module, "__name__")
+
+ constructorMethod = getattr(workload_module, workload_name)
+ workload = constructorMethod()
+ if (self.filename is not None):
+ workload.filename = self.filename
+ workload.id = self.job_db.job_id
+
+ if (workload_name.startswith("_")):
+ iodepths = [8, ]
+ blocksizes = [16384, ]
+ else:
+ iodepths = self._queue_depths
+ blocksizes = self._block_sizes
- for blocksize in blocksizes:
- for iodepth in iodepths:
+ for blocksize in blocksizes:
+ for iodepth in iodepths:
- name = '%s.%s.queue-depth.%s.block-size.%s' % \
- (self.job_db.job_id, workload_name, iodepth, blocksize)
- self.workload_status[name] = "Pending"
+ name = '%s.%s.queue-depth.%s.block-size.%s' % \
+ (self.job_db.job_id, workload_name, iodepth,
+ blocksize)
+ self.workload_status[name] = "Pending"
- parameters = {'queue-depth': iodepth,
- 'blocksize': blocksize,
- 'name': name,
- 'workload_name': workload_name,
- 'status': 'Pending',
- 'workload': workload}
+ parameters = {'queue-depth': iodepth,
+ 'blocksize': blocksize,
+ 'name': name,
+ 'workload_name': workload_name,
+ 'status': 'Pending',
+ 'workload': workload}
- self.logger.info("Workload %s=%s" % (name, parameters))
+ self.logger.info("Workload %s=%s" % (name, parameters))
- workloads.append(parameters)
+ workloads.append(parameters)
return workloads
- def execute_on_node(self, workload):
+ def execute_on_node(self, workload, parse_only=False):
invoker = FIOInvoker(self.metadata)
- invoker.register(self.event)
workload.invoker = invoker
self.logger.info("Starting " + workload.fullname)
- self.job_db.start_workload(workload)
- workload.execute()
- self.job_db.end_workload(workload)
- invoker.unregister(self.event)
+ if not parse_only:
+ invoker.register(self.event)
+ self.job_db.start_workload(workload)
+ result = workload.execute(parse_only)
+ if not parse_only:
+ self.job_db.end_workload(workload)
+ invoker.unregister(self.event)
self.logger.info("Ended " + workload.fullname)
+ return result
diff --git a/docker/storperf-master/storperf/utilities/data_handler.py b/docker/storperf-master/storperf/utilities/data_handler.py
index b85517f..98ae640 100644
--- a/docker/storperf-master/storperf/utilities/data_handler.py
+++ b/docker/storperf-master/storperf/utilities/data_handler.py
@@ -59,6 +59,12 @@ class DataHandler(object):
metrics[metric][io_type]['series'] = series
metrics[metric][io_type]['steady_state'] = steady
+ metrics[metric][io_type]['series_slope'] = \
+ math.slope_series(series)
+ metrics[metric][io_type]['series_min'] = \
+ math.min_series(series)
+ metrics[metric][io_type]['series_max'] = \
+ math.max_series(series)
treated_data = DataTreatment.data_treatment(series)
metrics[metric][io_type]['slope'] = \
@@ -151,14 +157,14 @@ class DataHandler(object):
test_db = os.environ.get('TEST_DB_URL')
if test_db is not None:
self.logger.info("Pushing results to %s" % (test_db))
- try:
- response = test_results_db.push_results_to_db(
- test_db,
- executor.metadata,
- self.logger)
- executor.result_url = response['href']
- except Exception:
- self.logger.exception("Error pushing results into Database")
+ stripped_metadata = executor.metadata
+ stripped_metadata.pop("ssh_key", None)
+ response = test_results_db.push_results_to_db(
+ test_db,
+ stripped_metadata,
+ self.logger)
+ if response:
+ self.logger.info("Results reference: %s" % response['href'])
def _determine_criteria(self, metadata):
steady_state = True
diff --git a/docker/storperf-master/storperf/utilities/ip_helper.py b/docker/storperf-master/storperf/utilities/ip_helper.py
new file mode 100644
index 0000000..06087b0
--- /dev/null
+++ b/docker/storperf-master/storperf/utilities/ip_helper.py
@@ -0,0 +1,27 @@
+##############################################################################
+# Copyright (c) 2019 VMware and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+def parse_address_and_port(address):
+ port = 22
+ if '.' in address:
+ # this is IPv4
+ if ':' in address:
+ host = address.split(':')[0]
+ port = int(address.split(':')[1])
+ else:
+ host = address
+ else:
+ if ']' in address:
+ # this is IPv6
+ host = address.split(']')[0].split('[')[1]
+ port = int(address.split(']')[1].split(':')[1])
+ else:
+ host = address
+ return (host, port)
diff --git a/docker/storperf-master/storperf/utilities/math.py b/docker/storperf-master/storperf/utilities/math.py
index 8e04134..173c39e 100644
--- a/docker/storperf-master/storperf/utilities/math.py
+++ b/docker/storperf-master/storperf/utilities/math.py
@@ -8,6 +8,9 @@
##############################################################################
import copy
+RANGE_DEVIATION = 0.20
+SLOPE_DEVIATION = 0.10
+
def slope(data_series):
"""
@@ -114,3 +117,67 @@ def average(data_series):
average = data_sum / float(m)
return average
+
+
+def slope_series(data_series):
+ """
+ This function returns an adjusted series based on the average
+ for the supplied series and the slope of the series.
+ """
+
+ new_series = []
+ average_series = []
+ for l in data_series:
+ average_series.append(l[1])
+
+ avg = average(average_series)
+ slp = slope(data_series)
+
+ if slp is None or avg is None:
+ return new_series
+
+ multiplier = float(len(data_series) + 1) / 2.0 - len(data_series)
+ for index, _ in data_series:
+ new_value = avg + (slp * multiplier)
+ new_series.append([index, new_value])
+ multiplier += 1
+
+ return new_series
+
+
+def min_series(data_series):
+ """
+ This function returns an copy of the series with only the
+ minimum allowed deviation as its values
+ """
+
+ new_series = []
+ average_series = []
+ for l in data_series:
+ average_series.append(l[1])
+ avg = average(average_series)
+ low = avg - (avg * RANGE_DEVIATION)
+
+ for index, _ in data_series:
+ new_series.append([index, low])
+
+ return new_series
+
+
+def max_series(data_series):
+ """
+ This function returns an copy of the series with only the
+ maximum allowed deviation as its values
+ """
+
+ new_series = []
+ average_series = []
+ for l in data_series:
+ average_series.append(l[1])
+ avg = average(average_series)
+ high = avg + (avg * RANGE_DEVIATION)
+
+ for index, _ in data_series:
+ new_series.append([index, high])
+
+ return new_series
diff --git a/docker/storperf-master/storperf/utilities/steady_state.py b/docker/storperf-master/storperf/utilities/steady_state.py
index 13f609d..23a74d7 100644
--- a/docker/storperf-master/storperf/utilities/steady_state.py
+++ b/docker/storperf-master/storperf/utilities/steady_state.py
@@ -9,7 +9,8 @@
import logging
from storperf.utilities import data_treatment as DataTreatment
-from storperf.utilities import math as math
+from storperf.utilities import math
+from storperf.utilities.math import RANGE_DEVIATION, SLOPE_DEVIATION
def steady_state(data_series):
@@ -38,15 +39,19 @@ def steady_state(data_series):
average_value is not None):
# Verification of the Steady State conditions following the SNIA
# definition
- range_condition = abs(range_value) <= 0.20 * abs(average_value)
- slope_condition = abs(slope_value) <= 0.10 * abs(average_value)
+ range_condition = abs(range_value) <= RANGE_DEVIATION * \
+ abs(average_value)
+ slope_condition = abs(slope_value) <= SLOPE_DEVIATION * \
+ abs(average_value)
steady_state = range_condition and slope_condition
logger.debug("Range %s <= %s?" % (abs(range_value),
- (0.20 * abs(average_value))))
+ (RANGE_DEVIATION *
+ abs(average_value))))
logger.debug("Slope %s <= %s?" % (abs(slope_value),
- (0.10 * abs(average_value))))
+ (SLOPE_DEVIATION *
+ abs(average_value))))
logger.debug("Steady State? %s" % steady_state)
else:
steady_state = False
diff --git a/docker/storperf-master/storperf/workloads/_base_workload.py b/docker/storperf-master/storperf/workloads/_base_workload.py
index d5282d7..5aa596e 100644
--- a/docker/storperf-master/storperf/workloads/_base_workload.py
+++ b/docker/storperf-master/storperf/workloads/_base_workload.py
@@ -30,8 +30,9 @@ class _base_workload(object):
self.invoker = None
self.remote_host = None
self.id = None
+ self.name = self.__class__.__name__
- def execute(self):
+ def execute(self, parse_only=False):
if self.invoker is None:
raise ValueError("No invoker has been set")
@@ -43,19 +44,29 @@ class _base_workload(object):
self.options['size'] = "100%"
self.logger.debug(
"Profiling a device, using 100% of " + self.filename)
+ self.options['filename'] = self.filename
else:
- self.options['size'] = self.default_filesize
+ if 'size' not in self.options:
+ self.options['size'] = self.default_filesize
self.logger.debug("Profiling a filesystem, using " +
- self.default_filesize + " file")
-
- self.options['filename'] = self.filename
+ self.options['size'] + " file")
+ if not self.filename.endswith('/'):
+ self.filename = self.filename + "/"
+ self.options['directory'] = self.filename
+ self.options['filename_format'] = "'storperf.$jobnum.$filenum'"
self.setup()
- for key, value in self.options.iteritems():
- args.append('--' + key + "=" + value)
+ for key, value in self.options.items():
+ if value is not None:
+ args.append('--' + key + "=" + str(value))
+ else:
+ args.append('--' + key)
+
+ if parse_only:
+ args.append('--parse-only')
- self.invoker.execute(args)
+ return self.invoker.execute(args)
def terminate(self):
if self.invoker is not None:
@@ -74,9 +85,11 @@ class _base_workload(object):
@property
def fullname(self):
+ host_file = self.remote_host + "." + self.filename
+ host_file = host_file.replace(".", "-").replace("/", "-")
return ("%s.%s.queue-depth.%s.block-size.%s.%s"
% (str(self.id),
- self.__class__.__name__,
+ self.name,
str(self.options['iodepth']),
str(self.options['bs']),
- str(self.remote_host).replace(".", "-")))
+ host_file))
diff --git a/docker/storperf-master/storperf/workloads/_custom_workload.py b/docker/storperf-master/storperf/workloads/_custom_workload.py
new file mode 100644
index 0000000..5cd37b3
--- /dev/null
+++ b/docker/storperf-master/storperf/workloads/_custom_workload.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2015 EMC and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+from storperf.workloads import _base_workload
+
+
+class _custom_workload(_base_workload._base_workload):
+
+ def __init__(self):
+ self.logger = logging.getLogger(self.__class__.__name__)
+ self.default_filesize = "1G"
+ self.filename = '/dev/vdb'
+ self.fixed_options = {
+ 'output-format': 'json',
+ 'status-interval': '60'
+ }
+ self.options = {
+ 'ioengine': 'libaio',
+ 'loops': '200',
+ 'direct': '1',
+ 'numjobs': '1',
+ 'rw': 'read',
+ 'bs': '64k',
+ 'iodepth': '1'
+ }
+ self.options.update(self.fixed_options)
+ self.invoker = None
+ self.remote_host = None
+ self.id = None
diff --git a/docker/storperf-master/storperf/workloads/_ssd_preconditioning.py b/docker/storperf-master/storperf/workloads/_ssd_preconditioning.py
deleted file mode 100644
index cce3c31..0000000
--- a/docker/storperf-master/storperf/workloads/_ssd_preconditioning.py
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 EMC and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from storperf.workloads import _base_workload
-
-
-class _ssd_preconditioning(_base_workload._base_workload):
-
- def setup(self):
- self.options['name'] = 'ssd_preconditioning'
- self.options['rw'] = 'randwrite'
- self.options['loops'] = '1'
diff --git a/docker/storperf-master/tests/carbon_tests/emitter_test.py b/docker/storperf-master/tests/carbon_tests/emitter_test.py
index f5a78d1..7ea515b 100644
--- a/docker/storperf-master/tests/carbon_tests/emitter_test.py
+++ b/docker/storperf-master/tests/carbon_tests/emitter_test.py
@@ -11,7 +11,7 @@ import json
from time import strptime
import unittest
-import mock
+from unittest import mock
from storperf.carbon import converter
from storperf.carbon.emitter import CarbonMetricTransmitter
@@ -69,9 +69,15 @@ class CarbonMetricTransmitterTest(unittest.TestCase):
emitter.carbon_port = self.listen_port
emitter.transmit_metrics(result, None)
+ element = ""
+ for element in data:
+ element = element.decode('utf-8')
+ if element.startswith("host.run-name"):
+ break
+
self.assertEqual("host.run-name.key 123.0 975542400\n",
- data[1],
- data[1])
+ element,
+ data)
@mock.patch("socket.socket")
@mock.patch("time.gmtime")
@@ -90,9 +96,14 @@ class CarbonMetricTransmitterTest(unittest.TestCase):
emitter.carbon_port = self.listen_port
emitter.transmit_metrics(result, None)
+ element = ""
+ for element in data:
+ element = element.decode('utf-8')
+ if element.startswith("None.commit-marker"):
+ break
self.assertEqual("None.commit-marker 975542400 975542400\n",
- data[1],
- data[1])
+ element,
+ data)
@mock.patch("socket.socket")
def test_connect_fails(self, mock_socket):
diff --git a/docker/storperf-master/tests/carbon_tests/json_to_carbon_test.py b/docker/storperf-master/tests/carbon_tests/json_to_carbon_test.py
index 523ff77..6043f10 100644
--- a/docker/storperf-master/tests/carbon_tests/json_to_carbon_test.py
+++ b/docker/storperf-master/tests/carbon_tests/json_to_carbon_test.py
@@ -7,10 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from storperf.carbon.converter import Converter
import json
import unittest
+from storperf.carbon.converter import Converter
+
class JSONToCarbonTest(unittest.TestCase):
@@ -112,5 +113,6 @@ class JSONToCarbonTest(unittest.TestCase):
"value", result["list_with_spaces.1.key"],
result["list_with_spaces.1.key"])
+
if __name__ == '__main__':
unittest.main()
diff --git a/docker/storperf-master/tests/db_tests/configuration_db_test.py b/docker/storperf-master/tests/db_tests/configuration_db_test.py
deleted file mode 100644
index d8b021a..0000000
--- a/docker/storperf-master/tests/db_tests/configuration_db_test.py
+++ /dev/null
@@ -1,71 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 EMC and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-from storperf.db.configuration_db import ConfigurationDB
-import os
-import unittest
-
-
-class ConfigurationDBTest(unittest.TestCase):
-
- def setUp(self):
- ConfigurationDB.db_name = __name__ + '.db'
- try:
- os.remove(ConfigurationDB.db_name)
- except OSError:
- pass
- self.config_db = ConfigurationDB()
-
- def tearDown(self):
- try:
- os.remove(ConfigurationDB.db_name)
- except OSError:
- pass
-
- def test_create_key(self):
- expected = "ABCDE-12345"
-
- self.config_db.set_configuration_value(
- "test", "key", expected)
-
- actual = self.config_db.get_configuration_value(
- "test", "key")
-
- self.assertEqual(
- expected, actual, "Did not expect: " + str(actual))
-
- def test_update_key(self):
- expected = "ABCDE-12345"
-
- self.config_db.set_configuration_value(
- "test", "key", "initial_value")
-
- self.config_db.set_configuration_value(
- "test", "key", expected)
-
- actual = self.config_db.get_configuration_value(
- "test", "key")
-
- self.assertEqual(
- expected, actual, "Did not expect: " + str(actual))
-
- def test_deleted_key(self):
- expected = None
-
- self.config_db.set_configuration_value(
- "test", "key", "initial_value")
-
- self.config_db.delete_configuration_value(
- "test", "key")
-
- actual = self.config_db.get_configuration_value(
- "test", "key")
-
- self.assertEqual(
- expected, actual, "Did not expect: " + str(actual))
diff --git a/docker/storperf-master/tests/db_tests/graphite_db_test.py b/docker/storperf-master/tests/db_tests/graphite_db_test.py
index d5fbbfc..2fabfd4 100644
--- a/docker/storperf-master/tests/db_tests/graphite_db_test.py
+++ b/docker/storperf-master/tests/db_tests/graphite_db_test.py
@@ -9,8 +9,7 @@
import unittest
-import mock
-
+from unittest import mock
from storperf.db.graphite_db import GraphiteDB
diff --git a/docker/storperf-master/tests/db_tests/job_db_test.py b/docker/storperf-master/tests/db_tests/job_db_test.py
index 25fda1f..5201963 100644
--- a/docker/storperf-master/tests/db_tests/job_db_test.py
+++ b/docker/storperf-master/tests/db_tests/job_db_test.py
@@ -11,8 +11,7 @@ import os
import sqlite3
import unittest
-import mock
-
+from unittest import mock
from storperf.db.job_db import JobDB
from storperf.workloads.rr import rr
diff --git a/docker/storperf-master/tests/fio_tests/fio_invoker_test.py b/docker/storperf-master/tests/fio_tests/fio_invoker_test.py
index 4672651..3a30500 100644
--- a/docker/storperf-master/tests/fio_tests/fio_invoker_test.py
+++ b/docker/storperf-master/tests/fio_tests/fio_invoker_test.py
@@ -7,11 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from StringIO import StringIO
import json
import unittest
from storperf.fio.fio_invoker import FIOInvoker
+from io import BytesIO
class Test(unittest.TestCase):
@@ -34,7 +34,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = json.dumps(self.simple_dictionary, indent=4, sort_keys=True)
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(self.simple_dictionary, self.metric)
@@ -43,7 +43,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = json.dumps(self.simple_dictionary, indent=4, sort_keys=True)
terminating = "fio: terminating on signal 2\n"
- output = StringIO(terminating + string + "\n")
+ output = BytesIO((terminating + string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(self.simple_dictionary, self.metric)
@@ -52,7 +52,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = "{'key': 'value'}"
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(None, self.metric)
@@ -61,7 +61,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = "{'key':\n}"
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(None, self.metric)
@@ -71,7 +71,7 @@ class Test(unittest.TestCase):
string = json.dumps(self.simple_dictionary, indent=4, sort_keys=True)
self.fio_invoker.terminated = True
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(None, self.metric)
@@ -81,7 +81,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = json.dumps(self.simple_dictionary, indent=4, sort_keys=True)
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(self.simple_dictionary, self.metric)
diff --git a/docker/storperf-master/tests/storperf_master_test.py b/docker/storperf-master/tests/storperf_master_test.py
index f328982..1edac6d 100644
--- a/docker/storperf-master/tests/storperf_master_test.py
+++ b/docker/storperf-master/tests/storperf_master_test.py
@@ -7,45 +7,23 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import os
import unittest
-import mock
+from unittest.mock import patch
-from storperf.db.configuration_db import ConfigurationDB
from storperf.storperf_master import StorPerfMaster
-class MockStack(object):
-
- def __init__(self):
- pass
-
- def get_stack(self):
- return None
-
-
class StorPerfMasterTest(unittest.TestCase):
def setUp(self):
- ConfigurationDB.db_name = __name__ + '.db'
- try:
- os.remove(ConfigurationDB.db_name)
- except OSError:
- pass
- with mock.patch("storperf.storperf_master.OSCreds"), \
- mock.patch(
+ with patch("storperf.storperf_master.OSCreds"), \
+ patch(
"storperf.storperf_master.OpenStackHeatStack") as oshs:
oshs.return_value.get_stack.return_value = None
self.storperf = StorPerfMaster()
- def tearDown(self):
- try:
- os.remove(ConfigurationDB.db_name)
- except OSError:
- pass
-
def test_agent_count(self):
expected = 10
@@ -82,6 +60,15 @@ class StorPerfMasterTest(unittest.TestCase):
self.assertEqual(
expected, actual, "Did not expect: " + str(actual))
+ def test_volume_type(self):
+ expected = 'tripleo-ceph'
+
+ self.storperf.volume_type = expected
+ actual = self.storperf.volume_type
+
+ self.assertEqual(
+ expected, actual, "Did not expect: " + str(actual))
+
def test_agent_network(self):
expected = "ABCDEF"
diff --git a/docker/storperf-master/tests/utilities_tests/data_handler_test.py b/docker/storperf-master/tests/utilities_tests/data_handler_test.py
index 35150dd..7e8cbcc 100644
--- a/docker/storperf-master/tests/utilities_tests/data_handler_test.py
+++ b/docker/storperf-master/tests/utilities_tests/data_handler_test.py
@@ -10,7 +10,7 @@
import os
import unittest
-import mock
+from unittest import mock
from storperf.utilities.data_handler import DataHandler
@@ -311,10 +311,10 @@ class DataHandlerTest(unittest.TestCase):
def test_pass_criteria(self):
metadata = {
"details": {
- "steady_state": {
- "_warm_up.queue-depth.8.block-size.16384": False,
- "rw.queue-depth.4.block-size.16384": True
- }
+ "steady_state": {
+ "_warm_up.queue-depth.8.block-size.16384": False,
+ "rw.queue-depth.4.block-size.16384": True
+ }
},
}
criteria = self.data_handler._determine_criteria(metadata)
@@ -325,11 +325,11 @@ class DataHandlerTest(unittest.TestCase):
def test_fail_criteria(self):
metadata = {
"details": {
- "steady_state": {
- "_warm_up.queue-depth.8.block-size.16384": False,
- "rw.queue-depth.4.block-size.16384": True,
- "rw.queue-depth.8.block-size.16384": False
- }
+ "steady_state": {
+ "_warm_up.queue-depth.8.block-size.16384": False,
+ "rw.queue-depth.4.block-size.16384": True,
+ "rw.queue-depth.8.block-size.16384": False
+ }
},
}
criteria = self.data_handler._determine_criteria(metadata)
diff --git a/docker/storperf-master/tests/utilities_tests/ip_helper_test.py b/docker/storperf-master/tests/utilities_tests/ip_helper_test.py
new file mode 100644
index 0000000..f2d662b
--- /dev/null
+++ b/docker/storperf-master/tests/utilities_tests/ip_helper_test.py
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Dell EMC and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+
+from storperf.utilities import ip_helper
+
+
+class Test(unittest.TestCase):
+
+ def testNoPortInIPv4(self):
+ host, port = ip_helper.parse_address_and_port("127.0.0.1")
+ self.assertEqual("127.0.0.1", host)
+ self.assertEqual(22, port)
+
+ def testPortInIPv4(self):
+ host, port = ip_helper.parse_address_and_port("127.0.0.1:2222")
+ self.assertEqual("127.0.0.1", host)
+ self.assertEqual(2222, port)
+
+ def testNoPortInIPv6(self):
+ host, port = ip_helper.parse_address_and_port(
+ "1fe80::58bb:c8b:f2f2:c888")
+ self.assertEqual("1fe80::58bb:c8b:f2f2:c888",
+ host)
+ self.assertEqual(22, port)
+
+ def testPortInIPv6(self):
+ host, port = ip_helper.parse_address_and_port(
+ "[1fe80::58bb:c8b:f2f2:c888]:2222")
+ self.assertEqual("1fe80::58bb:c8b:f2f2:c888",
+ host)
+ self.assertEqual(2222, port)
diff --git a/docker/storperf-master/tests/utilities_tests/math_range_test.py b/docker/storperf-master/tests/utilities_tests/math_range_test.py
index 90519e7..bbbbcf9 100644
--- a/docker/storperf-master/tests/utilities_tests/math_range_test.py
+++ b/docker/storperf-master/tests/utilities_tests/math_range_test.py
@@ -118,3 +118,15 @@ class MathRangeTest(unittest.TestCase):
data_series.insert(randrange(len(data_series)), -18954.98)
actual = Range.range_value(data_series)
self.assertEqual(expected, actual)
+
+ def test_min_series(self):
+ expected = [[1, 5.6], [2, 5.6], [3, 5.6], [4, 5.6]]
+ data_series = [[1, 6], [2, 5], [3, 7], [4, 10]]
+ actual = Range.min_series(data_series)
+ self.assertEqual(expected, actual)
+
+ def test_max_series(self):
+ expected = [[1, 8.4], [2, 8.4], [3, 8.4], [4, 8.4]]
+ data_series = [[1, 6], [2, 5], [3, 7], [4, 10]]
+ actual = Range.max_series(data_series)
+ self.assertEqual(expected, actual)
diff --git a/docker/storperf-master/tests/utilities_tests/math_slope_series_test.py b/docker/storperf-master/tests/utilities_tests/math_slope_series_test.py
new file mode 100644
index 0000000..cfa6efe
--- /dev/null
+++ b/docker/storperf-master/tests/utilities_tests/math_slope_series_test.py
@@ -0,0 +1,48 @@
+##############################################################################
+# Copyright (c) 2016 CENGN and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+from storperf.utilities import math
+
+
+class MathSlopeSeriesTest(unittest.TestCase):
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ pass
+
+ def test_slope_empty_series(self):
+ expected = []
+ actual = math.slope_series([])
+ self.assertEqual(expected, actual)
+
+ def test_slope_integer_series(self):
+ expected = [[1, 4.9], [2, 6.3], [3, 7.7], [4, 9.1]]
+ actual = math.slope_series([[1, 6], [2, 5], [3, 7], [4, 10]])
+ self.assertEqual(expected, actual)
+
+ def test_slope_mix_series(self):
+ expected = [[1, 4.9], [2, 6.3], [3, 7.7], [4, 9.1]]
+ actual = math.slope_series([[1.0, 6], [2, 5.0], [3, 7], [4.0, 10]])
+ self.assertEqual(expected, actual)
+
+ def test_slope_0_in_y(self):
+ expected = [
+ [15.5, 0.8333333333333333],
+ [16.5, 0.3333333333333333],
+ [17.5, -0.16666666666666669]]
+ actual = math.slope_series([[15.5, 1], [16.5, 0], [17.5, 0]])
+ self.assertEqual(expected, actual)
+
+ def test_slope_gaps_in_x(self):
+ expected = [
+ [1, 1.3571428571428572],
+ [2, 2.0],
+ [4, 2.642857142857143]]
+ actual = math.slope_series([[1, 1], [2, 2], [4, 3]])
+ self.assertEqual(expected, actual)
diff --git a/docker/storperf-master/tests/utilities_tests/math_slope_test.py b/docker/storperf-master/tests/utilities_tests/math_slope_test.py
index 24d5cd7..5c286ad 100644
--- a/docker/storperf-master/tests/utilities_tests/math_slope_test.py
+++ b/docker/storperf-master/tests/utilities_tests/math_slope_test.py
@@ -21,6 +21,11 @@ class MathSlopeTest(unittest.TestCase):
actual = Slope.slope([])
self.assertEqual(expected, actual)
+ def test_slope_one_series(self):
+ expected = None
+ actual = Slope.slope([[1, 0.0]])
+ self.assertEqual(expected, actual)
+
def test_slope_integer_series(self):
expected = 1.4
actual = Slope.slope([[1, 6], [2, 5], [3, 7], [4, 10]])
diff --git a/docker/storperf-master/tests/workload_tests/workload_subclass_test.py b/docker/storperf-master/tests/workload_tests/workload_subclass_test.py
index e9e47f3..c61fe74 100644
--- a/docker/storperf-master/tests/workload_tests/workload_subclass_test.py
+++ b/docker/storperf-master/tests/workload_tests/workload_subclass_test.py
@@ -22,33 +22,35 @@ class WorkloadSubclassTest(unittest.TestCase):
def test_local_name(self):
workload = rr()
self.assertEqual(workload.fullname,
- "None.rr.queue-depth.1.block-size.64k.None",
+ "None.rr.queue-depth.1.block-size.64k.None--dev-vdb",
workload.fullname)
def test_remote_name(self):
workload = rw()
workload.remote_host = "192.168.0.1"
- self.assertEqual(workload.fullname,
- "None.rw.queue-depth.1.block-size.64k.192-168-0-1",
- workload.fullname)
+ self.assertEqual(
+ workload.fullname,
+ "None.rw.queue-depth.1.block-size.64k.192-168-0-1--dev-vdb",
+ workload.fullname)
def test_blocksize(self):
workload = rs()
workload.options["bs"] = "4k"
self.assertEqual(workload.fullname,
- "None.rs.queue-depth.1.block-size.4k.None",
+ "None.rs.queue-depth.1.block-size.4k.None--dev-vdb",
workload.fullname)
def test_queue_depth(self):
workload = wr()
workload.options["iodepth"] = "8"
self.assertEqual(workload.fullname,
- "None.wr.queue-depth.8.block-size.64k.None",
+ "None.wr.queue-depth.8.block-size.64k.None--dev-vdb",
workload.fullname)
def test_id(self):
workload = ws()
workload.id = "workloadid"
- self.assertEqual(workload.fullname,
- "workloadid.ws.queue-depth.1.block-size.64k.None",
- workload.fullname)
+ self.assertEqual(
+ workload.fullname,
+ "workloadid.ws.queue-depth.1.block-size.64k.None--dev-vdb",
+ workload.fullname)
diff --git a/docker/storperf-reporting/Dockerfile b/docker/storperf-reporting/Dockerfile
index ac507a6..6d017ae 100644
--- a/docker/storperf-reporting/Dockerfile
+++ b/docker/storperf-reporting/Dockerfile
@@ -16,20 +16,22 @@
ARG ARCH=x86_64
-ARG ALPINE_VERSION=v3.6
+ARG ALPINE_VERSION=v3.10
FROM multiarch/alpine:$ARCH-$ALPINE_VERSION
MAINTAINER Mark Beierl <mark.beierl@dell.com>
-LABEL version="0.1" description="OPNFV Storperf Reporting Container"
+LABEL version="8.0" description="OPNFV Storperf Reporting Container"
ARG BRANCH=master
-RUN apk add --update python py-pip
+RUN ulimit -n 1024
+
+RUN apk add --update python3=3.7.5-r1
COPY . /home/opnfv/storperf-reporting
WORKDIR /home/opnfv/storperf-reporting/src
-RUN pip install -r /home/opnfv/storperf-reporting/requirements.txt
+RUN python3 -m pip install -r /home/opnfv/storperf-reporting/requirements.txt
-CMD ["python", "app.py"]
+CMD ["python3", "app.py"]
EXPOSE 5000
diff --git a/docker/storperf-reporting/src/app.py b/docker/storperf-reporting/src/app.py
index 79baa33..1c48eb0 100644
--- a/docker/storperf-reporting/src/app.py
+++ b/docker/storperf-reporting/src/app.py
@@ -128,14 +128,14 @@ def file_not_found():
return redirect(url_for('index'))
-@app.route('/reporting/js/<path:path>')
+@app.route('/reporting/3rd_party/js/<path:path>')
def js(path):
- return send_from_directory('static/js/', path)
+ return send_from_directory('static/3rd_party/js/', path)
-@app.route('/reporting/css/<path:path>')
+@app.route('/reporting/3rd_party/css/<path:path>')
def css(path):
- return send_from_directory('static/css/', path)
+ return send_from_directory('static/3rd_party/css/', path)
@app.route('/reporting/images/<path:path>')
diff --git a/docker/storperf-reporting/src/static/css/bootstrap.min.css b/docker/storperf-reporting/src/static/3rd_party/css/bootstrap.min.css
index cd1c616..cd1c616 100644
--- a/docker/storperf-reporting/src/static/css/bootstrap.min.css
+++ b/docker/storperf-reporting/src/static/3rd_party/css/bootstrap.min.css
diff --git a/docker/storperf-reporting/src/static/js/Chart.min.js b/docker/storperf-reporting/src/static/3rd_party/js/Chart.min.js
index 3e93936..3e93936 100644
--- a/docker/storperf-reporting/src/static/js/Chart.min.js
+++ b/docker/storperf-reporting/src/static/3rd_party/js/Chart.min.js
diff --git a/docker/storperf-reporting/src/static/js/bootstrap.min.js b/docker/storperf-reporting/src/static/3rd_party/js/bootstrap.min.js
index c8f82e5..c8f82e5 100644
--- a/docker/storperf-reporting/src/static/js/bootstrap.min.js
+++ b/docker/storperf-reporting/src/static/3rd_party/js/bootstrap.min.js
diff --git a/docker/storperf-reporting/src/static/js/jquery-2.1.3.min.js b/docker/storperf-reporting/src/static/3rd_party/js/jquery-2.1.3.min.js
index 25714ed..25714ed 100644
--- a/docker/storperf-reporting/src/static/js/jquery-2.1.3.min.js
+++ b/docker/storperf-reporting/src/static/3rd_party/js/jquery-2.1.3.min.js
diff --git a/docker/storperf-reporting/src/static/js/jquery.bootpag.min.js b/docker/storperf-reporting/src/static/3rd_party/js/jquery.bootpag.min.js
index 324b1a3..324b1a3 100644
--- a/docker/storperf-reporting/src/static/js/jquery.bootpag.min.js
+++ b/docker/storperf-reporting/src/static/3rd_party/js/jquery.bootpag.min.js
diff --git a/docker/storperf-reporting/src/static/js/plotly-latest.min.js b/docker/storperf-reporting/src/static/3rd_party/js/plotly-latest.min.js
index 753a7ba..753a7ba 100644
--- a/docker/storperf-reporting/src/static/js/plotly-latest.min.js
+++ b/docker/storperf-reporting/src/static/3rd_party/js/plotly-latest.min.js
diff --git a/docker/storperf-reporting/src/templates/index.html b/docker/storperf-reporting/src/templates/index.html
index ab4e539..65da38d 100644
--- a/docker/storperf-reporting/src/templates/index.html
+++ b/docker/storperf-reporting/src/templates/index.html
@@ -4,10 +4,10 @@
<head>
<title>Graphing Module | Storperf</title>
<link rel="icon" href="/reporting/images/OPNVF_ProjectIcons_2400x2400_Storperf.png">
- <link rel="stylesheet" href="/reporting/css/bootstrap.min.css">
- <script src="/reporting/js/jquery-2.1.3.min.js"></script>
- <script src="/reporting/js/bootstrap.min.js"></script>
- <script src="/reporting/js/jquery.bootpag.min.js"></script>
+ <link rel="stylesheet" href="/reporting/3rd_party/css/bootstrap.min.css">
+ <script src="/reporting/3rd_party/js/jquery-2.1.3.min.js"></script>
+ <script src="/reporting/3rd_party/js/bootstrap.min.js"></script>
+ <script src="/reporting/3rd_party/js/jquery.bootpag.min.js"></script>
</head>
<body>
@@ -37,4 +37,4 @@
</div>
</body>
-</html> \ No newline at end of file
+</html>
diff --git a/docker/storperf-reporting/src/templates/plot_jobs.html b/docker/storperf-reporting/src/templates/plot_jobs.html
index d2d3613..f10dd6c 100644
--- a/docker/storperf-reporting/src/templates/plot_jobs.html
+++ b/docker/storperf-reporting/src/templates/plot_jobs.html
@@ -4,12 +4,12 @@
<head>
<meta charset="utf-8" />
<title>Chart | Storperf</title>
- <link rel="stylesheet" href="/reporting/css/bootstrap.min.css">
- <script src="/reporting/js/jquery-2.1.3.min.js"></script>
- <script src="/reporting/js/bootstrap.min.js"></script>
- <script src="/reporting/js/jquery.bootpag.min.js"></script>
- <script src="/reporting/js/plotly-latest.min.js"></script>
- <script src="/reporting/js/Chart.min.js"></script>
+ <link rel="stylesheet" href="/reporting/3rd_party/css/bootstrap.min.css">
+ <script src="/reporting/3rd_party/js/jquery-2.1.3.min.js"></script>
+ <script src="/reporting/3rd_party/js/bootstrap.min.js"></script>
+ <script src="/reporting/3rd_party/js/jquery.bootpag.min.js"></script>
+ <script src="/reporting/3rd_party/js/plotly-latest.min.js"></script>
+ <script src="/reporting/3rd_party/js/Chart.min.js"></script>
<style>
/* The Modal (background) */
.modal {
@@ -88,8 +88,8 @@ ul {
text += "<div class='row well' id='row-" + key[i] + "'>";
text += "<h4> Job ID: " + key[i] + "</h4>";
text += "<button type='button' class='btn btn-default btn-lg' id='para-"+ key[i] +
- "' onclick=add_info('" + key[i] + "')> Click here to view details </button>";
- text += "</div>";
+ "' onclick=add_info('" + key[i] + "')> Click here to view details </button>";
+ text += "</div>";
}
$("#content").html(text);
}
@@ -112,8 +112,8 @@ ul {
text += "<h4 onclick=add_test('" + keys[i] + "')>" + keys[i] + "</h4>";
text += "</div>";
}
- }
- $("#content").html(text);
+ }
+ $("#content").html(text);
}
function add_test(key){
if(document.getElementById("test-" + key) == null){
@@ -264,4 +264,4 @@ ul {
</script>
</body>
-</html> \ No newline at end of file
+</html>
diff --git a/docker/storperf-reporting/src/templates/plot_multi_data.html b/docker/storperf-reporting/src/templates/plot_multi_data.html
index 9a81599..f35c368 100644
--- a/docker/storperf-reporting/src/templates/plot_multi_data.html
+++ b/docker/storperf-reporting/src/templates/plot_multi_data.html
@@ -4,12 +4,12 @@
<head>
<meta charset="utf-8" />
<title>Chart | Storperf</title>
- <link rel="stylesheet" href="/reporting/css/bootstrap.min.css">
- <script src="/reporting/js/jquery-2.1.3.min.js"></script>
- <script src="/reporting/js/bootstrap.min.js"></script>
- <script src="/reporting/js/jquery.bootpag.min.js"></script>
- <script src="/reporting/js/plotly-latest.min.js"></script>
- <script src="/reporting/js/Chart.min.js"></script>
+ <link rel="stylesheet" href="/reporting/3rd_party/css/bootstrap.min.css">
+ <script src="/reporting/3rd_party/js/jquery-2.1.3.min.js"></script>
+ <script src="/reporting/3rd_party/js/bootstrap.min.js"></script>
+ <script src="/reporting/3rd_party/js/jquery.bootpag.min.js"></script>
+ <script src="/reporting/3rd_party/js/plotly-latest.min.js"></script>
+ <script src="/reporting/3rd_party/js/Chart.min.js"></script>
</head>
<body>
diff --git a/docker/storperf-reporting/src/templates/plot_tables.html b/docker/storperf-reporting/src/templates/plot_tables.html
index ecdf764..536bb42 100644
--- a/docker/storperf-reporting/src/templates/plot_tables.html
+++ b/docker/storperf-reporting/src/templates/plot_tables.html
@@ -4,12 +4,12 @@
<head>
<meta charset="utf-8" />
<title>Chart | Storperf</title>
- <link rel="stylesheet" href="/reporting/css/bootstrap.min.css">
- <script src="/reporting/js/jquery-2.1.3.min.js"></script>
- <script src="/reporting/js/bootstrap.min.js"></script>
- <script src="/reporting/js/jquery.bootpag.min.js"></script>
- <script src="/reporting/js/plotly-latest.min.js"></script>
- <script src="/reporting/js/Chart.min.js"></script>
+ <link rel="stylesheet" href="/reporting/3rd_party/css/bootstrap.min.css">
+ <script src="/reporting/3rd_party/js/jquery-2.1.3.min.js"></script>
+ <script src="/reporting/3rd_party/js/bootstrap.min.js"></script>
+ <script src="/reporting/3rd_party/js/jquery.bootpag.min.js"></script>
+ <script src="/reporting/3rd_party/js/plotly-latest.min.js"></script>
+ <script src="/reporting/3rd_party/js/Chart.min.js"></script>
<style>
</style>
</head>
@@ -41,8 +41,8 @@
text += "<h4> Start Date : " + results["start_date"] + "</h4>";
text += "<h4> Criteria : " + results["criteria"] + "</h4>";
text += "<h4> Build Tag : " + results["build_tag"] + "</h4>";
- text += "<button type='button' class='btn btn-default btn-lg' onclick=add_info()> Click here to view details </button>";
- text += "</div>";
+ text += "<button type='button' class='btn btn-default btn-lg' onclick=add_info()> Click here to view details </button>";
+ text += "</div>";
text += "<div id='info'></div>";
$("#content").html(text);
}
@@ -55,7 +55,7 @@
text += "<h4> Build Tag : " + results["build_tag"] + "</h4>";
text += "<button type='button' class='btn btn-default btn-lg' onclick=create_block_report()> Steady State Convergence Report - All Block Sizes </button>";
text += "<div class='row' id='block_report'></div>"
- text += "</div>";
+ text += "</div>";
text += "<div id='info'></div>";
for (var i = 0; i < keys.length; i++ ){
text += "<div class='row' id='row-"+ keys[i] +"'>";
@@ -89,7 +89,7 @@
for( item in report_data[key][test] ){
var state = report_data[key][test][item]["steady_state"];
text += "<li onclick=create_modal('" + key + "','" + test + "','" + item + "') id='";
- text += key + "-" + test + "-" + item + "'>" + item + " (steady state: " + state +")";
+ text += key + "-" + test + "-" + item + "'>" + item + " (steady state: " + state +")";
text += "<br><div id='modal-" + key + "-" + test + "-" + item + "'>";
text += '<div class="modal-content-'+ key + '-' + test + '-' + item +'">';
text += '<div id="modal-text-'+ key + '-' + test + '-' + item +'"></div>';
@@ -280,7 +280,7 @@
for(i in report_data){
if(report_data[i][test] == undefined && test == 'lat_ns.mean')
test = 'lat.mean';
- data[i] = report_data[i][test][key];
+ data[i] = report_data[i][test][key];
}
document.getElementById('close-block-report-'+key+'-'+test).style.display = 'block';
var modal = document.getElementById('modal-block-report-'+key+'-'+test);
@@ -327,4 +327,4 @@
</script>
</body>
-</html> \ No newline at end of file
+</html>
diff --git a/docker/storperf-swaggerui/Dockerfile b/docker/storperf-swaggerui/Dockerfile
index 14c9fe1..9f82890 100644
--- a/docker/storperf-swaggerui/Dockerfile
+++ b/docker/storperf-swaggerui/Dockerfile
@@ -13,78 +13,10 @@
##
ARG ARCH=x86_64
-ARG ALPINE_VERSION=v3.6
-FROM multiarch/alpine:$ARCH-$ALPINE_VERSION
-
-# This is from https://github.com/nodejs/docker-node/blob/f547c4c7281027d5d90f4665815140126e1f70d5/8.2/alpine/Dockerfile
-
-ENV NPM_CONFIG_LOGLEVEL info
-ENV NODE_VERSION 8.2.1
-
-RUN addgroup -g 1000 node \
- && adduser -u 1000 -G node -s /bin/sh -D node \
- && apk add --no-cache \
- libstdc++ \
- && apk add --no-cache --virtual .build-deps \
- binutils-gold \
- curl \
- g++ \
- gcc \
- gnupg \
- libgcc \
- linux-headers \
- make \
- python \
- # gpg keys listed at https://github.com/nodejs/node#release-team
- && for key in \
- 9554F04D7259F04124DE6B476D5A82AC7E37093B \
- 94AE36675C464D64BAFA68DD7434390BDBE9B9C5 \
- FD3A5288F042B6850C66B31F09FE44734EB7990E \
- 71DCFD284A79C3B38668286BC97EC7A07EDE3FC1 \
- DD8F2338BAE7501E3DD5AC78C273792F7D83545D \
- B9AE9905FFD7803F25714661B63B535A4C206CA9 \
- C4F0DFFF4E8C1A8236409D08E73BC641CC11F4C8 \
- 56730D5401028683275BD23C23EFEFE93C4CFFFE \
- ; do \
- gpg --keyserver pgp.mit.edu --recv-keys "$key" || \
- gpg --keyserver keyserver.pgp.com --recv-keys "$key" || \
- gpg --keyserver ha.pool.sks-keyservers.net --recv-keys "$key" ; \
- done \
- && curl -SLO "https://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION.tar.xz" \
- && curl -SLO --compressed "https://nodejs.org/dist/v$NODE_VERSION/SHASUMS256.txt.asc" \
- && gpg --batch --decrypt --output SHASUMS256.txt SHASUMS256.txt.asc \
- && grep " node-v$NODE_VERSION.tar.xz\$" SHASUMS256.txt | sha256sum -c - \
- && tar -xf "node-v$NODE_VERSION.tar.xz" \
- && cd "node-v$NODE_VERSION" \
- && ./configure \
- && make -j$(getconf _NPROCESSORS_ONLN) \
- && make install \
- && apk del .build-deps \
- && cd .. \
- && rm -Rf "node-v$NODE_VERSION" \
- && rm "node-v$NODE_VERSION.tar.xz" SHASUMS256.txt.asc SHASUMS256.txt
-
-ENV YARN_VERSION 0.27.5
-
-RUN apk add --no-cache --virtual .build-deps-yarn curl gnupg tar \
- && for key in \
- 6A010C5166006599AA17F08146C2130DFD2497F5 \
- ; do \
- gpg --keyserver pgp.mit.edu --recv-keys "$key" || \
- gpg --keyserver keyserver.pgp.com --recv-keys "$key" || \
- gpg --keyserver ha.pool.sks-keyservers.net --recv-keys "$key" ; \
- done \
- && curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \
- && curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \
- && gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
- && mkdir -p /opt/yarn \
- && tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/yarn --strip-components=1 \
- && ln -s /opt/yarn/bin/yarn /usr/local/bin/yarn \
- && ln -s /opt/yarn/bin/yarn /usr/local/bin/yarnpkg \
- && rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
-&& apk del .build-deps-yarn
-
+ARG ALPINE_VERSION=v3.10
+FROM node:10-alpine
+RUN ulimit -n 1024
# This is from https://github.com/schickling/dockerfiles/blob/master/swagger-ui/Dockerfile
@@ -104,8 +36,8 @@ ENV PORT 80
WORKDIR /app
-RUN apk add --no-cache openssl
-RUN wget -qO- https://github.com/swagger-api/swagger-ui/archive/$VERSION.tar.gz | tar xvz
+RUN apk add --no-cache openssl curl
+RUN curl -SL https://github.com/swagger-api/swagger-ui/archive/$VERSION.tar.gz | tar xvz
RUN cp -r $FOLDER/dist/* . && rm -rf $FOLDER
RUN npm config set unsafe-perm true
RUN npm install -g http-server
diff --git a/docker/storperf-workloadagent/Dockerfile b/docker/storperf-workloadagent/Dockerfile
new file mode 100644
index 0000000..e6662a9
--- /dev/null
+++ b/docker/storperf-workloadagent/Dockerfile
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2019 VMware and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Docker container for workload
+#
+# Purpose: docker image for Storperf to control as a synthetic workload
+#
+# Maintained by Mark Beierl
+# Build:
+# $ docker build -t opnfv/storperf-workloadagent:tag .
+#
+
+ARG ARCH=x86_64
+ARG ALPINE_VERSION=v3.10
+FROM multiarch/alpine:$ARCH-$ALPINE_VERSION
+
+RUN apk add --no-cache --upgrade \
+ logrotate \
+ openssh-client \
+ openssh-server \
+ sudo
+
+RUN sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/g' /etc/ssh/sshd_config
+
+RUN echo "root ALL=(ALL) ALL" >> /etc/sudoers
+RUN ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa
+RUN ssh-keygen -f /etc/ssh/ssh_host_dsa_key -N '' -t dsa
+
+RUN echo root:password | chpasswd
+
+CMD /usr/sbin/sshd -D -e \ No newline at end of file
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..3c4453e
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import *
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 0000000..a665899
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: STORPERF
diff --git a/docs/index.rst b/docs/index.rst
index 5ed81af..c25a577 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -39,15 +39,15 @@ StorPerf User Guide
./testing/user/storperf-reporting.rst
******************************
-StorPerf Dev Guide
+StorPerf Developer Guide
******************************
.. toctree::
- :caption: StorPerf Dev Guide
+ :caption: StorPerf Developer Guide
:maxdepth: 5
:numbered: 5
- ./dev/index.rst
+ ./testing/developer/devguide/index.rst
Indices
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index bf71c6c..6122cf0 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -1,11 +1,13 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-****************************
+.. _storperf-releasenotes:
+
+**********************
StorPerf Release Notes
-****************************
+**********************
.. toctree::
:maxdepth: 2
- release-notes
+ release-notes.rst
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 8611171..0303fb4 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
-This document provides the release notes for Euphrates 1.0 of StorPerf.
+This document provides the release notes for Iruya 1.0 of StorPerf.
.. contents::
:depth: 3
@@ -17,7 +17,7 @@ Version history
| **Date** | **Ver.** | **Author** | **Comment** |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
-| 2017-10-06 | Euphrates 1.0 | Mark Beierl | |
+| 2020-01-10 | Iruya 1.0 | Mark Beierl | |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
@@ -25,9 +25,14 @@ Version history
Important notes
----------------
-This is the release where StorPerf is not delivered as a single container but
-is delivered as a series of networked containers. StorPerf must be run using
-docker-compose.
+Heat stack support is being sunsetted in StorPerf Iruya. Once Kali is released,
+StorPerf will no longer support the following APIs:
+
+/configurations
+/quota
+
+Additionally, the stack_name parameter will no longer be used. This also means
+that all tests must run using IP addresses.
Summary
--------
@@ -36,10 +41,10 @@ StorPerf is a standalone framework that uses OpenStack to measure Cinder volume
performance. If desired, it can push results to the OPNFV Test Results DB, or
the embedded Graphite web interface can be used to perform ad hoc queries.
-This release changes to docker-compose framework and adds the StorPerf
-reporting module. It also marks a change from microsecond (:math:`\mu`\s) to
-nano-second (ns) precision for all reported latencies. This is denoted by a change
-from lat.mean to lat_ns.mean for read and write metrics.
+This release provides the ability to use existing servers (virtual or physical)
+as the targets for workload execution. All that is required is the IP address
+and the SSH key or username/password for StorPerf to be able to log in and
+start FIO workloads.
Release Data
-------------
@@ -48,16 +53,16 @@ Release Data
| **Project** | StorPerf |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/commit-ID** | storperf/euphrates.1.0 |
+| **Repo/tag** | opnfv-9.0.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Euphrates base release |
+| **Release designation** | Iruya.9 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2017-10-06 |
+| **Release date** | Jan 10, 2020 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Euphrates release 1.0 |
+| **Purpose of the delivery** | Regular release |
| | |
+--------------------------------------+--------------------------------------+
@@ -67,39 +72,22 @@ Version change
Module version changes
-----------------------
-No changes to any modules.
+All modules have been upgraded to use python3.
Reason for version
===================
+* Timed release schedule
+
Features additions
-------------------
-* STORPERF-125 - StorPerf container decomposition
-* STORPERF-141 - Create a series of graphs to support SNIA targers
-* STORPERF-94 - Logs can now be viewed via the API. One has the choice to either view the complete length of logs,
- or limit himself to just a few lines.
-* STORPERF-193 - Support for ARM: StorPerf images for ARM and x86_64
- are published on docker hub with the architecture in the image tag.
-* STORPERF-174 - Container base switched to Alpine
-* STORPERF-92 - Allow flavor to be set in stack create
-* STORPERF-178 - Add ability to specify availability zone
-* STORPERF-175 - Support for different agent OS, such as Cirros
-
+* STORPERF-268 Allow user to specify list of IP addresses for StorPerf test
Bug Fixes
----------
-The following minor bugs have been fixed:
-
-* STORPERF-56 - Cannot delete stack if create failed
-* STORPERF-180 - No details if stack create failed
-* STORPERF-186 - Duplicate entries for _warm_up with status query
-* STORPERF-197 - FIO 2.2.10 hangs when statically built
-* STORPERF-216 - Incorrect key names posted to testresults DB
-
-
-See JIRA for full `change log <https://jira.opnfv.org/jira/secure/ReleaseNote.jspa?projectId=11002&version=11227>`_
+None
Deliverables
=============
@@ -108,24 +96,27 @@ Software
---------
- `StorPerf master image <https://hub.docker.com/r/opnfv/storperf-master/>`_
- (tag: x86_64-euphrates.1.0 or aarch64-euphrates.1.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
- `StorPerf swaggerui <https://hub.docker.com/r/opnfv/storperf-swaggerui/>`_
- (tag: x86_64-euphrates.1.0 or aarch64-euphrates.1.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
- `StorPerf graphite image <https://hub.docker.com/r/opnfv/storperf-graphite/>`_
- (tag: x86_64-euphrates.1.0 or aarch64-euphrates.1.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
- `StorPerf reporting image <https://hub.docker.com/r/opnfv/storperf-reporting/>`_
- (tag: x86_64-euphrates.1.0 or aarch64-euphrates.1.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
- `StorPerf Http-Frontend image <https://hub.docker.com/r/opnfv/storperf-httpfrontend/>`_
- (tag: x86_64-euphrates.1.0 or aarch64-euphrates.1.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
Documentation
--------------
-- `User Guide <http://docs.opnfv.org/en/latest/submodules/storperf/docs/testing/user/index.html>`_
+- :ref:`User Guide <storperf-userguide>`
+
+Note: The quotas and configurations apis are being sunsetted with the next
+release.
Known Limitations, Issues and Workarounds
------------------------------------------
@@ -140,9 +131,13 @@ Known issues
* Cirros target VMs do not always mount Cinder volumes on first boot. Sometimes
a reboot of the VM is required to properly attach the Cinder volume to /dev/vdb
+* A bug in the linux kernel can prevent Cinder volumes from attaching to VMs
+ using ARM architecture. Specifying the following properties in Glance for
+ the ARM based image will work around this problem. Note: this will cause
+ the device to show up as a SCSI device and therefore will be /dev/sdb instead
+ of /dev/vdb.
+.. code-block:
+ --property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi
-Test Result
-===========
-- `OPNFV Test Results DB <http://testresults.opnfv.org/reporting/euphrates/storperf/status-apex.html>`_
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..9fde2df
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/docs/dev/gerrit.rst b/docs/testing/developer/devguide/gerrit.rst
index b227d21..b227d21 100644
--- a/docs/dev/gerrit.rst
+++ b/docs/testing/developer/devguide/gerrit.rst
diff --git a/docs/dev/ide.rst b/docs/testing/developer/devguide/ide.rst
index 3af4b6c..3af4b6c 100644
--- a/docs/dev/ide.rst
+++ b/docs/testing/developer/devguide/ide.rst
diff --git a/docs/dev/index.rst b/docs/testing/developer/devguide/index.rst
index 48000cf..48000cf 100644
--- a/docs/dev/index.rst
+++ b/docs/testing/developer/devguide/index.rst
diff --git a/docs/dev/initial.rst b/docs/testing/developer/devguide/initial.rst
index 04b1c45..04b1c45 100644
--- a/docs/dev/initial.rst
+++ b/docs/testing/developer/devguide/initial.rst
diff --git a/docs/dev/unit_tests.rst b/docs/testing/developer/devguide/unit_tests.rst
index 98ed3ce..98ed3ce 100644
--- a/docs/dev/unit_tests.rst
+++ b/docs/testing/developer/devguide/unit_tests.rst
diff --git a/docs/testing/user/installation.rst b/docs/testing/user/installation.rst
index 046da42..7f56244 100755
--- a/docs/testing/user/installation.rst
+++ b/docs/testing/user/installation.rst
@@ -8,11 +8,20 @@ StorPerf Installation Guide
OpenStack Prerequisites
===========================
-If you do not have an Ubuntu 16.04 image in Glance, you will need to add one.
-You also need to create the StorPerf flavor, or choose one that closely
-matches. For Ubuntu 16.04, it must have a minimum of a 4 GB disk. It should
-also have about 8 GB RAM to support FIO's memory mapping of written data blocks
-to ensure 100% coverage of the volume under test.
+StorPerf can be instructed to use OpenStack APIs in order to manage a
+Heat stack of virtual machines and Cinder volumes, or it can be run in
+stackless mode, where it does not need to know anything about OpenStack.
+
+When running in OpenStack mode, there will need to be an external network
+with floating IPs available to assign to the VMs, as well as a Glance image
+that can be used to boot the VMs. This can be almost any Linux based
+image, as long as it can either accept OpenStack metadata for injecting
+the SSH key, or it has known SSH credentials as part of the base image.
+
+The flavor for the image should provide enough disk space for the initial
+boot, along with additional space if profiling of the Glance backing is
+desired. It should also provide at least 8 GB RAM to support FIO's memory
+mapping of written data blocks.
There are scripts in storperf/ci directory to assist, or you can use the follow
code snippets:
@@ -34,9 +43,10 @@ code snippets:
OpenStack Credentials
~~~~~~~~~~~~~~~~~~~~~
-You must have your OpenStack Controller environment variables defined and passed to
-the StorPerf container. The easiest way to do this is to put the rc file contents
-into a clean file called admin.rc that looks similar to this for V2 authentication:
+Unless running in stackless mode, the OpenStack Controller environment
+variables must be defined and passed to the StorPerf container. The easiest
+way to do this is to put the rc file contents into a clean file called
+admin.rc that looks similar to this for V2 authentication:
.. code-block:: console
@@ -78,8 +88,8 @@ Planning
StorPerf is delivered as a series of Docker containers managed by
docker-compose. There are two possible methods for installation:
-#. Run container on bare metal
-#. Run container in a VM
+#. Run the containers on bare metal
+#. Run the containers in a VM
Requirements:
@@ -89,7 +99,7 @@ Requirements:
* Host has access to the OpenStack Controller API
* Host must have internet connectivity for downloading docker image
* Enough OpenStack floating IPs must be available to match your agent count
-* A local directory for holding the Carbon DB Whisper files
+* Optionally, a local directory for holding the Carbon DB Whisper files
Local disk used for the Carbon DB storage as the default size of the docker
container is only 10g. Here is an example of how to create a local storage
@@ -117,7 +127,7 @@ http://storperf:5000/graphite
Running StorPerf Container
==========================
-**As of Euphrates (development) release (June 2017), StorPerf has
+**As of Euphrates release (June 2017), StorPerf has
changed to use docker-compose in order to start its services.**
Docker compose requires a local file to be created in order to define the
@@ -142,12 +152,16 @@ which should result in:
.. code-block:: console
- 968c0c2d7c0e24f6777c33b37d9b4fd885575155069fb760405ec8214b2eb672 docker-compose.yaml
+ 69856e9788bec36308a25303ec9154ed68562e126788a47d54641d68ad22c8b9 docker-compose.yaml
To run, you must specify two environment variables:
-* ENV_FILE, which points to your OpenStack admin.rc as noted above.
-* CARBON_DIR, which points to a directory that will be mounted to store the raw metrics.
+* ENV_FILE, which points to your OpenStack admin.rc as noted above. If running
+ in stackless mode only, it is possible to remove the ENV_FILE reference from
+ the docker-compose.yaml file.
+* CARBON_DIR, which points to a directory that will be mounted to store the
+ raw metrics. If desired, the CARBON_DIR can be removed from the
+ docker-compose.yaml file, causing metrics to be kept in the container only.
* TAG, which specified the Docker tag for the build (ie: latest, danube.3.0, etc).
The following command will start all the StorPerf services:
@@ -175,7 +189,7 @@ which should result in:
.. code-block:: console
- 00649e02237d27bf0b40d1a66160a68a56c9f5e1ceb78d7858e30715cf4350e3 create-compose.py
+ 327cad2a7b3a3ca37910978005c743799313c2b90709e4a3f142286a06e53f57 create-compose.py
Note: The script will run fine on python3. Install python future package to avoid error on python2.
@@ -187,79 +201,123 @@ Note: The script will run fine on python3. Install python future package to avoi
Docker Exec
~~~~~~~~~~~
-If needed, the container can be entered with docker exec. This is not normally
+If needed, any StorPerf container can be entered with docker exec. This is not normally
required.
.. code-block:: console
- docker exec -it storperf-master bash
+ docker exec -it storperf-master /bin/bash
-Pulling StorPerf Container
-==========================
+Pulling StorPerf Containers
+===========================
-Master (Euphrates)
-~~~~~~~~~~~~~~~~~~
+The tags for StorPerf can be found here: https://hub.docker.com/r/opnfv/storperf-master/tags/
-StorPerf has switched to docker-compose in the latest version. The tag for
-pulling the latest master Euphrates container is:
+Master (latest)
+~~~~~~~~~~~~~~~
-.. code-block:: bash
+This tag represents StorPerf at its most current state of development. While
+self-tests have been run, there is not a guarantee that all features will be
+functional, or there may be bugs.
- docker pull opnfv/storperf-master:latest
- docker pull opnfv/storperf-reporting:latest
- docker pull opnfv/storperf-httpfrontend:latest
- docker pull opnfv/storperf-swaggerui:latest
- docker pull opnfv/storperf-graphite:latest
+Documentation for latest can be found using the latest label at:
-However, by itself, this will no longer provide full functionality. Full
-instructions are provided in the Running StorPerf Container section of this
-document.
+:ref:`User Guide <storperf-userguide>`
+For x86_64 based systems, use:
-Danube
-~~~~~~
+.. code-block:: console
-The tag for the latest stable Danube is be:
+ TAG=x86_64-latest ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=x86_64-latest ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
-.. code-block:: bash
+For 64 bit ARM based systems, use:
- docker pull opnfv/storperf:danube.3.0
+.. code-block:: console
-Colorado
-~~~~~~~~
+ TAG=aarch64-latest ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=aarch64-latest ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
-The tag for the latest stable Colorado release is:
-.. code-block:: bash
+Release (stable)
+~~~~~~~~~~~~~~~~
- docker pull opnfv/storperf:colorado.0.1
+This tag represents StorPerf at its most recent stable release. There are
+no known bugs and known issues and workarounds are documented in the release
+notes. Issues found here should be reported in JIRA:
-Brahmaputra
-~~~~~~~~~~~
+https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=3
-The tag for the latest stable Brahmaputra release is:
+For x86_64 based systems, use:
-.. code-block:: bash
+.. code-block:: console
- docker pull opnfv/storperf:brahmaputra.1.2
+ TAG=x86_64-stable ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=x86_64-stable ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
-StorPerf on ARM Processors
-==========================
+For 64 bit ARM based systems, use:
-StorPerf now supports docker images on ARM processors as well. However, at the moment
-there is no upstream image on DockerHub. The user needs to manually build it. Firstly,
-clone StorPerf repository from GitHub
+.. code-block:: console
-.. code-block:: bash
+ TAG=aarch64-stable ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=aarch64-stable ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
+
+
+
+Fraser (opnfv-6.0.0)
+~~~~~~~~~~~~~~~~~~
+
+This tag represents the 6th OPNFV release and the 5th StorPerf release. There
+are no known bugs and known issues and workarounds are documented in the release
+notes. Documentation can be found under the Fraser label at:
+
+http://docs.opnfv.org/en/stable-fraser/submodules/storperf/docs/testing/user/index.html
+
+Issues found here should be reported against release 6.0.0 in JIRA:
+
+https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=3
+
+For x86_64 based systems, use:
+
+.. code-block:: console
+
+ TAG=x86_64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=x86_64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
+
+For 64 bit ARM based systems, use:
+
+.. code-block:: console
+
+ TAG=aarch64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=aarch64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
+
+
+
+Euphrates (opnfv-5.0.0)
+~~~~~~~~~~~~~~~~~
+
+This tag represents the 5th OPNFV release and the 4th StorPerf release. There
+are no known bugs and known issues and workarounds are documented in the release
+notes. Documentation can be found under the Euphrates label at:
+
+http://docs.opnfv.org/en/stable-euphrates/submodules/storperf/docs/testing/user/index.html
+
+Issues found here should be reported against release 6.0.0 in JIRA:
+
+https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=3
+
+For x86_64 based systems, use:
+
+.. code-block:: console
- git clone https://git.opnfv.org/storperf
- cd storperf/docker/
+ TAG=x86_64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=x86_64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
-Next, build and setup the docker images
+For 64 bit ARM based systems, use:
.. code-block:: console
- TAG=aarch64 ENV_FILE=./admin.rc CARBON_DIR=./carbon docker-compose -f local-docker-compose.yaml build
- TAG=aarch64 ENV_FILE=./admin.rc CARBON_DIR=./carbon docker-compose -f local-docker-compose.yaml up -d
+ TAG=aarch64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=aarch64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
diff --git a/docs/testing/user/introduction.rst b/docs/testing/user/introduction.rst
index 49e3220..c864edc 100644
--- a/docs/testing/user/introduction.rst
+++ b/docs/testing/user/introduction.rst
@@ -25,18 +25,18 @@ performance metrics in the shortest reasonable time.
How Does StorPerf Work?
=======================
-Once launched, StorPerf presents you with a ReST interface, along with a
+Once launched, StorPerf presents a ReST interface, along with a
`Swagger UI <https://swagger.io/swagger-ui/>`_ that makes it easier to
-form HTTP ReST requests. Issuing an HTTP POST to the configurations API
-causes StorPerf to talk to your OpenStack's heat service to create a new stack
-with as many agent VMs and attached Cinder volumes as you specify.
+form HTTP ReST requests.
-After the stack is created, you can issue one or more jobs by issuing a POST
-to the jobs ReST API. The job is the smallest unit of work that StorPerf
-can use to measure the disk's performance.
+StorPerf enables us to run FIO on multiple VMs, containers or bare
+metal servers by providing a recent release of FIO, copying it to the
+target system and running I/O workloads specified. It also provides a
+simple API to initialize the target device and fill it with random data
+to ensure that performance is measured against real data, not blank media.
-While the job is running, StorPerf collects the performance metrics from each
-of the disks under test every minute. Once the trend of metrics match the
+While an FIO job is running, StorPerf collects the performance metrics from
+each of the jobs every minute. Once the trend of metrics match the
criteria specified in the SNIA methodology, the job automatically terminates
and the valid set of metrics are available for querying.
@@ -45,8 +45,210 @@ measured start to "flat line" and stay within that range for the specified
amount of time, then the metrics are considered to be indicative of a
repeatable level of performance.
-What Data Can I Get?
-====================
+With OpenStack Heat
+~~~~~~~~~~~~~~~~~~~
+
+StorPerf provides an API to interact with OpenStack Heat to automatically
+create a set of target VMs and Cinder volumes. The Configurations API is
+used to specify how many VMs and volumes to create, as well as the size of
+each Cinder volume.
+
+Without OpenStack Heat
+~~~~~~~~~~~~~~~~~~~~~~
+
+StorPerf can also use IP addresses or DNS names to connect to systems that
+have already been provisioned by any external provider, including OpenStack.
+By specifying a stack name of 'null' in the JSON payload, StorPerf will look
+for a list of IP addresses and credentials to use in order to SSH to the
+target systems. In this way, StorPerf can be used to profile bare metal,
+containers that have SSH enabled, or VMs running under OpenStack, WMware ESXi,
+VIO, Microsoft Hyper-V, or anything else. The only requirement is that
+the target be capable of accepting and authenticating SSH connections, and that
+it is Linux based, as currently the FIO supplied by StorPerf is not compiled
+to run under Microsoft Windows or other non-Linux operating systems.
+
+
+StorPerf Testing Guidelines
+===========================
+
+First of all, StorPerf is not able to give pointers on how to tune a
+Cinder implementation, as there are far too many backends (Ceph, NFS, LVM,
+etc), each with their own methods of tuning. StorPerf is here to assist in
+getting a reliable performance measurement by encoding the test
+specification from SNIA, and helping present the results in a way that makes
+sense.
+
+Having said that, there are some general guidelines that we can present to
+assist with planning a performance test.
+
+Workload Modelling
+------------------
+
+This is an important item to address as there are many parameters to how
+data is accessed. Databases typically use a fixed block size and tend to
+manage their data so that sequential access is more likely. GPS image tiles
+can be around 20-60kb and will be accessed by reading the file in full, with
+no easy way to predict what tiles will be needed next. Some programs are
+able to submit I/O asynchronously where others need to have different threads
+and may be synchronous. There is no one size fits all here, so knowing what
+type of I/O pattern we need to model is critical to getting realistic
+measurements.
+
+System Under Test
+-----------------
+
+The unfortunate part is that StorPerf does not have any knowledge about the
+underlying OpenStack itself – we can only see what is available through
+OpenStack APIs, and none of them provide details about the underlying
+storage implementation. As the test executor, we need to know
+information such as: the number of disks or storage nodes; the amount of RAM
+available for caching; the type of connection to the storage and bandwidth
+available.
+
+Measure Storage, not Cache
+--------------------------
+
+As part of the test data size, we need to ensure that we prevent
+caching from interfering in the measurements. The total size of the data
+set in the test must exceed the total size of all the disk cache memory
+available by a certain amount in order to ensure we are forcing non-cached
+I/O. There is no exact science here, but if we balance test duration against
+cache hit ratio, it can be argued that 20% cache hit is good enough and
+increasing file size would result in diminishing returns. Let’s break this
+number down a bit. Given a cache size of 10GB, we could write, then read the
+following dataset sizes:
+
+* 10GB gives 100% cache hit
+* 20GB gives 50% cache hit
+* 50GB gives 20% cache hit
+* 100GB gives 10% cache hit
+
+This means that for the first test, 100% of the results are unreliable due to
+cache. At 50GB, the true performance without cache has only a 20% margin of
+error. Given the fact that the 100GB would take twice as long, and that we
+are only reducing the margin of error by 10%, we recommend this as the best
+tradeoff.
+
+How much cache do we actually have? This depends on the storage device being
+used. For hardware NAS or other arrays, it should be fairly easy to get the
+number from the manufacturer, but for software defined storage, it can be
+harder to determine. Let’s take Ceph as an example. Ceph runs as software
+on the bare metal server and therefore has access to all the RAM available on
+the server to use as its cache. Well, not exactly all the memory. We have
+to take into account the memory consumed by the operating system, by the Ceph
+processes, as well as any other processes running on the same system. In the
+case of hyper-converged Ceph, where workload VMs and Ceph run on the systems,
+it can become quite difficult to predict. Ultimately, the amount of memory
+that is left over is the cache for that single Ceph instance. We now need to
+add the memory available from all the other Ceph storage nodes in the
+environment. Time for another example: given 3 Ceph storage nodes with
+256GB RAM each. Let’s take 20% off to pin to the OS and other processes,
+leaving approximately 240GB per node This gives us 3 x 240 or 720GB total RAM
+available for cache. The total amount of data we want to write in order to
+initialize our Cinder volumes would then be 5 x 720, or 3,600 GB. The
+following illustrates some ways to allocate the data:
+
+* 1 VM with 1 3,600 GB volume
+* 10 VMs each with 1 360 GB volume
+* 2 VMs each with 5 360 GB volumes
+
+Back to Modelling
+-----------------
+
+Now that we know there is 3.6 TB of data to be written, we need to go back to
+the workload model to determine how we are going to write it. Factors to
+consider:
+
+* Number of Volumes. We might be simulating a single database of 3.6 TB, so
+ only 1 Cinder volume is needed to represent this. Or, we might be
+ simulating a web server farm where there are hundreds of processes
+ accessing many different volumes. In this case, we divide the 3.6 TB by
+ the number of volumes, making each volume smaller.
+* Number of Virtual Machines. We might have one monster VM that will drive
+ all our I/O in the system, or maybe there are hundreds of VMs, each with
+ their own individual volume. Using Ceph as an example again, we know that
+ it allows for a single VM to consume all the Ceph resources, which can be
+ perceived as a problem in terms of multi-tenancy and scaling. A common
+ practice to mitigate this is to use Cinder to throttle IOPS at the VM
+ level. If this technique is being used in the environment under test, we
+ must adjust the number of VMs used in the test accordingly.
+* Block Size. We need to know if the application is managing the volume as a
+ raw device (ie: /dev/vdb) or as a filesystem mounted over the device.
+ Different filesystems have their own block sizes: ext4 only allows 1024,
+ 2048 or 4096 as the block size. Typically the larger the block, the better
+ the throughput, however as blocks must be written as an atomic unit, larger
+ block sizes can also reduce effective throughput by having to pad the block
+ if the content is smaller than the actual block size.
+* I/O Depth. This represents the amount of I/O that the application can
+ issue simultaneously. In a multi-threaded app, or one that uses
+ asynchronous I/O, it is possible to have multiple read or write requests
+ outstanding at the same time. For example, with software defined storage
+ where there is an Ethernet network between the client and the storage,
+ the storage would have a higher latency for each I/O, but is capable of
+ accepting many requests in parallel. With an I/O depth of 1, we spend
+ time waiting for the network latency before a response comes back. With
+ higher I/O depth, we can get more throughput despite each I/O having higher
+ latency. Typically, we do not see applications that would go beyond a
+ queue depth of 8, however this is not a firm rule.
+* Data Access Pattern. We need to know if the application typically reads
+ data sequentially or randomly, as well as what the mixture of read vs.
+ write is. It is possible to measure read by itself, or write by itself,
+ but this is not typical behavior for applications. It is useful for
+ determining the potential maximum throughput of a given type of operation.
+
+Fastest Path to Results
+-----------------------
+
+Once we have the information gathered, we can now start executing some tests.
+Let’s take some of the points discussed above and describe our system:
+
+* OpenStack deployment with 3 Control nodes, 5 Compute nodes and 3 dedicated
+ Ceph storage nodes.
+* Ceph nodes each have 240 GB RAM available to be used as cache.
+* Our application writes directly to the raw device (/dev/vdb)
+* There will be 10 instances of the application running, each with its own
+ volume.
+* Our application can use block sizes of 4k or 64k.
+* Our application is capable of maintaining up to 6 I/O operations
+ simultaneously.
+
+The first thing we know is that we want to keep our cache hit ratio around
+20%, so we will be moving 3,600 GB of data. We also know this will take a
+significant amount of time, so here is where StorPerf helps.
+
+First, we use the configurations API to launch our 10 virtual machines each
+with a 360 GB volume. Next comes the most time consuming part: we call the
+initializations API to fill each one of these volumes with random data. By
+preloading the data, we ensure a number of things:
+
+* The storage device has had to fully allocate all of the space for our
+ volumes. This is especially important for software defined storage like
+ Ceph, which is smart enough to know if data is being read from a block that
+ has never been written. No data on disk means no disk read is needed and
+ the response is immediate.
+* The RAM cache has been overrun multiple times. Only 20% of what was
+ written can possibly remain in cache.
+
+This last part is important as we can now use StorPerf’s implementation of
+SNIA’s steady state algorithm to ensure our follow up tests execute as
+quickly as possible. Given the fact that 80% of the data in any given test
+results in a cache miss, we can run multiple tests in a row without having
+to re-initialize or invalidate the cache again in between test runs. We can
+also mix and match the types of workloads to be run in a single performance
+job submission.
+
+Now we can submit a job to the jobs API to execute a 70%/30% mix of
+read/write, with a block size of 4k and an I/O queue depth of 6. This job
+will run until either the maximum time has expired, or until StorPerf detects
+steady state has been reached, at which point it will immediately complete
+and report the results of the measurements.
+
+StorPerf uses FIO as its workload engine, so whatever workload parameters we
+would like to use with FIO can be passed directly through via StorPerf’s jobs
+API.
+
+What Data Can We Get?
+=====================
StorPerf provides the following metrics:
@@ -57,4 +259,9 @@ StorPerf provides the following metrics:
These metrics are available for every job, and for the specific workloads,
I/O loads and I/O types (read, write) associated with the job.
+For each metric, StorPerf also provides the set of samples that were
+collected along with the slope, min and max values that can be used for
+plotting or comparison.
+
As of this time, StorPerf only provides textual reports of the metrics.
+
diff --git a/docs/testing/user/storperf-reporting.rst b/docs/testing/user/storperf-reporting.rst
index 4094829..5531e0a 100644
--- a/docs/testing/user/storperf-reporting.rst
+++ b/docs/testing/user/storperf-reporting.rst
@@ -21,7 +21,7 @@ About this project
Usage
=======
-* Enter the URL for the location of the data for which you want to generate the report.
+* Enter the URL for the location of the data for which you want to generate the report(http://StorPerf:5000/api/v1.0/jobs?type=metadata).
* Note: You can test the module using the testdata present in the directory ``storperf-reporting/src/static/testdata``. Instead of the URL enter the filename present in the testdata directory, eg. **local-data.json**
* After entering the URL, you are taken to the page showing the details of the all the jobs present in the data.
* Click on the *Click here to view details* to see the different block sizes for the respective job.
diff --git a/docs/testing/user/test-usage.rst b/docs/testing/user/test-usage.rst
index 8048cff..0fb3a6c 100644
--- a/docs/testing/user/test-usage.rst
+++ b/docs/testing/user/test-usage.rst
@@ -31,62 +31,137 @@ The typical test execution follows this pattern:
#. Execute one or more performance runs
#. Delete the environment
-Configure The Environment
-=========================
+OpenStack or Stackless
+======================
+StorPerf provides the option of controlling the OpenStack environment
+via a Heat Stack, or it can run in stackless mode, where it connects
+directly to the IP addresses supplied, regardless of how the slave
+was created or even if it is an OpenStack VM.
+
+Note: Stack support in StorPerf will be deprecated as of the next release.
+
+Configure The Environment for OpenStack Usage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following pieces of information are required to prepare the environment:
-- The number of VMs/Cinder volumes to create
-- The Glance image that holds the VM operating system to use. StorPerf has
- only been tested with Ubuntu 16.04
-- The OpenStack flavor to use when creating the VMs
-- The name of the public network that agents will use
-- The size, in gigabytes, of the Cinder volumes to create
+- The number of VMs/Cinder volumes to create.
+- The Cinder volume type (optional) to create
+- The Glance image that holds the VM operating system to use.
+- The OpenStack flavor to use when creating the VMs.
+- The name of the public network that agents will use.
+- The size, in gigabytes, of the Cinder volumes to create.
+- The number of the Cinder volumes to attach to each VM.
- The availability zone (optional) in which the VM is to be launched. Defaults to **nova**.
- The username (optional) if we specify a custom image.
- The password (optional) for the above image.
+**Note**: on ARM based platforms there exists a bug in the kernel which can prevent
+VMs from properly attaching Cinder volumes. There are two known workarounds:
+
+#. Create the environment with 0 Cinder volumes attached, and after the VMs
+ have finished booting, modify the stack to have 1 or more Cinder volumes.
+ See section on Changing Stack Parameters later in this guide.
+#. Add the following image metadata to Glance. This will cause the Cinder
+ volume to be mounted as a SCSI device, and therefore your target will be
+ /dev/sdb, etc, instead of /dev/vdb. You will need to specify this in your
+ warm up and workload jobs.
+
+.. code-block:
+ --property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi
+
+
The ReST API is a POST to http://StorPerf:5000/api/v1.0/configurations and
takes a JSON payload as follows.
.. code-block:: json
- {
- "agent_count": int,
- "agent_flavor": string
- "agent_image": string,
- "public_network": string,
- "volume_size": int,
- "availability_zone": string,
- "username": string,
- "password": string
- }
+ {
+ "agent_count": int,
+ "agent_flavor": "string",
+ "agent_image": "string",
+ "availability_zone": "string",
+ "password": "string",
+ "public_network": "string",
+ "username": "string",
+ "volume_count": int,
+ "volume_size": int,
+ "volume_type": "string"
+ }
This call will block until the stack is created, at which point it will return
-the OpenStack heat stack id.
+the OpenStack heat stack id as well as the IP addresses of the slave agents.
+
+
+Configure The Environment for Stackless Usage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To configure the environment for stackless usage, the slaves must be
+fully operational (ie: a Linux operating system is running, are reachable
+via TCP/IP address or hostname).
+
+It is not necessary to use the Configurations API, but instead define the
+stack name as 'null' in any of the other APIs. This instructs StorPerf not to
+gather information about the stack from OpenStack, and to simply use the
+supplied IP addresses and credentials to communicate with the slaves.
+
+A slave can be a container (provided we can SSH to it), a VM running in any
+hypervisor, or even a bare metal server. In the bare metal case, it even
+allows for performing RADOS or RDB performance tests using the appropriate
+FIO engine.
-Initialize the Cinder Volumes
+If the slave SSH server is listening to a port other than 22, the port number
+can be specified as part of the address as follows:
+
+IPv4 example for port 2222:
+
+.. code-block::
+ 192.168.1.10:2222
+
+IPv6 example for port 2222:
+
+.. code-block::
+ [1fe80::58bb:c8b:f2f2:c888]:2222
+
+Helper Container Image for Workloads
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A new docker container is provided with StorPerf that can be used to test
+under docker or Kubernetes environments. It has hard coded credentials
+of root/password with an SSH server built it, so be cautious about security
+concerns when using this image. It listens internally on port 22, so that
+port must be exposed to a free port on the host in order for StorPerf to
+reach the synthetic workload container.
+
+.. code-block:: bash
+
+ docker run --name=storperf-workloadagent -p 2222:22
+ opnfv/storperf-workloadagent:latest
+
+Initialize the Target Volumes
=============================
Before executing a test run for the purpose of measuring performance, it is
-necessary to fill the Cinder volume with random data. Failure to execute this
+necessary to fill the volume or file with random data. Failure to execute this
step can result in meaningless numbers, especially for read performance. Most
Cinder drivers are smart enough to know what blocks contain data, and which do
not. Uninitialized blocks return "0" immediately without actually reading from
the volume.
-Initiating the data fill looks the same as a regular performance test, but uses
-the special workload called "_warm_up". StorPerf will never push _warm_up
-data to the OPNFV Test Results DB, nor will it terminate the run on steady state.
-It is guaranteed to run to completion, which fills 100% of the volume with
+Initiating the data fill behave similarly to a regular performance run, but
+will tag the data with a special workload name called "_warm_up". It is
+designed to run to completion, filling 100% of the specified target with
random data.
-The ReST API is a POST to http://StorPerf:5000/api/v1.0/jobs and
-takes a JSON payload as follows.
+The ReST API is a POST to http://StorPerf:5000/api/v1.0/initializations and
+takes a JSON payload as follows. The body is optional unless your target
+is something other than /dev/vdb. For example, if you want to profile a
+glance ephemeral storage file, you could specify the target as "/filename.dat",
+which is a file that then gets created on the root filesystem.
.. code-block:: json
{
- "workload": "_warm_up"
+ "target": "/dev/vdb"
}
This will return a job ID as follows.
@@ -100,6 +175,137 @@ This will return a job ID as follows.
This job ID can be used to query the state to determine when it has completed.
See the section on querying jobs for more information.
+Authentication and Slave Selection
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+It is possible to run the Initialization API against a subset of the slaves
+known to the stack, or to run it in stackless mode, where StorPerf
+connects directly to the IP addresses supplied via SSH. The following
+keys are available:
+
+slave_addresses
+ (optional) A list of IP addresses or hostnames to use as targets. If
+ omitted, and StorPerf is not running in stackless mode, the full list of
+ IP addresses from the OpenStack Heat stack is used.
+
+stack_name
+ (optional) Either the name of the stack in Heat to use, or null if running
+ in stackless mode.
+
+username
+ (optional) The username to supply to SSH when logging in. This defaults to
+ 'storperf' if not supplied.
+
+password
+ (optional) The password to supply to SSH when logging in. If omitted, the
+ SSH key is used instead.
+
+ssh_private_key
+ (optional) The SSH private key to supply to SSH when logging in. If omitted,
+ the default StorPerf private key is used.
+
+This shows an example of stackless mode going against a single bare metal
+server reachable by IP address:
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ }
+
+
+Filesystems and Mounts
+~~~~~~~~~~~~~~~~~~~~~~
+
+It is also possible to instruct StorPerf to create a file system on a device
+and mount that as the target directory. The filesystem can be anything
+supported by the target slave OS and it is possible to pass specific arguments
+to the mkfs command. The following additional keys are available in the
+Initializations API for file system control:
+
+mkfs
+ The type and arguments to pass for creating a filesystem
+
+mount_device
+ The target device on which to make the file system. The file system will
+ be mounted on the target specified.
+
+The following example shows the forced creation (-f) of an XFS filesystem
+on device /dev/sdb, and mounting that device on /storperf/filesystem.
+
+**Note** If any of the commands (mkfs, mount) fail for any reason, the
+Initializations API will return with a 400 code and the body of the response
+will contain the error message.
+
+.. code-block:: json
+
+ {
+ "target": "/storperf/filesystem",
+ "mkfs": "xfs -f",
+ "mount_device": "/dev/sdb",
+ }
+
+
+Initializing Filesystems
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Just like we need to fill Cinder volumes with data, if we want to profile
+files on a mounted file system, we need to initialize the file sets with
+random data prior to starting a performance run. The Initializations API
+can also be used to create test data sets.
+
+**Note** be sure to use the same parameters for the number of files, sizes
+and jobs in both the Initializations API and the Jobs API, or you will end
+up with possibly incorrect results in the Job performance run.
+
+The following keys are available in the Initializations API for file creation:
+
+filesize
+ The size of each file to be created and filled with random data.
+
+nrfiles
+ The number of files per job to create.
+
+numjobs
+ The number of independent instances of FIO to launch.
+
+Example:
+
+.. code-block:: json
+
+ {
+ "target": "/storperf/filesystem",
+ "filesize": "2G",
+ "nrfiles": 10,
+ "numjobs": 10
+ }
+
+This would create 100 (10 nrfiles x 10 numjobs) 2G files in the directory
+/storperf/filesystem.
+
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ "target": "/storperf/filesystem",
+ "mkfs": "ext4",
+ "mount_device": "/dev/sdb",
+ "filesize": "2G",
+ "nrfiles": 10,
+ "numjobs": 10
+ }
+
+
Execute a Performance Run
=========================
Performance runs can execute either a single workload, or iterate over a matrix
@@ -112,12 +318,152 @@ rr
rs
Read, Sequential. 100% read of sequential blocks of data
rw
- Read / Write Mix, Random. 70% random read, 30% random write
+ Read / Write Mix, Sequential. 70% random read, 30% random write
wr
Write, Random. 100% write of random blocks
ws
Write, Sequential. 100% write of sequential blocks.
+Custom Workload Types
+~~~~~~~~~~~~~~~~~~~~~
+New in Gambia (7.0), you can specify custom workload parameters for StorPerf
+to pass on to FIO. This is available in the /api/v2.0/jobs API, and takes
+a different format than the default v1.0 API.
+
+The format is as follows:
+
+.. code-block:: json
+
+ "workloads": {
+ "name": {
+ "fio argument": "fio value"
+ }
+ }
+
+The name is used the same way the 'rr', 'rs', 'rw', etc is used, but can be
+any arbitrary alphanumeric string. This is for you to identify the job later.
+Following the name is a series of arguments to pass on to FIO. The most
+important on of these is the actual I/O operation to perform. From the `FIO
+manual`__, there are a number of different workloads:
+
+.. _FIO_IOP: http://git.kernel.dk/cgit/fio/tree/HOWTO#n985
+__ FIO_IOP_
+
+* read
+* write
+* trim
+* randread
+* etc
+
+This is an example of how the original 'ws' workload looks in the new format:
+
+.. code-block:: json
+
+ "workloads": {
+ "ws": {
+ "rw": "write"
+ }
+ }
+
+Using this format, it is now possible to initiate any combination of IO
+workload type. For example, a mix of 60% reads and 40% writes scattered
+randomly throughout the volume being profiled would be:
+
+.. code-block:: json
+
+ "workloads": {
+ "6040randrw": {
+ "rw": "randrw",
+ "rwmixread": "60"
+ }
+ }
+
+Additional arguments can be added as needed. Here is an example of random
+writes, with 25% duplicated blocks, followed by a second run of 75/25% mixed
+reads and writes. This can be used to test the deduplication capabilities
+of the underlying storage driver.
+
+.. code-block:: json
+
+ "workloads": {
+ "dupwrite": {
+ "rw": "randwrite",
+ "dedupe_percentage": "25"
+ },
+ "7525randrw": {
+ "rw": "randrw",
+ "rwmixread": "75",
+ "dedupe_percentage": "25"
+ }
+ }
+
+There is no limit on the number of workloads and additional FIO arguments
+that can be specified.
+
+Note that as in v1.0, the list of workloads will be iterated over with the
+block sizes and queue depths specified.
+
+StorPerf will also do a verification of the arguments given prior to returning
+a Job ID from the ReST call. If an argument fails validation, the error
+will be returned in the payload of the response.
+
+File System Profiling
+~~~~~~~~~~~~~~~~~~~~~
+
+As noted in the Initializations API, files in a file system should be
+initialized prior to executing a performance run, and the number of jobs,
+files and size of files should match the initialization. Given the following
+Initializations API call:
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ "target": "/storperf/filesystem",
+ "mkfs": "ext4",
+ "mount_device": "/dev/sdb",
+ "filesize": "2G",
+ "nrfiles": 10,
+ "numjobs": 10
+ }
+
+The corresponding call to the Jobs API would appear as follows:
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ "target": "/storperf/filesystem",
+ "block_sizes": "4k",
+ "queue_depths": "8",
+ "workloads": {
+ "readwritemix": {
+ "rw": "rw",
+ "filesize": "2G",
+ "nrfiles": "10",
+ "numjobs": "10"
+ }
+ }
+ }
+
+**Note** the queue depths and block sizes as well as the I/O pattern (rw)
+can change, but the filesize, nrfiles, numjobs and slave addresses must
+match the initialization or the performance run could contain skewed results
+due to disk initialization. StorPerf explicitly allows for the mismatch
+of these so that it is possible to visualize performance when the files
+or disks have not been properly initialized.
+
+
Block Sizes
~~~~~~~~~~~
A comma delimited list of the different block sizes to use when reading and
@@ -165,6 +511,24 @@ is required in order to push results to the OPNFV Test Results DB:
"test_case": "snia_steady_state"
}
+Changing Stack Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~
+While StorPerf currently does not support changing the parameters of the
+stack directly, it is possible to change the stack using the OpenStack client
+library. The following parameters can be changed:
+
+- agent_count: to increase or decrease the number of VMs.
+- volume_count: to change the number of Cinder volumes per VM.
+- volume_size: to increase the size of each volume. Note: Cinder cannot shrink volumes.
+
+Increasing the number of agents or volumes, or increasing the size of the volumes
+will require you to kick off a new _warm_up job to initialize the newly
+allocated volumes.
+
+The following is an example of how to change the stack using the heat client:
+
+.. code-block::
+ heat stack-update StorPerfAgentGroup --existing -P "volume_count=2"
Query Jobs Information
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..840ce6a
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,19 @@
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck
+skipsdist = true
+
+[testenv:docs]
+basepython = python3
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+basepython = python3
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck