summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xci/daily.sh16
-rwxr-xr-xci/verify-build.sh2
-rwxr-xr-xci/verify.sh18
-rw-r--r--cli.py186
-rw-r--r--docker/local-docker-compose.yaml3
-rw-r--r--docker/storperf-httpfrontend/Dockerfile2
-rw-r--r--docker/storperf-master/Dockerfile23
-rw-r--r--docker/storperf-master/rest_server.py161
-rw-r--r--docker/storperf-master/storperf/carbon/converter.py10
-rw-r--r--docker/storperf-master/storperf/carbon/emitter.py6
-rw-r--r--docker/storperf-master/storperf/db/graphite_db.py2
-rw-r--r--docker/storperf-master/storperf/db/job_db.py2
-rw-r--r--docker/storperf-master/storperf/fio/fio_invoker.py36
-rw-r--r--docker/storperf-master/storperf/resources/hot/agent-group.yaml2
-rw-r--r--docker/storperf-master/storperf/resources/hot/storperf-agent.yaml2
-rw-r--r--docker/storperf-master/storperf/resources/hot/storperf-volume.yaml2
-rw-r--r--docker/storperf-master/storperf/storperf_master.py189
-rw-r--r--docker/storperf-master/storperf/test_executor.py27
-rw-r--r--docker/storperf-master/storperf/utilities/data_handler.py4
-rw-r--r--docker/storperf-master/storperf/utilities/ip_helper.py27
-rw-r--r--docker/storperf-master/storperf/workloads/_base_workload.py19
-rw-r--r--docker/storperf-master/storperf/workloads/_custom_workload.py2
-rw-r--r--docker/storperf-master/tests/carbon_tests/emitter_test.py21
-rw-r--r--docker/storperf-master/tests/db_tests/graphite_db_test.py3
-rw-r--r--docker/storperf-master/tests/db_tests/job_db_test.py3
-rw-r--r--docker/storperf-master/tests/fio_tests/fio_invoker_test.py14
-rw-r--r--docker/storperf-master/tests/storperf_master_test.py6
-rw-r--r--docker/storperf-master/tests/utilities_tests/data_handler_test.py20
-rw-r--r--docker/storperf-master/tests/utilities_tests/ip_helper_test.py39
-rw-r--r--docker/storperf-reporting/Dockerfile10
-rw-r--r--docker/storperf-swaggerui/Dockerfile2
-rw-r--r--docker/storperf-workloadagent/Dockerfile37
-rw-r--r--docs/release/release-notes/index.rst8
-rw-r--r--docs/release/release-notes/release-notes.rst73
-rwxr-xr-xdocs/testing/user/installation.rst38
-rw-r--r--docs/testing/user/introduction.rst229
-rw-r--r--docs/testing/user/test-usage.rst257
-rw-r--r--tox.ini2
38 files changed, 1074 insertions, 429 deletions
diff --git a/ci/daily.sh b/ci/daily.sh
index ada8cfd..1e99709 100755
--- a/ci/daily.sh
+++ b/ci/daily.sh
@@ -26,17 +26,13 @@ git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng "${WORKSPACE}/ci/job/
rm -rf "${WORKSPACE}/ci/job/joid"
git clone --depth 1 https://gerrit.opnfv.org/gerrit/joid "${WORKSPACE}/ci/job/joid"
-virtualenv "${WORKSPACE}/ci/job/storperf_daily_venv"
+python3 -m venv "${WORKSPACE}/ci/job/storperf_daily_venv"
# shellcheck source=/dev/null
source "${WORKSPACE}/ci/job/storperf_daily_venv/bin/activate"
-pip install --upgrade setuptools==33.1.1
-pip install cryptography==1.7.2
-pip install functools32==3.2.3.post2
-pip install pytz==2016.10
-pip install osc_lib==1.3.0
-pip install python-openstackclient==3.7.0
-pip install python-heatclient==1.16.0
+python3 -m pip install --upgrade setuptools==40.5.0
+python3 -m pip install python-openstackclient==3.16.1
+python3 -m pip install python-heatclient==1.16.1
"${WORKSPACE}/ci/generate-admin-rc.sh"
@@ -93,14 +89,14 @@ JOB=$("${WORKSPACE}/ci/start_job.sh" \
curl -s -X GET "http://127.0.0.1:5000/api/v1.0/jobs?id=${JOB}&type=status" \
-o "${WORKSPACE}/ci/job/status.json"
-JOB_STATUS=$(grep -A2 $JOB "${WORKSPACE}/ci/job/status.json" | awk '/Status/ {print $2}' | cut -d\" -f2)
+JOB_STATUS=$(awk '/Status/ {print $2}' "${WORKSPACE}/ci/job/status.json" | cut -d\" -f2)
while [ "${JOB_STATUS}" != "Completed" ]
do
sleep 600
mv "${WORKSPACE}/ci/job/status.json" "${WORKSPACE}/ci/job/old-status.json"
curl -s -X GET "http://127.0.0.1:5000/api/v1.0/jobs?id=${JOB}&type=status" \
-o "${WORKSPACE}/ci/job/status.json"
- JOB_STATUS=$(grep -A2 $JOB "${WORKSPACE}/ci/job/status.json" | awk '/Status/ {print $2}' | cut -d\" -f2)
+ JOB_STATUS=$(awk '/Status/ {print $2}' "${WORKSPACE}/ci/job/status.json" | cut -d\" -f2)
if diff "${WORKSPACE}/ci/job/status.json" "${WORKSPACE}/ci/job/old-status.json" >/dev/null
then
cat "${WORKSPACE}/ci/job/status.json"
diff --git a/ci/verify-build.sh b/ci/verify-build.sh
index c98fea4..5230cee 100755
--- a/ci/verify-build.sh
+++ b/ci/verify-build.sh
@@ -35,6 +35,8 @@ export ARCH=${ARCH}
echo Using $ARCH architecture
+export CURRENT_UID=$(id -u):$(id -g)
+
docker-compose -f local-docker-compose.yaml down
docker-compose -f local-docker-compose.yaml build
docker-compose -f local-docker-compose.yaml up -d
diff --git a/ci/verify.sh b/ci/verify.sh
index 40e94b6..deaafb5 100755
--- a/ci/verify.sh
+++ b/ci/verify.sh
@@ -15,17 +15,17 @@ then
WORKSPACE="$HOME"
fi
-virtualenv $WORKSPACE/storperf_venv
+python3 -m venv $WORKSPACE/storperf_venv
source $WORKSPACE/storperf_venv/bin/activate
-pip install --upgrade setuptools
-pip install autoflake==1.2
-pip install autopep8==1.3.5
-pip install coverage==4.5.1
-pip install flake8==3.5.0
-pip install mock==2.0.0
-pip install nose==1.3.7
-pip install -r docker/storperf-master/requirements.pip
+python3 -m pip install --upgrade setuptools
+python3 -m pip install autoflake==1.2
+python3 -m pip install autopep8==1.3.5
+python3 -m pip install coverage==4.5.1
+python3 -m pip install flake8==3.5.0
+python3 -m pip install mock==2.0.0
+python3 -m pip install nose==1.3.7
+python3 -m pip install -r docker/storperf-master/requirements.pip
final_rc=0
diff --git a/cli.py b/cli.py
deleted file mode 100644
index fda05c2..0000000
--- a/cli.py
+++ /dev/null
@@ -1,186 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 EMC and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-"""
-"""
-
-from storperf.storperf_master import StorPerfMaster
-from threading import Thread
-import cPickle
-import getopt
-import json
-import logging
-import logging.config
-import logging.handlers
-import requests
-import socket
-import struct
-import sys
-
-
-class Usage(Exception):
- pass
-
-
-def event(event_string):
- logging.getLogger(__name__).info(event_string)
-
-
-class LogRecordStreamHandler(object):
-
- def __init__(self):
- self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- self.socket.bind((
- 'localhost', logging.handlers.DEFAULT_UDP_LOGGING_PORT))
- self.level = logging.INFO
-
- def read_logs(self):
- try:
- while True:
- datagram = self.socket.recv(8192)
- chunk = datagram[0:4]
- struct.unpack(">L", chunk)[0]
- chunk = datagram[4:]
- obj = cPickle.loads(chunk)
- record = logging.makeLogRecord(obj)
- if (record.levelno >= self.level):
- logger = logging.getLogger(record.name)
- logger.handle(record)
-
- except Exception as e:
- print "ERROR: " + str(e)
- finally:
- self.socket.close()
-
-
-def main(argv=None):
- verbose = False
- debug = False
- report = None
- erase = False
- terminate = False
- options = {}
-
- storperf = StorPerfMaster()
-
- if argv is None:
- argv = sys.argv
- try:
- try:
- opts, args = getopt.getopt(argv[1:], "t:w:r:f:escvdTh",
- ["target=",
- "workload=",
- "report=",
- "configure=",
- "erase",
- "nossd",
- "nowarm",
- "verbose",
- "debug",
- "terminate",
- "help",
- ])
- except getopt.error, msg:
- raise Usage(msg)
-
- configuration = None
- options['workload'] = None
-
- for o, a in opts:
- if o in ("-h", "--help"):
- print __doc__
- return 0
- elif o in ("-t", "--target"):
- options['filename'] = a
- elif o in ("-v", "--verbose"):
- verbose = True
- elif o in ("-d", "--debug"):
- debug = True
- elif o in ("-s", "--nossd"):
- options['nossd'] = a
- elif o in ("-c", "--nowarm"):
- options['nowarm'] = False
- elif o in ("-w", "--workload"):
- options['workload'] = a
- elif o in ("-r", "--report"):
- report = a
- elif o in ("-e", "--erase"):
- erase = True
- elif o in ("-T", "--terminate"):
- terminate = True
- elif o in ("-f", "--configure"):
- configuration = dict(x.split('=') for x in a.split(','))
-
- if (debug) or (verbose):
- udpserver = LogRecordStreamHandler()
-
- if (debug):
- udpserver.level = logging.DEBUG
-
- logging.basicConfig(format="%(asctime)s - %(name)s - " +
- "%(levelname)s - %(message)s")
-
- t = Thread(target=udpserver.read_logs, args=())
- t.setDaemon(True)
- t.start()
-
- if (erase):
- response = requests.delete(
- 'http://127.0.0.1:5000/api/v1.0/configurations')
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
- return 0
-
- if (terminate):
- response = requests.delete(
- 'http://127.0.0.1:5000/api/v1.0/jobs')
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
- return 0
-
- if (configuration is not None):
- response = requests.post(
- 'http://127.0.0.1:5000/api/v1.0/configurations', json=configuration)
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
-
- if (report is not None):
- print "Fetching report for %s..." % (report,)
- response = requests.get(
- 'http://127.0.0.1:5000/api/v1.0/jobs?id=%s' % (report,))
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
- content = json.loads(response.content)
- print content
- else:
- print "Calling start..."
- response = requests.post(
- 'http://127.0.0.1:5000/api/v1.0/jobs', json=options)
- if (response.status_code == 400):
- content = json.loads(response.content)
- raise Usage(content['message'])
-
- content = json.loads(response.content)
- print "Started job id: " + content['job_id']
-
- except Usage as e:
- print >> sys.stderr, str(e)
- print >> sys.stderr, "For help use --help"
- return 2
-
- except Exception as e:
- print >> sys.stderr, str(e)
- return 2
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/docker/local-docker-compose.yaml b/docker/local-docker-compose.yaml
index 6daa6e2..a4b69b4 100644
--- a/docker/local-docker-compose.yaml
+++ b/docker/local-docker-compose.yaml
@@ -17,8 +17,10 @@ services:
args:
ARCH: ${ARCH}
env_file: ${ENV_FILE}
+ user: ${CURRENT_UID}
volumes:
- ./storperf-master/:/storperf
+ - ./certs:/etc/ssl/certs/
links:
- storperf-graphite
@@ -28,6 +30,7 @@ services:
context: storperf-reporting
args:
ARCH: ${ARCH}
+ user: ${CURRENT_UID}
volumes:
- ./storperf-reporting/:/home/opnfv/storperf-reporting
diff --git a/docker/storperf-httpfrontend/Dockerfile b/docker/storperf-httpfrontend/Dockerfile
index 95188b5..6f072b0 100644
--- a/docker/storperf-httpfrontend/Dockerfile
+++ b/docker/storperf-httpfrontend/Dockerfile
@@ -13,7 +13,7 @@
##
ARG ARCH=x86_64
-ARG ALPINE_VERSION=v3.6
+ARG ALPINE_VERSION=v3.10
FROM nginx:alpine
EXPOSE 80 443
diff --git a/docker/storperf-master/Dockerfile b/docker/storperf-master/Dockerfile
index 9764a8d..a2e1a1d 100644
--- a/docker/storperf-master/Dockerfile
+++ b/docker/storperf-master/Dockerfile
@@ -16,12 +16,12 @@
#
ARG ARCH=x86_64
-ARG ALPINE_VERSION=v3.6
+ARG ALPINE_VERSION=v3.10
FROM multiarch/alpine:$ARCH-$ALPINE_VERSION as storperf-builder
RUN ulimit -n 1024
-LABEL version="7.0" description="OPNFV Storperf Docker container"
+LABEL version="8.0" description="OPNFV Storperf Docker container"
ARG BRANCH=master
@@ -47,28 +47,27 @@ RUN cd ${repos_dir}/fio && EXTFLAGS="-static" make -j $(grep -c ^processor /proc
RUN apk --no-cache add --update \
libffi-dev \
libressl-dev \
- python \
- py-pip \
- python-dev \
+ python3=3.7.5-r1 \
+ python3-dev=3.7.5-r1 \
alpine-sdk \
- linux-headers \
- bash
+ linux-headers
# Install StorPerf
COPY requirements.pip /storperf/
-RUN pip install --upgrade setuptools==33.1.1
-RUN pip install -r /storperf/requirements.pip
+RUN python3 -m pip install --upgrade setuptools==33.1.1
+RUN python3 -m pip install -r /storperf/requirements.pip
# Build stripped down StorPerf image
FROM multiarch/alpine:$ARCH-$ALPINE_VERSION as storperf-master
RUN apk --no-cache add --update \
- python \
+ libressl-dev \
+ python3=3.7.5-r1 \
bash
-COPY --from=storperf-builder /usr/lib/python2.7/site-packages /usr/lib/python2.7/site-packages
+COPY --from=storperf-builder /usr/lib/python3.7/site-packages /usr/lib/python3.7/site-packages
COPY --from=storperf-builder /usr/local/bin/fio /usr/local/bin/fio
COPY . /storperf
@@ -80,4 +79,4 @@ RUN chmod 600 storperf/resources/ssh/storperf_rsa
EXPOSE 5000
# Entry point
-CMD [ "python", "./rest_server.py" ]
+CMD [ "python3", "./rest_server.py" ]
diff --git a/docker/storperf-master/rest_server.py b/docker/storperf-master/rest_server.py
index 92b6c85..7606eca 100644
--- a/docker/storperf-master/rest_server.py
+++ b/docker/storperf-master/rest_server.py
@@ -10,7 +10,6 @@
import json
import logging.config
import os
-import sys
from flask import abort, Flask, request, jsonify
from flask_cors import CORS
@@ -18,6 +17,7 @@ from flask_restful import Resource, Api, fields
from flask_restful_swagger import swagger
from storperf.storperf_master import StorPerfMaster
+import flask
class ReverseProxied(object):
@@ -137,7 +137,9 @@ class Configure(Resource):
self.logger = logging.getLogger(__name__)
@swagger.operation(
- notes='Fetch the current agent configuration',
+ notes='''Fetch the current agent configuration.
+
+ This API is in sunset until the next OPNFV release.''',
parameters=[
{
"name": "stack_name",
@@ -155,7 +157,7 @@ class Configure(Resource):
if stack_name:
storperf.stack_name = stack_name
- return jsonify({'agent_count': storperf.agent_count,
+ json = jsonify({'agent_count': storperf.agent_count,
'agent_flavor': storperf.agent_flavor,
'agent_image': storperf.agent_image,
'public_network': storperf.public_network,
@@ -168,10 +170,15 @@ class Configure(Resource):
'stack_name': storperf.stack_name,
'slave_addresses': storperf.slave_addresses,
'stack_id': storperf.stack_id})
+ response = flask.make_response(json)
+ response.headers['Sunset'] = "Tue. 31 Mar 2020 23:59:59 GMT"
+ return response
@swagger.operation(
notes='''Set the current agent configuration and create a stack in
- the controller. Returns once the stack create is completed.''',
+ the controller. Returns once the stack create is completed.
+
+ This API is in sunset until the next OPNFV release.''',
parameters=[
{
"name": "configuration",
@@ -229,7 +236,9 @@ class Configure(Resource):
abort(400, str(e))
@swagger.operation(
- notes='Deletes the agent configuration and the stack',
+ notes='''Deletes the agent configuration and the stack
+
+ This API is in sunset until the next OPNFV release.''',
parameters=[
{
"name": "stack_name",
@@ -246,7 +255,10 @@ class Configure(Resource):
if stack_name:
storperf.stack_name = stack_name
try:
- return jsonify({'stack_id': storperf.delete_stack()})
+ json = jsonify({'stack_id': storperf.delete_stack()})
+ response = flask.make_response(json)
+ response.headers['Sunset'] = "Tue. 31 Mar 2020 23:59:59 GMT"
+ return response
except Exception as e:
self.logger.exception(e)
abort(400, str(e))
@@ -355,7 +367,8 @@ for any single test iteration.
"workload":if specified, the workload to run. Defaults to all.
-"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
+"stack_name": This field is in sunset until the next OPNVF release.
+The target stack to use. Defaults to StorPerfAgentGroup, or
the last stack named.
""",
"required": True,
@@ -379,11 +392,13 @@ the last stack named.
if not request.json:
abort(400, "ERROR: Missing configuration data")
+ storperf.reset_values()
self.logger.info(request.json)
try:
if ('stack_name' in request.json):
storperf.stack_name = request.json['stack_name']
+ storperf.stackless = False
if ('target' in request.json):
storperf.filename = request.json['target']
if ('deadline' in request.json):
@@ -422,7 +437,6 @@ the last stack named.
]
)
def delete(self):
- self.logger.info("Threads: %s" % sys._current_frames())
try:
return jsonify({'Slaves': storperf.terminate_workloads()})
except Exception as e:
@@ -439,7 +453,7 @@ class WorkloadsBodyModel:
@swagger.model
@swagger.nested(
- name=WorkloadsBodyModel.__name__)
+ name=WorkloadsBodyModel.__name__)
class WorkloadsNameModel:
resource_fields = {
"name": fields.Nested(WorkloadsBodyModel.resource_fields)
@@ -448,7 +462,7 @@ class WorkloadsNameModel:
@swagger.model
@swagger.nested(
- workloads=WorkloadsNameModel.__name__)
+ workloads=WorkloadsNameModel.__name__)
class WorkloadV2Model:
resource_fields = {
'target': fields.String,
@@ -457,7 +471,11 @@ class WorkloadV2Model:
'workloads': fields.Nested(WorkloadsNameModel.resource_fields),
'queue_depths': fields.String,
'block_sizes': fields.String,
- 'stack_name': fields.String
+ 'stack_name': fields.String,
+ 'username': fields.String,
+ 'password': fields.String,
+ 'ssh_private_key': fields.String,
+ 'slave_addresses': fields.List
}
required = ['workloads']
@@ -483,8 +501,21 @@ for any single test iteration.
"workloads": A JSON formatted map of workload names and parameters for FIO.
-"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
-the last stack named.
+"stack_name": This field is in sunset until the next OPNFV release.
+The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named. Explicitly specifying null will bypass all Heat Stack
+operations and go directly against the IP addresses specified.
+
+"username": if specified, the username to use when logging into the slave.
+
+"password": if specified, the password to use when logging into the slave.
+
+"ssh_private_key": if specified, the ssh private key to use when logging
+into the slave.
+
+"slave_addresses": if specified, a list of IP addresses to use instead of
+looking all of them up from the stack.
+
""",
"required": True,
"type": "WorkloadV2Model",
@@ -505,9 +536,10 @@ the last stack named.
)
def post(self):
if not request.json:
- abort(400, "ERROR: Missing configuration data")
+ abort(400, "ERROR: Missing job data")
self.logger.info(request.json)
+ storperf.reset_values()
try:
if ('stack_name' in request.json):
@@ -534,6 +566,15 @@ the last stack named.
else:
metadata = {}
+ if 'username' in request.json:
+ storperf.username = request.json['username']
+ if 'password' in request.json:
+ storperf.password = request.json['password']
+ if 'ssh_private_key' in request.json:
+ storperf.ssh_key = request.json['ssh_private_key']
+ if 'slave_addresses' in request.json:
+ storperf.slave_addresses = request.json['slave_addresses']
+
job_id = storperf.execute_workloads(metadata)
return jsonify({'job_id': job_id})
@@ -547,7 +588,16 @@ the last stack named.
class WarmUpModel:
resource_fields = {
'stack_name': fields.String,
- 'target': fields.String
+ 'target': fields.String,
+ 'username': fields.String,
+ 'password': fields.String,
+ 'ssh_private_key': fields.String,
+ 'slave_addresses': fields.List,
+ 'mkfs': fields.String,
+ 'mount_point': fields.String,
+ 'file_size': fields.String,
+ 'nrfiles': fields.String,
+ 'numjobs': fields.String,
}
@@ -565,10 +615,36 @@ class Initialize(Resource):
"description": """Fill the target with random data. If no
target is specified, it will default to /dev/vdb
-"target": The target device or file to fill with random data.
+"target": The target device to use.
-"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
-the last stack named.
+"stack_name": This field is in sunset until the next OPNFV release.
+The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named. Explicitly specifying null will bypass all Heat Stack
+operations and go directly against the IP addresses specified.
+
+"username": if specified, the username to use when logging into the slave.
+
+"password": if specified, the password to use when logging into the slave.
+
+"ssh_private_key": if specified, the ssh private key to use when logging
+into the slave.
+
+"slave_addresses": if specified, a list of IP addresses to use instead of
+looking all of them up from the stack.
+
+"mkfs": if specified, the command to execute in order to create a filesystem
+on the target device (eg: mkfs.ext4)
+
+"mount_point": if specified, the directory to use when mounting the device.
+
+"filesize": if specified, the size of the files to create when profiling
+a filesystem.
+
+"nrfiles": if specified, the number of files to create when profiling
+a filesystem
+
+"numjobs": if specified, the number of jobs for when profiling
+a filesystem
""",
"required": False,
"type": "WarmUpModel",
@@ -593,17 +669,46 @@ the last stack named.
)
def post(self):
self.logger.info(request.json)
+ storperf.reset_values()
try:
+ warm_up_args = {
+ 'rw': 'randwrite',
+ 'direct': "1",
+ 'loops': "1"
+ }
+ storperf.queue_depths = "8"
+ storperf.block_sizes = "16k"
+
if request.json:
if 'target' in request.json:
storperf.filename = request.json['target']
if 'stack_name' in request.json:
storperf.stack_name = request.json['stack_name']
- storperf.queue_depths = "8"
- storperf.block_sizes = "16k"
- storperf.workloads = "_warm_up"
- storperf.custom_workloads = None
+ if 'username' in request.json:
+ storperf.username = request.json['username']
+ if 'password' in request.json:
+ storperf.password = request.json['password']
+ if 'ssh_private_key' in request.json:
+ storperf.ssh_key = request.json['ssh_private_key']
+ if 'slave_addresses' in request.json:
+ storperf.slave_addresses = request.json['slave_addresses']
+ if 'mkfs' in request.json:
+ storperf.mkfs = request.json['mkfs']
+ if 'mount_device' in request.json:
+ storperf.mount_device = request.json['mount_device']
+ if 'filesize' in request.json:
+ warm_up_args['filesize'] = str(request.json['filesize'])
+ if 'nrfiles' in request.json:
+ warm_up_args['nrfiles'] = str(request.json['nrfiles'])
+ if 'numjobs' in request.json:
+ warm_up_args['numjobs'] = str(request.json['numjobs'])
+
+ storperf.workloads = None
+ storperf.custom_workloads = {
+ '_warm_up': warm_up_args
+ }
+ self.logger.info(storperf.custom_workloads)
job_id = storperf.execute_workloads()
return jsonify({'job_id': job_id})
@@ -628,12 +733,18 @@ class Quota(Resource):
notes='''Fetch the current Cinder volume quota. This value limits
the number of volumes that can be created, and by extension, defines
the maximum number of agents that can be created for any given test
- scenario''',
+ scenario
+
+
+ This API is in sunset until the next OPNFV release.''',
type=QuotaModel.__name__
)
def get(self):
- quota = storperf.volume_quota
- return jsonify({'quota': quota})
+ quota = [] # storperf.volume_quota
+ # return jsonify({'quota': quota})
+ response = flask.make_response(jsonify({'quota': quota}))
+ response.headers['Sunset'] = "Tue. 31 Mar 2020 23:59:59 GMT"
+ return response
def setup_logging(default_path='logging.json',
diff --git a/docker/storperf-master/storperf/carbon/converter.py b/docker/storperf-master/storperf/carbon/converter.py
index 623c144..4b5e6aa 100644
--- a/docker/storperf-master/storperf/carbon/converter.py
+++ b/docker/storperf-master/storperf/carbon/converter.py
@@ -32,12 +32,12 @@ class Converter(object):
def resurse_to_flat_dictionary(self, json, prefix=None):
if type(json) == dict:
- for k, v in json.items():
+ for k, v in list(json.items()):
if prefix is None:
- key = k.decode("utf-8").replace(" ", "_")
+ key = k.replace(" ", "_")
else:
- key = prefix + "." + k.decode("utf-8").replace(" ", "_")
- if hasattr(v, '__iter__'):
+ key = prefix + "." + k.replace(" ", "_")
+ if type(v) is list or type(v) is dict:
self.resurse_to_flat_dictionary(v, key)
else:
self.flat_dictionary[key] = str(v).replace(" ", "_")
@@ -45,7 +45,7 @@ class Converter(object):
index = 0
for v in json:
index += 1
- if hasattr(v, '__iter__'):
+ if type(v) is list or type(v) is dict:
self.resurse_to_flat_dictionary(
v, prefix + "." + str(index))
else:
diff --git a/docker/storperf-master/storperf/carbon/emitter.py b/docker/storperf-master/storperf/carbon/emitter.py
index b196709..13503b2 100644
--- a/docker/storperf-master/storperf/carbon/emitter.py
+++ b/docker/storperf-master/storperf/carbon/emitter.py
@@ -40,19 +40,19 @@ class CarbonMetricTransmitter():
message = "%s %s %s\n" \
% (key, value, timestamp)
self.logger.debug("Metric: " + message.strip())
- carbon_socket.send(message)
+ carbon_socket.send(message.encode('utf-8'))
except ValueError:
self.logger.debug("Ignoring non numeric metric %s %s"
% (key, value))
message = "%s.commit-marker %s %s\n" \
% (commit_marker, timestamp, timestamp)
- carbon_socket.send(message)
+ carbon_socket.send(message.encode('utf-8'))
self.logger.debug("Marker %s" % message.strip())
self.logger.info("Sent metrics to %s:%s with timestamp %s"
% (self.host, self.port, timestamp))
- except Exception, e:
+ except Exception as e:
self.logger.error("While notifying carbon %s:%s %s"
% (self.host, self.port, e))
diff --git a/docker/storperf-master/storperf/db/graphite_db.py b/docker/storperf-master/storperf/db/graphite_db.py
index 8ebd22e..59b9f5d 100644
--- a/docker/storperf-master/storperf/db/graphite_db.py
+++ b/docker/storperf-master/storperf/db/graphite_db.py
@@ -41,7 +41,7 @@ class GraphiteDB(object):
start = end - duration
request = ("http://%s:%s/graphite/render/?target="
- "%s(%s.*.jobs.1.%s.%s)"
+ "%s(%s.*.jobs.*.%s.%s)"
"&format=json"
"&from=%s"
"&until=%s"
diff --git a/docker/storperf-master/storperf/db/job_db.py b/docker/storperf-master/storperf/db/job_db.py
index b029a35..c3632e4 100644
--- a/docker/storperf-master/storperf/db/job_db.py
+++ b/docker/storperf-master/storperf/db/job_db.py
@@ -220,7 +220,7 @@ class JobDB(object):
db = sqlite3.connect(JobDB.db_name)
cursor = db.cursor()
- for param, value in params.iteritems():
+ for param, value in params.items():
cursor.execute(
"""insert into job_params
(job_id,
diff --git a/docker/storperf-master/storperf/fio/fio_invoker.py b/docker/storperf-master/storperf/fio/fio_invoker.py
index a361eec..bb81eef 100644
--- a/docker/storperf-master/storperf/fio/fio_invoker.py
+++ b/docker/storperf-master/storperf/fio/fio_invoker.py
@@ -11,6 +11,7 @@ import json
import logging
from threading import Thread
import paramiko
+from storperf.utilities import ip_helper
class FIOInvoker(object):
@@ -45,6 +46,8 @@ class FIOInvoker(object):
self.json_body = ""
try:
for line in iter(stdout.readline, b''):
+ if type(line) == bytes:
+ line = line.decode('utf=8')
if line.startswith("fio"):
line = ""
continue
@@ -78,7 +81,8 @@ class FIOInvoker(object):
def stderr_handler(self, stderr):
self.logger.debug("Started")
for line in iter(stderr.readline, b''):
- self.logger.error("FIO Error: %s", line.rstrip())
+ if len(line) > 0:
+ self.logger.error("FIO Error: %s", line.rstrip())
self.stderr.append(line.rstrip())
# Sometime, FIO gets stuck and will give us this message:
@@ -137,10 +141,12 @@ class FIOInvoker(object):
ssh = self._ssh_client()
- command = "sudo killall fio"
-
- self.logger.debug("Executing on %s: %s" % (self.remote_host, command))
- (_, stdout, stderr) = ssh.exec_command(command)
+ kill_commands = ['sudo killall fio',
+ 'sudo pkill fio']
+ for command in kill_commands:
+ self.logger.debug("Executing on %s: %s" %
+ (self.remote_host, command))
+ (_, stdout, stderr) = ssh.exec_command(command)
for line in stdout.readlines():
self.logger.debug(line.strip())
@@ -153,13 +159,25 @@ class FIOInvoker(object):
def _ssh_client(self):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ address, port = ip_helper.parse_address_and_port(self.remote_host)
if 'username' in self.metadata and 'password' in self.metadata:
- ssh.connect(self.remote_host,
+ ssh.connect(address,
+ port=port,
+ username=self.metadata['username'],
+ password=self.metadata['password'],
+ timeout=5)
+ return ssh
+ elif 'username' in self.metadata and 'ssh_key' in self.metadata:
+ ssh.connect(address,
+ port=port,
username=self.metadata['username'],
- password=self.metadata['password'])
+ pkey=self.metadata['ssh_key'],
+ timeout=5)
return ssh
else:
- ssh.connect(self.remote_host, username='storperf',
+ ssh.connect(address,
+ port=port,
+ username='storperf',
key_filename='storperf/resources/ssh/storperf_rsa',
- timeout=2)
+ timeout=5)
return ssh
diff --git a/docker/storperf-master/storperf/resources/hot/agent-group.yaml b/docker/storperf-master/storperf/resources/hot/agent-group.yaml
index c82ae17..f09d95a 100644
--- a/docker/storperf-master/storperf/resources/hot/agent-group.yaml
+++ b/docker/storperf-master/storperf/resources/hot/agent-group.yaml
@@ -7,7 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-heat_template_version: 2017-09-01
+heat_template_version: newton
parameters:
public_network:
diff --git a/docker/storperf-master/storperf/resources/hot/storperf-agent.yaml b/docker/storperf-master/storperf/resources/hot/storperf-agent.yaml
index 6314514..7a0a9e9 100644
--- a/docker/storperf-master/storperf/resources/hot/storperf-agent.yaml
+++ b/docker/storperf-master/storperf/resources/hot/storperf-agent.yaml
@@ -7,7 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-heat_template_version: 2017-09-01
+heat_template_version: newton
parameters:
flavor:
diff --git a/docker/storperf-master/storperf/resources/hot/storperf-volume.yaml b/docker/storperf-master/storperf/resources/hot/storperf-volume.yaml
index cbdd861..d64d0c2 100644
--- a/docker/storperf-master/storperf/resources/hot/storperf-volume.yaml
+++ b/docker/storperf-master/storperf/resources/hot/storperf-volume.yaml
@@ -7,7 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-heat_template_version: 2017-09-01
+heat_template_version: newton
parameters:
volume_size:
diff --git a/docker/storperf-master/storperf/storperf_master.py b/docker/storperf-master/storperf/storperf_master.py
index 0c7e559..73f8f0d 100644
--- a/docker/storperf-master/storperf/storperf_master.py
+++ b/docker/storperf-master/storperf/storperf_master.py
@@ -7,16 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from datetime import datetime
-import logging
-import os
-import socket
-from threading import Thread
-from time import sleep
-import paramiko
+from datetime import datetime
+from io import StringIO
+from multiprocessing.pool import ThreadPool
from scp import SCPClient
-
from snaps.config.stack import StackConfig
from snaps.openstack.create_stack import OpenStackHeatStack
from snaps.openstack.os_credentials import OSCreds
@@ -24,7 +19,13 @@ from snaps.openstack.utils import heat_utils, cinder_utils, glance_utils
from snaps.thread_utils import worker_pool
from storperf.db.job_db import JobDB
from storperf.test_executor import TestExecutor
+from storperf.utilities import ip_helper
+from time import sleep
import json
+import logging
+import os
+import paramiko
+import socket
import uuid
@@ -37,8 +38,9 @@ class StorPerfMaster(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
+ self.reset_values()
+
self.job_db = JobDB()
- self._stack_name = 'StorPerfAgentGroup'
self.stack_settings = StackConfig(
name=self.stack_name,
template_path='storperf/resources/hot/agent-group.yaml')
@@ -59,21 +61,24 @@ class StorPerfMaster(object):
self.heat_stack = OpenStackHeatStack(self.os_creds,
self.stack_settings)
+
+ self._snaps_pool = worker_pool(20)
+
+ def reset_values(self):
+ self._stack_name = 'StorPerfAgentGroup'
self.username = None
self.password = None
+ self._ssh_key = None
self._test_executor = None
self._agent_count = 1
- self._agent_image = "Ubuntu 14.04"
- self._agent_flavor = "storperf"
+ self._agent_image = None
+ self._agent_flavor = None
self._availability_zone = None
self._public_network = None
self._volume_count = 1
self._volume_size = 1
self._volume_type = None
- self._cached_stack_id = None
- self._last_snaps_check_time = None
self._slave_addresses = []
- self._thread_pool = worker_pool(20)
self._filename = None
self._deadline = None
self._steady_state_samples = 10
@@ -83,6 +88,11 @@ class StorPerfMaster(object):
self._custom_workloads = []
self._subnet_CIDR = '172.16.0.0/16'
self.slave_info = {}
+ self.stackless = False
+ self.mkfs = None
+ self.mount_device = None
+ self._last_snaps_check_time = None
+ self._cached_stack_id = None
@property
def volume_count(self):
@@ -126,10 +136,14 @@ class StorPerfMaster(object):
@stack_name.setter
def stack_name(self, value):
- self._stack_name = value
- self.stack_settings.name = self.stack_name
- self.stack_id = None
- self._last_snaps_check_time = None
+ if value is None:
+ self.stackless = True
+ else:
+ self.stackless = False
+ self._stack_name = value
+ self.stack_settings.name = self.stack_name
+ self.stack_id = None
+ self._last_snaps_check_time = None
@property
def subnet_CIDR(self):
@@ -194,6 +208,10 @@ class StorPerfMaster(object):
def slave_addresses(self):
return self._slave_addresses
+ @slave_addresses.setter
+ def slave_addresses(self, value):
+ self._slave_addresses = value
+
@property
def stack_id(self):
self._get_stack_info()
@@ -204,6 +222,10 @@ class StorPerfMaster(object):
self._cached_stack_id = value
def _get_stack_info(self):
+ if self.stackless:
+ self._cached_stack_id = None
+ return None
+
if self._last_snaps_check_time is not None:
time_since_check = datetime.now() - self._last_snaps_check_time
if time_since_check.total_seconds() < 60:
@@ -216,7 +238,7 @@ class StorPerfMaster(object):
cinder_cli = cinder_utils.cinder_client(self.os_creds)
glance_cli = glance_utils.glance_client(self.os_creds)
- router_worker = self._thread_pool.apply_async(
+ router_worker = self._snaps_pool.apply_async(
self.heat_stack.get_router_creators)
vm_inst_creators = self.heat_stack.get_vm_inst_creators()
@@ -234,7 +256,7 @@ class StorPerfMaster(object):
server = vm1.get_vm_inst()
- image_worker = self._thread_pool.apply_async(
+ image_worker = self._snaps_pool.apply_async(
glance_utils.get_image_by_id, (glance_cli, server.image_id))
self._volume_count = len(server.volume_ids)
@@ -340,6 +362,19 @@ class StorPerfMaster(object):
self._custom_workloads = value
@property
+ def ssh_key(self):
+ if self._ssh_key is None:
+ return None
+ key = StringIO(self._ssh_key)
+ pkey = paramiko.RSAKey.from_private_key(key)
+ key.close()
+ return pkey
+
+ @ssh_key.setter
+ def ssh_key(self, value):
+ self._ssh_key = value
+
+ @property
def is_stack_created(self):
return (self.stack_id is not None and
(self.heat_stack.get_status() == u'CREATE_COMPLETE' or
@@ -363,6 +398,8 @@ class StorPerfMaster(object):
return logs
def create_stack(self):
+ self.stackless = False
+
self.stack_settings.resource_files = [
'storperf/resources/hot/storperf-agent.yaml',
'storperf/resources/hot/storperf-volume.yaml']
@@ -422,7 +459,8 @@ class StorPerfMaster(object):
raise Exception("ERROR: Job {} is already running".format(
self._test_executor.job_id))
- if (self.stack_id is None):
+ if (not self.stackless and
+ self.stack_id is None):
raise ParameterError("ERROR: Stack %s does not exist" %
self.stack_name)
@@ -438,20 +476,23 @@ class StorPerfMaster(object):
slaves = self._slave_addresses
- setup_threads = []
+ setup_pool = ThreadPool(processes=len(slaves))
+ workers = []
for slave in slaves:
- t = Thread(target=self._setup_slave, args=(slave,))
- setup_threads.append(t)
- t.start()
+ worker = setup_pool.apply_async(
+ self._setup_slave, (slave,))
+ workers.append(worker)
+
+ for worker in workers:
+ worker.get()
- for thread in setup_threads:
- thread.join()
+ setup_pool.close()
self._test_executor.slaves = slaves
self._test_executor.volume_count = self.volume_count
params = metadata
- params['agent_count'] = self.agent_count
+ params['agent_count'] = len(slaves)
params['agent_flavor'] = self.agent_flavor
params['agent_image'] = self.agent_image
params['agent_info'] = json.dumps(self.slave_info)
@@ -466,9 +507,12 @@ class StorPerfMaster(object):
params['volume_count'] = self.volume_count
params['volume_size'] = self.volume_size
params['volume_type'] = self.volume_type
- if self.username and self.password:
+ if self.username:
params['username'] = self.username
+ if self.password:
params['password'] = self.password
+ if self.ssh_key:
+ params['ssh_key'] = self.ssh_key
job_id = self._test_executor.execute(params)
self.slave_info = {}
@@ -535,7 +579,8 @@ class StorPerfMaster(object):
timer = 10
while not alive:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- result = s.connect_ex((slave, 22))
+ host, port = ip_helper.parse_address_and_port(slave)
+ result = s.connect_ex((host, port))
s.close()
if result:
@@ -552,13 +597,26 @@ class StorPerfMaster(object):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.username and self.password:
- ssh.connect(slave,
- username=self.username,
- password=self.password)
+ ssh.connect(
+ host,
+ port=port,
+ username=self.username,
+ password=self.password,
+ timeout=2)
+ elif self.username and self.ssh_key:
+ ssh.connect(
+ host,
+ port=port,
+ username=self.username,
+ pkey=self.ssh_key,
+ timeout=2)
else:
- ssh.connect(slave, username='storperf',
- key_filename='storperf/resources/ssh/storperf_rsa',
- timeout=2)
+ ssh.connect(
+ slave,
+ port=port,
+ username='storperf',
+ key_filename='storperf/resources/ssh/storperf_rsa',
+ timeout=2)
uname = self._get_uname(ssh)
logger.debug("Slave uname is %s" % uname)
@@ -582,6 +640,12 @@ class StorPerfMaster(object):
logger.debug("Transferring fio to %s" % slave)
scp.put('/usr/local/bin/fio', '~/')
+ if self.mkfs is not None:
+ self._mkfs(ssh, logger)
+
+ if self.mount_device is not None:
+ self._mount(ssh, logger)
+
def _get_uname(self, ssh):
(_, stdout, _) = ssh.exec_command("uname -a")
return stdout.readline()
@@ -594,6 +658,59 @@ class StorPerfMaster(object):
available = lines[3]
return int(available)
+ def _mkfs(self, ssh, logger):
+ command = "sudo umount %s" % (self.mount_device)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+
+ command = "sudo mkfs.%s %s" % (self.mkfs, self.mount_device)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ rc = stdout.channel.recv_exit_status()
+ stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ error_messages = ""
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+ error_messages += line.rstrip()
+
+ if rc != 0:
+ raise Exception(
+ "Error executing on {0}: {1}".format(
+ command, error_messages))
+
+ def _mount(self, ssh, logger):
+ command = "sudo mkdir -p %s" % (self.filename)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+
+ command = "sudo mount %s %s" % (self.mount_device, self.filename)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ rc = stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ error_messages = ""
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+ error_messages += line.rstrip()
+
+ if rc != 0:
+ raise Exception(
+ "Could not mount {0}: {1}".format(
+ self.mount_device, error_messages))
+
def _resize_root_fs(self, ssh, logger):
command = "sudo /usr/sbin/resize2fs /dev/vda1"
logger.info("Attempting %s" % command)
diff --git a/docker/storperf-master/storperf/test_executor.py b/docker/storperf-master/storperf/test_executor.py
index f7b577e..cb7e478 100644
--- a/docker/storperf-master/storperf/test_executor.py
+++ b/docker/storperf-master/storperf/test_executor.py
@@ -217,18 +217,19 @@ class TestExecutor(object):
def execute(self, metadata):
self.job_db.create_job_id()
+ self._setup_metadata(metadata)
try:
self.test_params()
except Exception as e:
self.terminate()
raise e
- self._setup_metadata(metadata)
- self.job_db.record_workload_params(metadata)
+ stripped_metadata = metadata.copy()
+ stripped_metadata.pop('ssh_key', None)
+ self.job_db.record_workload_params(stripped_metadata)
self._workload_thread = Thread(target=self.execute_workloads,
args=(),
name="Workload thread")
self._workload_thread.start()
- # seems to be hanging here
return self.job_db.job_id
def terminate(self):
@@ -315,8 +316,9 @@ class TestExecutor(object):
continue
workload = current_workload['workload']
- self._thread_gate = ThreadGate(len(self.slaves),
- workload.options['status-interval'])
+ self._thread_gate = ThreadGate(
+ len(self.slaves) * min(1, self.volume_count),
+ float(workload.options['status-interval']))
self.current_workload = current_workload['name']
@@ -360,20 +362,25 @@ class TestExecutor(object):
workloads = []
if self._custom_workloads:
- for workload_name in self._custom_workloads.iterkeys():
- if not workload_name.isalnum():
+ for workload_name in self._custom_workloads.keys():
+ real_name = workload_name
+ if real_name.startswith('_'):
+ real_name = real_name.replace('_', '')
+ self.logger.info("--- real_name: %s" % real_name)
+
+ if not real_name.isalnum():
raise InvalidWorkloadName(
"Workload name must be alphanumeric only: %s" %
- workload_name)
+ real_name)
workload = _custom_workload()
- workload.options['name'] = workload_name
+ workload.options['name'] = real_name
workload.name = workload_name
if (self.filename is not None):
workload.filename = self.filename
workload.id = self.job_db.job_id
workload_params = self._custom_workloads[workload_name]
- for param, value in workload_params.iteritems():
+ for param, value in workload_params.items():
if param == "readwrite":
param = "rw"
if param in workload.fixed_options:
diff --git a/docker/storperf-master/storperf/utilities/data_handler.py b/docker/storperf-master/storperf/utilities/data_handler.py
index 6e87781..98ae640 100644
--- a/docker/storperf-master/storperf/utilities/data_handler.py
+++ b/docker/storperf-master/storperf/utilities/data_handler.py
@@ -157,9 +157,11 @@ class DataHandler(object):
test_db = os.environ.get('TEST_DB_URL')
if test_db is not None:
self.logger.info("Pushing results to %s" % (test_db))
+ stripped_metadata = executor.metadata
+ stripped_metadata.pop("ssh_key", None)
response = test_results_db.push_results_to_db(
test_db,
- executor.metadata,
+ stripped_metadata,
self.logger)
if response:
self.logger.info("Results reference: %s" % response['href'])
diff --git a/docker/storperf-master/storperf/utilities/ip_helper.py b/docker/storperf-master/storperf/utilities/ip_helper.py
new file mode 100644
index 0000000..06087b0
--- /dev/null
+++ b/docker/storperf-master/storperf/utilities/ip_helper.py
@@ -0,0 +1,27 @@
+##############################################################################
+# Copyright (c) 2019 VMware and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+def parse_address_and_port(address):
+ port = 22
+ if '.' in address:
+ # this is IPv4
+ if ':' in address:
+ host = address.split(':')[0]
+ port = int(address.split(':')[1])
+ else:
+ host = address
+ else:
+ if ']' in address:
+ # this is IPv6
+ host = address.split(']')[0].split('[')[1]
+ port = int(address.split(']')[1].split(':')[1])
+ else:
+ host = address
+ return (host, port)
diff --git a/docker/storperf-master/storperf/workloads/_base_workload.py b/docker/storperf-master/storperf/workloads/_base_workload.py
index 9b04314..5aa596e 100644
--- a/docker/storperf-master/storperf/workloads/_base_workload.py
+++ b/docker/storperf-master/storperf/workloads/_base_workload.py
@@ -44,17 +44,24 @@ class _base_workload(object):
self.options['size'] = "100%"
self.logger.debug(
"Profiling a device, using 100% of " + self.filename)
+ self.options['filename'] = self.filename
else:
- self.options['size'] = self.default_filesize
+ if 'size' not in self.options:
+ self.options['size'] = self.default_filesize
self.logger.debug("Profiling a filesystem, using " +
- self.default_filesize + " file")
-
- self.options['filename'] = self.filename
+ self.options['size'] + " file")
+ if not self.filename.endswith('/'):
+ self.filename = self.filename + "/"
+ self.options['directory'] = self.filename
+ self.options['filename_format'] = "'storperf.$jobnum.$filenum'"
self.setup()
- for key, value in self.options.iteritems():
- args.append('--' + key + "=" + value)
+ for key, value in self.options.items():
+ if value is not None:
+ args.append('--' + key + "=" + str(value))
+ else:
+ args.append('--' + key)
if parse_only:
args.append('--parse-only')
diff --git a/docker/storperf-master/storperf/workloads/_custom_workload.py b/docker/storperf-master/storperf/workloads/_custom_workload.py
index 9e0100d..5cd37b3 100644
--- a/docker/storperf-master/storperf/workloads/_custom_workload.py
+++ b/docker/storperf-master/storperf/workloads/_custom_workload.py
@@ -18,12 +18,12 @@ class _custom_workload(_base_workload._base_workload):
self.default_filesize = "1G"
self.filename = '/dev/vdb'
self.fixed_options = {
- 'loops': '200',
'output-format': 'json',
'status-interval': '60'
}
self.options = {
'ioengine': 'libaio',
+ 'loops': '200',
'direct': '1',
'numjobs': '1',
'rw': 'read',
diff --git a/docker/storperf-master/tests/carbon_tests/emitter_test.py b/docker/storperf-master/tests/carbon_tests/emitter_test.py
index f5a78d1..7ea515b 100644
--- a/docker/storperf-master/tests/carbon_tests/emitter_test.py
+++ b/docker/storperf-master/tests/carbon_tests/emitter_test.py
@@ -11,7 +11,7 @@ import json
from time import strptime
import unittest
-import mock
+from unittest import mock
from storperf.carbon import converter
from storperf.carbon.emitter import CarbonMetricTransmitter
@@ -69,9 +69,15 @@ class CarbonMetricTransmitterTest(unittest.TestCase):
emitter.carbon_port = self.listen_port
emitter.transmit_metrics(result, None)
+ element = ""
+ for element in data:
+ element = element.decode('utf-8')
+ if element.startswith("host.run-name"):
+ break
+
self.assertEqual("host.run-name.key 123.0 975542400\n",
- data[1],
- data[1])
+ element,
+ data)
@mock.patch("socket.socket")
@mock.patch("time.gmtime")
@@ -90,9 +96,14 @@ class CarbonMetricTransmitterTest(unittest.TestCase):
emitter.carbon_port = self.listen_port
emitter.transmit_metrics(result, None)
+ element = ""
+ for element in data:
+ element = element.decode('utf-8')
+ if element.startswith("None.commit-marker"):
+ break
self.assertEqual("None.commit-marker 975542400 975542400\n",
- data[1],
- data[1])
+ element,
+ data)
@mock.patch("socket.socket")
def test_connect_fails(self, mock_socket):
diff --git a/docker/storperf-master/tests/db_tests/graphite_db_test.py b/docker/storperf-master/tests/db_tests/graphite_db_test.py
index d5fbbfc..2fabfd4 100644
--- a/docker/storperf-master/tests/db_tests/graphite_db_test.py
+++ b/docker/storperf-master/tests/db_tests/graphite_db_test.py
@@ -9,8 +9,7 @@
import unittest
-import mock
-
+from unittest import mock
from storperf.db.graphite_db import GraphiteDB
diff --git a/docker/storperf-master/tests/db_tests/job_db_test.py b/docker/storperf-master/tests/db_tests/job_db_test.py
index 25fda1f..5201963 100644
--- a/docker/storperf-master/tests/db_tests/job_db_test.py
+++ b/docker/storperf-master/tests/db_tests/job_db_test.py
@@ -11,8 +11,7 @@ import os
import sqlite3
import unittest
-import mock
-
+from unittest import mock
from storperf.db.job_db import JobDB
from storperf.workloads.rr import rr
diff --git a/docker/storperf-master/tests/fio_tests/fio_invoker_test.py b/docker/storperf-master/tests/fio_tests/fio_invoker_test.py
index 4672651..3a30500 100644
--- a/docker/storperf-master/tests/fio_tests/fio_invoker_test.py
+++ b/docker/storperf-master/tests/fio_tests/fio_invoker_test.py
@@ -7,11 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from StringIO import StringIO
import json
import unittest
from storperf.fio.fio_invoker import FIOInvoker
+from io import BytesIO
class Test(unittest.TestCase):
@@ -34,7 +34,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = json.dumps(self.simple_dictionary, indent=4, sort_keys=True)
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(self.simple_dictionary, self.metric)
@@ -43,7 +43,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = json.dumps(self.simple_dictionary, indent=4, sort_keys=True)
terminating = "fio: terminating on signal 2\n"
- output = StringIO(terminating + string + "\n")
+ output = BytesIO((terminating + string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(self.simple_dictionary, self.metric)
@@ -52,7 +52,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = "{'key': 'value'}"
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(None, self.metric)
@@ -61,7 +61,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = "{'key':\n}"
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(None, self.metric)
@@ -71,7 +71,7 @@ class Test(unittest.TestCase):
string = json.dumps(self.simple_dictionary, indent=4, sort_keys=True)
self.fio_invoker.terminated = True
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(None, self.metric)
@@ -81,7 +81,7 @@ class Test(unittest.TestCase):
self.fio_invoker.register(self.event)
string = json.dumps(self.simple_dictionary, indent=4, sort_keys=True)
- output = StringIO(string + "\n")
+ output = BytesIO((string + "\n").encode('utf-8'))
self.fio_invoker.stdout_handler(output)
self.assertEqual(self.simple_dictionary, self.metric)
diff --git a/docker/storperf-master/tests/storperf_master_test.py b/docker/storperf-master/tests/storperf_master_test.py
index 03009d1..1edac6d 100644
--- a/docker/storperf-master/tests/storperf_master_test.py
+++ b/docker/storperf-master/tests/storperf_master_test.py
@@ -9,7 +9,7 @@
import unittest
-import mock
+from unittest.mock import patch
from storperf.storperf_master import StorPerfMaster
@@ -17,8 +17,8 @@ from storperf.storperf_master import StorPerfMaster
class StorPerfMasterTest(unittest.TestCase):
def setUp(self):
- with mock.patch("storperf.storperf_master.OSCreds"), \
- mock.patch(
+ with patch("storperf.storperf_master.OSCreds"), \
+ patch(
"storperf.storperf_master.OpenStackHeatStack") as oshs:
oshs.return_value.get_stack.return_value = None
diff --git a/docker/storperf-master/tests/utilities_tests/data_handler_test.py b/docker/storperf-master/tests/utilities_tests/data_handler_test.py
index 35150dd..7e8cbcc 100644
--- a/docker/storperf-master/tests/utilities_tests/data_handler_test.py
+++ b/docker/storperf-master/tests/utilities_tests/data_handler_test.py
@@ -10,7 +10,7 @@
import os
import unittest
-import mock
+from unittest import mock
from storperf.utilities.data_handler import DataHandler
@@ -311,10 +311,10 @@ class DataHandlerTest(unittest.TestCase):
def test_pass_criteria(self):
metadata = {
"details": {
- "steady_state": {
- "_warm_up.queue-depth.8.block-size.16384": False,
- "rw.queue-depth.4.block-size.16384": True
- }
+ "steady_state": {
+ "_warm_up.queue-depth.8.block-size.16384": False,
+ "rw.queue-depth.4.block-size.16384": True
+ }
},
}
criteria = self.data_handler._determine_criteria(metadata)
@@ -325,11 +325,11 @@ class DataHandlerTest(unittest.TestCase):
def test_fail_criteria(self):
metadata = {
"details": {
- "steady_state": {
- "_warm_up.queue-depth.8.block-size.16384": False,
- "rw.queue-depth.4.block-size.16384": True,
- "rw.queue-depth.8.block-size.16384": False
- }
+ "steady_state": {
+ "_warm_up.queue-depth.8.block-size.16384": False,
+ "rw.queue-depth.4.block-size.16384": True,
+ "rw.queue-depth.8.block-size.16384": False
+ }
},
}
criteria = self.data_handler._determine_criteria(metadata)
diff --git a/docker/storperf-master/tests/utilities_tests/ip_helper_test.py b/docker/storperf-master/tests/utilities_tests/ip_helper_test.py
new file mode 100644
index 0000000..f2d662b
--- /dev/null
+++ b/docker/storperf-master/tests/utilities_tests/ip_helper_test.py
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Dell EMC and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+
+from storperf.utilities import ip_helper
+
+
+class Test(unittest.TestCase):
+
+ def testNoPortInIPv4(self):
+ host, port = ip_helper.parse_address_and_port("127.0.0.1")
+ self.assertEqual("127.0.0.1", host)
+ self.assertEqual(22, port)
+
+ def testPortInIPv4(self):
+ host, port = ip_helper.parse_address_and_port("127.0.0.1:2222")
+ self.assertEqual("127.0.0.1", host)
+ self.assertEqual(2222, port)
+
+ def testNoPortInIPv6(self):
+ host, port = ip_helper.parse_address_and_port(
+ "1fe80::58bb:c8b:f2f2:c888")
+ self.assertEqual("1fe80::58bb:c8b:f2f2:c888",
+ host)
+ self.assertEqual(22, port)
+
+ def testPortInIPv6(self):
+ host, port = ip_helper.parse_address_and_port(
+ "[1fe80::58bb:c8b:f2f2:c888]:2222")
+ self.assertEqual("1fe80::58bb:c8b:f2f2:c888",
+ host)
+ self.assertEqual(2222, port)
diff --git a/docker/storperf-reporting/Dockerfile b/docker/storperf-reporting/Dockerfile
index ff28dd1..6d017ae 100644
--- a/docker/storperf-reporting/Dockerfile
+++ b/docker/storperf-reporting/Dockerfile
@@ -16,22 +16,22 @@
ARG ARCH=x86_64
-ARG ALPINE_VERSION=v3.6
+ARG ALPINE_VERSION=v3.10
FROM multiarch/alpine:$ARCH-$ALPINE_VERSION
MAINTAINER Mark Beierl <mark.beierl@dell.com>
-LABEL version="0.1" description="OPNFV Storperf Reporting Container"
+LABEL version="8.0" description="OPNFV Storperf Reporting Container"
ARG BRANCH=master
RUN ulimit -n 1024
-RUN apk add --update python py-pip
+RUN apk add --update python3=3.7.5-r1
COPY . /home/opnfv/storperf-reporting
WORKDIR /home/opnfv/storperf-reporting/src
-RUN pip install -r /home/opnfv/storperf-reporting/requirements.txt
+RUN python3 -m pip install -r /home/opnfv/storperf-reporting/requirements.txt
-CMD ["python", "app.py"]
+CMD ["python3", "app.py"]
EXPOSE 5000
diff --git a/docker/storperf-swaggerui/Dockerfile b/docker/storperf-swaggerui/Dockerfile
index 5d58a30..9f82890 100644
--- a/docker/storperf-swaggerui/Dockerfile
+++ b/docker/storperf-swaggerui/Dockerfile
@@ -13,7 +13,7 @@
##
ARG ARCH=x86_64
-ARG ALPINE_VERSION=v3.6
+ARG ALPINE_VERSION=v3.10
FROM node:10-alpine
RUN ulimit -n 1024
diff --git a/docker/storperf-workloadagent/Dockerfile b/docker/storperf-workloadagent/Dockerfile
new file mode 100644
index 0000000..e6662a9
--- /dev/null
+++ b/docker/storperf-workloadagent/Dockerfile
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2019 VMware and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Docker container for workload
+#
+# Purpose: docker image for Storperf to control as a synthetic workload
+#
+# Maintained by Mark Beierl
+# Build:
+# $ docker build -t opnfv/storperf-workloadagent:tag .
+#
+
+ARG ARCH=x86_64
+ARG ALPINE_VERSION=v3.10
+FROM multiarch/alpine:$ARCH-$ALPINE_VERSION
+
+RUN apk add --no-cache --upgrade \
+ logrotate \
+ openssh-client \
+ openssh-server \
+ sudo
+
+RUN sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/g' /etc/ssh/sshd_config
+
+RUN echo "root ALL=(ALL) ALL" >> /etc/sudoers
+RUN ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa
+RUN ssh-keygen -f /etc/ssh/ssh_host_dsa_key -N '' -t dsa
+
+RUN echo root:password | chpasswd
+
+CMD /usr/sbin/sshd -D -e \ No newline at end of file
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index bf71c6c..6122cf0 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -1,11 +1,13 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-****************************
+.. _storperf-releasenotes:
+
+**********************
StorPerf Release Notes
-****************************
+**********************
.. toctree::
:maxdepth: 2
- release-notes
+ release-notes.rst
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 91e5666..0303fb4 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
-This document provides the release notes for Fraser 2.0 of StorPerf.
+This document provides the release notes for Iruya 1.0 of StorPerf.
.. contents::
:depth: 3
@@ -17,7 +17,7 @@ Version history
| **Date** | **Ver.** | **Author** | **Comment** |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
-| 2018-06-29 | Fraser 3.0 | Mark Beierl | |
+| 2020-01-10 | Iruya 1.0 | Mark Beierl | |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
@@ -25,11 +25,14 @@ Version history
Important notes
----------------
-StorPerf has added the ability to specify the number of Cinder Volumes per
-agent VM to test. The name of the device that the volume is attached to
-has been appended to the host IP address in the metrics so that it can be
-tracked independently.
+Heat stack support is being sunsetted in StorPerf Iruya. Once Kali is released,
+StorPerf will no longer support the following APIs:
+/configurations
+/quota
+
+Additionally, the stack_name parameter will no longer be used. This also means
+that all tests must run using IP addresses.
Summary
--------
@@ -38,22 +41,10 @@ StorPerf is a standalone framework that uses OpenStack to measure Cinder volume
performance. If desired, it can push results to the OPNFV Test Results DB, or
the embedded Graphite web interface can be used to perform ad hoc queries.
-This release allows for changing of stack attributes from the OpenStack CLI.
-Using a command such as
-
-.. code-block::
- heat stack-update StorPerfAgentGroup --existing -P "agent_count=6"
-
-will change the existing stack to use 6 agents. Note that StorPerf can take
-up to 1 minute after the stack update is complete before detecting the new
-values. Please use a GET of the configurations API to test for updated
-values prior to submitting a new test.
-
-The following command changes the number of volumes per agent:
-
-.. code-block::
- heat stack-update StorPerfAgentGroup --existing -P "volume_count=2"
-
+This release provides the ability to use existing servers (virtual or physical)
+as the targets for workload execution. All that is required is the IP address
+and the SSH key or username/password for StorPerf to be able to log in and
+start FIO workloads.
Release Data
-------------
@@ -62,17 +53,17 @@ Release Data
| **Project** | StorPerf |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | opnfv-6.2.0 |
+| **Repo/tag** | opnfv-9.0.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Fraser 6.2 |
+| **Release designation** | Iruya.9 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | June 29 2018 |
+| **Release date** | Jan 10, 2020 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Improvements to stack detection |
-| | speed. |
+| **Purpose of the delivery** | Regular release |
+| | |
+--------------------------------------+--------------------------------------+
Version change
@@ -81,24 +72,23 @@ Version change
Module version changes
-----------------------
-No changes to any modules.
+All modules have been upgraded to use python3.
Reason for version
===================
-* Loading stack properties from OpenStack could take minutes or longer
- depending on the stack size. This version includes changes from SNAPS
- to take advantage of parallel OpenStack object lookups.
+* Timed release schedule
Features additions
-------------------
-* STORPERF-239 - Add IP addresses of slaves to configurations API
-* STORPERF-245 - Change to use multithreaded SNAPS
+* STORPERF-268 Allow user to specify list of IP addresses for StorPerf test
Bug Fixes
----------
+None
+
Deliverables
=============
@@ -106,25 +96,28 @@ Software
---------
- `StorPerf master image <https://hub.docker.com/r/opnfv/storperf-master/>`_
- (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
- `StorPerf swaggerui <https://hub.docker.com/r/opnfv/storperf-swaggerui/>`_
- (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
- `StorPerf graphite image <https://hub.docker.com/r/opnfv/storperf-graphite/>`_
- (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
- `StorPerf reporting image <https://hub.docker.com/r/opnfv/storperf-reporting/>`_
- (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
- `StorPerf Http-Frontend image <https://hub.docker.com/r/opnfv/storperf-httpfrontend/>`_
- (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
+ (tag: x86_64-opnfv-8.0.0 or aarch64-opnfv-8.0.0)
Documentation
--------------
- :ref:`User Guide <storperf-userguide>`
+Note: The quotas and configurations apis are being sunsetted with the next
+release.
+
Known Limitations, Issues and Workarounds
------------------------------------------
@@ -148,7 +141,3 @@ Known issues
--property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi
-Test Result
-===========
-
-- `OPNFV Test Results DB <http://testresults.opnfv.org/reporting/fraser/storperf/status-apex.html>`_
diff --git a/docs/testing/user/installation.rst b/docs/testing/user/installation.rst
index c129fee..7f56244 100755
--- a/docs/testing/user/installation.rst
+++ b/docs/testing/user/installation.rst
@@ -8,11 +8,20 @@ StorPerf Installation Guide
OpenStack Prerequisites
===========================
-If you do not have an Ubuntu 16.04 image in Glance, you will need to add one.
-You also need to create the StorPerf flavor, or choose one that closely
-matches. For Ubuntu 16.04, it must have a minimum of a 4 GB disk. It should
-also have about 8 GB RAM to support FIO's memory mapping of written data blocks
-to ensure 100% coverage of the volume under test.
+StorPerf can be instructed to use OpenStack APIs in order to manage a
+Heat stack of virtual machines and Cinder volumes, or it can be run in
+stackless mode, where it does not need to know anything about OpenStack.
+
+When running in OpenStack mode, there will need to be an external network
+with floating IPs available to assign to the VMs, as well as a Glance image
+that can be used to boot the VMs. This can be almost any Linux based
+image, as long as it can either accept OpenStack metadata for injecting
+the SSH key, or it has known SSH credentials as part of the base image.
+
+The flavor for the image should provide enough disk space for the initial
+boot, along with additional space if profiling of the Glance backing is
+desired. It should also provide at least 8 GB RAM to support FIO's memory
+mapping of written data blocks.
There are scripts in storperf/ci directory to assist, or you can use the follow
code snippets:
@@ -34,9 +43,10 @@ code snippets:
OpenStack Credentials
~~~~~~~~~~~~~~~~~~~~~
-You must have your OpenStack Controller environment variables defined and passed to
-the StorPerf container. The easiest way to do this is to put the rc file contents
-into a clean file called admin.rc that looks similar to this for V2 authentication:
+Unless running in stackless mode, the OpenStack Controller environment
+variables must be defined and passed to the StorPerf container. The easiest
+way to do this is to put the rc file contents into a clean file called
+admin.rc that looks similar to this for V2 authentication:
.. code-block:: console
@@ -89,7 +99,7 @@ Requirements:
* Host has access to the OpenStack Controller API
* Host must have internet connectivity for downloading docker image
* Enough OpenStack floating IPs must be available to match your agent count
-* A local directory for holding the Carbon DB Whisper files
+* Optionally, a local directory for holding the Carbon DB Whisper files
Local disk used for the Carbon DB storage as the default size of the docker
container is only 10g. Here is an example of how to create a local storage
@@ -117,7 +127,7 @@ http://storperf:5000/graphite
Running StorPerf Container
==========================
-**As of Euphrates (development) release (June 2017), StorPerf has
+**As of Euphrates release (June 2017), StorPerf has
changed to use docker-compose in order to start its services.**
Docker compose requires a local file to be created in order to define the
@@ -146,8 +156,12 @@ which should result in:
To run, you must specify two environment variables:
-* ENV_FILE, which points to your OpenStack admin.rc as noted above.
-* CARBON_DIR, which points to a directory that will be mounted to store the raw metrics.
+* ENV_FILE, which points to your OpenStack admin.rc as noted above. If running
+ in stackless mode only, it is possible to remove the ENV_FILE reference from
+ the docker-compose.yaml file.
+* CARBON_DIR, which points to a directory that will be mounted to store the
+ raw metrics. If desired, the CARBON_DIR can be removed from the
+ docker-compose.yaml file, causing metrics to be kept in the container only.
* TAG, which specified the Docker tag for the build (ie: latest, danube.3.0, etc).
The following command will start all the StorPerf services:
diff --git a/docs/testing/user/introduction.rst b/docs/testing/user/introduction.rst
index 49e3220..c864edc 100644
--- a/docs/testing/user/introduction.rst
+++ b/docs/testing/user/introduction.rst
@@ -25,18 +25,18 @@ performance metrics in the shortest reasonable time.
How Does StorPerf Work?
=======================
-Once launched, StorPerf presents you with a ReST interface, along with a
+Once launched, StorPerf presents a ReST interface, along with a
`Swagger UI <https://swagger.io/swagger-ui/>`_ that makes it easier to
-form HTTP ReST requests. Issuing an HTTP POST to the configurations API
-causes StorPerf to talk to your OpenStack's heat service to create a new stack
-with as many agent VMs and attached Cinder volumes as you specify.
+form HTTP ReST requests.
-After the stack is created, you can issue one or more jobs by issuing a POST
-to the jobs ReST API. The job is the smallest unit of work that StorPerf
-can use to measure the disk's performance.
+StorPerf enables us to run FIO on multiple VMs, containers or bare
+metal servers by providing a recent release of FIO, copying it to the
+target system and running I/O workloads specified. It also provides a
+simple API to initialize the target device and fill it with random data
+to ensure that performance is measured against real data, not blank media.
-While the job is running, StorPerf collects the performance metrics from each
-of the disks under test every minute. Once the trend of metrics match the
+While an FIO job is running, StorPerf collects the performance metrics from
+each of the jobs every minute. Once the trend of metrics match the
criteria specified in the SNIA methodology, the job automatically terminates
and the valid set of metrics are available for querying.
@@ -45,8 +45,210 @@ measured start to "flat line" and stay within that range for the specified
amount of time, then the metrics are considered to be indicative of a
repeatable level of performance.
-What Data Can I Get?
-====================
+With OpenStack Heat
+~~~~~~~~~~~~~~~~~~~
+
+StorPerf provides an API to interact with OpenStack Heat to automatically
+create a set of target VMs and Cinder volumes. The Configurations API is
+used to specify how many VMs and volumes to create, as well as the size of
+each Cinder volume.
+
+Without OpenStack Heat
+~~~~~~~~~~~~~~~~~~~~~~
+
+StorPerf can also use IP addresses or DNS names to connect to systems that
+have already been provisioned by any external provider, including OpenStack.
+By specifying a stack name of 'null' in the JSON payload, StorPerf will look
+for a list of IP addresses and credentials to use in order to SSH to the
+target systems. In this way, StorPerf can be used to profile bare metal,
+containers that have SSH enabled, or VMs running under OpenStack, WMware ESXi,
+VIO, Microsoft Hyper-V, or anything else. The only requirement is that
+the target be capable of accepting and authenticating SSH connections, and that
+it is Linux based, as currently the FIO supplied by StorPerf is not compiled
+to run under Microsoft Windows or other non-Linux operating systems.
+
+
+StorPerf Testing Guidelines
+===========================
+
+First of all, StorPerf is not able to give pointers on how to tune a
+Cinder implementation, as there are far too many backends (Ceph, NFS, LVM,
+etc), each with their own methods of tuning. StorPerf is here to assist in
+getting a reliable performance measurement by encoding the test
+specification from SNIA, and helping present the results in a way that makes
+sense.
+
+Having said that, there are some general guidelines that we can present to
+assist with planning a performance test.
+
+Workload Modelling
+------------------
+
+This is an important item to address as there are many parameters to how
+data is accessed. Databases typically use a fixed block size and tend to
+manage their data so that sequential access is more likely. GPS image tiles
+can be around 20-60kb and will be accessed by reading the file in full, with
+no easy way to predict what tiles will be needed next. Some programs are
+able to submit I/O asynchronously where others need to have different threads
+and may be synchronous. There is no one size fits all here, so knowing what
+type of I/O pattern we need to model is critical to getting realistic
+measurements.
+
+System Under Test
+-----------------
+
+The unfortunate part is that StorPerf does not have any knowledge about the
+underlying OpenStack itself – we can only see what is available through
+OpenStack APIs, and none of them provide details about the underlying
+storage implementation. As the test executor, we need to know
+information such as: the number of disks or storage nodes; the amount of RAM
+available for caching; the type of connection to the storage and bandwidth
+available.
+
+Measure Storage, not Cache
+--------------------------
+
+As part of the test data size, we need to ensure that we prevent
+caching from interfering in the measurements. The total size of the data
+set in the test must exceed the total size of all the disk cache memory
+available by a certain amount in order to ensure we are forcing non-cached
+I/O. There is no exact science here, but if we balance test duration against
+cache hit ratio, it can be argued that 20% cache hit is good enough and
+increasing file size would result in diminishing returns. Let’s break this
+number down a bit. Given a cache size of 10GB, we could write, then read the
+following dataset sizes:
+
+* 10GB gives 100% cache hit
+* 20GB gives 50% cache hit
+* 50GB gives 20% cache hit
+* 100GB gives 10% cache hit
+
+This means that for the first test, 100% of the results are unreliable due to
+cache. At 50GB, the true performance without cache has only a 20% margin of
+error. Given the fact that the 100GB would take twice as long, and that we
+are only reducing the margin of error by 10%, we recommend this as the best
+tradeoff.
+
+How much cache do we actually have? This depends on the storage device being
+used. For hardware NAS or other arrays, it should be fairly easy to get the
+number from the manufacturer, but for software defined storage, it can be
+harder to determine. Let’s take Ceph as an example. Ceph runs as software
+on the bare metal server and therefore has access to all the RAM available on
+the server to use as its cache. Well, not exactly all the memory. We have
+to take into account the memory consumed by the operating system, by the Ceph
+processes, as well as any other processes running on the same system. In the
+case of hyper-converged Ceph, where workload VMs and Ceph run on the systems,
+it can become quite difficult to predict. Ultimately, the amount of memory
+that is left over is the cache for that single Ceph instance. We now need to
+add the memory available from all the other Ceph storage nodes in the
+environment. Time for another example: given 3 Ceph storage nodes with
+256GB RAM each. Let’s take 20% off to pin to the OS and other processes,
+leaving approximately 240GB per node This gives us 3 x 240 or 720GB total RAM
+available for cache. The total amount of data we want to write in order to
+initialize our Cinder volumes would then be 5 x 720, or 3,600 GB. The
+following illustrates some ways to allocate the data:
+
+* 1 VM with 1 3,600 GB volume
+* 10 VMs each with 1 360 GB volume
+* 2 VMs each with 5 360 GB volumes
+
+Back to Modelling
+-----------------
+
+Now that we know there is 3.6 TB of data to be written, we need to go back to
+the workload model to determine how we are going to write it. Factors to
+consider:
+
+* Number of Volumes. We might be simulating a single database of 3.6 TB, so
+ only 1 Cinder volume is needed to represent this. Or, we might be
+ simulating a web server farm where there are hundreds of processes
+ accessing many different volumes. In this case, we divide the 3.6 TB by
+ the number of volumes, making each volume smaller.
+* Number of Virtual Machines. We might have one monster VM that will drive
+ all our I/O in the system, or maybe there are hundreds of VMs, each with
+ their own individual volume. Using Ceph as an example again, we know that
+ it allows for a single VM to consume all the Ceph resources, which can be
+ perceived as a problem in terms of multi-tenancy and scaling. A common
+ practice to mitigate this is to use Cinder to throttle IOPS at the VM
+ level. If this technique is being used in the environment under test, we
+ must adjust the number of VMs used in the test accordingly.
+* Block Size. We need to know if the application is managing the volume as a
+ raw device (ie: /dev/vdb) or as a filesystem mounted over the device.
+ Different filesystems have their own block sizes: ext4 only allows 1024,
+ 2048 or 4096 as the block size. Typically the larger the block, the better
+ the throughput, however as blocks must be written as an atomic unit, larger
+ block sizes can also reduce effective throughput by having to pad the block
+ if the content is smaller than the actual block size.
+* I/O Depth. This represents the amount of I/O that the application can
+ issue simultaneously. In a multi-threaded app, or one that uses
+ asynchronous I/O, it is possible to have multiple read or write requests
+ outstanding at the same time. For example, with software defined storage
+ where there is an Ethernet network between the client and the storage,
+ the storage would have a higher latency for each I/O, but is capable of
+ accepting many requests in parallel. With an I/O depth of 1, we spend
+ time waiting for the network latency before a response comes back. With
+ higher I/O depth, we can get more throughput despite each I/O having higher
+ latency. Typically, we do not see applications that would go beyond a
+ queue depth of 8, however this is not a firm rule.
+* Data Access Pattern. We need to know if the application typically reads
+ data sequentially or randomly, as well as what the mixture of read vs.
+ write is. It is possible to measure read by itself, or write by itself,
+ but this is not typical behavior for applications. It is useful for
+ determining the potential maximum throughput of a given type of operation.
+
+Fastest Path to Results
+-----------------------
+
+Once we have the information gathered, we can now start executing some tests.
+Let’s take some of the points discussed above and describe our system:
+
+* OpenStack deployment with 3 Control nodes, 5 Compute nodes and 3 dedicated
+ Ceph storage nodes.
+* Ceph nodes each have 240 GB RAM available to be used as cache.
+* Our application writes directly to the raw device (/dev/vdb)
+* There will be 10 instances of the application running, each with its own
+ volume.
+* Our application can use block sizes of 4k or 64k.
+* Our application is capable of maintaining up to 6 I/O operations
+ simultaneously.
+
+The first thing we know is that we want to keep our cache hit ratio around
+20%, so we will be moving 3,600 GB of data. We also know this will take a
+significant amount of time, so here is where StorPerf helps.
+
+First, we use the configurations API to launch our 10 virtual machines each
+with a 360 GB volume. Next comes the most time consuming part: we call the
+initializations API to fill each one of these volumes with random data. By
+preloading the data, we ensure a number of things:
+
+* The storage device has had to fully allocate all of the space for our
+ volumes. This is especially important for software defined storage like
+ Ceph, which is smart enough to know if data is being read from a block that
+ has never been written. No data on disk means no disk read is needed and
+ the response is immediate.
+* The RAM cache has been overrun multiple times. Only 20% of what was
+ written can possibly remain in cache.
+
+This last part is important as we can now use StorPerf’s implementation of
+SNIA’s steady state algorithm to ensure our follow up tests execute as
+quickly as possible. Given the fact that 80% of the data in any given test
+results in a cache miss, we can run multiple tests in a row without having
+to re-initialize or invalidate the cache again in between test runs. We can
+also mix and match the types of workloads to be run in a single performance
+job submission.
+
+Now we can submit a job to the jobs API to execute a 70%/30% mix of
+read/write, with a block size of 4k and an I/O queue depth of 6. This job
+will run until either the maximum time has expired, or until StorPerf detects
+steady state has been reached, at which point it will immediately complete
+and report the results of the measurements.
+
+StorPerf uses FIO as its workload engine, so whatever workload parameters we
+would like to use with FIO can be passed directly through via StorPerf’s jobs
+API.
+
+What Data Can We Get?
+=====================
StorPerf provides the following metrics:
@@ -57,4 +259,9 @@ StorPerf provides the following metrics:
These metrics are available for every job, and for the specific workloads,
I/O loads and I/O types (read, write) associated with the job.
+For each metric, StorPerf also provides the set of samples that were
+collected along with the slope, min and max values that can be used for
+plotting or comparison.
+
As of this time, StorPerf only provides textual reports of the metrics.
+
diff --git a/docs/testing/user/test-usage.rst b/docs/testing/user/test-usage.rst
index 41cbbbd..0fb3a6c 100644
--- a/docs/testing/user/test-usage.rst
+++ b/docs/testing/user/test-usage.rst
@@ -31,8 +31,17 @@ The typical test execution follows this pattern:
#. Execute one or more performance runs
#. Delete the environment
-Configure The Environment
-=========================
+OpenStack or Stackless
+======================
+StorPerf provides the option of controlling the OpenStack environment
+via a Heat Stack, or it can run in stackless mode, where it connects
+directly to the IP addresses supplied, regardless of how the slave
+was created or even if it is an OpenStack VM.
+
+Note: Stack support in StorPerf will be deprecated as of the next release.
+
+Configure The Environment for OpenStack Usage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following pieces of information are required to prepare the environment:
@@ -51,12 +60,12 @@ The following pieces of information are required to prepare the environment:
VMs from properly attaching Cinder volumes. There are two known workarounds:
#. Create the environment with 0 Cinder volumes attached, and after the VMs
- have finished booting, modify the stack to have 1 or more Cinder volumes.
- See section on Changing Stack Parameters later in this guide.
+ have finished booting, modify the stack to have 1 or more Cinder volumes.
+ See section on Changing Stack Parameters later in this guide.
#. Add the following image metadata to Glance. This will cause the Cinder
- volume to be mounted as a SCSI device, and therefore your target will be
- /dev/sdb, etc, instead of /dev/vdb. You will need to specify this in your
- warm up and workload jobs.
+ volume to be mounted as a SCSI device, and therefore your target will be
+ /dev/sdb, etc, instead of /dev/vdb. You will need to specify this in your
+ warm up and workload jobs.
.. code-block:
--property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi
@@ -83,6 +92,52 @@ takes a JSON payload as follows.
This call will block until the stack is created, at which point it will return
the OpenStack heat stack id as well as the IP addresses of the slave agents.
+
+Configure The Environment for Stackless Usage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To configure the environment for stackless usage, the slaves must be
+fully operational (ie: a Linux operating system is running, are reachable
+via TCP/IP address or hostname).
+
+It is not necessary to use the Configurations API, but instead define the
+stack name as 'null' in any of the other APIs. This instructs StorPerf not to
+gather information about the stack from OpenStack, and to simply use the
+supplied IP addresses and credentials to communicate with the slaves.
+
+A slave can be a container (provided we can SSH to it), a VM running in any
+hypervisor, or even a bare metal server. In the bare metal case, it even
+allows for performing RADOS or RDB performance tests using the appropriate
+FIO engine.
+
+If the slave SSH server is listening to a port other than 22, the port number
+can be specified as part of the address as follows:
+
+IPv4 example for port 2222:
+
+.. code-block::
+ 192.168.1.10:2222
+
+IPv6 example for port 2222:
+
+.. code-block::
+ [1fe80::58bb:c8b:f2f2:c888]:2222
+
+Helper Container Image for Workloads
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A new docker container is provided with StorPerf that can be used to test
+under docker or Kubernetes environments. It has hard coded credentials
+of root/password with an SSH server built it, so be cautious about security
+concerns when using this image. It listens internally on port 22, so that
+port must be exposed to a free port on the host in order for StorPerf to
+reach the synthetic workload container.
+
+.. code-block:: bash
+
+ docker run --name=storperf-workloadagent -p 2222:22
+ opnfv/storperf-workloadagent:latest
+
Initialize the Target Volumes
=============================
Before executing a test run for the purpose of measuring performance, it is
@@ -120,6 +175,137 @@ This will return a job ID as follows.
This job ID can be used to query the state to determine when it has completed.
See the section on querying jobs for more information.
+Authentication and Slave Selection
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+It is possible to run the Initialization API against a subset of the slaves
+known to the stack, or to run it in stackless mode, where StorPerf
+connects directly to the IP addresses supplied via SSH. The following
+keys are available:
+
+slave_addresses
+ (optional) A list of IP addresses or hostnames to use as targets. If
+ omitted, and StorPerf is not running in stackless mode, the full list of
+ IP addresses from the OpenStack Heat stack is used.
+
+stack_name
+ (optional) Either the name of the stack in Heat to use, or null if running
+ in stackless mode.
+
+username
+ (optional) The username to supply to SSH when logging in. This defaults to
+ 'storperf' if not supplied.
+
+password
+ (optional) The password to supply to SSH when logging in. If omitted, the
+ SSH key is used instead.
+
+ssh_private_key
+ (optional) The SSH private key to supply to SSH when logging in. If omitted,
+ the default StorPerf private key is used.
+
+This shows an example of stackless mode going against a single bare metal
+server reachable by IP address:
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ }
+
+
+Filesystems and Mounts
+~~~~~~~~~~~~~~~~~~~~~~
+
+It is also possible to instruct StorPerf to create a file system on a device
+and mount that as the target directory. The filesystem can be anything
+supported by the target slave OS and it is possible to pass specific arguments
+to the mkfs command. The following additional keys are available in the
+Initializations API for file system control:
+
+mkfs
+ The type and arguments to pass for creating a filesystem
+
+mount_device
+ The target device on which to make the file system. The file system will
+ be mounted on the target specified.
+
+The following example shows the forced creation (-f) of an XFS filesystem
+on device /dev/sdb, and mounting that device on /storperf/filesystem.
+
+**Note** If any of the commands (mkfs, mount) fail for any reason, the
+Initializations API will return with a 400 code and the body of the response
+will contain the error message.
+
+.. code-block:: json
+
+ {
+ "target": "/storperf/filesystem",
+ "mkfs": "xfs -f",
+ "mount_device": "/dev/sdb",
+ }
+
+
+Initializing Filesystems
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Just like we need to fill Cinder volumes with data, if we want to profile
+files on a mounted file system, we need to initialize the file sets with
+random data prior to starting a performance run. The Initializations API
+can also be used to create test data sets.
+
+**Note** be sure to use the same parameters for the number of files, sizes
+and jobs in both the Initializations API and the Jobs API, or you will end
+up with possibly incorrect results in the Job performance run.
+
+The following keys are available in the Initializations API for file creation:
+
+filesize
+ The size of each file to be created and filled with random data.
+
+nrfiles
+ The number of files per job to create.
+
+numjobs
+ The number of independent instances of FIO to launch.
+
+Example:
+
+.. code-block:: json
+
+ {
+ "target": "/storperf/filesystem",
+ "filesize": "2G",
+ "nrfiles": 10,
+ "numjobs": 10
+ }
+
+This would create 100 (10 nrfiles x 10 numjobs) 2G files in the directory
+/storperf/filesystem.
+
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ "target": "/storperf/filesystem",
+ "mkfs": "ext4",
+ "mount_device": "/dev/sdb",
+ "filesize": "2G",
+ "nrfiles": 10,
+ "numjobs": 10
+ }
+
+
Execute a Performance Run
=========================
Performance runs can execute either a single workload, or iterate over a matrix
@@ -221,6 +407,63 @@ StorPerf will also do a verification of the arguments given prior to returning
a Job ID from the ReST call. If an argument fails validation, the error
will be returned in the payload of the response.
+File System Profiling
+~~~~~~~~~~~~~~~~~~~~~
+
+As noted in the Initializations API, files in a file system should be
+initialized prior to executing a performance run, and the number of jobs,
+files and size of files should match the initialization. Given the following
+Initializations API call:
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ "target": "/storperf/filesystem",
+ "mkfs": "ext4",
+ "mount_device": "/dev/sdb",
+ "filesize": "2G",
+ "nrfiles": 10,
+ "numjobs": 10
+ }
+
+The corresponding call to the Jobs API would appear as follows:
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ "target": "/storperf/filesystem",
+ "block_sizes": "4k",
+ "queue_depths": "8",
+ "workloads": {
+ "readwritemix": {
+ "rw": "rw",
+ "filesize": "2G",
+ "nrfiles": "10",
+ "numjobs": "10"
+ }
+ }
+ }
+
+**Note** the queue depths and block sizes as well as the I/O pattern (rw)
+can change, but the filesize, nrfiles, numjobs and slave addresses must
+match the initialization or the performance run could contain skewed results
+due to disk initialization. StorPerf explicitly allows for the mismatch
+of these so that it is possible to visualize performance when the files
+or disks have not been properly initialized.
+
+
Block Sizes
~~~~~~~~~~~
A comma delimited list of the different block sizes to use when reading and
diff --git a/tox.ini b/tox.ini
index 69aa189..840ce6a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,6 +6,7 @@ envlist =
skipsdist = true
[testenv:docs]
+basepython = python3
deps = -rdocs/requirements.txt
commands =
sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
@@ -13,5 +14,6 @@ commands =
whitelist_externals = echo
[testenv:docs-linkcheck]
+basepython = python3
deps = -rdocs/requirements.txt
commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck