summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Beierl <mark.beierl@dell.com>2018-11-05 18:38:17 +0000
committerGerrit Code Review <gerrit@opnfv.org>2018-11-05 18:38:17 +0000
commitc7842191ede010b05366204668cb3c80518461e5 (patch)
tree010c60e684d8b3f591d7af926caf899fb0994e94
parentc46cd425bd8fd2c9b652a14d2e2400e3de644fbe (diff)
parentbf29ec6e9a5f742d71e7d5cafe009b7223f46782 (diff)
Merge "Add Stackless Support"
-rw-r--r--docker/storperf-master/rest_server.py114
-rw-r--r--docker/storperf-master/storperf/fio/fio_invoker.py5
-rw-r--r--docker/storperf-master/storperf/storperf_master.py175
-rw-r--r--docker/storperf-master/storperf/test_executor.py18
-rw-r--r--docker/storperf-master/storperf/utilities/data_handler.py4
-rw-r--r--docker/storperf-master/storperf/workloads/_base_workload.py17
-rw-r--r--docker/storperf-master/storperf/workloads/_custom_workload.py2
-rwxr-xr-xdocs/testing/user/installation.rst38
-rw-r--r--docs/testing/user/introduction.rst39
-rw-r--r--docs/testing/user/test-usage.rst229
10 files changed, 558 insertions, 83 deletions
diff --git a/docker/storperf-master/rest_server.py b/docker/storperf-master/rest_server.py
index 92b6c85..ce3a41c 100644
--- a/docker/storperf-master/rest_server.py
+++ b/docker/storperf-master/rest_server.py
@@ -379,11 +379,13 @@ the last stack named.
if not request.json:
abort(400, "ERROR: Missing configuration data")
+ storperf.reset_values()
self.logger.info(request.json)
try:
if ('stack_name' in request.json):
storperf.stack_name = request.json['stack_name']
+ storperf.stackless = False
if ('target' in request.json):
storperf.filename = request.json['target']
if ('deadline' in request.json):
@@ -439,7 +441,7 @@ class WorkloadsBodyModel:
@swagger.model
@swagger.nested(
- name=WorkloadsBodyModel.__name__)
+ name=WorkloadsBodyModel.__name__)
class WorkloadsNameModel:
resource_fields = {
"name": fields.Nested(WorkloadsBodyModel.resource_fields)
@@ -448,7 +450,7 @@ class WorkloadsNameModel:
@swagger.model
@swagger.nested(
- workloads=WorkloadsNameModel.__name__)
+ workloads=WorkloadsNameModel.__name__)
class WorkloadV2Model:
resource_fields = {
'target': fields.String,
@@ -457,7 +459,11 @@ class WorkloadV2Model:
'workloads': fields.Nested(WorkloadsNameModel.resource_fields),
'queue_depths': fields.String,
'block_sizes': fields.String,
- 'stack_name': fields.String
+ 'stack_name': fields.String,
+ 'username': fields.String,
+ 'password': fields.String,
+ 'ssh_private_key': fields.String,
+ 'slave_addresses': fields.List
}
required = ['workloads']
@@ -484,7 +490,19 @@ for any single test iteration.
"workloads": A JSON formatted map of workload names and parameters for FIO.
"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
-the last stack named.
+the last stack named. Explicitly specifying null will bypass all Heat Stack
+operations and go directly against the IP addresses specified.
+
+"username": if specified, the username to use when logging into the slave.
+
+"password": if specified, the password to use when logging into the slave.
+
+"ssh_private_key": if specified, the ssh private key to use when logging
+into the slave.
+
+"slave_addresses": if specified, a list of IP addresses to use instead of
+looking all of them up from the stack.
+
""",
"required": True,
"type": "WorkloadV2Model",
@@ -505,9 +523,10 @@ the last stack named.
)
def post(self):
if not request.json:
- abort(400, "ERROR: Missing configuration data")
+ abort(400, "ERROR: Missing job data")
self.logger.info(request.json)
+ storperf.reset_values()
try:
if ('stack_name' in request.json):
@@ -534,6 +553,15 @@ the last stack named.
else:
metadata = {}
+ if 'username' in request.json:
+ storperf.username = request.json['username']
+ if 'password' in request.json:
+ storperf.password = request.json['password']
+ if 'ssh_private_key' in request.json:
+ storperf.ssh_key = request.json['ssh_private_key']
+ if 'slave_addresses' in request.json:
+ storperf.slave_addresses = request.json['slave_addresses']
+
job_id = storperf.execute_workloads(metadata)
return jsonify({'job_id': job_id})
@@ -547,7 +575,15 @@ the last stack named.
class WarmUpModel:
resource_fields = {
'stack_name': fields.String,
- 'target': fields.String
+ 'target': fields.String,
+ 'username': fields.String,
+ 'password': fields.String,
+ 'ssh_private_key': fields.String,
+ 'slave_addresses': fields.List,
+ 'mkfs': fields.String,
+ 'mount_point': fields.String,
+ 'file_size': fields.String,
+ 'file_count': fields.String
}
@@ -565,10 +601,35 @@ class Initialize(Resource):
"description": """Fill the target with random data. If no
target is specified, it will default to /dev/vdb
-"target": The target device or file to fill with random data.
+"target": The target device to use.
"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
-the last stack named.
+the last stack named. Explicitly specifying null will bypass all Heat Stack
+operations and go directly against the IP addresses specified.
+
+"username": if specified, the username to use when logging into the slave.
+
+"password": if specified, the password to use when logging into the slave.
+
+"ssh_private_key": if specified, the ssh private key to use when logging
+into the slave.
+
+"slave_addresses": if specified, a list of IP addresses to use instead of
+looking all of them up from the stack.
+
+"mkfs": if specified, the command to execute in order to create a filesystem
+on the target device (eg: mkfs.ext4)
+
+"mount_point": if specified, the directory to use when mounting the device.
+
+"filesize": if specified, the size of the files to create when profiling
+a filesystem.
+
+"nrfiles": if specified, the number of files to create when profiling
+a filesystem
+
+"numjobs": if specified, the number of jobs for when profiling
+a filesystem
""",
"required": False,
"type": "WarmUpModel",
@@ -593,17 +654,46 @@ the last stack named.
)
def post(self):
self.logger.info(request.json)
+ storperf.reset_values()
try:
+ warm_up_args = {
+ 'rw': 'randwrite',
+ 'direct': "1",
+ 'loops': "1"
+ }
+ storperf.queue_depths = "8"
+ storperf.block_sizes = "16k"
+
if request.json:
if 'target' in request.json:
storperf.filename = request.json['target']
if 'stack_name' in request.json:
storperf.stack_name = request.json['stack_name']
- storperf.queue_depths = "8"
- storperf.block_sizes = "16k"
- storperf.workloads = "_warm_up"
- storperf.custom_workloads = None
+ if 'username' in request.json:
+ storperf.username = request.json['username']
+ if 'password' in request.json:
+ storperf.password = request.json['password']
+ if 'ssh_private_key' in request.json:
+ storperf.ssh_key = request.json['ssh_private_key']
+ if 'slave_addresses' in request.json:
+ storperf.slave_addresses = request.json['slave_addresses']
+ if 'mkfs' in request.json:
+ storperf.mkfs = request.json['mkfs']
+ if 'mount_device' in request.json:
+ storperf.mount_device = request.json['mount_device']
+ if 'filesize' in request.json:
+ warm_up_args['filesize'] = str(request.json['filesize'])
+ if 'nrfiles' in request.json:
+ warm_up_args['nrfiles'] = str(request.json['nrfiles'])
+ if 'numjobs' in request.json:
+ warm_up_args['numjobs'] = str(request.json['numjobs'])
+
+ storperf.workloads = None
+ storperf.custom_workloads = {
+ '_warm_up': warm_up_args
+ }
+ self.logger.info(storperf.custom_workloads)
job_id = storperf.execute_workloads()
return jsonify({'job_id': job_id})
diff --git a/docker/storperf-master/storperf/fio/fio_invoker.py b/docker/storperf-master/storperf/fio/fio_invoker.py
index a361eec..c665598 100644
--- a/docker/storperf-master/storperf/fio/fio_invoker.py
+++ b/docker/storperf-master/storperf/fio/fio_invoker.py
@@ -158,6 +158,11 @@ class FIOInvoker(object):
username=self.metadata['username'],
password=self.metadata['password'])
return ssh
+ elif 'username' in self.metadata and 'ssh_key' in self.metadata:
+ ssh.connect(self.remote_host,
+ username=self.metadata['username'],
+ pkey=self.metadata['ssh_key'])
+ return ssh
else:
ssh.connect(self.remote_host, username='storperf',
key_filename='storperf/resources/ssh/storperf_rsa',
diff --git a/docker/storperf-master/storperf/storperf_master.py b/docker/storperf-master/storperf/storperf_master.py
index 0c7e559..76c4807 100644
--- a/docker/storperf-master/storperf/storperf_master.py
+++ b/docker/storperf-master/storperf/storperf_master.py
@@ -7,25 +7,26 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import StringIO
from datetime import datetime
+import json
import logging
+from multiprocessing.pool import ThreadPool
import os
import socket
-from threading import Thread
from time import sleep
+import uuid
import paramiko
from scp import SCPClient
-
from snaps.config.stack import StackConfig
from snaps.openstack.create_stack import OpenStackHeatStack
from snaps.openstack.os_credentials import OSCreds
from snaps.openstack.utils import heat_utils, cinder_utils, glance_utils
from snaps.thread_utils import worker_pool
+
from storperf.db.job_db import JobDB
from storperf.test_executor import TestExecutor
-import json
-import uuid
class ParameterError(Exception):
@@ -37,8 +38,9 @@ class StorPerfMaster(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
+ self.reset_values()
+
self.job_db = JobDB()
- self._stack_name = 'StorPerfAgentGroup'
self.stack_settings = StackConfig(
name=self.stack_name,
template_path='storperf/resources/hot/agent-group.yaml')
@@ -59,21 +61,26 @@ class StorPerfMaster(object):
self.heat_stack = OpenStackHeatStack(self.os_creds,
self.stack_settings)
+
+ self._cached_stack_id = None
+ self._last_snaps_check_time = None
+ self._snaps_pool = worker_pool(20)
+
+ def reset_values(self):
+ self._stack_name = 'StorPerfAgentGroup'
self.username = None
self.password = None
+ self._ssh_key = None
self._test_executor = None
self._agent_count = 1
- self._agent_image = "Ubuntu 14.04"
- self._agent_flavor = "storperf"
+ self._agent_image = None
+ self._agent_flavor = None
self._availability_zone = None
self._public_network = None
self._volume_count = 1
self._volume_size = 1
self._volume_type = None
- self._cached_stack_id = None
- self._last_snaps_check_time = None
self._slave_addresses = []
- self._thread_pool = worker_pool(20)
self._filename = None
self._deadline = None
self._steady_state_samples = 10
@@ -83,6 +90,9 @@ class StorPerfMaster(object):
self._custom_workloads = []
self._subnet_CIDR = '172.16.0.0/16'
self.slave_info = {}
+ self.stackless = False
+ self.mkfs = None
+ self.mount_device = None
@property
def volume_count(self):
@@ -126,10 +136,14 @@ class StorPerfMaster(object):
@stack_name.setter
def stack_name(self, value):
- self._stack_name = value
- self.stack_settings.name = self.stack_name
- self.stack_id = None
- self._last_snaps_check_time = None
+ if value is None:
+ self.stackless = True
+ else:
+ self.stackless = False
+ self._stack_name = value
+ self.stack_settings.name = self.stack_name
+ self.stack_id = None
+ self._last_snaps_check_time = None
@property
def subnet_CIDR(self):
@@ -194,6 +208,10 @@ class StorPerfMaster(object):
def slave_addresses(self):
return self._slave_addresses
+ @slave_addresses.setter
+ def slave_addresses(self, value):
+ self._slave_addresses = value
+
@property
def stack_id(self):
self._get_stack_info()
@@ -204,6 +222,10 @@ class StorPerfMaster(object):
self._cached_stack_id = value
def _get_stack_info(self):
+ if self.stackless:
+ self._cached_stack_id = None
+ return None
+
if self._last_snaps_check_time is not None:
time_since_check = datetime.now() - self._last_snaps_check_time
if time_since_check.total_seconds() < 60:
@@ -216,7 +238,7 @@ class StorPerfMaster(object):
cinder_cli = cinder_utils.cinder_client(self.os_creds)
glance_cli = glance_utils.glance_client(self.os_creds)
- router_worker = self._thread_pool.apply_async(
+ router_worker = self._snaps_pool.apply_async(
self.heat_stack.get_router_creators)
vm_inst_creators = self.heat_stack.get_vm_inst_creators()
@@ -234,7 +256,7 @@ class StorPerfMaster(object):
server = vm1.get_vm_inst()
- image_worker = self._thread_pool.apply_async(
+ image_worker = self._snaps_pool.apply_async(
glance_utils.get_image_by_id, (glance_cli, server.image_id))
self._volume_count = len(server.volume_ids)
@@ -340,6 +362,19 @@ class StorPerfMaster(object):
self._custom_workloads = value
@property
+ def ssh_key(self):
+ if self._ssh_key is None:
+ return None
+ key = StringIO.StringIO(self._ssh_key)
+ pkey = paramiko.RSAKey.from_private_key(key)
+ key.close()
+ return pkey
+
+ @ssh_key.setter
+ def ssh_key(self, value):
+ self._ssh_key = value
+
+ @property
def is_stack_created(self):
return (self.stack_id is not None and
(self.heat_stack.get_status() == u'CREATE_COMPLETE' or
@@ -363,6 +398,8 @@ class StorPerfMaster(object):
return logs
def create_stack(self):
+ self.stackless = False
+
self.stack_settings.resource_files = [
'storperf/resources/hot/storperf-agent.yaml',
'storperf/resources/hot/storperf-volume.yaml']
@@ -422,7 +459,8 @@ class StorPerfMaster(object):
raise Exception("ERROR: Job {} is already running".format(
self._test_executor.job_id))
- if (self.stack_id is None):
+ if (not self.stackless and
+ self.stack_id is None):
raise ParameterError("ERROR: Stack %s does not exist" %
self.stack_name)
@@ -438,20 +476,23 @@ class StorPerfMaster(object):
slaves = self._slave_addresses
- setup_threads = []
+ setup_pool = ThreadPool(processes=len(slaves))
+ workers = []
for slave in slaves:
- t = Thread(target=self._setup_slave, args=(slave,))
- setup_threads.append(t)
- t.start()
+ worker = setup_pool.apply_async(
+ self._setup_slave, (slave,))
+ workers.append(worker)
- for thread in setup_threads:
- thread.join()
+ for worker in workers:
+ worker.get()
+
+ setup_pool.close()
self._test_executor.slaves = slaves
self._test_executor.volume_count = self.volume_count
params = metadata
- params['agent_count'] = self.agent_count
+ params['agent_count'] = len(slaves)
params['agent_flavor'] = self.agent_flavor
params['agent_image'] = self.agent_image
params['agent_info'] = json.dumps(self.slave_info)
@@ -466,9 +507,12 @@ class StorPerfMaster(object):
params['volume_count'] = self.volume_count
params['volume_size'] = self.volume_size
params['volume_type'] = self.volume_type
- if self.username and self.password:
+ if self.username:
params['username'] = self.username
+ if self.password:
params['password'] = self.password
+ if self.ssh_key:
+ params['ssh_key'] = self.ssh_key
job_id = self._test_executor.execute(params)
self.slave_info = {}
@@ -552,13 +596,23 @@ class StorPerfMaster(object):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.username and self.password:
- ssh.connect(slave,
- username=self.username,
- password=self.password)
+ ssh.connect(
+ slave,
+ username=self.username,
+ password=self.password,
+ timeout=2)
+ elif self.username and self.ssh_key:
+ ssh.connect(
+ slave,
+ username=self.username,
+ pkey=self.ssh_key,
+ timeout=2)
else:
- ssh.connect(slave, username='storperf',
- key_filename='storperf/resources/ssh/storperf_rsa',
- timeout=2)
+ ssh.connect(
+ slave,
+ username='storperf',
+ key_filename='storperf/resources/ssh/storperf_rsa',
+ timeout=2)
uname = self._get_uname(ssh)
logger.debug("Slave uname is %s" % uname)
@@ -582,6 +636,12 @@ class StorPerfMaster(object):
logger.debug("Transferring fio to %s" % slave)
scp.put('/usr/local/bin/fio', '~/')
+ if self.mkfs is not None:
+ self._mkfs(ssh, logger)
+
+ if self.mount_device is not None:
+ self._mount(ssh, logger)
+
def _get_uname(self, ssh):
(_, stdout, _) = ssh.exec_command("uname -a")
return stdout.readline()
@@ -594,6 +654,59 @@ class StorPerfMaster(object):
available = lines[3]
return int(available)
+ def _mkfs(self, ssh, logger):
+ command = "sudo umount %s" % (self.mount_device)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+
+ command = "sudo mkfs.%s %s" % (self.mkfs, self.mount_device)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ rc = stdout.channel.recv_exit_status()
+ stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ error_messages = ""
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+ error_messages += line.rstrip()
+
+ if rc != 0:
+ raise Exception(
+ "Error executing on {0}: {1}".format(
+ command, error_messages))
+
+ def _mount(self, ssh, logger):
+ command = "sudo mkdir -p %s" % (self.filename)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+
+ command = "sudo mount %s %s" % (self.mount_device, self.filename)
+ logger.info("Attempting %s" % command)
+ (_, stdout, stderr) = ssh.exec_command(command)
+ rc = stdout.channel.recv_exit_status()
+ for line in iter(stdout.readline, b''):
+ logger.info(line)
+ error_messages = ""
+ for line in iter(stderr.readline, b''):
+ logger.error(line)
+ error_messages += line.rstrip()
+
+ if rc != 0:
+ raise Exception(
+ "Could not mount {0}: {1}".format(
+ self.mount_device, error_messages))
+
def _resize_root_fs(self, ssh, logger):
command = "sudo /usr/sbin/resize2fs /dev/vda1"
logger.info("Attempting %s" % command)
diff --git a/docker/storperf-master/storperf/test_executor.py b/docker/storperf-master/storperf/test_executor.py
index 53832b4..4b5bbd4 100644
--- a/docker/storperf-master/storperf/test_executor.py
+++ b/docker/storperf-master/storperf/test_executor.py
@@ -217,18 +217,19 @@ class TestExecutor(object):
def execute(self, metadata):
self.job_db.create_job_id()
+ self._setup_metadata(metadata)
try:
self.test_params()
except Exception as e:
self.terminate()
raise e
- self._setup_metadata(metadata)
- self.job_db.record_workload_params(metadata)
+ stripped_metadata = metadata.copy()
+ stripped_metadata.pop('ssh_key', None)
+ self.job_db.record_workload_params(stripped_metadata)
self._workload_thread = Thread(target=self.execute_workloads,
args=(),
name="Workload thread")
self._workload_thread.start()
- # seems to be hanging here
return self.job_db.job_id
def terminate(self):
@@ -362,12 +363,17 @@ class TestExecutor(object):
if self._custom_workloads:
for workload_name in self._custom_workloads.iterkeys():
- if not workload_name.isalnum():
+ real_name = workload_name
+ if real_name.startswith('_'):
+ real_name = real_name.replace('_', '')
+ self.logger.info("--- real_name: %s" % real_name)
+
+ if not real_name.isalnum():
raise InvalidWorkloadName(
"Workload name must be alphanumeric only: %s" %
- workload_name)
+ real_name)
workload = _custom_workload()
- workload.options['name'] = workload_name
+ workload.options['name'] = real_name
workload.name = workload_name
if (self.filename is not None):
workload.filename = self.filename
diff --git a/docker/storperf-master/storperf/utilities/data_handler.py b/docker/storperf-master/storperf/utilities/data_handler.py
index 6e87781..98ae640 100644
--- a/docker/storperf-master/storperf/utilities/data_handler.py
+++ b/docker/storperf-master/storperf/utilities/data_handler.py
@@ -157,9 +157,11 @@ class DataHandler(object):
test_db = os.environ.get('TEST_DB_URL')
if test_db is not None:
self.logger.info("Pushing results to %s" % (test_db))
+ stripped_metadata = executor.metadata
+ stripped_metadata.pop("ssh_key", None)
response = test_results_db.push_results_to_db(
test_db,
- executor.metadata,
+ stripped_metadata,
self.logger)
if response:
self.logger.info("Results reference: %s" % response['href'])
diff --git a/docker/storperf-master/storperf/workloads/_base_workload.py b/docker/storperf-master/storperf/workloads/_base_workload.py
index 9b04314..7468fea 100644
--- a/docker/storperf-master/storperf/workloads/_base_workload.py
+++ b/docker/storperf-master/storperf/workloads/_base_workload.py
@@ -44,17 +44,24 @@ class _base_workload(object):
self.options['size'] = "100%"
self.logger.debug(
"Profiling a device, using 100% of " + self.filename)
+ self.options['filename'] = self.filename
else:
- self.options['size'] = self.default_filesize
+ if 'size' not in self.options:
+ self.options['size'] = self.default_filesize
self.logger.debug("Profiling a filesystem, using " +
- self.default_filesize + " file")
-
- self.options['filename'] = self.filename
+ self.options['size'] + " file")
+ if not self.filename.endswith('/'):
+ self.filename = self.filename + "/"
+ self.options['directory'] = self.filename
+ self.options['filename_format'] = "'storperf.$jobnum.$filenum'"
self.setup()
for key, value in self.options.iteritems():
- args.append('--' + key + "=" + value)
+ if value is not None:
+ args.append('--' + key + "=" + str(value))
+ else:
+ args.append('--' + key)
if parse_only:
args.append('--parse-only')
diff --git a/docker/storperf-master/storperf/workloads/_custom_workload.py b/docker/storperf-master/storperf/workloads/_custom_workload.py
index 9e0100d..5cd37b3 100644
--- a/docker/storperf-master/storperf/workloads/_custom_workload.py
+++ b/docker/storperf-master/storperf/workloads/_custom_workload.py
@@ -18,12 +18,12 @@ class _custom_workload(_base_workload._base_workload):
self.default_filesize = "1G"
self.filename = '/dev/vdb'
self.fixed_options = {
- 'loops': '200',
'output-format': 'json',
'status-interval': '60'
}
self.options = {
'ioengine': 'libaio',
+ 'loops': '200',
'direct': '1',
'numjobs': '1',
'rw': 'read',
diff --git a/docs/testing/user/installation.rst b/docs/testing/user/installation.rst
index c129fee..7f56244 100755
--- a/docs/testing/user/installation.rst
+++ b/docs/testing/user/installation.rst
@@ -8,11 +8,20 @@ StorPerf Installation Guide
OpenStack Prerequisites
===========================
-If you do not have an Ubuntu 16.04 image in Glance, you will need to add one.
-You also need to create the StorPerf flavor, or choose one that closely
-matches. For Ubuntu 16.04, it must have a minimum of a 4 GB disk. It should
-also have about 8 GB RAM to support FIO's memory mapping of written data blocks
-to ensure 100% coverage of the volume under test.
+StorPerf can be instructed to use OpenStack APIs in order to manage a
+Heat stack of virtual machines and Cinder volumes, or it can be run in
+stackless mode, where it does not need to know anything about OpenStack.
+
+When running in OpenStack mode, there will need to be an external network
+with floating IPs available to assign to the VMs, as well as a Glance image
+that can be used to boot the VMs. This can be almost any Linux based
+image, as long as it can either accept OpenStack metadata for injecting
+the SSH key, or it has known SSH credentials as part of the base image.
+
+The flavor for the image should provide enough disk space for the initial
+boot, along with additional space if profiling of the Glance backing is
+desired. It should also provide at least 8 GB RAM to support FIO's memory
+mapping of written data blocks.
There are scripts in storperf/ci directory to assist, or you can use the follow
code snippets:
@@ -34,9 +43,10 @@ code snippets:
OpenStack Credentials
~~~~~~~~~~~~~~~~~~~~~
-You must have your OpenStack Controller environment variables defined and passed to
-the StorPerf container. The easiest way to do this is to put the rc file contents
-into a clean file called admin.rc that looks similar to this for V2 authentication:
+Unless running in stackless mode, the OpenStack Controller environment
+variables must be defined and passed to the StorPerf container. The easiest
+way to do this is to put the rc file contents into a clean file called
+admin.rc that looks similar to this for V2 authentication:
.. code-block:: console
@@ -89,7 +99,7 @@ Requirements:
* Host has access to the OpenStack Controller API
* Host must have internet connectivity for downloading docker image
* Enough OpenStack floating IPs must be available to match your agent count
-* A local directory for holding the Carbon DB Whisper files
+* Optionally, a local directory for holding the Carbon DB Whisper files
Local disk used for the Carbon DB storage as the default size of the docker
container is only 10g. Here is an example of how to create a local storage
@@ -117,7 +127,7 @@ http://storperf:5000/graphite
Running StorPerf Container
==========================
-**As of Euphrates (development) release (June 2017), StorPerf has
+**As of Euphrates release (June 2017), StorPerf has
changed to use docker-compose in order to start its services.**
Docker compose requires a local file to be created in order to define the
@@ -146,8 +156,12 @@ which should result in:
To run, you must specify two environment variables:
-* ENV_FILE, which points to your OpenStack admin.rc as noted above.
-* CARBON_DIR, which points to a directory that will be mounted to store the raw metrics.
+* ENV_FILE, which points to your OpenStack admin.rc as noted above. If running
+ in stackless mode only, it is possible to remove the ENV_FILE reference from
+ the docker-compose.yaml file.
+* CARBON_DIR, which points to a directory that will be mounted to store the
+ raw metrics. If desired, the CARBON_DIR can be removed from the
+ docker-compose.yaml file, causing metrics to be kept in the container only.
* TAG, which specified the Docker tag for the build (ie: latest, danube.3.0, etc).
The following command will start all the StorPerf services:
diff --git a/docs/testing/user/introduction.rst b/docs/testing/user/introduction.rst
index 0099c39..c864edc 100644
--- a/docs/testing/user/introduction.rst
+++ b/docs/testing/user/introduction.rst
@@ -27,16 +27,16 @@ How Does StorPerf Work?
Once launched, StorPerf presents a ReST interface, along with a
`Swagger UI <https://swagger.io/swagger-ui/>`_ that makes it easier to
-form HTTP ReST requests. Issuing an HTTP POST to the configurations API
-causes StorPerf to talk to OpenStack's heat service to create a new stack
-with as many agent VMs and attached Cinder volumes as specified.
+form HTTP ReST requests.
-After the stack is created, we can issue one or more jobs by issuing a POST
-to the jobs ReST API. The job is the smallest unit of work that StorPerf
-can use to measure the disk's performance.
+StorPerf enables us to run FIO on multiple VMs, containers or bare
+metal servers by providing a recent release of FIO, copying it to the
+target system and running I/O workloads specified. It also provides a
+simple API to initialize the target device and fill it with random data
+to ensure that performance is measured against real data, not blank media.
-While the job is running, StorPerf collects the performance metrics from each
-of the disks under test every minute. Once the trend of metrics match the
+While an FIO job is running, StorPerf collects the performance metrics from
+each of the jobs every minute. Once the trend of metrics match the
criteria specified in the SNIA methodology, the job automatically terminates
and the valid set of metrics are available for querying.
@@ -45,6 +45,29 @@ measured start to "flat line" and stay within that range for the specified
amount of time, then the metrics are considered to be indicative of a
repeatable level of performance.
+With OpenStack Heat
+~~~~~~~~~~~~~~~~~~~
+
+StorPerf provides an API to interact with OpenStack Heat to automatically
+create a set of target VMs and Cinder volumes. The Configurations API is
+used to specify how many VMs and volumes to create, as well as the size of
+each Cinder volume.
+
+Without OpenStack Heat
+~~~~~~~~~~~~~~~~~~~~~~
+
+StorPerf can also use IP addresses or DNS names to connect to systems that
+have already been provisioned by any external provider, including OpenStack.
+By specifying a stack name of 'null' in the JSON payload, StorPerf will look
+for a list of IP addresses and credentials to use in order to SSH to the
+target systems. In this way, StorPerf can be used to profile bare metal,
+containers that have SSH enabled, or VMs running under OpenStack, WMware ESXi,
+VIO, Microsoft Hyper-V, or anything else. The only requirement is that
+the target be capable of accepting and authenticating SSH connections, and that
+it is Linux based, as currently the FIO supplied by StorPerf is not compiled
+to run under Microsoft Windows or other non-Linux operating systems.
+
+
StorPerf Testing Guidelines
===========================
diff --git a/docs/testing/user/test-usage.rst b/docs/testing/user/test-usage.rst
index 41cbbbd..5dfd048 100644
--- a/docs/testing/user/test-usage.rst
+++ b/docs/testing/user/test-usage.rst
@@ -31,8 +31,15 @@ The typical test execution follows this pattern:
#. Execute one or more performance runs
#. Delete the environment
-Configure The Environment
-=========================
+OpenStack or Stackless
+======================
+StorPerf provides the option of controlling the OpenStack environment
+via a Heat Stack, or it can run in stackless mode, where it connects
+directly to the IP addresses supplied, regardless of how the slave
+was created or even if it is an OpenStack VM.
+
+Configure The Environment for OpenStack Usage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following pieces of information are required to prepare the environment:
@@ -51,12 +58,12 @@ The following pieces of information are required to prepare the environment:
VMs from properly attaching Cinder volumes. There are two known workarounds:
#. Create the environment with 0 Cinder volumes attached, and after the VMs
- have finished booting, modify the stack to have 1 or more Cinder volumes.
- See section on Changing Stack Parameters later in this guide.
+ have finished booting, modify the stack to have 1 or more Cinder volumes.
+ See section on Changing Stack Parameters later in this guide.
#. Add the following image metadata to Glance. This will cause the Cinder
- volume to be mounted as a SCSI device, and therefore your target will be
- /dev/sdb, etc, instead of /dev/vdb. You will need to specify this in your
- warm up and workload jobs.
+ volume to be mounted as a SCSI device, and therefore your target will be
+ /dev/sdb, etc, instead of /dev/vdb. You will need to specify this in your
+ warm up and workload jobs.
.. code-block:
--property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi
@@ -83,6 +90,26 @@ takes a JSON payload as follows.
This call will block until the stack is created, at which point it will return
the OpenStack heat stack id as well as the IP addresses of the slave agents.
+
+Configure The Environment for Stackless Usage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To configure the environment for stackless usage, the slaves must be
+fully operational (ie: a Linux operating system is running, are reachable
+via TCP/IP address or hostname).
+
+It is not necessary to use the Configurations API, but instead define the
+stack name as 'null' in any of the other APIs. This instructs StorPerf not to
+gather information about the stack from OpenStack, and to simply use the
+supplied IP addresses and credentials to communicate with the slaves.
+
+A slave can be a container (provided we can SSH to it), a VM running in any
+hypervisor, or even a bare metal server. In the bare metal case, it even
+allows for performing RADOS or RDB performance tests using the appropriate
+FIO engine.
+
+
+
Initialize the Target Volumes
=============================
Before executing a test run for the purpose of measuring performance, it is
@@ -120,6 +147,137 @@ This will return a job ID as follows.
This job ID can be used to query the state to determine when it has completed.
See the section on querying jobs for more information.
+Authentication and Slave Selection
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+It is possible to run the Initialization API against a subset of the slaves
+known to the stack, or to run it in stackless mode, where StorPerf
+connects directly to the IP addresses supplied via SSH. The following
+keys are available:
+
+slave_addresses
+ (optional) A list of IP addresses or hostnames to use as targets. If
+ omitted, and StorPerf is not running in stackless mode, the full list of
+ IP addresses from the OpenStack Heat stack is used.
+
+stack_name
+ (optional) Either the name of the stack in Heat to use, or null if running
+ in stackless mode.
+
+username
+ (optional) The username to supply to SSH when logging in. This defaults to
+ 'storperf' if not supplied.
+
+password
+ (optional) The password to supply to SSH when logging in. If omitted, the
+ SSH key is used instead.
+
+ssh_private_key
+ (optional) The SSH private key to supply to SSH when logging in. If omitted,
+ the default StorPerf private key is used.
+
+This shows an example of stackless mode going against a single bare metal
+server reachable by IP address:
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ }
+
+
+Filesystems and Mounts
+~~~~~~~~~~~~~~~~~~~~~~
+
+It is also possible to instruct StorPerf to create a file system on a device
+and mount that as the target directory. The filesystem can be anything
+supported by the target slave OS and it is possible to pass specific arguments
+to the mkfs command. The following additional keys are available in the
+Initializations API for file system control:
+
+mkfs
+ The type and arguments to pass for creating a filesystem
+
+mount_device
+ The target device on which to make the file system. The file system will
+ be mounted on the target specified.
+
+The following example shows the forced creation (-f) of an XFS filesystem
+on device /dev/sdb, and mounting that device on /storperf/filesystem.
+
+**Note** If any of the commands (mkfs, mount) fail for any reason, the
+Initializations API will return with a 400 code and the body of the response
+will contain the error message.
+
+.. code-block:: json
+
+ {
+ "target": "/storperf/filesystem",
+ "mkfs": "xfs -f",
+ "mount_device": "/dev/sdb",
+ }
+
+
+Initializing Filesystems
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Just like we need to fill Cinder volumes with data, if we want to profile
+files on a mounted file system, we need to initialize the file sets with
+random data prior to starting a performance run. The Initializations API
+can also be used to create test data sets.
+
+**Note** be sure to use the same parameters for the number of files, sizes
+and jobs in both the Initializations API and the Jobs API, or you will end
+up with possibly incorrect results in the Job performance run.
+
+The following keys are available in the Initializations API for file creation:
+
+filesize
+ The size of each file to be created and filled with random data.
+
+nrfiles
+ The number of files per job to create.
+
+numjobs
+ The number of independent instances of FIO to launch.
+
+Example:
+
+.. code-block:: json
+
+ {
+ "target": "/storperf/filesystem",
+ "filesize": "2G",
+ "nrfiles": 10,
+ "numjobs": 10
+ }
+
+This would create 100 (10 nrfiles x 10 numjobs) 2G files in the directory
+/storperf/filesystem.
+
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ "target": "/storperf/filesystem",
+ "mkfs": "ext4",
+ "mount_device": "/dev/sdb",
+ "filesize": "2G",
+ "nrfiles": 10,
+ "numjobs": 10
+ }
+
+
Execute a Performance Run
=========================
Performance runs can execute either a single workload, or iterate over a matrix
@@ -221,6 +379,63 @@ StorPerf will also do a verification of the arguments given prior to returning
a Job ID from the ReST call. If an argument fails validation, the error
will be returned in the payload of the response.
+File System Profiling
+~~~~~~~~~~~~~~~~~~~~~
+
+As noted in the Initializations API, files in a file system should be
+initialized prior to executing a performance run, and the number of jobs,
+files and size of files should match the initialization. Given the following
+Initializations API call:
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ "target": "/storperf/filesystem",
+ "mkfs": "ext4",
+ "mount_device": "/dev/sdb",
+ "filesize": "2G",
+ "nrfiles": 10,
+ "numjobs": 10
+ }
+
+The corresponding call to the Jobs API would appear as follows:
+
+.. code-block:: json
+
+ {
+ "username": "labadmin",
+ "ssh_private_key": "-----BEGIN RSA PRIVATE KEY----- \nMIIE...X0=\n-----END RSA PRIVATE KEY-----",
+ "slave_addresses": [
+ "172.17.108.44"
+ ],
+ "stack_name": null,
+ "target": "/storperf/filesystem",
+ "block_sizes": "4k",
+ "queue_depths": "8",
+ "workloads": {
+ "readwritemix": {
+ "rw": "rw",
+ "filesize": "2G",
+ "nrfiles": "10",
+ "numjobs": "10"
+ }
+ }
+ }
+
+**Note** the queue depths and block sizes as well as the I/O pattern (rw)
+can change, but the filesize, nrfiles, numjobs and slave addresses must
+match the initialization or the performance run could contain skewed results
+due to disk initialization. StorPerf explicitly allows for the mismatch
+of these so that it is possible to visualize performance when the files
+or disks have not been properly initialized.
+
+
Block Sizes
~~~~~~~~~~~
A comma delimited list of the different block sizes to use when reading and