summaryrefslogtreecommitdiffstats
path: root/docker/storperf-master
diff options
context:
space:
mode:
authorAmeed Ashour <Ameed.Ashour.ext@nokia.com>2018-07-18 19:45:26 +0300
committermbeierl <mark.beierl@dell.com>2018-07-18 15:46:55 -0400
commit6441c0c76523201e62768087b9a0255cdddb6756 (patch)
tree82c1cfffc0b7d0d3e9dc10733482c148aed824c3 /docker/storperf-master
parent3375ff5646ea9daf944bc7207a19b4db4b44334b (diff)
Allow user to specify stack name on stack create
this allow Storperf to run multiple time at same time using same OpenStack Adds stack_name to all /configurations API requests as an optional parameter. Adds stack_name to the /jobs API POST as an optional parameter. Adds stack_name to the /initializations API as an optional parameter. Adds all stack info to the metadata for later reporting. JIRA: STORPERF-256 Change-Id: Ief69c69472e08ee821555f3a006ab9edc7ec177f Signed-off-by: Ameed Ashour <Ameed.Ashour.ext@nokia.com>
Diffstat (limited to 'docker/storperf-master')
-rw-r--r--docker/storperf-master/rest_server.py91
-rw-r--r--docker/storperf-master/storperf/resources/hot/agent-group.yaml7
-rw-r--r--docker/storperf-master/storperf/storperf_master.py37
-rw-r--r--docker/storperf-master/storperf/test_executor.py3
4 files changed, 117 insertions, 21 deletions
diff --git a/docker/storperf-master/rest_server.py b/docker/storperf-master/rest_server.py
index 1e61693..020517a 100644
--- a/docker/storperf-master/rest_server.py
+++ b/docker/storperf-master/rest_server.py
@@ -104,6 +104,7 @@ class ConfigurationRequestModel:
'volume_type': fields.String,
'availability_zone': fields.String,
'subnet_CIDR': fields.String,
+ 'stack_name': fields.String,
'username': fields.String,
'password': fields.String
}
@@ -123,6 +124,7 @@ class ConfigurationResponseModel:
'volume_type': fields.String,
'availability_zone': fields.String,
'subnet_CIDR': fields.String,
+ 'stack_name': fields.String,
'slave_addresses': fields.Nested
}
@@ -136,9 +138,23 @@ class Configure(Resource):
@swagger.operation(
notes='Fetch the current agent configuration',
+ parameters=[
+ {
+ "name": "stack_name",
+ "description": "The name of the stack to use, defaults to" +
+ "StorPerfAgentGroup or the last stack named",
+ "required": False,
+ "type": "string",
+ "allowMultiple": False,
+ "paramType": "query"
+ }],
type=ConfigurationResponseModel.__name__
)
def get(self):
+ stack_name = request.args.get('stack_name')
+ if stack_name:
+ storperf.stack_name = stack_name
+
return jsonify({'agent_count': storperf.agent_count,
'agent_flavor': storperf.agent_flavor,
'agent_image': storperf.agent_image,
@@ -149,6 +165,7 @@ class Configure(Resource):
'stack_created': storperf.is_stack_created,
'availability_zone': storperf.availability_zone,
'subnet_CIDR': storperf.subnet_CIDR,
+ 'stack_name': storperf.stack_name,
'slave_addresses': storperf.slave_addresses,
'stack_id': storperf.stack_id})
@@ -174,6 +191,10 @@ class Configure(Resource):
abort(400, "ERROR: No data specified")
try:
+ # Note this must be first in order to be able to create
+ # more than one stack in the same StorPerf instance.
+ if ('stack_name' in request.json):
+ storperf.stack_name = request.json['stack_name']
if ('agent_count' in request.json):
storperf.agent_count = request.json['agent_count']
if ('agent_flavor' in request.json):
@@ -208,9 +229,22 @@ class Configure(Resource):
abort(400, str(e))
@swagger.operation(
- notes='Deletes the agent configuration and the stack'
+ notes='Deletes the agent configuration and the stack',
+ parameters=[
+ {
+ "name": "stack_name",
+ "description": "The name of the stack to delete, defaults to" +
+ "StorPerfAgentGroup or the last stack named",
+ "required": False,
+ "type": "string",
+ "allowMultiple": False,
+ "paramType": "query"
+ }]
)
def delete(self):
+ stack_name = request.args.get('stack_name')
+ if stack_name:
+ storperf.stack_name = stack_name
try:
return jsonify({'stack_id': storperf.delete_stack()})
except Exception as e:
@@ -226,7 +260,8 @@ class WorkloadModel:
"steady_state_samples": fields.Integer,
'workload': fields.String,
'queue_depths': fields.String,
- 'block_sizes': fields.String
+ 'block_sizes': fields.String,
+ 'stack_name': fields.String
}
@@ -319,6 +354,9 @@ following parameters:
for any single test iteration.
"workload":if specified, the workload to run. Defaults to all.
+
+"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named.
""",
"required": True,
"type": "WorkloadModel",
@@ -344,6 +382,8 @@ for any single test iteration.
self.logger.info(request.json)
try:
+ if ('stack_name' in request.json):
+ storperf.stack_name = request.json['stack_name']
if ('target' in request.json):
storperf.filename = request.json['target']
if ('deadline' in request.json):
@@ -390,15 +430,36 @@ for any single test iteration.
@swagger.model
+class WorkloadsBodyModel:
+ resource_fields = {
+ "rw": fields.String(default="randrw")
+ }
+ required = ['rw']
+
+
+@swagger.model
+@swagger.nested(
+ name=WorkloadsBodyModel.__name__)
+class WorkloadsNameModel:
+ resource_fields = {
+ "name": fields.Nested(WorkloadsBodyModel.resource_fields)
+ }
+
+
+@swagger.model
+@swagger.nested(
+ workloads=WorkloadsNameModel.__name__)
class WorkloadV2Model:
resource_fields = {
'target': fields.String,
'deadline': fields.Integer,
"steady_state_samples": fields.Integer,
- 'workloads': fields.Nested,
+ 'workloads': fields.Nested(WorkloadsNameModel.resource_fields),
'queue_depths': fields.String,
- 'block_sizes': fields.String
+ 'block_sizes': fields.String,
+ 'stack_name': fields.String
}
+ required = ['workloads']
class Job_v2(Resource):
@@ -420,7 +481,10 @@ following parameters:
"deadline": if specified, the maximum duration in minutes
for any single test iteration.
-"workloads":if specified, the workload to run. Defaults to all.
+"workloads": A JSON formatted map of workload names and parameters for FIO.
+
+"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named.
""",
"required": True,
"type": "WorkloadV2Model",
@@ -446,6 +510,8 @@ for any single test iteration.
self.logger.info(request.json)
try:
+ if ('stack_name' in request.json):
+ storperf.stack_name = request.json['stack_name']
if ('target' in request.json):
storperf.filename = request.json['target']
if ('deadline' in request.json):
@@ -478,6 +544,7 @@ for any single test iteration.
@swagger.model
class WarmUpModel:
resource_fields = {
+ 'stack_name': fields.String,
'target': fields.String
}
@@ -496,7 +563,10 @@ class Initialize(Resource):
"description": """Fill the target with random data. If no
target is specified, it will default to /dev/vdb
-"target": The target device or file to fill with random data",
+"target": The target device or file to fill with random data.
+
+"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named.
""",
"required": False,
"type": "WarmUpModel",
@@ -523,10 +593,13 @@ target is specified, it will default to /dev/vdb
self.logger.info(request.json)
try:
- if (request.json and 'target' in request.json):
- storperf.filename = request.json['target']
+ if request.json:
+ if 'target' in request.json:
+ storperf.filename = request.json['target']
+ if 'stack_name' in request.json:
+ storperf.stack_name = request.json['stack_name']
storperf.queue_depths = "8"
- storperf.block_sizes = "8192"
+ storperf.block_sizes = "16k"
storperf.workloads = "_warm_up"
storperf.custom_workloads = None
job_id = storperf.execute_workloads()
diff --git a/docker/storperf-master/storperf/resources/hot/agent-group.yaml b/docker/storperf-master/storperf/resources/hot/agent-group.yaml
index b4e3b6f..c82ae17 100644
--- a/docker/storperf-master/storperf/resources/hot/agent-group.yaml
+++ b/docker/storperf-master/storperf/resources/hot/agent-group.yaml
@@ -48,6 +48,9 @@ parameters:
subnet_CIDR:
type: string
default: '172.16.0.0/16'
+ keypair_name:
+ type: string
+ default: storperf_agent_keypair
resources:
slaves:
@@ -74,8 +77,6 @@ resources:
storperf_network:
type: OS::Neutron::Net
- properties:
- name: storperf-network
storperf_subnet:
type: OS::Neutron::Subnet
@@ -99,7 +100,7 @@ resources:
type: OS::Nova::KeyPair
properties:
save_private_key: true
- name: storperf_agent_keypair
+ name: { get_param: keypair_name}
storperf_open_security_group:
type: OS::Neutron::SecurityGroup
diff --git a/docker/storperf-master/storperf/storperf_master.py b/docker/storperf-master/storperf/storperf_master.py
index a50dcc6..0c7e559 100644
--- a/docker/storperf-master/storperf/storperf_master.py
+++ b/docker/storperf-master/storperf/storperf_master.py
@@ -25,6 +25,7 @@ from snaps.thread_utils import worker_pool
from storperf.db.job_db import JobDB
from storperf.test_executor import TestExecutor
import json
+import uuid
class ParameterError(Exception):
@@ -37,9 +38,9 @@ class StorPerfMaster(object):
self.logger = logging.getLogger(__name__)
self.job_db = JobDB()
-
+ self._stack_name = 'StorPerfAgentGroup'
self.stack_settings = StackConfig(
- name='StorPerfAgentGroup',
+ name=self.stack_name,
template_path='storperf/resources/hot/agent-group.yaml')
self.os_creds = OSCreds(
@@ -120,6 +121,17 @@ class StorPerfMaster(object):
self._volume_type = value
@property
+ def stack_name(self):
+ return self._stack_name
+
+ @stack_name.setter
+ def stack_name(self, value):
+ self._stack_name = value
+ self.stack_settings.name = self.stack_name
+ self.stack_id = None
+ self._last_snaps_check_time = None
+
+ @property
def subnet_CIDR(self):
return self._subnet_CIDR
@@ -411,7 +423,8 @@ class StorPerfMaster(object):
self._test_executor.job_id))
if (self.stack_id is None):
- raise ParameterError("ERROR: Stack does not exist")
+ raise ParameterError("ERROR: Stack %s does not exist" %
+ self.stack_name)
self._test_executor = TestExecutor()
self._test_executor.register(self.executor_event)
@@ -437,16 +450,22 @@ class StorPerfMaster(object):
self._test_executor.slaves = slaves
self._test_executor.volume_count = self.volume_count
-
params = metadata
params['agent_count'] = self.agent_count
+ params['agent_flavor'] = self.agent_flavor
+ params['agent_image'] = self.agent_image
+ params['agent_info'] = json.dumps(self.slave_info)
+ params['avaiability_zone'] = self.availability_zone
+ params['block_sizes'] = self.block_sizes
+ params['deadline'] = self.deadline
params['public_network'] = self.public_network
+ params['stack_name'] = self.stack_name
+ params['steady_state_samples'] = self.steady_state_samples
+ params['subnet_CIDR'] = self.subnet_CIDR
+ params['target'] = self.filename
params['volume_count'] = self.volume_count
params['volume_size'] = self.volume_size
- params['subnet_CIDR'] = self.subnet_CIDR
- params['agent_info'] = json.dumps(self.slave_info)
- if self.volume_type is not None:
- params['volume_type'] = self.volume_type
+ params['volume_type'] = self.volume_type
if self.username and self.password:
params['username'] = self.username
params['password'] = self.password
@@ -586,11 +605,13 @@ class StorPerfMaster(object):
logger.error(line)
def _make_parameters(self):
+ random_str = uuid.uuid4().hex[:6].upper()
heat_parameters = {}
heat_parameters['public_network'] = self.public_network
heat_parameters['agent_count'] = self.agent_count
heat_parameters['volume_count'] = self.volume_count
heat_parameters['volume_size'] = self.volume_size
+ heat_parameters['keypair_name'] = 'storperf_agent_keypair' + random_str
heat_parameters['subnet_CIDR'] = self.subnet_CIDR
if self.volume_type is not None:
heat_parameters['volume_type'] = self.volume_type
diff --git a/docker/storperf-master/storperf/test_executor.py b/docker/storperf-master/storperf/test_executor.py
index 0ab5698..9e1736d 100644
--- a/docker/storperf-master/storperf/test_executor.py
+++ b/docker/storperf-master/storperf/test_executor.py
@@ -222,8 +222,8 @@ class TestExecutor(object):
except Exception as e:
self.terminate()
raise e
- self.job_db.record_workload_params(metadata)
self._setup_metadata(metadata)
+ self.job_db.record_workload_params(metadata)
self._workload_thread = Thread(target=self.execute_workloads,
args=(),
name="Workload thread")
@@ -263,6 +263,7 @@ class TestExecutor(object):
def _execute_workload(self, current_workload, workload, parse_only=False):
workload.options['iodepth'] = str(current_workload['queue-depth'])
workload.options['bs'] = str(current_workload['blocksize'])
+ self._workload_executors = []
slave_threads = []
thread_pool = ThreadPool(processes=len(self.slaves) *
self.volume_count)