summaryrefslogtreecommitdiffstats
path: root/docker/storperf-master/rest_server.py
diff options
context:
space:
mode:
Diffstat (limited to 'docker/storperf-master/rest_server.py')
-rw-r--r--docker/storperf-master/rest_server.py161
1 files changed, 136 insertions, 25 deletions
diff --git a/docker/storperf-master/rest_server.py b/docker/storperf-master/rest_server.py
index 92b6c85..7606eca 100644
--- a/docker/storperf-master/rest_server.py
+++ b/docker/storperf-master/rest_server.py
@@ -10,7 +10,6 @@
import json
import logging.config
import os
-import sys
from flask import abort, Flask, request, jsonify
from flask_cors import CORS
@@ -18,6 +17,7 @@ from flask_restful import Resource, Api, fields
from flask_restful_swagger import swagger
from storperf.storperf_master import StorPerfMaster
+import flask
class ReverseProxied(object):
@@ -137,7 +137,9 @@ class Configure(Resource):
self.logger = logging.getLogger(__name__)
@swagger.operation(
- notes='Fetch the current agent configuration',
+ notes='''Fetch the current agent configuration.
+
+ This API is in sunset until the next OPNFV release.''',
parameters=[
{
"name": "stack_name",
@@ -155,7 +157,7 @@ class Configure(Resource):
if stack_name:
storperf.stack_name = stack_name
- return jsonify({'agent_count': storperf.agent_count,
+ json = jsonify({'agent_count': storperf.agent_count,
'agent_flavor': storperf.agent_flavor,
'agent_image': storperf.agent_image,
'public_network': storperf.public_network,
@@ -168,10 +170,15 @@ class Configure(Resource):
'stack_name': storperf.stack_name,
'slave_addresses': storperf.slave_addresses,
'stack_id': storperf.stack_id})
+ response = flask.make_response(json)
+ response.headers['Sunset'] = "Tue. 31 Mar 2020 23:59:59 GMT"
+ return response
@swagger.operation(
notes='''Set the current agent configuration and create a stack in
- the controller. Returns once the stack create is completed.''',
+ the controller. Returns once the stack create is completed.
+
+ This API is in sunset until the next OPNFV release.''',
parameters=[
{
"name": "configuration",
@@ -229,7 +236,9 @@ class Configure(Resource):
abort(400, str(e))
@swagger.operation(
- notes='Deletes the agent configuration and the stack',
+ notes='''Deletes the agent configuration and the stack
+
+ This API is in sunset until the next OPNFV release.''',
parameters=[
{
"name": "stack_name",
@@ -246,7 +255,10 @@ class Configure(Resource):
if stack_name:
storperf.stack_name = stack_name
try:
- return jsonify({'stack_id': storperf.delete_stack()})
+ json = jsonify({'stack_id': storperf.delete_stack()})
+ response = flask.make_response(json)
+ response.headers['Sunset'] = "Tue. 31 Mar 2020 23:59:59 GMT"
+ return response
except Exception as e:
self.logger.exception(e)
abort(400, str(e))
@@ -355,7 +367,8 @@ for any single test iteration.
"workload":if specified, the workload to run. Defaults to all.
-"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
+"stack_name": This field is in sunset until the next OPNVF release.
+The target stack to use. Defaults to StorPerfAgentGroup, or
the last stack named.
""",
"required": True,
@@ -379,11 +392,13 @@ the last stack named.
if not request.json:
abort(400, "ERROR: Missing configuration data")
+ storperf.reset_values()
self.logger.info(request.json)
try:
if ('stack_name' in request.json):
storperf.stack_name = request.json['stack_name']
+ storperf.stackless = False
if ('target' in request.json):
storperf.filename = request.json['target']
if ('deadline' in request.json):
@@ -422,7 +437,6 @@ the last stack named.
]
)
def delete(self):
- self.logger.info("Threads: %s" % sys._current_frames())
try:
return jsonify({'Slaves': storperf.terminate_workloads()})
except Exception as e:
@@ -439,7 +453,7 @@ class WorkloadsBodyModel:
@swagger.model
@swagger.nested(
- name=WorkloadsBodyModel.__name__)
+ name=WorkloadsBodyModel.__name__)
class WorkloadsNameModel:
resource_fields = {
"name": fields.Nested(WorkloadsBodyModel.resource_fields)
@@ -448,7 +462,7 @@ class WorkloadsNameModel:
@swagger.model
@swagger.nested(
- workloads=WorkloadsNameModel.__name__)
+ workloads=WorkloadsNameModel.__name__)
class WorkloadV2Model:
resource_fields = {
'target': fields.String,
@@ -457,7 +471,11 @@ class WorkloadV2Model:
'workloads': fields.Nested(WorkloadsNameModel.resource_fields),
'queue_depths': fields.String,
'block_sizes': fields.String,
- 'stack_name': fields.String
+ 'stack_name': fields.String,
+ 'username': fields.String,
+ 'password': fields.String,
+ 'ssh_private_key': fields.String,
+ 'slave_addresses': fields.List
}
required = ['workloads']
@@ -483,8 +501,21 @@ for any single test iteration.
"workloads": A JSON formatted map of workload names and parameters for FIO.
-"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
-the last stack named.
+"stack_name": This field is in sunset until the next OPNFV release.
+The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named. Explicitly specifying null will bypass all Heat Stack
+operations and go directly against the IP addresses specified.
+
+"username": if specified, the username to use when logging into the slave.
+
+"password": if specified, the password to use when logging into the slave.
+
+"ssh_private_key": if specified, the ssh private key to use when logging
+into the slave.
+
+"slave_addresses": if specified, a list of IP addresses to use instead of
+looking all of them up from the stack.
+
""",
"required": True,
"type": "WorkloadV2Model",
@@ -505,9 +536,10 @@ the last stack named.
)
def post(self):
if not request.json:
- abort(400, "ERROR: Missing configuration data")
+ abort(400, "ERROR: Missing job data")
self.logger.info(request.json)
+ storperf.reset_values()
try:
if ('stack_name' in request.json):
@@ -534,6 +566,15 @@ the last stack named.
else:
metadata = {}
+ if 'username' in request.json:
+ storperf.username = request.json['username']
+ if 'password' in request.json:
+ storperf.password = request.json['password']
+ if 'ssh_private_key' in request.json:
+ storperf.ssh_key = request.json['ssh_private_key']
+ if 'slave_addresses' in request.json:
+ storperf.slave_addresses = request.json['slave_addresses']
+
job_id = storperf.execute_workloads(metadata)
return jsonify({'job_id': job_id})
@@ -547,7 +588,16 @@ the last stack named.
class WarmUpModel:
resource_fields = {
'stack_name': fields.String,
- 'target': fields.String
+ 'target': fields.String,
+ 'username': fields.String,
+ 'password': fields.String,
+ 'ssh_private_key': fields.String,
+ 'slave_addresses': fields.List,
+ 'mkfs': fields.String,
+ 'mount_point': fields.String,
+ 'file_size': fields.String,
+ 'nrfiles': fields.String,
+ 'numjobs': fields.String,
}
@@ -565,10 +615,36 @@ class Initialize(Resource):
"description": """Fill the target with random data. If no
target is specified, it will default to /dev/vdb
-"target": The target device or file to fill with random data.
+"target": The target device to use.
-"stack_name": The target stack to use. Defaults to StorPerfAgentGroup, or
-the last stack named.
+"stack_name": This field is in sunset until the next OPNFV release.
+The target stack to use. Defaults to StorPerfAgentGroup, or
+the last stack named. Explicitly specifying null will bypass all Heat Stack
+operations and go directly against the IP addresses specified.
+
+"username": if specified, the username to use when logging into the slave.
+
+"password": if specified, the password to use when logging into the slave.
+
+"ssh_private_key": if specified, the ssh private key to use when logging
+into the slave.
+
+"slave_addresses": if specified, a list of IP addresses to use instead of
+looking all of them up from the stack.
+
+"mkfs": if specified, the command to execute in order to create a filesystem
+on the target device (eg: mkfs.ext4)
+
+"mount_point": if specified, the directory to use when mounting the device.
+
+"filesize": if specified, the size of the files to create when profiling
+a filesystem.
+
+"nrfiles": if specified, the number of files to create when profiling
+a filesystem
+
+"numjobs": if specified, the number of jobs for when profiling
+a filesystem
""",
"required": False,
"type": "WarmUpModel",
@@ -593,17 +669,46 @@ the last stack named.
)
def post(self):
self.logger.info(request.json)
+ storperf.reset_values()
try:
+ warm_up_args = {
+ 'rw': 'randwrite',
+ 'direct': "1",
+ 'loops': "1"
+ }
+ storperf.queue_depths = "8"
+ storperf.block_sizes = "16k"
+
if request.json:
if 'target' in request.json:
storperf.filename = request.json['target']
if 'stack_name' in request.json:
storperf.stack_name = request.json['stack_name']
- storperf.queue_depths = "8"
- storperf.block_sizes = "16k"
- storperf.workloads = "_warm_up"
- storperf.custom_workloads = None
+ if 'username' in request.json:
+ storperf.username = request.json['username']
+ if 'password' in request.json:
+ storperf.password = request.json['password']
+ if 'ssh_private_key' in request.json:
+ storperf.ssh_key = request.json['ssh_private_key']
+ if 'slave_addresses' in request.json:
+ storperf.slave_addresses = request.json['slave_addresses']
+ if 'mkfs' in request.json:
+ storperf.mkfs = request.json['mkfs']
+ if 'mount_device' in request.json:
+ storperf.mount_device = request.json['mount_device']
+ if 'filesize' in request.json:
+ warm_up_args['filesize'] = str(request.json['filesize'])
+ if 'nrfiles' in request.json:
+ warm_up_args['nrfiles'] = str(request.json['nrfiles'])
+ if 'numjobs' in request.json:
+ warm_up_args['numjobs'] = str(request.json['numjobs'])
+
+ storperf.workloads = None
+ storperf.custom_workloads = {
+ '_warm_up': warm_up_args
+ }
+ self.logger.info(storperf.custom_workloads)
job_id = storperf.execute_workloads()
return jsonify({'job_id': job_id})
@@ -628,12 +733,18 @@ class Quota(Resource):
notes='''Fetch the current Cinder volume quota. This value limits
the number of volumes that can be created, and by extension, defines
the maximum number of agents that can be created for any given test
- scenario''',
+ scenario
+
+
+ This API is in sunset until the next OPNFV release.''',
type=QuotaModel.__name__
)
def get(self):
- quota = storperf.volume_quota
- return jsonify({'quota': quota})
+ quota = [] # storperf.volume_quota
+ # return jsonify({'quota': quota})
+ response = flask.make_response(jsonify({'quota': quota}))
+ response.headers['Sunset'] = "Tue. 31 Mar 2020 23:59:59 GMT"
+ return response
def setup_logging(default_path='logging.json',