diff options
Diffstat (limited to 'rest_server.py')
-rw-r--r-- | rest_server.py | 89 |
1 files changed, 88 insertions, 1 deletions
diff --git a/rest_server.py b/rest_server.py index f0a817b..72f849a 100644 --- a/rest_server.py +++ b/rest_server.py @@ -7,7 +7,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from storperf.storperf_master import StorPerfMaster +import io import json import logging import logging.config @@ -15,7 +15,12 @@ import os from flask import abort, Flask, request, jsonify, send_from_directory from flask_restful import Resource, Api, fields + from flask_restful_swagger import swagger +from storperf.db.job_db import JobDB +from storperf.plot.barchart import Barchart +from storperf.storperf_master import StorPerfMaster + app = Flask(__name__, static_url_path="") api = swagger.docs(Api(app), apiVersion='1.0') @@ -28,6 +33,82 @@ def send_swagger(path): return send_from_directory('storperf/resources/html/swagger', path) +@app.route('/results/<path:job_id>') +def results_page(job_id): + + job_db = JobDB() + params = {} + + params = job_db.fetch_workload_params(job_id) + + results = storperf.fetch_results(job_id) + workloads = [] + block_sizes = [] + queue_depths = [] + + for key, value in results.iteritems(): + workload = key.split('.')[0] + queue_depth = int(key.split('.')[2]) + block_size = int(key.split('.')[4]) + if workload not in workloads: + workloads.append(workload) + if queue_depth not in queue_depths: + queue_depths.append(queue_depth) + if block_size not in block_sizes: + block_sizes.append(block_size) + + queue_depths.sort() + block_sizes.sort() + + read_latencies = [] + write_latencies = [] +# for workload in workloads: + workload = "rw" + + for queue_depth in queue_depths: + rlatencies = [] + read_latencies.append(rlatencies) + wlatencies = [] + write_latencies.append(wlatencies) + for block_size in block_sizes: + + key = "%s.queue-depth.%s.block-size.%s.read.latency" % \ + (workload, queue_depth, block_size) + + print key + "=" + str(results[key]) + if key in results: + rlatencies.append(results[key] / 1000) + key = "%s.queue-depth.%s.block-size.%s.write.latency" % \ + (workload, queue_depth, block_size) + if key in results: + wlatencies.append(results[key] / 1000) + + chart = Barchart() + chart.barchart3d(queue_depths, block_sizes, read_latencies, 'g', + 'Read Latency (ms)') + readchart = chart.to_base64_image() + + chart.barchart3d(queue_depths, block_sizes, write_latencies, 'r', + 'Write Latency (ms)') + writechart = chart.to_base64_image() + + html = """<html><body>%s <BR> + Number of VMs: %s <BR> + Cinder volume size per VM: %s (GB) <BR> + <center>Read Latency Report <BR> + <img src="data:image/png;base64,%s"/> + <center>Write Latency Report <BR> + <img src="data:image/png;base64,%s"/> + </body></html>""" % (job_id, + params['agent_count'], + params['volume_size'], + readchart, + writechart, + ) + + return html + + @swagger.model class ConfigurationRequestModel: resource_fields = { @@ -123,6 +204,8 @@ class WorkloadModel: 'nossd': fields.String, 'nowarm': fields.String, 'workload': fields.String, + 'queue_depths': fields.String, + 'block_sizes': fields.String } @@ -210,6 +293,10 @@ class Job(Resource): storperf.precondition = False if ('nowarm' in request.json): storperf.warm_up = False + if ('queue_depths' in request.json): + storperf.queue_depths = request.json['queue_depths'] + if ('block_sizes' in request.json): + storperf.block_sizes = request.json['block_sizes'] if ('workload' in request.json): storperf.workloads = request.json['workload'] else: |