diff options
Diffstat (limited to 'docker')
-rw-r--r-- | docker/storperf-graphite/Dockerfile | 2 | ||||
-rw-r--r-- | docker/storperf-httpfrontend/Dockerfile | 2 | ||||
-rw-r--r-- | docker/storperf-master/Dockerfile | 2 | ||||
-rw-r--r-- | docker/storperf-master/rest_server.py | 8 | ||||
-rw-r--r-- | docker/storperf-master/storperf/db/test_results_db.py | 5 | ||||
-rw-r--r-- | docker/storperf-master/storperf/fio/fio_invoker.py | 29 | ||||
-rw-r--r-- | docker/storperf-master/storperf/storperf_master.py | 75 | ||||
-rw-r--r-- | docker/storperf-master/storperf/test_executor.py | 173 | ||||
-rw-r--r-- | docker/storperf-reporting/Dockerfile | 2 | ||||
-rw-r--r-- | docker/storperf-reporting/src/app.py | 8 | ||||
-rw-r--r-- | docker/storperf-reporting/src/static/testdata/local-multi.json | 831 | ||||
-rw-r--r-- | docker/storperf-reporting/src/templates/plot_multi_data.html | 16 | ||||
-rw-r--r-- | docker/storperf-swaggerui/Dockerfile | 2 |
13 files changed, 1040 insertions, 115 deletions
diff --git a/docker/storperf-graphite/Dockerfile b/docker/storperf-graphite/Dockerfile index 2bea2c8..b566458 100644 --- a/docker/storperf-graphite/Dockerfile +++ b/docker/storperf-graphite/Dockerfile @@ -14,7 +14,7 @@ # From https://github.com/SchweizerischeBundesbahnen/docker-graphite -ARG ARCH +ARG ARCH=x86_64 ARG ALPINE_VERSION=v3.5 FROM multiarch/alpine:$ARCH-$ALPINE_VERSION diff --git a/docker/storperf-httpfrontend/Dockerfile b/docker/storperf-httpfrontend/Dockerfile index b37943f..9b5b5f9 100644 --- a/docker/storperf-httpfrontend/Dockerfile +++ b/docker/storperf-httpfrontend/Dockerfile @@ -12,7 +12,7 @@ # $ docker build -t opnfv/storperf-frontend:tag . ## -ARG ARCH +ARG ARCH=x86_64 ARG ALPINE_VERSION=v3.6 FROM multiarch/alpine:$ARCH-$ALPINE_VERSION diff --git a/docker/storperf-master/Dockerfile b/docker/storperf-master/Dockerfile index 38bd231..eaaf811 100644 --- a/docker/storperf-master/Dockerfile +++ b/docker/storperf-master/Dockerfile @@ -15,7 +15,7 @@ # $ docker build -t opnfv/storperf-master:tag . # -ARG ARCH +ARG ARCH=x86_64 ARG ALPINE_VERSION=v3.6 FROM multiarch/alpine:$ARCH-$ALPINE_VERSION as storperf-builder diff --git a/docker/storperf-master/rest_server.py b/docker/storperf-master/rest_server.py index 6da2004..0634b8f 100644 --- a/docker/storperf-master/rest_server.py +++ b/docker/storperf-master/rest_server.py @@ -64,7 +64,9 @@ class ConfigurationRequestModel: 'agent_image': fields.String, 'public_network': fields.String, 'volume_size': fields.Integer, - 'availability_zone': fields.String + 'availability_zone': fields.String, + 'username': fields.String, + 'password': fields.String } @@ -137,6 +139,10 @@ class Configure(Resource): storperf.volume_size = request.json['volume_size'] if ('availability_zone' in request.json): storperf.availabilty_zone = request.json['availability_zone'] + if ('username' in request.json): + storperf.username = request.json['username'] + if ('password' in request.json): + storperf.password = request.json['password'] storperf.create_stack() if storperf.stack_id is None: diff --git a/docker/storperf-master/storperf/db/test_results_db.py b/docker/storperf-master/storperf/db/test_results_db.py index d6aabee..9c87e32 100644 --- a/docker/storperf-master/storperf/db/test_results_db.py +++ b/docker/storperf-master/storperf/db/test_results_db.py @@ -35,7 +35,6 @@ def push_results_to_db(db_url, details, logger): logger.debug(r.content) return json.loads(r.content) except Exception: - if logger: - logger.exception("Error [push_results_to_db('%s', '%s', '%s')]:" % - (db_url, params, details['details'])) + logger.exception("Error [push_results_to_db('%s', '%s')]:" % + (db_url, params)) return None diff --git a/docker/storperf-master/storperf/fio/fio_invoker.py b/docker/storperf-master/storperf/fio/fio_invoker.py index 106696d..0360ea2 100644 --- a/docker/storperf-master/storperf/fio/fio_invoker.py +++ b/docker/storperf-master/storperf/fio/fio_invoker.py @@ -15,13 +15,14 @@ import paramiko class FIOInvoker(object): - def __init__(self): + def __init__(self, var_dict={}): self.logger = logging.getLogger(__name__) self.event_listeners = set() self.event_callback_ids = set() self._remote_host = None self.callback_id = None self.terminated = False + self.metadata = var_dict @property def remote_host(self): @@ -90,11 +91,7 @@ class FIOInvoker(object): self.logger.debug("Finished") def execute(self, args=[]): - ssh = paramiko.SSHClient() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.connect(self.remote_host, username='storperf', - key_filename='storperf/resources/ssh/storperf_rsa', - timeout=2) + ssh = self._ssh_client() command = "sudo ./fio " + ' '.join(args) self.logger.debug("Remote command: %s" % command) @@ -133,11 +130,7 @@ class FIOInvoker(object): self.logger.debug("Terminating fio on " + self.remote_host) self.terminated = True - ssh = paramiko.SSHClient() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.connect(self.remote_host, username='storperf', - key_filename='storperf/resources/ssh/storperf_rsa', - timeout=2) + ssh = self._ssh_client() command = "sudo killall fio" @@ -151,3 +144,17 @@ class FIOInvoker(object): stdout.close() stderr.close() + + def _ssh_client(self): + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if 'username' in self.metadata and 'password' in self.metadata: + ssh.connect(self.remote_host, + username=self.metadata['username'], + password=self.metadata['password']) + return ssh + else: + ssh.connect(self.remote_host, username='storperf', + key_filename='storperf/resources/ssh/storperf_rsa', + timeout=2) + return ssh diff --git a/docker/storperf-master/storperf/storperf_master.py b/docker/storperf-master/storperf/storperf_master.py index 4e99e57..8a67048 100644 --- a/docker/storperf-master/storperf/storperf_master.py +++ b/docker/storperf-master/storperf/storperf_master.py @@ -257,6 +257,36 @@ class StorPerfMaster(object): 'workloads', str(self._test_executor.workload_modules)) + @property + def username(self): + return self.configuration_db.get_configuration_value( + 'stack', + 'username' + ) + + @username.setter + def username(self, value): + self.configuration_db.set_configuration_value( + 'stack', + 'username', + value + ) + + @property + def password(self): + return self.configuration_db.get_configuration_value( + 'stack', + 'password' + ) + + @password.setter + def password(self, value): + self.configuration_db.set_configuration_value( + 'stack', + 'password', + value + ) + def get_logs(self, lines=None): LOG_DIR = './storperf.log' @@ -354,6 +384,9 @@ class StorPerfMaster(object): params['agent_count'] = self.agent_count params['public_network'] = self.public_network params['volume_size'] = self.volume_size + if self.username and self.password: + params['username'] = self.username + params['password'] = self.password job_id = self._test_executor.execute(params) return job_id @@ -424,14 +457,50 @@ class StorPerfMaster(object): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.connect(slave, username='storperf', - key_filename='storperf/resources/ssh/storperf_rsa', - timeout=2) + if self.username and self.password: + ssh.connect(slave, + username=self.username, + password=self.password) + else: + ssh.connect(slave, username='storperf', + key_filename='storperf/resources/ssh/storperf_rsa', + timeout=2) + + available = self._check_root_fs(ssh) + logger.debug("Available space on / is %s" % available) + if available < 65536: + logger.warn("Root filesystem is too small, attemping resize") + self._resize_root_fs(ssh, logger) + + available = self._check_root_fs(ssh) + logger.debug("Available space on / is now %s" % available) + if available < 65536: + logger.error("Cannot create enough space on /") + raise Exception("Root filesystem has only %s free" % + available) scp = SCPClient(ssh.get_transport()) logger.debug("Transferring fio to %s" % slave) scp.put('/usr/local/bin/fio', '~/') + def _check_root_fs(self, ssh): + (_, stdout, _) = ssh.exec_command("df /") + stdout.readline() + lines = stdout.readline().split() + if len(lines) > 4: + available = lines[3] + return int(available) + + def _resize_root_fs(self, ssh, logger): + command = "sudo /usr/sbin/resize2fs /dev/vda1" + logger.info("Attempting %s" % command) + (_, stdout, stderr) = ssh.exec_command(command) + stdout.channel.recv_exit_status() + for line in iter(stdout.readline, b''): + logger.info(line) + for line in iter(stderr.readline, b''): + logger.error(line) + def _make_parameters(self): heat_parameters = {} heat_parameters['public_network'] = self.public_network diff --git a/docker/storperf-master/storperf/test_executor.py b/docker/storperf-master/storperf/test_executor.py index 3d1d9f2..4c2c972 100644 --- a/docker/storperf-master/storperf/test_executor.py +++ b/docker/storperf-master/storperf/test_executor.py @@ -235,25 +235,90 @@ class TestExecutor(object): self.start_time = time.time() self.workload_status = {} - # Prepare stats list - for workload_module in self.workload_modules: - workload_name = getattr(workload_module, "__name__") - blocksizes = self._block_sizes - iodepths = self._queue_depths - for blocksize in blocksizes: - for iodepth in iodepths: - name = '%s.%s.queue-depth.%s.block-size.%s' % \ - (self.job_db.job_id, workload_name, iodepth, blocksize) - self.workload_status[name] = "Pending" + + workloads = self._create_workload_matrix() + + for current_workload in workloads: + workload = current_workload['workload'] + self._thread_gate = ThreadGate(len(self.slaves), + workload.options['status-interval']) + + if self._terminated: + return + self.current_workload = current_workload['name'] + + self.logger.info("Starting run %s" % self.current_workload) + self.workload_status[self.current_workload] = "Running" + + scheduler = sched.scheduler(time.time, time.sleep) + if self.deadline is not None \ + and not current_workload['workload_name'].startswith("_"): + event = scheduler.enter(self.deadline * 60, 1, + self.terminate_current_run, + ()) + t = Thread(target=scheduler.run, args=()) + t.start() + + workload.options['iodepth'] = str(current_workload['queue-depth']) + workload.options['bs'] = str(current_workload['blocksize']) + + slave_threads = [] + for slave in self.slaves: + slave_workload = copy.copy(current_workload['workload']) + slave_workload.remote_host = slave + + self._workload_executors.append(slave_workload) + + t = Thread(target=self.execute_on_node, + args=(slave_workload,), + name="%s worker" % slave) + t.daemon = False + t.start() + slave_threads.append(t) + + for slave_thread in slave_threads: + self.logger.debug("Waiting on %s" % slave_thread) + slave_thread.join() + self.logger.debug("Done waiting for %s" % slave_thread) + + if not scheduler.empty(): + try: + scheduler.cancel(event) + except ValueError: + pass + + self.logger.info("Completed run %s" + % self.current_workload) + self.workload_status[self.current_workload] = "Completed" + self._workload_executors = [] + self.current_workload = None + + self.logger.info("Completed job %s" % (self.job_db.job_id)) + + self.end_time = time.time() + self._terminated = True + self.broadcast_event() + self.unregister(data_handler.data_event) + report = {'report': json.dumps(self.metadata)} + self.job_db.record_workload_params(report) + self.job_db.job_id = None + if self.result_url is not None: + self.logger.info("Results can be found at %s" % self.result_url) + + def _create_workload_matrix(self): + workloads = [] for workload_module in self.workload_modules: workload_name = getattr(workload_module, "__name__") - self.logger.info("Starting workload %s" % (workload_name)) constructorMethod = getattr(workload_module, workload_name) workload = constructorMethod() if (self.filename is not None): workload.filename = self.filename + workload.id = self.job_db.job_id + + if (self.filename is not None): + workload.filename = self.filename if (workload_name.startswith("_")): iodepths = [8, ] @@ -262,85 +327,29 @@ class TestExecutor(object): iodepths = self._queue_depths blocksizes = self._block_sizes - workload.id = self.job_db.job_id - self._thread_gate = ThreadGate(len(self.slaves), - workload.options['status-interval']) - for blocksize in blocksizes: for iodepth in iodepths: - if self._terminated: - return - self.current_workload = ( - "%s.%s.queue-depth.%s.block-size.%s" - % (self.job_db.job_id, - workload_name, - iodepth, - blocksize)) - - self.logger.info("Starting run %s" % self.current_workload) - self.workload_status[self.current_workload] = "Running" - - scheduler = sched.scheduler(time.time, time.sleep) - if self.deadline is not None \ - and not workload_name.startswith("_"): - event = scheduler.enter(self.deadline * 60, 1, - self.terminate_current_run, - ()) - t = Thread(target=scheduler.run, args=()) - t.start() - - workload.options['iodepth'] = str(iodepth) - workload.options['bs'] = str(blocksize) - - slave_threads = [] - for slave in self.slaves: - slave_workload = copy.copy(workload) - slave_workload.remote_host = slave - - self._workload_executors.append(slave_workload) - - t = Thread(target=self.execute_on_node, - args=(slave_workload,), - name="%s worker" % slave) - t.daemon = False - t.start() - slave_threads.append(t) - - for slave_thread in slave_threads: - self.logger.debug("Waiting on %s" % slave_thread) - slave_thread.join() - self.logger.debug("Done waiting for %s" % slave_thread) - - if not scheduler.empty(): - try: - scheduler.cancel(event) - except ValueError: - pass - - self.logger.info("Completed run %s" - % self.current_workload) - self.workload_status[self.current_workload] = "Completed" - self._workload_executors = [] - self.current_workload = None - - self.logger.info("Completed workload %s" % (workload_name)) - self.logger.info("Completed job %s" % (self.job_db.job_id)) + name = '%s.%s.queue-depth.%s.block-size.%s' % \ + (self.job_db.job_id, workload_name, iodepth, blocksize) + self.workload_status[name] = "Pending" - if self.result_url is not None: - self.logger.info("Results can be found at %s" % self.result_url) + parameters = {'queue-depth': iodepth, + 'blocksize': blocksize, + 'name': name, + 'workload_name': workload_name, + 'status': 'Pending', + 'workload': workload} - self.end_time = time.time() - self._terminated = True - self.broadcast_event() - self.unregister(data_handler.data_event) - report = {'report': json.dumps(self.metadata)} - self.job_db.record_workload_params(report) - self.job_db.job_id = None + self.logger.info("Workload %s=%s" % (name, parameters)) + + workloads.append(parameters) + + return workloads def execute_on_node(self, workload): - invoker = FIOInvoker() + invoker = FIOInvoker(self.metadata) invoker.register(self.event) workload.invoker = invoker diff --git a/docker/storperf-reporting/Dockerfile b/docker/storperf-reporting/Dockerfile index 6f20e7b..ac507a6 100644 --- a/docker/storperf-reporting/Dockerfile +++ b/docker/storperf-reporting/Dockerfile @@ -15,7 +15,7 @@ ## -ARG ARCH +ARG ARCH=x86_64 ARG ALPINE_VERSION=v3.6 FROM multiarch/alpine:$ARCH-$ALPINE_VERSION MAINTAINER Mark Beierl <mark.beierl@dell.com> diff --git a/docker/storperf-reporting/src/app.py b/docker/storperf-reporting/src/app.py index 8ee04b8..e2d889d 100644 --- a/docker/storperf-reporting/src/app.py +++ b/docker/storperf-reporting/src/app.py @@ -7,11 +7,15 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +import json +import urllib + from flask import Flask, redirect, url_for, request, render_template, session from flask import send_from_directory, flash -import urllib + import validators -import json + + app = Flask(__name__) app.secret_key = 'storperf_graphing_module' diff --git a/docker/storperf-reporting/src/static/testdata/local-multi.json b/docker/storperf-reporting/src/static/testdata/local-multi.json new file mode 100644 index 0000000..78797af --- /dev/null +++ b/docker/storperf-reporting/src/static/testdata/local-multi.json @@ -0,0 +1,831 @@ +{ + "results": [ + { + "_id": "6cc1c6c5-f660-4ab0-bf91-9ae0324846c2", + "agent_count": 1, + "build_tag": "Unknown", + "case_name": "Unknown", + "criteria": "FAIL", + "details": { + "metrics": { + "_warm_up.queue-depth.8.block-size.16384.read.bw": 0.0, + "_warm_up.queue-depth.8.block-size.16384.read.iops": 0.0, + "_warm_up.queue-depth.8.block-size.16384.read.lat_ns.mean": 0.0, + "_warm_up.queue-depth.8.block-size.16384.write.bw": 4547.0, + "_warm_up.queue-depth.8.block-size.16384.write.iops": 284.27710866666666, + "_warm_up.queue-depth.8.block-size.16384.write.lat_ns.mean": 28133208.4602, + "rw.queue-depth.1.block-size.2048.read.bw": 325.1, + "rw.queue-depth.1.block-size.2048.read.iops": 162.77480539999996, + "rw.queue-depth.1.block-size.2048.read.lat_ns.mean": 757589.6947685999, + "rw.queue-depth.1.block-size.2048.write.bw": 138.6, + "rw.queue-depth.1.block-size.2048.write.iops": 69.55300399999999, + "rw.queue-depth.1.block-size.2048.write.lat_ns.mean": 12599603.595169999 + }, + "report_data": { + "_warm_up.queue-depth.8.block-size.16384": { + "bw": { + "read": { + "average": 0.0, + "range": 0.0, + "series": [ + [ + 1, + 0.0 + ], + [ + 2, + 0.0 + ], + [ + 3, + 0.0 + ] + ], + "slope": -0.0, + "steady_state": false + }, + "write": { + "average": 4547.0, + "range": 44.0, + "series": [ + [ + 1, + 4529.0 + ], + [ + 2, + 4539.0 + ], + [ + 3, + 4573.0 + ] + ], + "slope": 22.0, + "steady_state": false + } + }, + "iops": { + "read": { + "average": 0.0, + "range": 0.0, + "series": [ + [ + 1, + 0.0 + ], + [ + 2, + 0.0 + ], + [ + 3, + 0.0 + ] + ], + "slope": -0.0, + "steady_state": false + }, + "write": { + "average": 284.27710866666666, + "range": 2.618600000000015, + "series": [ + [ + 1, + 283.205342 + ], + [ + 2, + 283.802042 + ], + [ + 3, + 285.823942 + ] + ], + "slope": 1.3092999999998938, + "steady_state": false + } + }, + "lat_ns.mean": { + "read": { + "average": 0.0, + "range": 0.0, + "series": [ + [ + 1, + 0.0 + ], + [ + 2, + 0.0 + ], + [ + 3, + 0.0 + ] + ], + "slope": -0.0, + "steady_state": false + }, + "write": { + "average": 28133208.4602, + "range": 237517.34149999917, + "series": [ + [ + 1, + 28224860.6161 + ], + [ + 2, + 28187421.4899 + ], + [ + 3, + 27987343.2746 + ] + ], + "slope": -118758.670750012, + "steady_state": false + } + } + }, + "rw.queue-depth.1.block-size.2048": { + "bw": { + "read": { + "average": 325.1, + "range": 23.0, + "series": [ + [ + 0, + 311.0 + ], + [ + 1, + 321.0 + ], + [ + 2, + 330.0 + ], + [ + 3, + 320.0 + ], + [ + 4, + 318.0 + ], + [ + 5, + 324.0 + ], + [ + 6, + 331.0 + ], + [ + 7, + 330.0 + ], + [ + 8, + 332.0 + ], + [ + 9, + 334.0 + ] + ], + "slope": 1.9575757575757575, + "steady_state": true + }, + "write": { + "average": 138.6, + "range": 8.0, + "series": [ + [ + 0, + 134.0 + ], + [ + 1, + 136.0 + ], + [ + 2, + 141.0 + ], + [ + 3, + 136.0 + ], + [ + 4, + 136.0 + ], + [ + 5, + 138.0 + ], + [ + 6, + 140.0 + ], + [ + 7, + 141.0 + ], + [ + 8, + 142.0 + ], + [ + 9, + 142.0 + ] + ], + "slope": 0.7757575757575758, + "steady_state": true + } + }, + "iops": { + "read": { + "average": 162.77480539999996, + "range": 11.549119999999988, + "series": [ + [ + 0, + 155.735105 + ], + [ + 1, + 160.742878 + ], + [ + 2, + 165.048328 + ], + [ + 3, + 160.166051 + ], + [ + 4, + 159.205937 + ], + [ + 5, + 162.22321 + ], + [ + 6, + 165.548626 + ], + [ + 7, + 165.358843 + ], + [ + 8, + 166.434851 + ], + [ + 9, + 167.284225 + ] + ], + "slope": 0.9969906909091073, + "steady_state": true + }, + "write": { + "average": 69.55300399999999, + "range": 4.193179999999998, + "series": [ + [ + 0, + 67.175267 + ], + [ + 1, + 68.425178 + ], + [ + 2, + 70.538081 + ], + [ + 3, + 68.458608 + ], + [ + 4, + 68.110134 + ], + [ + 5, + 69.288473 + ], + [ + 6, + 70.459194 + ], + [ + 7, + 70.559996 + ], + [ + 8, + 71.146662 + ], + [ + 9, + 71.368447 + ] + ], + "slope": 0.3883556363636418, + "steady_state": true + } + }, + "lat_ns.mean": { + "read": { + "average": 757589.6947685999, + "range": 57973.07178999996, + "series": [ + [ + 0, + 728154.347518 + ], + [ + 1, + 720469.143139 + ], + [ + 2, + 736655.887692 + ], + [ + 3, + 741840.50458 + ], + [ + 4, + 769845.856565 + ], + [ + 5, + 777245.805254 + ], + [ + 6, + 771668.148592 + ], + [ + 7, + 778442.214929 + ], + [ + 8, + 775110.161108 + ], + [ + 9, + 776464.878309 + ] + ], + "slope": 6806.644968557574, + "steady_state": true + }, + "write": { + "average": 12599603.595169999, + "range": 1007779.9737999998, + "series": [ + [ + 0, + 13189011.2609 + ], + [ + 1, + 12911322.2831 + ], + [ + 2, + 12442518.5818 + ], + [ + 3, + 12862580.5707 + ], + [ + 4, + 12871629.818 + ], + [ + 5, + 12601602.6661 + ], + [ + 6, + 12368342.1886 + ], + [ + 7, + 12336719.4401 + ], + [ + 8, + 12231077.8553 + ], + [ + 9, + 12181231.2871 + ] + ], + "slope": -97657.38645757502, + "steady_state": true + } + } + } + }, + "steady_state": { + "_warm_up.queue-depth.8.block-size.16384": false, + "rw.queue-depth.1.block-size.2048": true + } + }, + "duration": 831.1383128166199, + "end_date": "2017-09-01 18:17:52", + "end_time": "2017-09-01 18:17:52", + "installer": "apex", + "password": "cubswin:)", + "pod_name": "Unknown", + "project_name": "storperf", + "public_network": "external", + "scenario": "Unknown", + "start_date": "2017-09-01 18:04:01", + "start_time": "2017-09-01 18:04:01", + "test_case": "Unknown", + "timestart": 1504289041.533451, + "username": "cirros", + "version": "Unknown", + "volume_size": 1 + }, + { + "_id": "09669733-f8cd-41fe-b914-5c7bd2d9be2a", + "agent_count": 1, + "build_tag": "Unknown", + "case_name": "Unknown", + "criteria": "PASS", + "details": { + "metrics": { + "rw.queue-depth.8.block-size.2048.read.bw": 1216.6363636363637, + "rw.queue-depth.8.block-size.2048.read.iops": 608.6309282727273, + "rw.queue-depth.8.block-size.2048.read.lat_ns.mean": 764497.1991469089, + "rw.queue-depth.8.block-size.2048.write.bw": 522.2727272727273, + "rw.queue-depth.8.block-size.2048.write.iops": 261.4501354545455, + "rw.queue-depth.8.block-size.2048.write.lat_ns.mean": 28815276.88039091 + }, + "report_data": { + "rw.queue-depth.8.block-size.2048": { + "bw": { + "read": { + "average": 1216.6363636363637, + "range": 27.0, + "series": [ + [ + 4, + 1199.0 + ], + [ + 5, + 1208.0 + ], + [ + 6, + 1219.0 + ], + [ + 7, + 1221.0 + ], + [ + 8, + 1226.0 + ], + [ + 9, + 1225.0 + ], + [ + 10, + 1221.0 + ], + [ + 11, + 1212.0 + ], + [ + 12, + 1213.0 + ], + [ + 13, + 1218.0 + ], + [ + 14, + 1221.0 + ] + ], + "slope": 0.990909090909091, + "steady_state": true + }, + "write": { + "average": 522.2727272727273, + "range": 13.0, + "series": [ + [ + 4, + 514.0 + ], + [ + 5, + 519.0 + ], + [ + 6, + 523.0 + ], + [ + 7, + 524.0 + ], + [ + 8, + 526.0 + ], + [ + 9, + 527.0 + ], + [ + 10, + 524.0 + ], + [ + 11, + 520.0 + ], + [ + 12, + 521.0 + ], + [ + 13, + 523.0 + ], + [ + 14, + 524.0 + ] + ], + "slope": 0.45454545454545453, + "steady_state": true + } + }, + "iops": { + "read": { + "average": 608.6309282727273, + "range": 13.495996999999988, + "series": [ + [ + 4, + 599.818618 + ], + [ + 5, + 604.079727 + ], + [ + 6, + 609.527778 + ], + [ + 7, + 610.975561 + ], + [ + 8, + 613.314615 + ], + [ + 9, + 612.93468 + ], + [ + 10, + 610.557626 + ], + [ + 11, + 606.445349 + ], + [ + 12, + 606.979571 + ], + [ + 13, + 609.426323 + ], + [ + 14, + 610.880363 + ] + ], + "slope": 0.5203006818181496, + "steady_state": true + }, + "write": { + "average": 261.4501354545455, + "range": 6.084462999999971, + "series": [ + [ + 4, + 257.462848 + ], + [ + 5, + 259.786734 + ], + [ + 6, + 261.688853 + ], + [ + 7, + 262.425017 + ], + [ + 8, + 263.456851 + ], + [ + 9, + 263.547311 + ], + [ + 10, + 262.429939 + ], + [ + 11, + 260.32799 + ], + [ + 12, + 260.663981 + ], + [ + 13, + 261.654327 + ], + [ + 14, + 262.507639 + ] + ], + "slope": 0.22180677272724797, + "steady_state": true + } + }, + "lat_ns.mean": { + "read": { + "average": 764497.1991469089, + "range": 140375.23192300007, + "series": [ + [ + 4, + 848927.686926 + ], + [ + 5, + 797544.078184 + ], + [ + 6, + 757363.259077 + ], + [ + 7, + 728089.195358 + ], + [ + 8, + 708552.455003 + ], + [ + 9, + 754690.727534 + ], + [ + 10, + 793235.998106 + ], + [ + 11, + 780364.49709 + ], + [ + 12, + 762791.580243 + ], + [ + 13, + 745990.131378 + ], + [ + 14, + 731919.581717 + ] + ], + "slope": -5324.88366549084, + "steady_state": true + }, + "write": { + "average": 28815276.88039091, + "range": 496288.23420000076, + "series": [ + [ + 4, + 29090778.9543 + ], + [ + 5, + 28933952.7311 + ], + [ + 6, + 28802775.6777 + ], + [ + 7, + 28784924.3952 + ], + [ + 8, + 28709411.4788 + ], + [ + 9, + 28594490.7201 + ], + [ + 10, + 28634150.1367 + ], + [ + 11, + 28907630.8908 + ], + [ + 12, + 28909931.4169 + ], + [ + 13, + 28832309.7042 + ], + [ + 14, + 28767689.5785 + ] + ], + "slope": -13912.728362724998, + "steady_state": true + } + } + } + }, + "steady_state": { + "rw.queue-depth.8.block-size.2048": true + } + }, + "duration": 900.983824968338, + "end_date": "2017-09-01 18:34:36", + "end_time": "2017-09-01 18:34:36", + "installer": "apex", + "password": "cubswin:)", + "pod_name": "Unknown", + "project_name": "storperf", + "public_network": "external", + "scenario": "Unknown", + "start_date": "2017-09-01 18:19:35", + "start_time": "2017-09-01 18:19:35", + "test_case": "Unknown", + "timestart": 1504289975.091599, + "username": "cirros", + "version": "Unknown", + "volume_size": 1 + } + ] +}
\ No newline at end of file diff --git a/docker/storperf-reporting/src/templates/plot_multi_data.html b/docker/storperf-reporting/src/templates/plot_multi_data.html index 2edae9e..4205fd6 100644 --- a/docker/storperf-reporting/src/templates/plot_multi_data.html +++ b/docker/storperf-reporting/src/templates/plot_multi_data.html @@ -84,27 +84,27 @@ ul { function init(){ page = []; report_data = results[0]["details"]["report_data"]; - build_tag = []; + ids = []; text = "<a href='/reporting/'><button type='button' class='btn btn-default btn-lg'>Go back</button></a><br><br>"; for ( var i = 0; i < results.length ; i++ ){ - build_tag.push(results[i]["build_tag"]); - text += "<div class='row well' id='row-" + build_tag[i] + "'>"; + ids.push(results[i]["_id"]); + text += "<div class='row well' id='row-" + ids[i] + "'>"; text += "<h4> ID : " + results[i]["_id"] + "</h4>"; text += "<h4> Start Date : " + results[i]["start_date"] + "</h4>"; text += "<h4> Criteria : " + results[i]["criteria"] + "</h4>"; text += "<h4> Build Tag : " + results[i]["build_tag"] + "</h4>"; - text += "<button type='button' class='btn btn-default btn-lg' id='para-"+ build_tag[i] + - "' onclick=add_info('" + build_tag[i] + "')> Click here to view details </button>"; + text += "<button type='button' class='btn btn-default btn-lg' id='para-"+ ids[i] + + "' onclick=add_info('" + ids[i] + "')> Click here to view details </button>"; text += "</div>"; } $("#content").html(text); } - function add_info(build_tag){ + function add_info(ids){ report_data = {}; keys = []; var i = 0; for ( i = 0; i < results.length ; i++ ){ - if( results[i]["build_tag"] == build_tag ){ + if( results[i]["_id"] == ids ){ report_data = results[i]["details"]["report_data"]; break; } @@ -113,7 +113,7 @@ ul { keys.push(k); } text = ""; - text += "<div class='row well' id='row-" + build_tag[i] + "'>"; + text += "<div class='row well' id='row-" + results[i]["_id"] + "'>"; text += "<h4> ID : " + results[i]["_id"] + "</h4>"; text += "<h4> Start Date : " + results[i]["start_date"] + "</h4>"; text += "<h4> Criteria : " + results[i]["criteria"] + "</h4>"; diff --git a/docker/storperf-swaggerui/Dockerfile b/docker/storperf-swaggerui/Dockerfile index 9795c47..14c9fe1 100644 --- a/docker/storperf-swaggerui/Dockerfile +++ b/docker/storperf-swaggerui/Dockerfile @@ -12,7 +12,7 @@ # $ docker build -t opnfv/storperf-swaggerui:tag . ## -ARG ARCH +ARG ARCH=x86_64 ARG ALPINE_VERSION=v3.6 FROM multiarch/alpine:$ARCH-$ALPINE_VERSION |