summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Beierl <mark.beierl@emc.com>2016-05-04 22:53:07 -0400
committerMark Beierl <mark.beierl@emc.com>2016-05-05 14:58:39 -0400
commit05e863781ce6746fabec176d1fc5f7454f2cdd73 (patch)
tree0ff7f2aa9e55b33c3f95c0521bbd3991a9e4e2c0
parent1e0544d70dabed4f33e0624cb4a7cde4c8c6b691 (diff)
Add Stats report and Swagger UI
Add Swagger web ui at /swagger Add ability to fetch read/write latency status via ReST ui Can now delete where stack was removed from OpenStack but not from the storperf DB Change to use Floating IPs instead of private IP Fix delete bug where there was no dependency on resources in the resource group. JIRA: STORPERF-19 JIRA: STORPERF-20 Change-Id: I0a4b3386789c38d6745906ba896b8ff851dc122f Signed-off-by: Mark Beierl <mark.beierl@emc.com>
-rwxr-xr-xci/verify.sh7
-rw-r--r--cli.py2
-rw-r--r--docker/requirements.pip6
-rw-r--r--rest_server.py98
-rw-r--r--storperf/carbon/converter.py4
-rw-r--r--storperf/db/configuration_db.py2
-rw-r--r--storperf/db/graphite_db.py81
-rw-r--r--storperf/db/job_db.py47
-rw-r--r--storperf/resources/hot/agent-group.yaml47
-rw-r--r--storperf/resources/hot/storperf-agent.yaml28
-rw-r--r--storperf/storperf_master.py46
-rw-r--r--storperf/test_executor.py14
-rw-r--r--storperf/tests/__init__.py8
-rw-r--r--storperf/tests/carbon_tests/emitter_test.py4
-rw-r--r--storperf/tests/carbon_tests/json_to_carbon_test.py31
-rw-r--r--storperf/tests/db_tests/__init__.py8
-rw-r--r--storperf/tests/db_tests/graphite_db_test.py47
-rw-r--r--storperf/tests/storperf_master_test.py4
-rw-r--r--storperf/tests/workload_tests/__init__.py8
-rw-r--r--storperf/tests/workload_tests/workload_subclass_test.py12
-rw-r--r--storperf/workloads/_base_workload.py11
21 files changed, 388 insertions, 127 deletions
diff --git a/ci/verify.sh b/ci/verify.sh
index 70ecb6a..416d9e4 100755
--- a/ci/verify.sh
+++ b/ci/verify.sh
@@ -19,17 +19,18 @@ virtualenv $WORKSPACE/storperf_venv
source $WORKSPACE/storperf_venv/bin/activate
pip install setuptools
-pip install autoflake=00.6.6
+pip install autoflake==0.6.6
pip install autopep8==1.2.2
pip install coverage==4.0.3
pip install flask==0.10
pip install flask-restful==0.3.5
+pip install flask-restful-swagger==0.19
+pip install flask-swagger==0.2.12
pip install funcsigs==0.4
pip install flake8==2.5.4
pip install html2text==2016.1.8
pip install mock==1.3.0
pip install nose==1.3.7
-pip install pysqlite==2.8.2
pip install python-cinderclient==1.6.0
pip install python-glanceclient==1.1.0
pip install python-heatclient==0.8.0
@@ -53,4 +54,4 @@ rc=$?
deactivate
-exit $rc \ No newline at end of file
+exit $rc
diff --git a/cli.py b/cli.py
index f2fa178..fad275c 100644
--- a/cli.py
+++ b/cli.py
@@ -154,7 +154,7 @@ def main(argv=None):
raise Usage(content['message'])
if (report is not None):
- print storperf.fetch_results(report, workload)
+ print storperf.fetch_results(report)
else:
print "Calling start..."
response = requests.post(
diff --git a/docker/requirements.pip b/docker/requirements.pip
index c6f262c..89f19ae 100644
--- a/docker/requirements.pip
+++ b/docker/requirements.pip
@@ -5,6 +5,8 @@ python-novaclient==2.28.1
python-glanceclient==1.1.0
python-cinderclient==1.6.0
python-keystoneclient==1.6.0
-flask>=0.10
-flask-restful>=0.3.5
+flask==0.10
+flask-restful==0.3.5
+flask-restful-swagger==0.19
+flask-swagger==0.2.12
html2text==2016.1.8 \ No newline at end of file
diff --git a/rest_server.py b/rest_server.py
index 1194ab5..c5fe99b 100644
--- a/rest_server.py
+++ b/rest_server.py
@@ -13,15 +13,22 @@ import logging
import logging.config
import os
-from flask import abort, Flask, request, jsonify
-from flask_restful import Resource, Api
+from flask import abort, Flask, request, jsonify, send_from_directory
+from flask_restful import Resource, Api, fields
+from flask_restful_swagger import swagger
+app = Flask(__name__, static_url_path="")
+api = swagger.docs(Api(app), apiVersion='1.0')
-app = Flask(__name__)
-api = Api(app)
storperf = StorPerfMaster()
+@app.route('/swagger/<path:path>')
+def send_swagger(path):
+ print "called! storperf/resources/html/swagger/" + path
+ return send_from_directory('storperf/resources/html/swagger', path)
+
+
class Configure(Resource):
def __init__(self):
@@ -29,7 +36,7 @@ class Configure(Resource):
def get(self):
return jsonify({'agent_count': storperf.agent_count,
- 'agent_network': storperf.agent_network,
+ 'public_network': storperf.public_network,
'volume_size': storperf.volume_size,
'stack_created': storperf.is_stack_created,
'stack_id': storperf.stack_id})
@@ -41,8 +48,8 @@ class Configure(Resource):
try:
if ('agent_count' in request.json):
storperf.agent_count = request.json['agent_count']
- if ('agent_network' in request.json):
- storperf.agent_network = request.json['agent_network']
+ if ('public_network' in request.json):
+ storperf.public_network = request.json['public_network']
if ('volume_size' in request.json):
storperf.volume_size = request.json['volume_size']
@@ -50,7 +57,7 @@ class Configure(Resource):
storperf.create_stack()
return jsonify({'agent_count': storperf.agent_count,
- 'agent_network': storperf.agent_network,
+ 'public_network': storperf.public_network,
'volume_size': storperf.volume_size,
'stack_id': storperf.stack_id})
@@ -64,11 +71,77 @@ class Configure(Resource):
abort(400, str(e))
+@swagger.model
+class WorkloadModel:
+ resource_fields = {
+ 'target': fields.String,
+ 'nossd': fields.String,
+ 'nowarm': fields.String,
+ 'workload': fields.String,
+ }
+
+
class Job(Resource):
def __init__(self):
self.logger = logging.getLogger(__name__)
+ @swagger.operation(
+ notes='Fetch the average latency of the specified workload',
+ parameters=[
+ {
+ "name": "id",
+ "description": "The UUID of the workload in the format "
+ "NNNNNNNN-NNNN-NNNN-NNNN-NNNNNNNNNNNN",
+ "required": True,
+ "type": "string",
+ "allowMultiple": False,
+ "paramType": "query"
+ }
+ ],
+ responseMessages=[
+ {
+ "code": 200,
+ "message": "Wordload ID found, response in JSON format"
+ },
+ {
+ "code": 404,
+ "message": "Workload ID not found"
+ }
+ ]
+ )
+ def get(self):
+ workload_id = request.args.get('id')
+ print workload_id
+ return jsonify(storperf.fetch_results(workload_id))
+
+ @swagger.operation(
+ parameters=[
+ {
+ "name": "body",
+ "description": 'Start execution of a workload with the '
+ 'following parameters: "target": The target device to '
+ 'profile", "nossd": Do not fill the target with random '
+ 'data prior to running the test, "nowarm": Do not '
+ 'refill the target with data '
+ 'prior to running any further tests, "workload":if specified, '
+ 'the workload to run. Defaults to all.',
+ "required": True,
+ "type": "WorkloadModel",
+ "paramType": "body"
+ }
+ ],
+ responseMessages=[
+ {
+ "code": 200,
+ "message": "Wordload ID found, response in JSON format"
+ },
+ {
+ "code": 400,
+ "message": "Missing configuration data"
+ }
+ ]
+ )
def post(self):
if not request.json:
abort(400, "ERROR: Missing configuration data")
@@ -97,6 +170,15 @@ class Job(Resource):
except Exception as e:
abort(400, str(e))
+ @swagger.operation(
+ notes='Cancels the currently running workload',
+ responseMessages=[
+ {
+ "code": 200,
+ "message": "Wordload ID found, response in JSON format"
+ },
+ ]
+ )
def delete(self):
try:
storperf.terminate_workloads()
diff --git a/storperf/carbon/converter.py b/storperf/carbon/converter.py
index d551822..623c144 100644
--- a/storperf/carbon/converter.py
+++ b/storperf/carbon/converter.py
@@ -11,12 +11,12 @@ import logging
import time
-class JSONToCarbon(object):
+class Converter(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
- def convert_to_dictionary(self, json_object, prefix=None):
+ def convert_json_to_flat(self, json_object, prefix=None):
# Use the timestamp reported by fio, or current time if
# not present.
if 'timestamp' in json_object:
diff --git a/storperf/db/configuration_db.py b/storperf/db/configuration_db.py
index b12394e..5b996c7 100644
--- a/storperf/db/configuration_db.py
+++ b/storperf/db/configuration_db.py
@@ -7,7 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from _sqlite3 import OperationalError
+from sqlite3 import OperationalError
from threading import Lock
import logging
import sqlite3
diff --git a/storperf/db/graphite_db.py b/storperf/db/graphite_db.py
new file mode 100644
index 0000000..c62340c
--- /dev/null
+++ b/storperf/db/graphite_db.py
@@ -0,0 +1,81 @@
+from storperf.db.job_db import JobDB
+import json
+import logging
+
+import requests
+
+
+class GraphiteDB(object):
+
+ def __init__(self):
+ """
+ """
+ self._job_db = JobDB()
+ self.logger = logging.getLogger(__name__)
+
+ def fetch_averages(self, workload):
+ workload_executions = self._job_db.fetch_workloads(workload)
+
+ # Create a map of job runs
+ workload_names = {}
+ for workload_execution in workload_executions:
+ name = '.'.join(workload_execution[0].split('.')[0:6])
+ if name in workload_names:
+ workload_record = workload_names[name]
+ start = workload_record[0]
+ end = workload_record[1]
+ else:
+ start = None
+ end = None
+
+ if start is None or workload_execution[1] < start:
+ start = workload_execution[1]
+
+ if end is None or workload_execution[2] > end:
+ end = workload_execution[2]
+
+ workload_names[name] = [start, end]
+
+ averages = {}
+
+ for io_type in ['read', 'write']:
+ for workload_name, times in workload_names.iteritems():
+ workload_pattern = self.make_fullname_pattern(workload_name)
+ request = ("http://127.0.0.1:8000/render/?target="
+ "averageSeries(%s.jobs.1.%s.lat.mean)"
+ "&format=json"
+ "&from=%s"
+ "&until=%s" %
+ (workload_pattern, io_type, times[0], times[1]))
+ self.logger.debug("Calling %s" % (request))
+
+ response = requests.get(request)
+ if (response.status_code == 200):
+ short_name = '.'.join(workload_name.split('.')[1:6])
+ averages[short_name + "." + io_type] = \
+ self._average_results(json.loads(response.content))
+
+ return averages
+
+ def _average_results(self, results):
+
+ for item in results:
+ datapoints = item['datapoints']
+
+ total = 0
+ count = 0
+
+ for datapoint in datapoints:
+ if datapoint[0] is not None:
+ total += datapoint[0]
+ count += 1
+
+ average = total / count
+
+ return average
+
+ def make_fullname_pattern(self, workload):
+ parts = workload.split('.')
+ wildcards_needed = 7 - len(parts)
+ fullname = workload + (".*" * wildcards_needed)
+ return fullname
diff --git a/storperf/db/job_db.py b/storperf/db/job_db.py
index 0e94358..d42568a 100644
--- a/storperf/db/job_db.py
+++ b/storperf/db/job_db.py
@@ -7,7 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from _sqlite3 import OperationalError
+from sqlite3 import OperationalError
from threading import Lock
import calendar
import logging
@@ -15,8 +15,6 @@ import sqlite3
import time
import uuid
-import requests
-
db_mutex = Lock()
@@ -172,22 +170,14 @@ class JobDB(object):
db.commit()
db.close()
- def fetch_results(self, workload_prefix=""):
- if (workload_prefix is None):
- workload_prefix = ""
-
- workload_prefix = workload_prefix + "%"
-
- start_time = str(calendar.timegm(time.gmtime()))
- end_time = "0"
-
- self.logger.debug("Workload like: " + workload_prefix)
+ def fetch_workloads(self, workload):
+ workload_prefix = workload + "%"
+ workload_executions = []
with db_mutex:
-
db = sqlite3.connect(JobDB.db_name)
cursor = db.cursor()
- cursor.execute("""select start, end, workload
+ cursor.execute("""select workload, start, end
from jobs where workload like ?""",
(workload_prefix,))
@@ -195,27 +185,8 @@ class JobDB(object):
row = cursor.fetchone()
if (row is None):
break
-
- start_time = str(row[0])
- end_time = str(row[1])
- workload = str(row[2])
-
- # for most of these stats, we just want the final one
- # as that is cumulative average or whatever for the whole
- # run
-
- self.logger.info("workload=" + workload +
- "start=" + start_time + " end=" + end_time)
-
- request = ("http://127.0.0.1:8000/render/?target="
- "*.%s.%s.jobs.1.*.clat.mean"
- "&format=json&from=%s&until=%s"
- % (self.job_id, workload, start_time, end_time))
- response = requests.get(request)
-
- if (response.status_code == 200):
- data = response.json()
- print data
- else:
- pass
+ workload_execution = [row[0], row[1], row[2]]
+ workload_executions.append(workload_execution)
db.close()
+
+ return workload_executions
diff --git a/storperf/resources/hot/agent-group.yaml b/storperf/resources/hot/agent-group.yaml
index 315ecf3..4a1df8e 100644
--- a/storperf/resources/hot/agent-group.yaml
+++ b/storperf/resources/hot/agent-group.yaml
@@ -10,13 +10,13 @@
heat_template_version: 2013-05-23
parameters:
- agent_network:
+ public_network:
type: string
constraints:
- custom_constraint: neutron.network
flavor:
type: string
- default: "StorPerf Agent"
+ default: "m1.small"
key_name:
type: string
default: StorPerf
@@ -38,18 +38,59 @@ parameters:
resources:
slaves:
type: OS::Heat::ResourceGroup
+ depends_on: [storperf_subnet, storperf_network_router_interface, storperf_open_security_group]
properties:
count: {get_param: agent_count}
resource_def: {
type: "storperf-agent.yaml",
properties: {
- agent_network: {get_param: agent_network},
+ public_network: {get_param: public_network},
+ agent_network: {get_resource: storperf_network},
flavor: {get_param: flavor},
+ storperf_open_security_group: {get_resource: storperf_open_security_group},
key_name: {get_param: key_name},
volume_size: {get_param: volume_size}
}
}
+ storperf_network:
+ type: OS::Neutron::Net
+ properties:
+ name: storperf-network
+
+ storperf_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: storperf_network }
+ cidr: 172.16.0.0/16
+ gateway_ip: 172.16.0.1
+
+ storperf_network_router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_network }
+
+ storperf_network_router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: storperf_network_router }
+ subnet_id: { get_resource: storperf_subnet }
+
+ storperf_open_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: An open security group to allow all access to the StorPerf slaves
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: icmp
+
+
+
outputs:
slave_ips: {
description: "Slave addresses",
diff --git a/storperf/resources/hot/storperf-agent.yaml b/storperf/resources/hot/storperf-agent.yaml
index ffda9c4..5d99a26 100644
--- a/storperf/resources/hot/storperf-agent.yaml
+++ b/storperf/resources/hot/storperf-agent.yaml
@@ -22,6 +22,8 @@ parameters:
username:
type: string
default: storperf
+ storperf_open_security_group:
+ type: string
volume_size:
type: number
description: Size of the volume to be created.
@@ -33,7 +35,10 @@ parameters:
type: string
constraints:
- custom_constraint: neutron.network
-
+ public_network:
+ type: string
+ constraints:
+ - custom_constraint: neutron.network
resources:
storperf_agent:
@@ -69,20 +74,13 @@ resources:
properties:
network_id: { get_param: agent_network }
security_groups:
- - { get_resource: storperf_security_group }
+ - { get_param: storperf_open_security_group }
- storperf_security_group:
- type: OS::Neutron::SecurityGroup
+ storperf_floating_ip:
+ type: OS::Neutron::FloatingIP
properties:
- description: Neutron security group rules
- name: StorPerf-Security-Group
- rules:
- - remote_ip_prefix: 0.0.0.0/0
- protocol: tcp
- direction: ingress
- - remote_ip_prefix: 0.0.0.0/0
- protocol: icmp
- direction: ingress
+ floating_network_id: { get_param: public_network }
+ port_id: { get_resource: storperf_agent_port }
agent_volume:
type: OS::Cinder::Volume
@@ -97,5 +95,5 @@ resources:
outputs:
storperf_agent_ip:
- description: The IP address of the agent on the StorPerf network
- value: { get_attr: [ storperf_agent, first_address ] } \ No newline at end of file
+ description: The floating IP address of the agent on the public network
+ value: { get_attr: [ storperf_floating_ip, floating_ip_address ] } \ No newline at end of file
diff --git a/storperf/storperf_master.py b/storperf/storperf_master.py
index c684ce6..2a03753 100644
--- a/storperf/storperf_master.py
+++ b/storperf/storperf_master.py
@@ -7,7 +7,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from storperf.db.graphite_db import GraphiteDB
from threading import Thread
+from time import sleep
import logging
import os
import subprocess
@@ -97,20 +99,20 @@ class StorPerfMaster(object):
value)
@property
- def agent_network(self):
+ def public_network(self):
return self.configuration_db.get_configuration_value(
'stack',
- 'agent_network')
+ 'public_network')
- @agent_network.setter
- def agent_network(self, value):
+ @public_network.setter
+ def public_network(self, value):
if (self.stack_id is not None):
raise ParameterError(
- "ERROR: Cannot change agent network after stack is created")
+ "ERROR: Cannot change public network after stack is created")
self.configuration_db.set_configuration_value(
'stack',
- 'agent_network',
+ 'public_network',
value)
@property
@@ -190,7 +192,8 @@ class StorPerfMaster(object):
raise ParameterError("ERROR: Stack has already been created")
self._attach_to_openstack()
- if (self.agent_count > self.volume_quota):
+ volume_quota = self.volume_quota
+ if (volume_quota > 0 and self.agent_count > volume_quota):
message = "ERROR: Volume quota too low: " + \
str(self.agent_count) + " > " + str(self.volume_quota)
raise ParameterError(message)
@@ -205,7 +208,8 @@ class StorPerfMaster(object):
def validate_stack(self):
self._attach_to_openstack()
- if (self.agent_count > self.volume_quota):
+ volume_quota = self.volume_quota
+ if (volume_quota > 0 and self.agent_count > volume_quota):
message = "ERROR: Volume quota too low: " + \
str(self.agent_count) + " > " + str(self.volume_quota)
self.logger.error(message)
@@ -218,18 +222,24 @@ class StorPerfMaster(object):
parameters=self._make_parameters())
return True
- def wait_for_stack_creation(self):
-
- pass
-
def delete_stack(self):
if (self.stack_id is None):
raise ParameterError("ERROR: Stack does not exist")
self._attach_to_openstack()
-
- self._heat_client.stacks.delete(stack_id=self.stack_id)
- self.stack_id = None
+ while True:
+ stack = self._heat_client.stacks.get(self.stack_id)
+ status = getattr(stack, 'stack_status')
+ self.logger.debug("Stack status=%s" % (status,))
+ if (status == u'CREATE_COMPLETE'):
+ self._heat_client.stacks.delete(stack_id=self.stack_id)
+ if (status == u'DELETE_COMPLETE'):
+ self.stack_id = None
+ return True
+ if (status == u'DELETE_FAILED'):
+ sleep(5)
+ self._heat_client.stacks.delete(stack_id=self.stack_id)
+ sleep(2)
def execute_workloads(self):
if (self.stack_id is None):
@@ -257,6 +267,10 @@ class StorPerfMaster(object):
def terminate_workloads(self):
return self._test_executor.terminate()
+ def fetch_results(self, job_id):
+ graphite_db = GraphiteDB()
+ return graphite_db.fetch_averages(job_id)
+
def _setup_slave(self, slave):
logger = logging.getLogger(__name__ + ":" + slave)
@@ -316,7 +330,7 @@ class StorPerfMaster(object):
def _make_parameters(self):
heat_parameters = {}
- heat_parameters['agent_network'] = self.agent_network
+ heat_parameters['public_network'] = self.public_network
heat_parameters['agent_count'] = self.agent_count
heat_parameters['volume_size'] = self.volume_size
return heat_parameters
diff --git a/storperf/test_executor.py b/storperf/test_executor.py
index aa8a415..c0ea295 100644
--- a/storperf/test_executor.py
+++ b/storperf/test_executor.py
@@ -9,7 +9,7 @@
from os import listdir
from os.path import isfile, join
-from storperf.carbon.converter import JSONToCarbon
+from storperf.carbon.converter import Converter
from storperf.carbon.emitter import CarbonMetricTransmitter
from storperf.db.job_db import JobDB
from storperf.fio.fio_invoker import FIOInvoker
@@ -33,7 +33,7 @@ class TestExecutor(object):
self.precondition = True
self.warm = True
self.event_listeners = set()
- self.metrics_converter = JSONToCarbon()
+ self.metrics_converter = Converter()
self.metrics_emitter = CarbonMetricTransmitter()
self.prefix = None
self.job_db = JobDB()
@@ -58,7 +58,7 @@ class TestExecutor(object):
self.event_listeners.discard(event_listener)
def event(self, callback_id, metric):
- carbon_metrics = self.metrics_converter.convert_to_dictionary(
+ carbon_metrics = self.metrics_converter.convert_json_to_flat(
metric,
callback_id)
@@ -135,10 +135,10 @@ class TestExecutor(object):
workload.filename = self.filename
if (workload_name.startswith("_")):
- iodepths = [2, ]
+ iodepths = [32, ]
blocksizes = [8192, ]
else:
- iodepths = [1, 16, 128]
+ iodepths = [128, 16, 1]
blocksizes = [8192, 4096, 512]
workload.id = self.job_db.job_id
@@ -183,6 +183,6 @@ class TestExecutor(object):
self.logger.info("Ended " + workload.fullname)
- def fetch_results(self, job, workload_name=""):
+ def fetch_workloads(self, job, workload_name=""):
self.job_db.job_id = job
- return self.job_db.fetch_results(workload_name)
+ return self.job_db.fetch_workloads(workload_name)
diff --git a/storperf/tests/__init__.py b/storperf/tests/__init__.py
new file mode 100644
index 0000000..73334c7
--- /dev/null
+++ b/storperf/tests/__init__.py
@@ -0,0 +1,8 @@
+##############################################################################
+# Copyright (c) 2015 EMC and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
diff --git a/storperf/tests/carbon_tests/emitter_test.py b/storperf/tests/carbon_tests/emitter_test.py
index f3ff57e..fe19ed2 100644
--- a/storperf/tests/carbon_tests/emitter_test.py
+++ b/storperf/tests/carbon_tests/emitter_test.py
@@ -44,9 +44,9 @@ class CarbonMetricTransmitterTest(unittest.TestCase):
def test_transmit_metrics(self):
- testconv = converter.JSONToCarbon()
+ testconv = converter.Converter()
json_object = json.loads("""{"timestamp" : "12345", "key":"value" }""")
- result = testconv.convert_to_dictionary(json_object, "host.run-name")
+ result = testconv.convert_json_to_flat(json_object, "host.run-name")
emitter = CarbonMetricTransmitter()
emitter.carbon_port = self.listen_port
diff --git a/storperf/tests/carbon_tests/json_to_carbon_test.py b/storperf/tests/carbon_tests/json_to_carbon_test.py
index d309b48..523ff77 100644
--- a/storperf/tests/carbon_tests/json_to_carbon_test.py
+++ b/storperf/tests/carbon_tests/json_to_carbon_test.py
@@ -7,7 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from storperf.carbon.converter import JSONToCarbon
+from storperf.carbon.converter import Converter
import json
import unittest
@@ -58,45 +58,45 @@ class JSONToCarbonTest(unittest.TestCase):
pass
def test_to_string(self):
- testconv = JSONToCarbon()
+ testconv = Converter()
json_object = json.loads(self.simple_fio_json)
- result = testconv.convert_to_dictionary(json_object, "host.run-name")
+ result = testconv.convert_json_to_flat(json_object, "host.run-name")
self.assertEqual("7116", result[
"host.run-name.jobs.1.read.io_bytes"],
result["host.run-name.jobs.1.read.io_bytes"])
def test_single_text_element_no_prefix(self):
- testconv = JSONToCarbon()
- result = testconv.convert_to_dictionary(
+ testconv = Converter()
+ result = testconv.convert_json_to_flat(
json.loads(self.single_json_text_element))
self.assertEqual("value", result["key"], result["key"])
def test_single_numeric_element_no_prefix(self):
- testconv = JSONToCarbon()
- result = testconv.convert_to_dictionary(
+ testconv = Converter()
+ result = testconv.convert_json_to_flat(
json.loads(self.single_json_numeric_element))
self.assertEqual("123", result["key"], result["key"])
def test_single_text_key_space_element_no_prefix(self):
- testconv = JSONToCarbon()
- result = testconv.convert_to_dictionary(
+ testconv = Converter()
+ result = testconv.convert_json_to_flat(
json.loads(self.single_json_key_with_spaces))
self.assertEqual(
"value", result["key_with_spaces"], result["key_with_spaces"])
def test_single_text_value_space_element_no_prefix(self):
- testconv = JSONToCarbon()
- result = testconv.convert_to_dictionary(
+ testconv = Converter()
+ result = testconv.convert_json_to_flat(
json.loads(self.single_json_value_with_spaces))
self.assertEqual("value_with_spaces", result["key"], result["key"])
def test_map_name_with_space_no_prefix(self):
- testconv = JSONToCarbon()
- result = testconv.convert_to_dictionary(
+ testconv = Converter()
+ result = testconv.convert_json_to_flat(
json.loads(self.json_map_name_with_spaces))
self.assertEqual(
@@ -104,14 +104,13 @@ class JSONToCarbonTest(unittest.TestCase):
result["map_with_spaces.key"])
def test_list_name_with_space_no_prefix(self):
- testconv = JSONToCarbon()
- result = testconv.convert_to_dictionary(
+ testconv = Converter()
+ result = testconv.convert_json_to_flat(
json.loads(self.json_list_name_with_spaces))
self.assertEqual(
"value", result["list_with_spaces.1.key"],
result["list_with_spaces.1.key"])
-
if __name__ == '__main__':
unittest.main()
diff --git a/storperf/tests/db_tests/__init__.py b/storperf/tests/db_tests/__init__.py
new file mode 100644
index 0000000..73334c7
--- /dev/null
+++ b/storperf/tests/db_tests/__init__.py
@@ -0,0 +1,8 @@
+##############################################################################
+# Copyright (c) 2015 EMC and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
diff --git a/storperf/tests/db_tests/graphite_db_test.py b/storperf/tests/db_tests/graphite_db_test.py
new file mode 100644
index 0000000..ce970bb
--- /dev/null
+++ b/storperf/tests/db_tests/graphite_db_test.py
@@ -0,0 +1,47 @@
+from storperf.db.graphite_db import GraphiteDB
+import this
+import unittest
+
+
+class GraphiteDBTest(unittest.TestCase):
+
+ def setUp(self):
+ self.graphdb = GraphiteDB()
+ self.graphdb._job_db = self
+
+ def test_wilcard_pattern(self):
+ workload = "job_id"
+ expected = "job_id.*.*.*.*.*.*"
+ actual = self.graphdb.make_fullname_pattern(workload)
+ self.assertEqual(expected, actual, actual)
+
+ def test_no_wilcard_pattern(self):
+ workload = "job_id.workload.host.queue-depth.1.block-size.16"
+ actual = self.graphdb.make_fullname_pattern(workload)
+ self.assertEqual(workload, actual, actual)
+
+ def test_fetch_averages(self):
+ # self.graphdb.fetch_averages(u'32d31724-fac1-44f3-9033-ca8e00066a36')
+ pass
+
+ def fetch_workloads(self, workload):
+ workloads = [[u'32d31724-fac1-44f3-9033-ca8e00066a36.'
+ u'_warm_up.queue-depth.32.block-size.8192.10-9-15-151',
+ u'1462379653', u'1462379893'],
+ [u'32d31724-fac1-44f3-9033-ca8e00066a36.'
+ u'_warm_up.queue-depth.32.block-size.8192.10-9-15-150',
+ u'1462379653', u'1462379898'],
+ [u'32d31724-fac1-44f3-9033-ca8e00066a36'
+ u'.rw.queue-depth.128.block-size.8192.10-9-15-151',
+ u'1462379898', u'1462380028'],
+ [u'32d31724-fac1-44f3-9033-ca8e00066a36'
+ u'.rw.queue-depth.128.block-size.8192.10-9-15-150',
+ u'1462379898', u'1462380032'],
+ [u'32d31724-fac1-44f3-9033-ca8e00066a36'
+ u'.rw.queue-depth.16.block-size.8192.10-9-15-151',
+ u'1462380032', u'1462380312'],
+ [u'32d31724-fac1-44f3-9033-ca8e00066a36'
+ u'.rw.queue-depth.16.block-size.8192.10-9-15-150',
+ u'1462380032', u'1462380329'],
+ ]
+ return workloads
diff --git a/storperf/tests/storperf_master_test.py b/storperf/tests/storperf_master_test.py
index ff85fb0..33c1699 100644
--- a/storperf/tests/storperf_master_test.py
+++ b/storperf/tests/storperf_master_test.py
@@ -44,8 +44,8 @@ class StorPerfMasterTest(unittest.TestCase):
def test_agent_network(self):
expected = "ABCDEF"
- self.storperf.agent_network = expected
- actual = self.storperf.agent_network
+ self.storperf.public_network = expected
+ actual = self.storperf.public_network
self.assertEqual(
expected, actual, "Did not expect: " + str(actual))
diff --git a/storperf/tests/workload_tests/__init__.py b/storperf/tests/workload_tests/__init__.py
new file mode 100644
index 0000000..73334c7
--- /dev/null
+++ b/storperf/tests/workload_tests/__init__.py
@@ -0,0 +1,8 @@
+##############################################################################
+# Copyright (c) 2015 EMC and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
diff --git a/storperf/tests/workload_tests/workload_subclass_test.py b/storperf/tests/workload_tests/workload_subclass_test.py
index 97b6b46..e9e47f3 100644
--- a/storperf/tests/workload_tests/workload_subclass_test.py
+++ b/storperf/tests/workload_tests/workload_subclass_test.py
@@ -6,12 +6,12 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
from storperf.workloads.rr import rr
from storperf.workloads.rs import rs
from storperf.workloads.rw import rw
from storperf.workloads.wr import wr
from storperf.workloads.ws import ws
+import unittest
class WorkloadSubclassTest(unittest.TestCase):
@@ -22,33 +22,33 @@ class WorkloadSubclassTest(unittest.TestCase):
def test_local_name(self):
workload = rr()
self.assertEqual(workload.fullname,
- "None.rr.None.queue-depth.1.block-size.64k",
+ "None.rr.queue-depth.1.block-size.64k.None",
workload.fullname)
def test_remote_name(self):
workload = rw()
workload.remote_host = "192.168.0.1"
self.assertEqual(workload.fullname,
- "None.rw.192-168-0-1.queue-depth.1.block-size.64k",
+ "None.rw.queue-depth.1.block-size.64k.192-168-0-1",
workload.fullname)
def test_blocksize(self):
workload = rs()
workload.options["bs"] = "4k"
self.assertEqual(workload.fullname,
- "None.rs.None.queue-depth.1.block-size.4k",
+ "None.rs.queue-depth.1.block-size.4k.None",
workload.fullname)
def test_queue_depth(self):
workload = wr()
workload.options["iodepth"] = "8"
self.assertEqual(workload.fullname,
- "None.wr.None.queue-depth.8.block-size.64k",
+ "None.wr.queue-depth.8.block-size.64k.None",
workload.fullname)
def test_id(self):
workload = ws()
workload.id = "workloadid"
self.assertEqual(workload.fullname,
- "workloadid.ws.None.queue-depth.1.block-size.64k",
+ "workloadid.ws.queue-depth.1.block-size.64k.None",
workload.fullname)
diff --git a/storperf/workloads/_base_workload.py b/storperf/workloads/_base_workload.py
index 050a15c..dc448fd 100644
--- a/storperf/workloads/_base_workload.py
+++ b/storperf/workloads/_base_workload.py
@@ -74,8 +74,9 @@ class _base_workload(object):
@property
def fullname(self):
- return str(self.id) + "." + \
- self.__class__.__name__ + "." + \
- str(self.remote_host).replace(".", "-") + \
- ".queue-depth." + str(self.options['iodepth']) + \
- ".block-size." + str(self.options['bs'])
+ return ("%s.%s.queue-depth.%s.block-size.%s.%s" %
+ (str(self.id),
+ self.__class__.__name__,
+ str(self.options['iodepth']),
+ str(self.options['bs']),
+ str(self.remote_host).replace(".", "-")))