diff options
author | Mark Beierl <mark.beierl@emc.com> | 2016-05-04 22:53:07 -0400 |
---|---|---|
committer | Mark Beierl <mark.beierl@emc.com> | 2016-05-05 14:58:39 -0400 |
commit | 05e863781ce6746fabec176d1fc5f7454f2cdd73 (patch) | |
tree | 0ff7f2aa9e55b33c3f95c0521bbd3991a9e4e2c0 /storperf/db/graphite_db.py | |
parent | 1e0544d70dabed4f33e0624cb4a7cde4c8c6b691 (diff) |
Add Stats report and Swagger UI
Add Swagger web ui at /swagger
Add ability to fetch read/write latency status via ReST ui
Can now delete where stack was removed from OpenStack but not from the
storperf DB
Change to use Floating IPs instead of private IP
Fix delete bug where there was no dependency on resources in
the resource group.
JIRA: STORPERF-19
JIRA: STORPERF-20
Change-Id: I0a4b3386789c38d6745906ba896b8ff851dc122f
Signed-off-by: Mark Beierl <mark.beierl@emc.com>
Diffstat (limited to 'storperf/db/graphite_db.py')
-rw-r--r-- | storperf/db/graphite_db.py | 81 |
1 files changed, 81 insertions, 0 deletions
diff --git a/storperf/db/graphite_db.py b/storperf/db/graphite_db.py new file mode 100644 index 0000000..c62340c --- /dev/null +++ b/storperf/db/graphite_db.py @@ -0,0 +1,81 @@ +from storperf.db.job_db import JobDB +import json +import logging + +import requests + + +class GraphiteDB(object): + + def __init__(self): + """ + """ + self._job_db = JobDB() + self.logger = logging.getLogger(__name__) + + def fetch_averages(self, workload): + workload_executions = self._job_db.fetch_workloads(workload) + + # Create a map of job runs + workload_names = {} + for workload_execution in workload_executions: + name = '.'.join(workload_execution[0].split('.')[0:6]) + if name in workload_names: + workload_record = workload_names[name] + start = workload_record[0] + end = workload_record[1] + else: + start = None + end = None + + if start is None or workload_execution[1] < start: + start = workload_execution[1] + + if end is None or workload_execution[2] > end: + end = workload_execution[2] + + workload_names[name] = [start, end] + + averages = {} + + for io_type in ['read', 'write']: + for workload_name, times in workload_names.iteritems(): + workload_pattern = self.make_fullname_pattern(workload_name) + request = ("http://127.0.0.1:8000/render/?target=" + "averageSeries(%s.jobs.1.%s.lat.mean)" + "&format=json" + "&from=%s" + "&until=%s" % + (workload_pattern, io_type, times[0], times[1])) + self.logger.debug("Calling %s" % (request)) + + response = requests.get(request) + if (response.status_code == 200): + short_name = '.'.join(workload_name.split('.')[1:6]) + averages[short_name + "." + io_type] = \ + self._average_results(json.loads(response.content)) + + return averages + + def _average_results(self, results): + + for item in results: + datapoints = item['datapoints'] + + total = 0 + count = 0 + + for datapoint in datapoints: + if datapoint[0] is not None: + total += datapoint[0] + count += 1 + + average = total / count + + return average + + def make_fullname_pattern(self, workload): + parts = workload.split('.') + wildcards_needed = 7 - len(parts) + fullname = workload + (".*" * wildcards_needed) + return fullname |