aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/scenarios/storage/storperf.py
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/scenarios/storage/storperf.py')
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py134
1 files changed, 93 insertions, 41 deletions
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index f0b2361d6..5b8b00075 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -8,15 +8,16 @@
##############################################################################
from __future__ import absolute_import
-import os
import logging
+import os
import time
-import requests
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios import base
+
LOG = logging.getLogger(__name__)
@@ -43,12 +44,6 @@ class StorPerf(base.Scenario):
wr: 100% Write, random access
rw: 70% Read / 30% write, random access
- nossd (Optional):
- Do not perform SSD style preconditioning.
-
- nowarm (Optional):
- Do not perform a warmup prior to measurements.
-
report = [job_id] (Optional):
Query the status of the supplied job_id and report on metrics.
If a workload is supplied, will report on only that subset.
@@ -79,17 +74,22 @@ class StorPerf(base.Scenario):
setup_query_content = jsonutils.loads(
setup_query.content)
- if setup_query_content["stack_created"]:
- self.setup_done = True
+ if ("stack_created" in setup_query_content and
+ setup_query_content["stack_created"]):
LOG.debug("stack_created: %s",
setup_query_content["stack_created"])
+ return True
+
+ return False
def setup(self):
"""Set the configuration."""
env_args = {}
env_args_payload_list = ["agent_count", "agent_flavor",
"public_network", "agent_image",
- "volume_size"]
+ "volume_size", "volume_type",
+ "volume_count", "availability_zone",
+ "stack_name", "subnet_CIDR"]
for env_argument in env_args_payload_list:
try:
@@ -102,30 +102,36 @@ class StorPerf(base.Scenario):
setup_res = requests.post('http://%s:5000/api/v1.0/configurations'
% self.target, json=env_args)
- setup_res_content = jsonutils.loads(
- setup_res.content)
if setup_res.status_code != 200:
- raise RuntimeError("Failed to create a stack, error message:",
- setup_res_content["message"])
+ LOG.error("Failed to create stack. %s: %s",
+ setup_res.status_code, setup_res.content)
+ raise RuntimeError("Failed to create stack. %s: %s" %
+ (setup_res.status_code, setup_res.content))
elif setup_res.status_code == 200:
+ setup_res_content = jsonutils.loads(setup_res.content)
LOG.info("stack_id: %s", setup_res_content["stack_id"])
- while not self.setup_done:
- self._query_setup_state()
- time.sleep(self.query_interval)
+ while not self._query_setup_state():
+ time.sleep(self.query_interval)
+
+ # We do not want to load the results of the disk initialization,
+ # so it is not added to the results here.
+ self.initialize_disks()
+ self.setup_done = True
def _query_job_state(self, job_id):
"""Query the status of the supplied job_id and report on metrics"""
LOG.info("Fetching report for %s...", job_id)
- report_res = requests.get('http://{}:5000/api/v1.0/jobs'.format
- (self.target),
+ report_res = requests.get('http://%s:5000/api/v1.0/jobs' % self.target,
params={'id': job_id, 'type': 'status'})
report_res_content = jsonutils.loads(
report_res.content)
if report_res.status_code != 200:
+ LOG.error("Failed to fetch report, error message: %s",
+ report_res_content["message"])
raise RuntimeError("Failed to fetch report, error message:",
report_res_content["message"])
else:
@@ -149,7 +155,8 @@ class StorPerf(base.Scenario):
if not self.setup_done:
self.setup()
- metadata = {"build_tag": "latest", "test_case": "opnfv_yardstick_tc074"}
+ metadata = {"build_tag": "latest",
+ "test_case": "opnfv_yardstick_tc074"}
metadata_payload_dict = {"pod_name": "NODE_NAME",
"scenario_name": "DEPLOY_SCENARIO",
"version": "YARDSTICK_BRANCH"}
@@ -162,7 +169,9 @@ class StorPerf(base.Scenario):
job_args = {"metadata": metadata}
job_args_payload_list = ["block_sizes", "queue_depths", "deadline",
- "target", "nossd", "nowarm", "workload"]
+ "target", "workload", "workloads",
+ "agent_count", "steady_state_samples"]
+ job_args["deadline"] = self.options["timeout"]
for job_argument in job_args_payload_list:
try:
@@ -170,16 +179,24 @@ class StorPerf(base.Scenario):
except KeyError:
pass
- LOG.info("Starting a job with parameters %s", job_args)
- job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
- json=job_args)
+ api_version = "v1.0"
- job_res_content = jsonutils.loads(job_res.content)
+ if ("workloads" in job_args and
+ job_args["workloads"] is not None and
+ len(job_args["workloads"])) > 0:
+ api_version = "v2.0"
+
+ LOG.info("Starting a job with parameters %s", job_args)
+ job_res = requests.post('http://%s:5000/api/%s/jobs' % (self.target,
+ api_version), json=job_args)
if job_res.status_code != 200:
- raise RuntimeError("Failed to start a job, error message:",
- job_res_content["message"])
+ LOG.error("Failed to start job. %s: %s",
+ job_res.status_code, job_res.content)
+ raise RuntimeError("Failed to start job. %s: %s" %
+ (job_res.status_code, job_res.content))
elif job_res.status_code == 200:
+ job_res_content = jsonutils.loads(job_res.content)
job_id = job_res_content["job_id"]
LOG.info("Started job id: %s...", job_id)
@@ -187,15 +204,6 @@ class StorPerf(base.Scenario):
self._query_job_state(job_id)
time.sleep(self.query_interval)
- terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
- self.target)
-
- if terminate_res.status_code != 200:
- terminate_res_content = jsonutils.loads(
- terminate_res.content)
- raise RuntimeError("Failed to start a job, error message:",
- terminate_res_content["message"])
-
# TODO: Support using ETA to polls for completion.
# Read ETA, next poll in 1/2 ETA time slot.
# If ETA is greater than the maximum allowed job time,
@@ -209,21 +217,65 @@ class StorPerf(base.Scenario):
# else:
# time.sleep(int(esti_time)/2)
+ result_res = requests.get('http://%s:5000/api/v1.0/jobs?type='
+ 'metadata&id=%s' % (self.target, job_id))
+ result_res_content = jsonutils.loads(result_res.content)
+ if 'report' in result_res_content and \
+ 'steady_state' in result_res_content['report']['details']:
+ res = result_res_content['report']['details']['steady_state']
+ steady_state = res.values()[0]
+ LOG.info("Job %s completed with steady state %s",
+ job_id, steady_state)
+
result_res = requests.get('http://%s:5000/api/v1.0/jobs?id=%s' %
(self.target, job_id))
result_res_content = jsonutils.loads(
result_res.content)
-
result.update(result_res_content)
+ def initialize_disks(self):
+ """Fills the target with random data prior to executing workloads"""
+
+ job_args = {}
+ job_args_payload_list = ["target"]
+
+ for job_argument in job_args_payload_list:
+ try:
+ job_args[job_argument] = self.options[job_argument]
+ except KeyError:
+ pass
+
+ LOG.info("Starting initialization with parameters %s", job_args)
+ job_res = requests.post('http://%s:5000/api/v1.0/initializations' %
+ self.target, json=job_args)
+
+
+ if job_res.status_code != 200:
+ LOG.error("Failed to start initialization job, error message: %s: %s",
+ job_res.status_code, job_res.content)
+ raise RuntimeError("Failed to start initialization job, error message: %s: %s" %
+ (job_res.status_code, job_res.content))
+ elif job_res.status_code == 200:
+ job_res_content = jsonutils.loads(job_res.content)
+ job_id = job_res_content["job_id"]
+ LOG.info("Started initialization as job id: %s...", job_id)
+
+ while not self.job_completed:
+ self._query_job_state(job_id)
+ time.sleep(self.query_interval)
+
+ self.job_completed = False
+
def teardown(self):
"""Deletes the agent configuration and the stack"""
- teardown_res = requests.delete('http://%s:5000/api/v1.0/\
- configurations' % self.target)
+ teardown_res = requests.delete(
+ 'http://%s:5000/api/v1.0/configurations' % self.target)
if teardown_res.status_code == 400:
teardown_res_content = jsonutils.loads(
- teardown_res.content)
+ teardown_res.json_data)
+ LOG.error("Failed to reset environment, error message: %s",
+ teardown_res_content['message'])
raise RuntimeError("Failed to reset environment, error message:",
teardown_res_content['message'])