From f76a4912d312712f06e2ad4b86ee339dea38d4cd Mon Sep 17 00:00:00 2001 From: mbeierl Date: Wed, 29 Mar 2017 21:00:50 -0400 Subject: Use tagged version of graphite Changes the docker build procedure to use a pip install of a specific version of graphite so that it doesn't break when upstream changes occur. Change-Id: I2316912fc04568c441212087665e384075e409a6 JIRA: STORPERF-110 Signed-off-by: mbeierl (cherry picked from commit 1bcbe0bac328e26341ba68341d4b809c32b8242f) --- build-dev-docker.sh | 15 ++++--- ci/daily.sh | 5 ++- ci/start_job.sh | 2 - docker/Dockerfile | 35 +++++++++++++--- docker/graphite/carbon.conf | 80 ++++++++++++++++++++++++++++++++++++ docker/graphite/initial_data.json | 20 +++++++++ docker/graphite/local_settings.py | 42 +++++++++++++++++++ docker/graphite/nginx.conf | 69 +++++++++++++++++++++++++++++++ docker/graphite/storage-schemas.conf | 7 ++++ docker/storperf.pp | 19 --------- docker/supervisord.conf | 17 +++++--- 11 files changed, 272 insertions(+), 39 deletions(-) create mode 100644 docker/graphite/carbon.conf create mode 100644 docker/graphite/initial_data.json create mode 100644 docker/graphite/local_settings.py create mode 100644 docker/graphite/nginx.conf create mode 100644 docker/graphite/storage-schemas.conf delete mode 100644 docker/storperf.pp diff --git a/build-dev-docker.sh b/build-dev-docker.sh index 131d8ef..3a565f3 100755 --- a/build-dev-docker.sh +++ b/build-dev-docker.sh @@ -8,12 +8,17 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +cd `dirname $0` + echo "Creating a docker image from the current working directory..." -sed "s|RUN git clone https://gerrit.opnfv.org/gerrit/storperf.*$|COPY . \${repos_dir}/storperf|" docker/Dockerfile > Dockerfile -sed -i "s|COPY storperf.pp|COPY docker/storperf.pp|" Dockerfile -sed -i "s|COPY supervisord.conf|COPY docker/supervisord.conf|" Dockerfile +cp docker/Dockerfile Dockerfile.dev +sed -i "s|COPY |COPY docker\/|" Dockerfile.dev +sed -i "s|ADD |ADD docker\/|" Dockerfile.dev +sed -i "s|RUN git clone.*https://gerrit.opnfv.org/gerrit/storperf.*$|COPY . \${repos_dir}/storperf|" Dockerfile.dev + +diff docker/Dockerfile Dockerfile.dev -docker build -t opnfv/storperf:dev . +docker build -t opnfv/storperf:dev -f Dockerfile.dev . -rm -f Dockerfile +rm -f Dockerfile.dev diff --git a/ci/daily.sh b/ci/daily.sh index e12964d..b69506a 100755 --- a/ci/daily.sh +++ b/ci/daily.sh @@ -122,11 +122,12 @@ done echo "Deleting stack for cleanup" curl -s -X DELETE --header 'Accept: application/json' 'http://127.0.0.1:5000/api/v1.0/configurations' -sudo chmod 777 -R $WORKSPACE/ci/job/carbon - curl -s -X GET "http://127.0.0.1:5000/api/v1.0/jobs?id=$JOB&type=metadata" \ -o $WORKSPACE/ci/job/report.json +docker rm -f storperf +sudo rm -rf $WORKSPACE/ci/job/carbon + echo ========================================================================== echo Final report echo ========================================================================== diff --git a/ci/start_job.sh b/ci/start_job.sh index 31bfddb..85652d0 100755 --- a/ci/start_job.sh +++ b/ci/start_job.sh @@ -11,8 +11,6 @@ cat << EOF > body.json { "block_sizes": "${BLOCK_SIZE}", - "nowarm": "string", - "nossd": "string", "deadline": 30, "queue_depths": "${QUEUE_DEPTH}", "workload": "${WORKLOAD}", diff --git a/docker/Dockerfile b/docker/Dockerfile index ce1f853..96ed58f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -26,6 +26,7 @@ LABEL version="0.1" description="OPNFV Storperf Docker container" ARG BRANCH=master ENV repos_dir /home/opnfv/repos +ENV DEBIAN_FRONTEND noninteractive WORKDIR /home/opnfv @@ -65,6 +66,35 @@ RUN mkdir -p ${repos_dir} RUN mkdir -p /root/.ssh RUN chmod 700 /root/.ssh +# Graphite installation +RUN echo 'deb http://us.archive.ubuntu.com/ubuntu/ trusty universe' >> /etc/apt/sources.list +RUN apt-get -y update + +# Install required packages +RUN apt-get -y install python-ldap python-cairo python-django python-twisted python-django-tagging python-simplejson python-memcache python-pysqlite2 python-support python-tz python-pip gunicorn supervisor nginx-light +RUN pip install whisper==0.9.15 +RUN pip install --install-option="--prefix=/var/lib/graphite" --install-option="--install-lib=/var/lib/graphite/lib" carbon==0.9.15 +RUN pip install --install-option="--prefix=/var/lib/graphite" --install-option="--install-lib=/var/lib/graphite/webapp" graphite-web==0.9.15 + +# Add system service config +ADD graphite/nginx.conf /etc/nginx/nginx.conf +ADD graphite/supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +# Add graphite config +ADD graphite/initial_data.json /var/lib/graphite/webapp/graphite/initial_data.json +ADD graphite/local_settings.py /var/lib/graphite/webapp/graphite/local_settings.py +ADD graphite/carbon.conf /var/lib/graphite/conf/carbon.conf +ADD graphite/storage-schemas.conf /var/lib/graphite/conf/storage-schemas.conf +RUN mkdir -p /opt/graphite/storage +RUN ln -s /var/lib/graphite/storage/whisper /opt/graphite/storage/whisper +RUN touch /var/lib/graphite/storage/graphite.db /var/lib/graphite/storage/index +RUN chown -R www-data /var/lib/graphite/storage +RUN chmod 0775 /var/lib/graphite/storage /var/lib/graphite/storage/whisper +RUN chmod 0664 /var/lib/graphite/storage/graphite.db +RUN cd /var/lib/graphite/webapp/graphite && python manage.py syncdb --noinput + +# Git configuration + RUN git config --global http.sslVerify false RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${repos_dir}/storperf RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${repos_dir}/releng @@ -79,17 +109,12 @@ RUN git clone http://git.kernel.dk/fio.git ${repos_dir}/fio RUN cd ${repos_dir}/fio && git checkout tags/fio-2.2.10 RUN cd ${repos_dir}/fio && make -j 6 install -RUN puppet module install gdsoperations-graphite RUN chmod 600 ${repos_dir}/storperf/storperf/resources/ssh/storperf_rsa RUN pip install --upgrade setuptools==33.1.1 RUN pip install -r ${repos_dir}/storperf/docker/requirements.pip -COPY storperf.pp /etc/puppet/manifests/storperf.pp -RUN puppet apply /etc/puppet/manifests/storperf.pp - -RUN ln -s /opt/graphite/lib/whisper* /usr/lib/python2.7/ # Open access to SSH if desired EXPOSE 22 diff --git a/docker/graphite/carbon.conf b/docker/graphite/carbon.conf new file mode 100644 index 0000000..13088dd --- /dev/null +++ b/docker/graphite/carbon.conf @@ -0,0 +1,80 @@ +[cache] +LOCAL_DATA_DIR = /var/lib/graphite/storage/whisper/ + +# Specify the user to drop privileges to +# If this is blank carbon runs as the user that invokes it +# This user must have write access to the local data directory +USER = + +# Limit the size of the cache to avoid swapping or becoming CPU bound. +# Sorts and serving cache queries gets more expensive as the cache grows. +# Use the value "inf" (infinity) for an unlimited cache size. +MAX_CACHE_SIZE = inf + +# Limits the number of whisper update_many() calls per second, which effectively +# means the number of write requests sent to the disk. This is intended to +# prevent over-utilizing the disk and thus starving the rest of the system. +# When the rate of required updates exceeds this, then carbon's caching will +# take effect and increase the overall throughput accordingly. +MAX_UPDATES_PER_SECOND = 1000 + +# Softly limits the number of whisper files that get created each minute. +# Setting this value low (like at 50) is a good way to ensure your graphite +# system will not be adversely impacted when a bunch of new metrics are +# sent to it. The trade off is that it will take much longer for those metrics' +# database files to all get created and thus longer until the data becomes usable. +# Setting this value high (like "inf" for infinity) will cause graphite to create +# the files quickly but at the risk of slowing I/O down considerably for a while. +MAX_CREATES_PER_MINUTE = inf + +LINE_RECEIVER_INTERFACE = 0.0.0.0 +LINE_RECEIVER_PORT = 2003 + +ENABLE_UDP_LISTENER = True +UDP_RECEIVER_INTERFACE = 0.0.0.0 +UDP_RECEIVER_PORT = 2003 + +PICKLE_RECEIVER_INTERFACE = 0.0.0.0 +PICKLE_RECEIVER_PORT = 2004 + +CACHE_QUERY_INTERFACE = 0.0.0.0 +CACHE_QUERY_PORT = 7002 + +LOG_UPDATES = False + +# Enable AMQP if you want to receve metrics using an amqp broker +# ENABLE_AMQP = False + +# Verbose means a line will be logged for every metric received +# useful for testing +# AMQP_VERBOSE = False + +# AMQP_HOST = localhost +# AMQP_PORT = 5672 +# AMQP_VHOST = / +# AMQP_USER = guest +# AMQP_PASSWORD = guest +# AMQP_EXCHANGE = graphite + +# Patterns for all of the metrics this machine will store. Read more at +# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings +# +# Example: store all sales, linux servers, and utilization metrics +# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization +# +# Example: store everything +# BIND_PATTERNS = # + +# NOTE: you cannot run both a cache and a relay on the same server +# with the default configuration, you have to specify a distinict +# interfaces and ports for the listeners. + +[relay] +LINE_RECEIVER_INTERFACE = 0.0.0.0 +LINE_RECEIVER_PORT = 2003 + +PICKLE_RECEIVER_INTERFACE = 0.0.0.0 +PICKLE_RECEIVER_PORT = 2004 + +CACHE_SERVERS = server1, server2, server3 +MAX_QUEUE_SIZE = 10000 diff --git a/docker/graphite/initial_data.json b/docker/graphite/initial_data.json new file mode 100644 index 0000000..b3ac9b1 --- /dev/null +++ b/docker/graphite/initial_data.json @@ -0,0 +1,20 @@ +[ + { + "pk": 1, + "model": "auth.user", + "fields": { + "username": "admin", + "first_name": "", + "last_name": "", + "is_active": true, + "is_superuser": true, + "is_staff": true, + "last_login": "2011-09-20 17:02:14", + "groups": [], + "user_permissions": [], + "password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236", + "email": "root@example.com", + "date_joined": "2011-09-20 17:02:14" + } + } +] diff --git a/docker/graphite/local_settings.py b/docker/graphite/local_settings.py new file mode 100644 index 0000000..177d674 --- /dev/null +++ b/docker/graphite/local_settings.py @@ -0,0 +1,42 @@ +# Edit this file to override the default graphite settings, do not edit settings.py + +# Turn on debugging and restart apache if you ever see an "Internal Server Error" page +#DEBUG = True + +# Set your local timezone (django will try to figure this out automatically) +TIME_ZONE = 'UTC' + +# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely +#MEMCACHE_HOSTS = ['127.0.0.1:11211'] + +# Sometimes you need to do a lot of rendering work but cannot share your storage mount +#REMOTE_RENDERING = True +#RENDERING_HOSTS = ['fastserver01','fastserver02'] +#LOG_RENDERING_PERFORMANCE = True +#LOG_CACHE_PERFORMANCE = True + +# If you've got more than one backend server they should all be listed here +#CLUSTER_SERVERS = [] + +# Override this if you need to provide documentation specific to your graphite deployment +#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite" + +# Enable email-related features +#SMTP_SERVER = "mail.mycompany.com" + +# LDAP / ActiveDirectory authentication setup +#USE_LDAP_AUTH = True +#LDAP_SERVER = "ldap.mycompany.com" +#LDAP_PORT = 389 +#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com" +#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com" +#LDAP_BASE_PASS = "readonly_account_password" +#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)" + +# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!) +#DATABASE_ENGINE = 'mysql' # or 'postgres' +#DATABASE_NAME = 'graphite' +#DATABASE_USER = 'graphite' +#DATABASE_PASSWORD = 'graphite-is-awesome' +#DATABASE_HOST = 'mysql.mycompany.com' +#DATABASE_PORT = '3306' diff --git a/docker/graphite/nginx.conf b/docker/graphite/nginx.conf new file mode 100644 index 0000000..8a11e94 --- /dev/null +++ b/docker/graphite/nginx.conf @@ -0,0 +1,69 @@ +daemon off; +user www-data; +worker_processes 1; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + server_tokens off; + + server_names_hash_bucket_size 32; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + gzip on; + gzip_disable "msie6"; + + server { + listen 8000 default_server; + server_name _; + + open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m; + + location / { + proxy_pass http://127.0.0.1:8080; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Forwarded-Host $http_host; + proxy_set_header Host $http_host; + + client_max_body_size 10m; + client_body_buffer_size 128k; + + proxy_connect_timeout 90; + proxy_send_timeout 90; + proxy_read_timeout 90; + + proxy_buffer_size 4k; + proxy_buffers 4 32k; + proxy_busy_buffers_size 64k; + proxy_temp_file_write_size 64k; + } + + add_header Access-Control-Allow-Origin "*"; + add_header Access-Control-Allow-Methods "GET, OPTIONS"; + add_header Access-Control-Allow-Headers "origin, authorization, accept"; + + location /content { + alias /var/lib/graphite/webapp/content; + } + + location /media { + alias /usr/share/pyshared/django/contrib/admin/media; + } + } +} diff --git a/docker/graphite/storage-schemas.conf b/docker/graphite/storage-schemas.conf new file mode 100644 index 0000000..855a9e4 --- /dev/null +++ b/docker/graphite/storage-schemas.conf @@ -0,0 +1,7 @@ +[carbon] +pattern = ^carbon\..* +retentions = 1m:31d,10m:1y,1h:5y + +[default] +pattern = .* +retentions = 10s:8d,1m:31d,10m:1y,1h:5y diff --git a/docker/storperf.pp b/docker/storperf.pp deleted file mode 100644 index 7de1024..0000000 --- a/docker/storperf.pp +++ /dev/null @@ -1,19 +0,0 @@ -############################################################################## -# Copyright (c) 2015 EMC and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -class { 'python': - pip => true, - dev => true, - virtualenv => true, -} - -class { 'graphite': - port => 8080, - bind_address => '0.0.0.0', -} diff --git a/docker/supervisord.conf b/docker/supervisord.conf index 0a78928..834e23c 100644 --- a/docker/supervisord.conf +++ b/docker/supervisord.conf @@ -1,19 +1,25 @@ [supervisord] nodaemon = true -environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf' +environment = GRAPHITE_STORAGE_DIR='/var/lib/graphite/storage',GRAPHITE_CONF_DIR='/var/lib/graphite/conf' + +[program:nginx] +command = /usr/sbin/nginx +stdout_logfile = /var/log/supervisor/%(program_name)s.log +stderr_logfile = /var/log/supervisor/%(program_name)s.log +autorestart = true [program:carbon-cache] user = www-data -command = /opt/graphite/bin/carbon-cache.py --debug start +command = /var/lib/graphite/bin/carbon-cache.py --debug start stdout_logfile = /var/log/supervisor/%(program_name)s.log stderr_logfile = /var/log/supervisor/%(program_name)s.log autorestart = true [program:graphite-webapp] user = www-data -directory = /opt/graphite/webapp -environment = PYTHONPATH='/opt/graphite/webapp' -command = /opt/graphite/bin/gunicorn_django -b0.0.0.0:8000 -w2 graphite/settings.py +directory = /var/lib/graphite/webapp +environment = PYTHONPATH='/var/lib/graphite/webapp' +command = /usr/bin/gunicorn_django -b127.0.0.1:8080 -w2 graphite/settings.py stdout_logfile = /var/log/supervisor/%(program_name)s.log stderr_logfile = /var/log/supervisor/%(program_name)s.log autorestart = true @@ -32,4 +38,3 @@ command = /usr/bin/python /home/opnfv/repos/storperf/rest_server.py stdout_logfile = /var/log/supervisor/%(program_name)s.log stderr_logfile = /var/log/supervisor/%(program_name)s.log autorestart = true - -- cgit 1.2.3-korg From 36a0eb1588467230967613bc871d99faeb3ee166 Mon Sep 17 00:00:00 2001 From: mbeierl Date: Thu, 30 Mar 2017 10:05:27 -0400 Subject: Fixed typo Change-Id: I03b8df65a13a0c2ddeee6eb40e6071c8843ad5ca JIRA: STORPERF-110 Signed-off-by: mbeierl (cherry picked from commit 6eb2c8d8a8971ee3ae0693feebe509d4ec30a7a8) --- docker/Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 96ed58f..69a6e3b 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -78,7 +78,6 @@ RUN pip install --install-option="--prefix=/var/lib/graphite" --install-option=" # Add system service config ADD graphite/nginx.conf /etc/nginx/nginx.conf -ADD graphite/supervisord.conf /etc/supervisor/conf.d/supervisord.conf # Add graphite config ADD graphite/initial_data.json /var/lib/graphite/webapp/graphite/initial_data.json -- cgit 1.2.3-korg From 77348164208df28ca8545e552cc9c06eefd05256 Mon Sep 17 00:00:00 2001 From: mbeierl Date: Fri, 31 Mar 2017 03:01:17 -0400 Subject: Fixed paths to match standard Change-Id: I733d34eb8bbdf0201b06a21d0ceeda50a06dd49f Signed-off-by: mbeierl --- docs/index.rst | 6 +- docs/release/installation/index.rst | 15 -- docs/release/installation/installation.rst | 129 --------------- docs/release/userguide/index.rst | 16 -- docs/release/userguide/introduction.rst | 101 ------------ docs/release/userguide/test-usage.rst | 244 ----------------------------- docs/testing/user/index.rst | 17 ++ docs/testing/user/installation.rst | 129 +++++++++++++++ docs/testing/user/introduction.rst | 101 ++++++++++++ docs/testing/user/test-usage.rst | 244 +++++++++++++++++++++++++++++ 10 files changed, 494 insertions(+), 508 deletions(-) delete mode 100644 docs/release/installation/index.rst delete mode 100755 docs/release/installation/installation.rst delete mode 100644 docs/release/userguide/index.rst delete mode 100644 docs/release/userguide/introduction.rst delete mode 100644 docs/release/userguide/test-usage.rst create mode 100644 docs/testing/user/index.rst create mode 100755 docs/testing/user/installation.rst create mode 100644 docs/testing/user/introduction.rst create mode 100644 docs/testing/user/test-usage.rst diff --git a/docs/index.rst b/docs/index.rst index 60e9c21..93bf62e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -23,7 +23,7 @@ StorPerf Installation Guide :maxdepth: 5 :numbered: 5 - ./release/installation/installation.rst + ./user/installation.rst ****************************** StorPerf User Guide @@ -34,8 +34,8 @@ StorPerf User Guide :maxdepth: 5 :numbered: 5 - ./release/userguide/introduction.rst - ./release/userguide/test-usage.rst + ./user/introduction.rst + ./user/test-usage.rst Indices diff --git a/docs/release/installation/index.rst b/docs/release/installation/index.rst deleted file mode 100644 index 10296dd..0000000 --- a/docs/release/installation/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. _storperf-installation: - -.. This work is licensed under a Creative Commons Attribution 4.0 International -.. License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV, Dell EMC and others. - -====================== -StorPerf Installation Guide -====================== - -.. toctree:: - :maxdepth: 2 - - installation.rst diff --git a/docs/release/installation/installation.rst b/docs/release/installation/installation.rst deleted file mode 100755 index ae3b3f8..0000000 --- a/docs/release/installation/installation.rst +++ /dev/null @@ -1,129 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV, Dell EMC and others. - -=========================== -StorPerf Installation Guide -=========================== - -OpenStack Prerequisites -=========================== -If you do not have an Ubuntu 16.04 image in Glance, you will need to add one. -There are scripts in storperf/ci directory to assist, or you can use the follow -code snippets: - -.. code-block:: bash - - # Put an Ubuntu Image in glance - wget -q https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img - openstack image create "Ubuntu 16.04 x86_64" --disk-format qcow2 --public \ - --container-format bare --file ubuntu-16.04-server-cloudimg-amd64-disk1.img - - # Create StorPerf flavor - openstack flavor create storperf \ - --id auto \ - --ram 8192 \ - --disk 4 \ - --vcpus 2 - - -Planning -=========================== - -StorPerf is delivered as a `Docker container -`__. There are two possible -methods for installation in your environment: - 1. Run container on Jump Host - 2. Run container in a VM - - -Running StorPerf on Jump Host -============================= - -Requirements: - - * Docker must be installed - * Jump Host must have access to the OpenStack Controller API - * Jump Host must have internet connectivity for downloading docker image - * Enough floating IPs must be available to match your agent count - -Running StorPerf in a VM -======================== - -Requirements: - - * VM has docker installed - * VM has OpenStack Controller credentials and can communicate with the Controller API - * VM has internet connectivity for downloading the docker image - * Enough floating IPs must be available to match your agent count - -VM Creation -~~~~~~~~~~~ - -The following procedure will create the VM in your environment - -.. code-block:: console - - # Create the StorPerf VM itself. Here we use the network ID generated by OPNFV FUEL. - ADMIN_NET_ID=`neutron net-list | grep 'admin_internal_net ' | awk '{print $2}'` - - nova boot --nic net-id=$ADMIN_NET_ID --flavor m1.small --key-name=StorPerf --image 'Ubuntu 14.04' 'StorPerf Master' - -At this point, you may associate a floating IP with the StorPerf master VM. - -VM Docker Installation -~~~~~~~~~~~~~~~~~~~~~~ - -The following procedure will install Docker on Ubuntu 14.04. - -.. code-block:: console - - sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D - cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list - deb https://apt.dockerproject.org/repo ubuntu-trusty main - EOF - - sudo apt-get update - sudo apt-get install -y docker-engine - sudo usermod -aG docker ubuntu - -Pulling StorPerf Container -========================== - -Danube -~~~~~~ - -The tag for the latest stable Danube will be: - -.. code-block:: bash - - docker pull opnfv/storperf:danube.0.1 - -Colorado -~~~~~~~~ - -The tag for the latest stable Colorado release is: - -.. code-block:: bash - - docker pull opnfv/storperf:colorado.0.1 - -Brahmaputra -~~~~~~~~~~~ - -The tag for the latest stable Brahmaputra release is: - -.. code-block:: bash - - docker pull opnfv/storperf:brahmaputra.1.2 - -Development -~~~~~~~~~~~ - -The tag for the latest development version is: - -.. code-block:: bash - - docker pull opnfv/storperf:master - - diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst deleted file mode 100644 index e2f076a..0000000 --- a/docs/release/userguide/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _storperf-userguide: - -.. This work is licensed under a Creative Commons Attribution 4.0 International -.. License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV, Dell EMC and others. - -====================== -StorPerf User Guide -====================== - -.. toctree:: - :maxdepth: 2 - - introduction.rst - test-usage.rst diff --git a/docs/release/userguide/introduction.rst b/docs/release/userguide/introduction.rst deleted file mode 100644 index a40750f..0000000 --- a/docs/release/userguide/introduction.rst +++ /dev/null @@ -1,101 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV, Dell EMC and others. - -================================== -StorPerf Container Execution Guide -================================== - -Planning -======== - -There are some ports that the container can expose: - - * 22 for SSHD. Username and password are root/storperf. This is used for CLI access only - * 5000 for StorPerf ReST API. - * 8000 for StorPerf's Graphite Web Server - -OpenStack Credentials -~~~~~~~~~~~~~~~~~~~~~ - -You must have your OpenStack Controller environment variables defined and passed to -the StorPerf container. The easiest way to do this is to put the rc file contents -into a clean file the looks similar to this for V2 authentication: - -.. code-block:: console - - OS_AUTH_URL=http://10.13.182.243:5000/v2.0 - OS_TENANT_ID=e8e64985506a4a508957f931d1800aa9 - OS_TENANT_NAME=admin - OS_PROJECT_NAME=admin - OS_USERNAME=admin - OS_PASSWORD=admin - OS_REGION_NAME=RegionOne - -For V3 authentication, use the following: - -.. code-block:: console - - OS_AUTH_URL=http://10.13.182.243:5000/v3 - OS_PROJECT_ID=32ae78a844bc4f108b359dd7320463e5 - OS_PROJECT_NAME=admin - OS_USER_DOMAIN_NAME=Default - OS_USERNAME=admin - OS_PASSWORD=admin - OS_REGION_NAME=RegionOne - OS_INTERFACE=public - OS_IDENTITY_API_VERSION=3 - -Additionally, if you want your results published to the common OPNFV Test Results - DB, add the following: - -.. code-block:: console - - TEST_DB_URL=http://testresults.opnfv.org/testapi - -Running StorPerf Container -========================== - -You might want to have the local disk used for storage as the default size of the docker -container is only 10g. This is done with the -v option, mounting under -/opt/graphite/storage/whisper - -.. code-block:: console - - mkdir -p ~/carbon - sudo chown 33:33 ~/carbon - -The recommended method of running StorPerf is to expose only the ReST and Graphite -ports. The command line below shows how to run the container with local disk for -the carbon database. - -.. code-block:: console - - docker run -t --env-file admin-rc -p 5000:5000 -p 8000:8000 -v ~/carbon:/opt/graphite/storage/whisper --name storperf opnfv/storperf - - -Docker Exec -~~~~~~~~~~~ - -Instead of exposing port 5022 externally, you can use the exec method in docker. This -provides a slightly more secure method of running StorPerf container without having to -expose port 22. - -If needed, the container can be entered with docker exec. This is not normally required. - -.. code-block:: console - - docker exec -it storperf bash - -Container with SSH -~~~~~~~~~~~~~~~~~~ - -Running the StorPerf Container with all ports open and a local disk for the result -storage. This is not recommended as the SSH port is open. - -.. code-block:: console - - docker run -t --env-file admin-rc -p 5022:22 -p 5000:5000 -p 8000:8000 -v ~/carbon:/opt/graphite/storage/whisper --name storperf opnfv/storperf - -This will then permit ssh to localhost port 5022 for CLI access. - diff --git a/docs/release/userguide/test-usage.rst b/docs/release/userguide/test-usage.rst deleted file mode 100644 index 2beae69..0000000 --- a/docs/release/userguide/test-usage.rst +++ /dev/null @@ -1,244 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV, Dell EMC and others. - -============================= -StorPerf Test Execution Guide -============================= - -Prerequisites -============= - -This guide requires StorPerf to be running and have its ReST API accessible. If -the ReST API is not running on port 5000, adjust the commands provided here as -needed. - -Interacting With StorPerf -========================= - -Once the StorPerf container has been started and the ReST API exposed, you can -interact directly with it using the ReST API. StorPerf comes with a Swagger -interface that is accessible through the exposed port at: - -.. code-block:: console - - http://StorPerf:5000/swagger/index.html - -The typical test execution follows this pattern: - -#. Configure the environment -#. Initialize the cinder volumes -#. Execute one or more performance runs -#. Delete the environment - -Configure The Environment -========================= - -The following pieces of information are required to prepare the environment: - -- The number of VMs/Cinder volumes to create -- The Glance image that holds the VM operating system to use. StorPerf has - only been tested with Ubuntu 16.04 -- The name of the public network that agents will use -- The size, in gigabytes, of the Cinder volumes to create - -The ReST API is a POST to http://StorPerf:5000/api/v1.0/configurations and -takes a JSON payload as follows. - -.. code-block:: json - - { - "agent_count": int, - "agent_image": string, - "public_network": string, - "volume_size": int - } - -This call will block until the stack is created, at which point it will return -the OpenStack heat stack id. - -Initialize the Cinder Volumes -============================= -Before executing a test run for the purpose of measuring performance, it is -necessary to fill the Cinder volume with random data. Failure to execute this -step can result in meaningless numbers, especially for read performance. Most -Cinder drivers are smart enough to know what blocks contain data, and which do -not. Uninitialized blocks return "0" immediately without actually reading from -the volume. - -Initiating the data fill looks the same as a regular performance test, but uses -the special workload called "_warm_up". StorPerf will never push _warm_up -data to the OPNFV Test Results DB, nor will it terminate the run on steady state. -It is guaranteed to run to completion, which fills 100% of the volume with -random data. - -The ReST API is a POST to http://StorPerf:5000/api/v1.0/jobs and -takes a JSON payload as follows. - -.. code-block:: json - - { - "workload": "_warm_up" - } - -This will return a job ID as follows. - -.. code-block:: json - - { - "job_id": "edafa97e-457e-4d3d-9db4-1d6c0fc03f98" - } - -This job ID can be used to query the state to determine when it has completed. -See the section on querying jobs for more information. - -Execute a Performance Run -========================= -Performance runs can execute either a single workload, or iterate over a matrix -of workload types, block sizes and queue depths. - -Workload Types -~~~~~~~~~~~~~~ -rr - Read, Random. 100% read of random blocks -rs - Read, Sequential. 100% read of sequential blocks of data -rw - Read / Write Mix, Random. 70% random read, 30% random write -wr - Write, Random. 100% write of random blocks -ws - Write, Sequential. 100% write of sequential blocks. - -Block Sizes -~~~~~~~~~~~ -A comma delimited list of the different block sizes to use when reading and -writing data. Note: Some Cinder drivers (such as Ceph) cannot support block -sizes larger than 16k (16384). - -Queue Depths -~~~~~~~~~~~~ -A comma delimited list of the different queue depths to use when reading and -writing data. The queue depth parameter causes FIO to keep this many I/O -requests outstanding at one time. It is used to simulate traffic patterns -on the system. For example, a queue depth of 4 would simulate 4 processes -constantly creating I/O requests. - -Deadline -~~~~~~~~ -The deadline is the maximum amount of time in minutes for a workload to run. If -steady state has not been reached by the deadline, the workload will terminate -and that particular run will be marked as not having reached steady state. Any -remaining workloads will continue to execute in order. - -.. code-block:: json - - { - "block_sizes": "2048,16384, - "deadline": 20, - "queue_depths": "2,4", - "workload": "wr,rr,rw", - } - -Metadata -~~~~~~~~ -A job can have metadata associated with it for tagging. The following metadata -is required in order to push results to the OPNFV Test Results DB: - -.. code-block:: json - - "metadata": { - "disk_type": "HDD or SDD", - "pod_name": "OPNFV Pod Name", - "scenario_name": string, - "storage_node_count": int, - "version": string, - "build_tag": string, - "test_case": "snia_steady_state" - } - - - -Query Jobs Information -====================== - -By issuing a GET to the job API http://StorPerf:5000/api/v1.0/jobs?job_id=, -you can fetch information about the job as follows: - -- &type=status: to report on the status of the job. -- &type=metrics: to report on the collected metrics. -- &type=metadata: to report back any metadata sent with the job ReST API - -Status -~~~~~~ -The Status field can be: -- Running to indicate the job is still in progress, or -- Completed to indicate the job is done. This could be either normal completion - or manually terminated via HTTP DELETE call. - -Workloads can have a value of: -- Pending to indicate the workload has not yet started, -- Running to indicate this is the active workload, or -- Completed to indicate this workload has completed. - -This is an example of a type=status call. - -.. code-block:: json - - { - "Status": "Running", - "TestResultURL": null, - "Workloads": { - "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.1.block-size.16384": "Pending", - "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.1.block-size.4096": "Pending", - "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.1.block-size.512": "Pending", - "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.4.block-size.16384": "Running", - "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.4.block-size.4096": "Pending", - "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.4.block-size.512": "Pending", - "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.8.block-size.16384": "Completed", - "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.8.block-size.4096": "Pending", - "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.8.block-size.512": "Pending" - } - } - -Metrics -~~~~~~~ -Metrics can be queried at any time during or after the completion of a run. -Note that the metrics show up only after the first interval has passed, and -are subject to change until the job completes. - -This is a sample of a type=metrics call. - -.. code-block:: json - - { - "rw.queue-depth.1.block-size.512.read.bw": 52.8, - "rw.queue-depth.1.block-size.512.read.iops": 106.76199999999999, - "rw.queue-depth.1.block-size.512.read.lat.mean": 93.176, - "rw.queue-depth.1.block-size.512.write.bw": 22.5, - "rw.queue-depth.1.block-size.512.write.iops": 45.760000000000005, - "rw.queue-depth.1.block-size.512.write.lat.mean": 21764.184999999998 - } - -Abort a Job -=========== -Issuing an HTTP DELETE to the job api http://StorPerf:5000/api/v1.0/jobs will -force the termination of the whole job, regardless of how many workloads -remain to be executed. - -.. code-block:: bash - - curl -X DELETE --header 'Accept: application/json' http://StorPerf:5000/api/v1.0/jobs - -Delete the Environment -====================== -After you are done testing, you can have StorPerf delete the Heat stack by -issuing an HTTP DELETE to the configurations API. - -.. code-block:: bash - - curl -X DELETE --header 'Accept: application/json' http://StorPerf:5000/api/v1.0/configurations - -You may also want to delete an environment, and then create a new one with a -different number of VMs/Cinder volumes to test the impact of the number of VMs -in your environment. diff --git a/docs/testing/user/index.rst b/docs/testing/user/index.rst new file mode 100644 index 0000000..e9054dc --- /dev/null +++ b/docs/testing/user/index.rst @@ -0,0 +1,17 @@ +.. _storperf-userguide: + +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Dell EMC and others. + +====================== +StorPerf User Guide +====================== + +.. toctree:: + :maxdepth: 2 + + introduction.rst + installation.rst + test-usage.rst diff --git a/docs/testing/user/installation.rst b/docs/testing/user/installation.rst new file mode 100755 index 0000000..0effb2f --- /dev/null +++ b/docs/testing/user/installation.rst @@ -0,0 +1,129 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Dell EMC and others. + +=========================== +StorPerf Installation Guide +=========================== + +OpenStack Prerequisites +=========================== +If you do not have an Ubuntu 16.04 image in Glance, you will need to add one. +There are scripts in storperf/ci directory to assist, or you can use the follow +code snippets: + +.. code-block:: bash + + # Put an Ubuntu Image in glance + wget -q https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img + openstack image create "Ubuntu 16.04 x86_64" --disk-format qcow2 --public \ + --container-format bare --file ubuntu-16.04-server-cloudimg-amd64-disk1.img + + # Create StorPerf flavor + openstack flavor create storperf \ + --id auto \ + --ram 8192 \ + --disk 4 \ + --vcpus 2 + + +Planning +=========================== + +StorPerf is delivered as a `Docker container +`__. There are two possible +methods for installation in your environment: + 1. Run container on Jump Host + 2. Run container in a VM + + +Running StorPerf on Jump Host +============================= + +Requirements: + + * Docker must be installed + * Jump Host must have access to the OpenStack Controller API + * Jump Host must have internet connectivity for downloading docker image + * Enough floating IPs must be available to match your agent count + +Running StorPerf in a VM +======================== + +Requirements: + + * VM has docker installed + * VM has OpenStack Controller credentials and can communicate with the Controller API + * VM has internet connectivity for downloading the docker image + * Enough floating IPs must be available to match your agent count + +VM Creation +~~~~~~~~~~~ + +The following procedure will create the VM in your environment + +.. code-block:: console + + # Create the StorPerf VM itself. Here we use the network ID generated by OPNFV FUEL. + ADMIN_NET_ID=`neutron net-list | grep 'admin_internal_net ' | awk '{print $2}'` + + nova boot --nic net-id=$ADMIN_NET_ID --flavor m1.small --key-name=StorPerf --image 'Ubuntu 14.04' 'StorPerf Master' + +At this point, you may associate a floating IP with the StorPerf master VM. + +VM Docker Installation +~~~~~~~~~~~~~~~~~~~~~~ + +The following procedure will install Docker on Ubuntu 14.04. + +.. code-block:: console + + sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D + cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list + deb https://apt.dockerproject.org/repo ubuntu-trusty main + EOF + + sudo apt-get update + sudo apt-get install -y docker-engine + sudo usermod -aG docker ubuntu + +Pulling StorPerf Container +========================== + +Danube +~~~~~~ + +The tag for the latest stable Danube will be: + +.. code-block:: bash + + docker pull opnfv/storperf:danube.1.0 + +Colorado +~~~~~~~~ + +The tag for the latest stable Colorado release is: + +.. code-block:: bash + + docker pull opnfv/storperf:colorado.0.1 + +Brahmaputra +~~~~~~~~~~~ + +The tag for the latest stable Brahmaputra release is: + +.. code-block:: bash + + docker pull opnfv/storperf:brahmaputra.1.2 + +Development +~~~~~~~~~~~ + +The tag for the latest development version is: + +.. code-block:: bash + + docker pull opnfv/storperf:master + + diff --git a/docs/testing/user/introduction.rst b/docs/testing/user/introduction.rst new file mode 100644 index 0000000..a40750f --- /dev/null +++ b/docs/testing/user/introduction.rst @@ -0,0 +1,101 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Dell EMC and others. + +================================== +StorPerf Container Execution Guide +================================== + +Planning +======== + +There are some ports that the container can expose: + + * 22 for SSHD. Username and password are root/storperf. This is used for CLI access only + * 5000 for StorPerf ReST API. + * 8000 for StorPerf's Graphite Web Server + +OpenStack Credentials +~~~~~~~~~~~~~~~~~~~~~ + +You must have your OpenStack Controller environment variables defined and passed to +the StorPerf container. The easiest way to do this is to put the rc file contents +into a clean file the looks similar to this for V2 authentication: + +.. code-block:: console + + OS_AUTH_URL=http://10.13.182.243:5000/v2.0 + OS_TENANT_ID=e8e64985506a4a508957f931d1800aa9 + OS_TENANT_NAME=admin + OS_PROJECT_NAME=admin + OS_USERNAME=admin + OS_PASSWORD=admin + OS_REGION_NAME=RegionOne + +For V3 authentication, use the following: + +.. code-block:: console + + OS_AUTH_URL=http://10.13.182.243:5000/v3 + OS_PROJECT_ID=32ae78a844bc4f108b359dd7320463e5 + OS_PROJECT_NAME=admin + OS_USER_DOMAIN_NAME=Default + OS_USERNAME=admin + OS_PASSWORD=admin + OS_REGION_NAME=RegionOne + OS_INTERFACE=public + OS_IDENTITY_API_VERSION=3 + +Additionally, if you want your results published to the common OPNFV Test Results + DB, add the following: + +.. code-block:: console + + TEST_DB_URL=http://testresults.opnfv.org/testapi + +Running StorPerf Container +========================== + +You might want to have the local disk used for storage as the default size of the docker +container is only 10g. This is done with the -v option, mounting under +/opt/graphite/storage/whisper + +.. code-block:: console + + mkdir -p ~/carbon + sudo chown 33:33 ~/carbon + +The recommended method of running StorPerf is to expose only the ReST and Graphite +ports. The command line below shows how to run the container with local disk for +the carbon database. + +.. code-block:: console + + docker run -t --env-file admin-rc -p 5000:5000 -p 8000:8000 -v ~/carbon:/opt/graphite/storage/whisper --name storperf opnfv/storperf + + +Docker Exec +~~~~~~~~~~~ + +Instead of exposing port 5022 externally, you can use the exec method in docker. This +provides a slightly more secure method of running StorPerf container without having to +expose port 22. + +If needed, the container can be entered with docker exec. This is not normally required. + +.. code-block:: console + + docker exec -it storperf bash + +Container with SSH +~~~~~~~~~~~~~~~~~~ + +Running the StorPerf Container with all ports open and a local disk for the result +storage. This is not recommended as the SSH port is open. + +.. code-block:: console + + docker run -t --env-file admin-rc -p 5022:22 -p 5000:5000 -p 8000:8000 -v ~/carbon:/opt/graphite/storage/whisper --name storperf opnfv/storperf + +This will then permit ssh to localhost port 5022 for CLI access. + diff --git a/docs/testing/user/test-usage.rst b/docs/testing/user/test-usage.rst new file mode 100644 index 0000000..2beae69 --- /dev/null +++ b/docs/testing/user/test-usage.rst @@ -0,0 +1,244 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Dell EMC and others. + +============================= +StorPerf Test Execution Guide +============================= + +Prerequisites +============= + +This guide requires StorPerf to be running and have its ReST API accessible. If +the ReST API is not running on port 5000, adjust the commands provided here as +needed. + +Interacting With StorPerf +========================= + +Once the StorPerf container has been started and the ReST API exposed, you can +interact directly with it using the ReST API. StorPerf comes with a Swagger +interface that is accessible through the exposed port at: + +.. code-block:: console + + http://StorPerf:5000/swagger/index.html + +The typical test execution follows this pattern: + +#. Configure the environment +#. Initialize the cinder volumes +#. Execute one or more performance runs +#. Delete the environment + +Configure The Environment +========================= + +The following pieces of information are required to prepare the environment: + +- The number of VMs/Cinder volumes to create +- The Glance image that holds the VM operating system to use. StorPerf has + only been tested with Ubuntu 16.04 +- The name of the public network that agents will use +- The size, in gigabytes, of the Cinder volumes to create + +The ReST API is a POST to http://StorPerf:5000/api/v1.0/configurations and +takes a JSON payload as follows. + +.. code-block:: json + + { + "agent_count": int, + "agent_image": string, + "public_network": string, + "volume_size": int + } + +This call will block until the stack is created, at which point it will return +the OpenStack heat stack id. + +Initialize the Cinder Volumes +============================= +Before executing a test run for the purpose of measuring performance, it is +necessary to fill the Cinder volume with random data. Failure to execute this +step can result in meaningless numbers, especially for read performance. Most +Cinder drivers are smart enough to know what blocks contain data, and which do +not. Uninitialized blocks return "0" immediately without actually reading from +the volume. + +Initiating the data fill looks the same as a regular performance test, but uses +the special workload called "_warm_up". StorPerf will never push _warm_up +data to the OPNFV Test Results DB, nor will it terminate the run on steady state. +It is guaranteed to run to completion, which fills 100% of the volume with +random data. + +The ReST API is a POST to http://StorPerf:5000/api/v1.0/jobs and +takes a JSON payload as follows. + +.. code-block:: json + + { + "workload": "_warm_up" + } + +This will return a job ID as follows. + +.. code-block:: json + + { + "job_id": "edafa97e-457e-4d3d-9db4-1d6c0fc03f98" + } + +This job ID can be used to query the state to determine when it has completed. +See the section on querying jobs for more information. + +Execute a Performance Run +========================= +Performance runs can execute either a single workload, or iterate over a matrix +of workload types, block sizes and queue depths. + +Workload Types +~~~~~~~~~~~~~~ +rr + Read, Random. 100% read of random blocks +rs + Read, Sequential. 100% read of sequential blocks of data +rw + Read / Write Mix, Random. 70% random read, 30% random write +wr + Write, Random. 100% write of random blocks +ws + Write, Sequential. 100% write of sequential blocks. + +Block Sizes +~~~~~~~~~~~ +A comma delimited list of the different block sizes to use when reading and +writing data. Note: Some Cinder drivers (such as Ceph) cannot support block +sizes larger than 16k (16384). + +Queue Depths +~~~~~~~~~~~~ +A comma delimited list of the different queue depths to use when reading and +writing data. The queue depth parameter causes FIO to keep this many I/O +requests outstanding at one time. It is used to simulate traffic patterns +on the system. For example, a queue depth of 4 would simulate 4 processes +constantly creating I/O requests. + +Deadline +~~~~~~~~ +The deadline is the maximum amount of time in minutes for a workload to run. If +steady state has not been reached by the deadline, the workload will terminate +and that particular run will be marked as not having reached steady state. Any +remaining workloads will continue to execute in order. + +.. code-block:: json + + { + "block_sizes": "2048,16384, + "deadline": 20, + "queue_depths": "2,4", + "workload": "wr,rr,rw", + } + +Metadata +~~~~~~~~ +A job can have metadata associated with it for tagging. The following metadata +is required in order to push results to the OPNFV Test Results DB: + +.. code-block:: json + + "metadata": { + "disk_type": "HDD or SDD", + "pod_name": "OPNFV Pod Name", + "scenario_name": string, + "storage_node_count": int, + "version": string, + "build_tag": string, + "test_case": "snia_steady_state" + } + + + +Query Jobs Information +====================== + +By issuing a GET to the job API http://StorPerf:5000/api/v1.0/jobs?job_id=, +you can fetch information about the job as follows: + +- &type=status: to report on the status of the job. +- &type=metrics: to report on the collected metrics. +- &type=metadata: to report back any metadata sent with the job ReST API + +Status +~~~~~~ +The Status field can be: +- Running to indicate the job is still in progress, or +- Completed to indicate the job is done. This could be either normal completion + or manually terminated via HTTP DELETE call. + +Workloads can have a value of: +- Pending to indicate the workload has not yet started, +- Running to indicate this is the active workload, or +- Completed to indicate this workload has completed. + +This is an example of a type=status call. + +.. code-block:: json + + { + "Status": "Running", + "TestResultURL": null, + "Workloads": { + "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.1.block-size.16384": "Pending", + "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.1.block-size.4096": "Pending", + "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.1.block-size.512": "Pending", + "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.4.block-size.16384": "Running", + "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.4.block-size.4096": "Pending", + "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.4.block-size.512": "Pending", + "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.8.block-size.16384": "Completed", + "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.8.block-size.4096": "Pending", + "eeb2e587-5274-4d2f-ad95-5c85102d055e.ws.queue-depth.8.block-size.512": "Pending" + } + } + +Metrics +~~~~~~~ +Metrics can be queried at any time during or after the completion of a run. +Note that the metrics show up only after the first interval has passed, and +are subject to change until the job completes. + +This is a sample of a type=metrics call. + +.. code-block:: json + + { + "rw.queue-depth.1.block-size.512.read.bw": 52.8, + "rw.queue-depth.1.block-size.512.read.iops": 106.76199999999999, + "rw.queue-depth.1.block-size.512.read.lat.mean": 93.176, + "rw.queue-depth.1.block-size.512.write.bw": 22.5, + "rw.queue-depth.1.block-size.512.write.iops": 45.760000000000005, + "rw.queue-depth.1.block-size.512.write.lat.mean": 21764.184999999998 + } + +Abort a Job +=========== +Issuing an HTTP DELETE to the job api http://StorPerf:5000/api/v1.0/jobs will +force the termination of the whole job, regardless of how many workloads +remain to be executed. + +.. code-block:: bash + + curl -X DELETE --header 'Accept: application/json' http://StorPerf:5000/api/v1.0/jobs + +Delete the Environment +====================== +After you are done testing, you can have StorPerf delete the Heat stack by +issuing an HTTP DELETE to the configurations API. + +.. code-block:: bash + + curl -X DELETE --header 'Accept: application/json' http://StorPerf:5000/api/v1.0/configurations + +You may also want to delete an environment, and then create a new one with a +different number of VMs/Cinder volumes to test the impact of the number of VMs +in your environment. -- cgit 1.2.3-korg