summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/standalone/erasure-code
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph/qa/standalone/erasure-code')
-rwxr-xr-xsrc/ceph/qa/standalone/erasure-code/test-erasure-code-plugins.sh117
-rwxr-xr-xsrc/ceph/qa/standalone/erasure-code/test-erasure-code.sh339
-rwxr-xr-xsrc/ceph/qa/standalone/erasure-code/test-erasure-eio.sh323
3 files changed, 0 insertions, 779 deletions
diff --git a/src/ceph/qa/standalone/erasure-code/test-erasure-code-plugins.sh b/src/ceph/qa/standalone/erasure-code/test-erasure-code-plugins.sh
deleted file mode 100755
index 26aff64..0000000
--- a/src/ceph/qa/standalone/erasure-code/test-erasure-code-plugins.sh
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/bin/bash -x
-
-source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
-
-arch=$(uname -m)
-
-case $arch in
- i[[3456]]86*|x86_64*|amd64*)
- legacy_jerasure_plugins=(jerasure_generic jerasure_sse3 jerasure_sse4)
- legacy_shec_plugins=(shec_generic shec_sse3 shec_sse4)
- plugins=(jerasure shec lrc isa)
- ;;
- aarch64*|arm*)
- legacy_jerasure_plugins=(jerasure_generic jerasure_neon)
- legacy_shec_plugins=(shec_generic shec_neon)
- plugins=(jerasure shec lrc)
- ;;
- *)
- echo "unsupported platform ${arch}."
- return 1
- ;;
-esac
-
-function run() {
- local dir=$1
- shift
-
- export CEPH_MON="127.0.0.1:17110" # git grep '\<17110\>' : there must be only one
- export CEPH_ARGS
- CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
- CEPH_ARGS+="--mon-host=$CEPH_MON "
-
- local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
- for func in $funcs ; do
- $func $dir || return 1
- done
-}
-
-function TEST_preload_warning() {
- local dir=$1
-
- for plugin in ${legacy_jerasure_plugins[*]} ${legacy_shec_plugins[*]}; do
- setup $dir || return 1
- run_mon $dir a --osd_erasure_code_plugins="${plugin}" || return 1
- run_mgr $dir x || return 1
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
- run_osd $dir 0 --osd_erasure_code_plugins="${plugin}" || return 1
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
- grep "WARNING: osd_erasure_code_plugins contains plugin ${plugin}" $dir/mon.a.log || return 1
- grep "WARNING: osd_erasure_code_plugins contains plugin ${plugin}" $dir/osd.0.log || return 1
- teardown $dir || return 1
- done
- return 0
-}
-
-function TEST_preload_no_warning() {
- local dir=$1
-
- for plugin in ${plugins[*]}; do
- setup $dir || return 1
- run_mon $dir a --osd_erasure_code_plugins="${plugin}" || return 1
- run_mgr $dir x || return 1
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
- run_osd $dir 0 --osd_erasure_code_plugins="${plugin}" || return 1
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
- ! grep "WARNING: osd_erasure_code_plugins contains plugin" $dir/mon.a.log || return 1
- ! grep "WARNING: osd_erasure_code_plugins contains plugin" $dir/osd.0.log || return 1
- teardown $dir || return 1
- done
-
- return 0
-}
-
-function TEST_preload_no_warning_default() {
- local dir=$1
-
- setup $dir || return 1
- run_mon $dir a || return 1
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
- run_mgr $dir x || return 1
- run_osd $dir 0 || return 1
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
- ! grep "WARNING: osd_erasure_code_plugins" $dir/mon.a.log || return 1
- ! grep "WARNING: osd_erasure_code_plugins" $dir/osd.0.log || return 1
- teardown $dir || return 1
-
- return 0
-}
-
-function TEST_ec_profile_warning() {
- local dir=$1
-
- setup $dir || return 1
- run_mon $dir a || return 1
- run_mgr $dir x || return 1
- for id in $(seq 0 2) ; do
- run_osd $dir $id || return 1
- done
- create_rbd_pool || return 1
- wait_for_clean || return 1
-
- for plugin in ${legacy_jerasure_plugins[*]}; do
- ceph osd erasure-code-profile set prof-${plugin} crush-failure-domain=osd technique=reed_sol_van plugin=${plugin} || return 1
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
- grep "WARNING: erasure coding profile prof-${plugin} uses plugin ${plugin}" $dir/mon.a.log || return 1
- done
-
- for plugin in ${legacy_shec_plugins[*]}; do
- ceph osd erasure-code-profile set prof-${plugin} crush-failure-domain=osd plugin=${plugin} || return 1
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
- grep "WARNING: erasure coding profile prof-${plugin} uses plugin ${plugin}" $dir/mon.a.log || return 1
- done
-
- teardown $dir || return 1
-}
-
-main test-erasure-code-plugins "$@"
diff --git a/src/ceph/qa/standalone/erasure-code/test-erasure-code.sh b/src/ceph/qa/standalone/erasure-code/test-erasure-code.sh
deleted file mode 100755
index 6dd5833..0000000
--- a/src/ceph/qa/standalone/erasure-code/test-erasure-code.sh
+++ /dev/null
@@ -1,339 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
-# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
-#
-# Author: Loic Dachary <loic@dachary.org>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Library Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Library Public License for more details.
-#
-
-source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
-
-function run() {
- local dir=$1
- shift
-
- export CEPH_MON="127.0.0.1:7101" # git grep '\<7101\>' : there must be only one
- export CEPH_ARGS
- CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
- CEPH_ARGS+="--mon-host=$CEPH_MON --mon-osd-prime-pg-temp=false"
-
- setup $dir || return 1
- run_mon $dir a || return 1
- run_mgr $dir x || return 1
- # check that erasure code plugins are preloaded
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
- grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
- for id in $(seq 0 10) ; do
- run_osd $dir $id || return 1
- done
- create_rbd_pool || return 1
- wait_for_clean || return 1
- # check that erasure code plugins are preloaded
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
- grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
- create_erasure_coded_pool ecpool || return 1
-
- local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
- for func in $funcs ; do
- $func $dir || return 1
- done
-
- delete_pool ecpool || return 1
- teardown $dir || return 1
-}
-
-function create_erasure_coded_pool() {
- local poolname=$1
-
- ceph osd erasure-code-profile set myprofile \
- crush-failure-domain=osd || return 1
- create_pool $poolname 12 12 erasure myprofile \
- || return 1
- wait_for_clean || return 1
-}
-
-function delete_pool() {
- local poolname=$1
-
- ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
-}
-
-function rados_put_get() {
- local dir=$1
- local poolname=$2
- local objname=${3:-SOMETHING}
-
-
- for marker in AAA BBB CCCC DDDD ; do
- printf "%*s" 1024 $marker
- done > $dir/ORIGINAL
-
- #
- # get and put an object, compare they are equal
- #
- rados --pool $poolname put $objname $dir/ORIGINAL || return 1
- rados --pool $poolname get $objname $dir/COPY || return 1
- diff $dir/ORIGINAL $dir/COPY || return 1
- rm $dir/COPY
-
- #
- # take out an OSD used to store the object and
- # check the object can still be retrieved, which implies
- # recovery
- #
- local -a initial_osds=($(get_osds $poolname $objname))
- local last=$((${#initial_osds[@]} - 1))
- ceph osd out ${initial_osds[$last]} || return 1
- ! get_osds $poolname $objname | grep '\<'${initial_osds[$last]}'\>' || return 1
- rados --pool $poolname get $objname $dir/COPY || return 1
- diff $dir/ORIGINAL $dir/COPY || return 1
- ceph osd in ${initial_osds[$last]} || return 1
-
- rm $dir/ORIGINAL
-}
-
-function rados_osds_out_in() {
- local dir=$1
- local poolname=$2
- local objname=${3:-SOMETHING}
-
-
- for marker in FFFF GGGG HHHH IIII ; do
- printf "%*s" 1024 $marker
- done > $dir/ORIGINAL
-
- #
- # get and put an object, compare they are equal
- #
- rados --pool $poolname put $objname $dir/ORIGINAL || return 1
- rados --pool $poolname get $objname $dir/COPY || return 1
- diff $dir/ORIGINAL $dir/COPY || return 1
- rm $dir/COPY
-
- #
- # take out two OSDs used to store the object, wait for the cluster
- # to be clean (i.e. all PG are clean and active) again which
- # implies the PG have been moved to use the remaining OSDs. Check
- # the object can still be retrieved.
- #
- wait_for_clean || return 1
- local osds_list=$(get_osds $poolname $objname)
- local -a osds=($osds_list)
- for osd in 0 1 ; do
- ceph osd out ${osds[$osd]} || return 1
- done
- wait_for_clean || return 1
- #
- # verify the object is no longer mapped to the osds that are out
- #
- for osd in 0 1 ; do
- ! get_osds $poolname $objname | grep '\<'${osds[$osd]}'\>' || return 1
- done
- rados --pool $poolname get $objname $dir/COPY || return 1
- diff $dir/ORIGINAL $dir/COPY || return 1
- #
- # bring the osds back in, , wait for the cluster
- # to be clean (i.e. all PG are clean and active) again which
- # implies the PG go back to using the same osds as before
- #
- for osd in 0 1 ; do
- ceph osd in ${osds[$osd]} || return 1
- done
- wait_for_clean || return 1
- test "$osds_list" = "$(get_osds $poolname $objname)" || return 1
- rm $dir/ORIGINAL
-}
-
-function TEST_rados_put_get_lrc_advanced() {
- local dir=$1
- local poolname=pool-lrc-a
- local profile=profile-lrc-a
-
- ceph osd erasure-code-profile set $profile \
- plugin=lrc \
- mapping=DD_ \
- crush-steps='[ [ "chooseleaf", "osd", 0 ] ]' \
- layers='[ [ "DDc", "" ] ]' || return 1
- create_pool $poolname 12 12 erasure $profile \
- || return 1
-
- rados_put_get $dir $poolname || return 1
-
- delete_pool $poolname
- ceph osd erasure-code-profile rm $profile
-}
-
-function TEST_rados_put_get_lrc_kml() {
- local dir=$1
- local poolname=pool-lrc
- local profile=profile-lrc
-
- ceph osd erasure-code-profile set $profile \
- plugin=lrc \
- k=4 m=2 l=3 \
- crush-failure-domain=osd || return 1
- create_pool $poolname 12 12 erasure $profile \
- || return 1
-
- rados_put_get $dir $poolname || return 1
-
- delete_pool $poolname
- ceph osd erasure-code-profile rm $profile
-}
-
-function TEST_rados_put_get_isa() {
- if ! erasure_code_plugin_exists isa ; then
- echo "SKIP because plugin isa has not been built"
- return 0
- fi
- local dir=$1
- local poolname=pool-isa
-
- ceph osd erasure-code-profile set profile-isa \
- plugin=isa \
- crush-failure-domain=osd || return 1
- create_pool $poolname 1 1 erasure profile-isa \
- || return 1
-
- rados_put_get $dir $poolname || return 1
-
- delete_pool $poolname
-}
-
-function TEST_rados_put_get_jerasure() {
- local dir=$1
-
- rados_put_get $dir ecpool || return 1
-
- local poolname=pool-jerasure
- local profile=profile-jerasure
-
- ceph osd erasure-code-profile set $profile \
- plugin=jerasure \
- k=4 m=2 \
- crush-failure-domain=osd || return 1
- create_pool $poolname 12 12 erasure $profile \
- || return 1
-
- rados_put_get $dir $poolname || return 1
- rados_osds_out_in $dir $poolname || return 1
-
- delete_pool $poolname
- ceph osd erasure-code-profile rm $profile
-}
-
-function TEST_rados_put_get_shec() {
- local dir=$1
-
- local poolname=pool-shec
- local profile=profile-shec
-
- ceph osd erasure-code-profile set $profile \
- plugin=shec \
- k=2 m=1 c=1 \
- crush-failure-domain=osd || return 1
- create_pool $poolname 12 12 erasure $profile \
- || return 1
-
- rados_put_get $dir $poolname || return 1
-
- delete_pool $poolname
- ceph osd erasure-code-profile rm $profile
-}
-
-function TEST_alignment_constraints() {
- local payload=ABC
- echo "$payload" > $dir/ORIGINAL
- #
- # Verify that the rados command enforces alignment constraints
- # imposed by the stripe width
- # See http://tracker.ceph.com/issues/8622
- #
- local stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
- eval local $(ceph osd erasure-code-profile get myprofile | grep k=)
- local block_size=$((stripe_unit * k - 1))
- dd if=/dev/zero of=$dir/ORIGINAL bs=$block_size count=2
- rados --block-size=$block_size \
- --pool ecpool put UNALIGNED $dir/ORIGINAL || return 1
- rm $dir/ORIGINAL
-}
-
-function chunk_size() {
- echo $(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
-}
-
-#
-# By default an object will be split in two (k=2) with the first part
-# of the object in the first OSD of the up set and the second part in
-# the next OSD in the up set. This layout is defined by the mapping
-# parameter and this function helps verify that the first and second
-# part of the object are located in the OSD where they should be.
-#
-function verify_chunk_mapping() {
- local dir=$1
- local poolname=$2
- local first=$3
- local second=$4
-
- local payload=$(printf '%*s' $(chunk_size) FIRST$poolname ; printf '%*s' $(chunk_size) SECOND$poolname)
- echo -n "$payload" > $dir/ORIGINAL
-
- rados --pool $poolname put SOMETHING$poolname $dir/ORIGINAL || return 1
- rados --pool $poolname get SOMETHING$poolname $dir/COPY || return 1
- local -a osds=($(get_osds $poolname SOMETHING$poolname))
- for (( i = 0; i < ${#osds[@]}; i++ )) ; do
- ceph daemon osd.${osds[$i]} flush_journal
- done
- diff $dir/ORIGINAL $dir/COPY || return 1
- rm $dir/COPY
-
- local -a osds=($(get_osds $poolname SOMETHING$poolname))
- grep --quiet --recursive --text FIRST$poolname $dir/${osds[$first]} || return 1
- grep --quiet --recursive --text SECOND$poolname $dir/${osds[$second]} || return 1
-}
-
-function TEST_chunk_mapping() {
- local dir=$1
-
- #
- # mapping=DD_ is the default:
- # first OSD (i.e. 0) in the up set has the first part of the object
- # second OSD (i.e. 1) in the up set has the second part of the object
- #
- verify_chunk_mapping $dir ecpool 0 1 || return 1
-
- ceph osd erasure-code-profile set remap-profile \
- plugin=lrc \
- layers='[ [ "_DD", "" ] ]' \
- mapping='_DD' \
- crush-steps='[ [ "choose", "osd", 0 ] ]' || return 1
- ceph osd erasure-code-profile get remap-profile
- create_pool remap-pool 12 12 erasure remap-profile \
- || return 1
-
- #
- # mapping=_DD
- # second OSD (i.e. 1) in the up set has the first part of the object
- # third OSD (i.e. 2) in the up set has the second part of the object
- #
- verify_chunk_mapping $dir remap-pool 1 2 || return 1
-
- delete_pool remap-pool
- ceph osd erasure-code-profile rm remap-profile
-}
-
-main test-erasure-code "$@"
-
-# Local Variables:
-# compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-code.sh"
-# End:
diff --git a/src/ceph/qa/standalone/erasure-code/test-erasure-eio.sh b/src/ceph/qa/standalone/erasure-code/test-erasure-eio.sh
deleted file mode 100755
index b788016..0000000
--- a/src/ceph/qa/standalone/erasure-code/test-erasure-eio.sh
+++ /dev/null
@@ -1,323 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 Red Hat <contact@redhat.com>
-#
-#
-# Author: Kefu Chai <kchai@redhat.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Library Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Library Public License for more details.
-#
-
-source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
-
-function run() {
- local dir=$1
- shift
-
- export CEPH_MON="127.0.0.1:7112" # git grep '\<7112\>' : there must be only one
- export CEPH_ARGS
- CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
- CEPH_ARGS+="--mon-host=$CEPH_MON "
-
- local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
- for func in $funcs ; do
- setup $dir || return 1
- run_mon $dir a || return 1
- run_mgr $dir x || return 1
- create_rbd_pool || return 1
-
- # check that erasure code plugins are preloaded
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
- grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
- $func $dir || return 1
- teardown $dir || return 1
- done
-}
-
-function setup_osds() {
- for id in $(seq 0 3) ; do
- run_osd $dir $id || return 1
- done
- wait_for_clean || return 1
-
- # check that erasure code plugins are preloaded
- CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
- grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
-}
-
-function create_erasure_coded_pool() {
- local poolname=$1
-
- ceph osd erasure-code-profile set myprofile \
- plugin=jerasure \
- k=2 m=1 \
- crush-failure-domain=osd || return 1
- create_pool $poolname 1 1 erasure myprofile \
- || return 1
- wait_for_clean || return 1
-}
-
-function delete_pool() {
- local poolname=$1
-
- ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
- ceph osd erasure-code-profile rm myprofile
-}
-
-function rados_put() {
- local dir=$1
- local poolname=$2
- local objname=${3:-SOMETHING}
-
- for marker in AAA BBB CCCC DDDD ; do
- printf "%*s" 1024 $marker
- done > $dir/ORIGINAL
- #
- # get and put an object, compare they are equal
- #
- rados --pool $poolname put $objname $dir/ORIGINAL || return 1
-}
-
-function rados_get() {
- local dir=$1
- local poolname=$2
- local objname=${3:-SOMETHING}
- local expect=${4:-ok}
-
- #
- # Expect a failure to get object
- #
- if [ $expect = "fail" ];
- then
- ! rados --pool $poolname get $objname $dir/COPY
- return
- fi
- #
- # get an object, compare with $dir/ORIGINAL
- #
- rados --pool $poolname get $objname $dir/COPY || return 1
- diff $dir/ORIGINAL $dir/COPY || return 1
- rm $dir/COPY
-}
-
-function rados_put_get() {
- local dir=$1
- local poolname=$2
- local objname=${3:-SOMETHING}
- local recovery=$4
-
- #
- # get and put an object, compare they are equal
- #
- rados_put $dir $poolname $objname || return 1
- # We can read even though caller injected read error on one of the shards
- rados_get $dir $poolname $objname || return 1
-
- if [ -n "$recovery" ];
- then
- #
- # take out the last OSD used to store the object,
- # bring it back, and check for clean PGs which means
- # recovery didn't crash the primary.
- #
- local -a initial_osds=($(get_osds $poolname $objname))
- local last=$((${#initial_osds[@]} - 1))
- # Kill OSD
- kill_daemons $dir TERM osd.${initial_osds[$last]} >&2 < /dev/null || return 1
- ceph osd out ${initial_osds[$last]} || return 1
- ! get_osds $poolname $objname | grep '\<'${initial_osds[$last]}'\>' || return 1
- ceph osd in ${initial_osds[$last]} || return 1
- run_osd $dir ${initial_osds[$last]} || return 1
- wait_for_clean || return 1
- fi
-
- rm $dir/ORIGINAL
-}
-
-function rados_get_data_eio() {
- local dir=$1
- shift
- local shard_id=$1
- shift
- local recovery=$1
- shift
-
- # inject eio to speificied shard
- #
- local poolname=pool-jerasure
- local objname=obj-eio-$$-$shard_id
- inject_eio ec data $poolname $objname $dir $shard_id || return 1
- rados_put_get $dir $poolname $objname $recovery || return 1
-
- shard_id=$(expr $shard_id + 1)
- inject_eio ec data $poolname $objname $dir $shard_id || return 1
- # Now 2 out of 3 shards get EIO, so should fail
- rados_get $dir $poolname $objname fail || return 1
-}
-
-# Change the size of speificied shard
-#
-function set_size() {
- local objname=$1
- shift
- local dir=$1
- shift
- local shard_id=$1
- shift
- local bytes=$1
- shift
- local mode=${1}
-
- local poolname=pool-jerasure
- local -a initial_osds=($(get_osds $poolname $objname))
- local osd_id=${initial_osds[$shard_id]}
- ceph osd set noout
- if [ "$mode" = "add" ];
- then
- objectstore_tool $dir $osd_id $objname get-bytes $dir/CORRUPT || return 1
- dd if=/dev/urandom bs=$bytes count=1 >> $dir/CORRUPT
- elif [ "$bytes" = "0" ];
- then
- touch $dir/CORRUPT
- else
- dd if=/dev/urandom bs=$bytes count=1 of=$dir/CORRUPT
- fi
- objectstore_tool $dir $osd_id $objname set-bytes $dir/CORRUPT || return 1
- rm -f $dir/CORRUPT
- ceph osd unset noout
-}
-
-function rados_get_data_bad_size() {
- local dir=$1
- shift
- local shard_id=$1
- shift
- local bytes=$1
- shift
- local mode=${1:-set}
-
- local poolname=pool-jerasure
- local objname=obj-size-$$-$shard_id-$bytes
- rados_put $dir $poolname $objname || return 1
-
- # Change the size of speificied shard
- #
- set_size $objname $dir $shard_id $bytes $mode || return 1
-
- rados_get $dir $poolname $objname || return 1
-
- # Leave objname and modify another shard
- shard_id=$(expr $shard_id + 1)
- set_size $objname $dir $shard_id $bytes $mode || return 1
- rados_get $dir $poolname $objname fail || return 1
-}
-
-#
-# These two test cases try to validate the following behavior:
-# For object on EC pool, if there is one shard having read error (
-# either primary or replica), client can still read object.
-#
-# If 2 shards have read errors the client will get an error.
-#
-function TEST_rados_get_subread_eio_shard_0() {
- local dir=$1
- setup_osds || return 1
-
- local poolname=pool-jerasure
- create_erasure_coded_pool $poolname || return 1
- # inject eio on primary OSD (0) and replica OSD (1)
- local shard_id=0
- rados_get_data_eio $dir $shard_id || return 1
- delete_pool $poolname
-}
-
-function TEST_rados_get_subread_eio_shard_1() {
- local dir=$1
- setup_osds || return 1
-
- local poolname=pool-jerasure
- create_erasure_coded_pool $poolname || return 1
- # inject eio into replicas OSD (1) and OSD (2)
- local shard_id=1
- rados_get_data_eio $dir $shard_id || return 1
- delete_pool $poolname
-}
-
-#
-# These two test cases try to validate that following behavior:
-# For object on EC pool, if there is one shard which an incorrect
-# size this will cause an internal read error, client can still read object.
-#
-# If 2 shards have incorrect size the client will get an error.
-#
-function TEST_rados_get_bad_size_shard_0() {
- local dir=$1
- setup_osds || return 1
-
- local poolname=pool-jerasure
- create_erasure_coded_pool $poolname || return 1
- # Set incorrect size into primary OSD (0) and replica OSD (1)
- local shard_id=0
- rados_get_data_bad_size $dir $shard_id 10 || return 1
- rados_get_data_bad_size $dir $shard_id 0 || return 1
- rados_get_data_bad_size $dir $shard_id 256 add || return 1
- delete_pool $poolname
-}
-
-function TEST_rados_get_bad_size_shard_1() {
- local dir=$1
- setup_osds || return 1
-
- local poolname=pool-jerasure
- create_erasure_coded_pool $poolname || return 1
- # Set incorrect size into replicas OSD (1) and OSD (2)
- local shard_id=1
- rados_get_data_bad_size $dir $shard_id 10 || return 1
- rados_get_data_bad_size $dir $shard_id 0 || return 1
- rados_get_data_bad_size $dir $shard_id 256 add || return 1
- delete_pool $poolname
-}
-
-function TEST_rados_get_with_subreadall_eio_shard_0() {
- local dir=$1
- local shard_id=0
-
- setup_osds || return 1
-
- local poolname=pool-jerasure
- create_erasure_coded_pool $poolname || return 1
- # inject eio on primary OSD (0)
- local shard_id=0
- rados_get_data_eio $dir $shard_id recovery || return 1
-
- delete_pool $poolname
-}
-
-function TEST_rados_get_with_subreadall_eio_shard_1() {
- local dir=$1
- local shard_id=0
-
- setup_osds || return 1
-
- local poolname=pool-jerasure
- create_erasure_coded_pool $poolname || return 1
- # inject eio on replica OSD (1)
- local shard_id=1
- rados_get_data_eio $dir $shard_id recovery || return 1
-
- delete_pool $poolname
-}
-
-main test-erasure-eio "$@"
-
-# Local Variables:
-# compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-eio.sh"
-# End: