diff options
author | Qiaowei Ren <qiaowei.ren@intel.com> | 2018-01-04 13:43:33 +0800 |
---|---|---|
committer | Qiaowei Ren <qiaowei.ren@intel.com> | 2018-01-05 11:59:39 +0800 |
commit | 812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch) | |
tree | 04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/qa/erasure-code | |
parent | 15280273faafb77777eab341909a3f495cf248d9 (diff) |
initial code repo
This patch creates initial code repo.
For ceph, luminous stable release will be used for base code,
and next changes and optimization for ceph will be added to it.
For opensds, currently any changes can be upstreamed into original
opensds repo (https://github.com/opensds/opensds), and so stor4nfv
will directly clone opensds code to deploy stor4nfv environment.
And the scripts for deployment based on ceph and opensds will be
put into 'ci' directory.
Change-Id: I46a32218884c75dda2936337604ff03c554648e4
Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/qa/erasure-code')
11 files changed, 412 insertions, 0 deletions
diff --git a/src/ceph/qa/erasure-code/ec-feature-plugins-v2.yaml b/src/ceph/qa/erasure-code/ec-feature-plugins-v2.yaml new file mode 100644 index 0000000..f2d374d --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-feature-plugins-v2.yaml @@ -0,0 +1,98 @@ +# +# Test the expected behavior of the +# +# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 +# +# feature. +# +roles: +- - mon.a + - mon.b + - osd.0 + - osd.1 +- - osd.2 + - mon.c + - mgr.x +tasks: +# +# Install firefly +# +- install: + branch: firefly +- ceph: + fs: xfs +# +# We don't need mon.c for now: it will be used later to make sure an old +# mon cannot join the quorum once the feature has been activated +# +- ceph.stop: + daemons: [mon.c] +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set WRONG plugin=WRONG + ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG" +# +# Partial upgrade, osd.2 is not upgraded +# +- install.upgrade: + osd.0: +# +# a is the leader +# +- ceph.restart: + daemons: [mon.a] + wait-for-healthy: false +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: the monitor cluster" +- ceph.restart: + daemons: [mon.b, osd.1, osd.0] + wait-for-healthy: false + wait-for-osds-up: true +# +# The lrc plugin cannot be used because osd.2 is not upgraded yet +# and would crash. +# +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: osd.2" +# +# Taking osd.2 out, the rest of the cluster is upgraded +# +- ceph.stop: + daemons: [osd.2] +- sleep: + duration: 60 +# +# Creating an erasure code profile using the lrc plugin now works +# +- exec: + mon.a: + - "ceph osd erasure-code-profile set profile-lrc plugin=lrc" +# +# osd.2 won't be able to join the because is does not support the feature +# +- ceph.restart: + daemons: [osd.2] + wait-for-healthy: false +- sleep: + duration: 60 +- exec: + osd.2: + - |- + grep "protocol feature.*missing 100000000000" /var/log/ceph/ceph-osd.2.log +# +# mon.c won't be able to join the because it does not support the feature +# +- ceph.restart: + daemons: [mon.c] + wait-for-healthy: false +- sleep: + duration: 60 +- exec: + mon.c: + - |- + grep "missing.*feature" /var/log/ceph/ceph-mon.c.log diff --git a/src/ceph/qa/erasure-code/ec-feature-plugins-v3.yaml b/src/ceph/qa/erasure-code/ec-feature-plugins-v3.yaml new file mode 100644 index 0000000..332b944 --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-feature-plugins-v3.yaml @@ -0,0 +1,98 @@ +# +# Test the expected behavior of the +# +# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 +# +# feature. +# +roles: +- - mon.a + - mon.b + - osd.0 + - osd.1 +- - osd.2 + - mon.c + - mgr.x +tasks: +# +# Install hammer +# +- install: + branch: hammer +- ceph: + fs: xfs +# +# We don't need mon.c for now: it will be used later to make sure an old +# mon cannot join the quorum once the feature has been activated +# +- ceph.stop: + daemons: [mon.c] +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set WRONG plugin=WRONG + ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG" +# +# Partial upgrade, osd.2 is not upgraded +# +- install.upgrade: + osd.0: +# +# a is the leader +# +- ceph.restart: + daemons: [mon.a] + wait-for-healthy: false +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: the monitor cluster" +- ceph.restart: + daemons: [mon.b, osd.1, osd.0] + wait-for-healthy: false + wait-for-osds-up: true +# +# The shec plugin cannot be used because osd.2 is not upgraded yet +# and would crash. +# +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: osd.2" +# +# Taking osd.2 out, the rest of the cluster is upgraded +# +- ceph.stop: + daemons: [osd.2] +- sleep: + duration: 60 +# +# Creating an erasure code profile using the shec plugin now works +# +- exec: + mon.a: + - "ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec" +# +# osd.2 won't be able to join the because is does not support the feature +# +- ceph.restart: + daemons: [osd.2] + wait-for-healthy: false +- sleep: + duration: 60 +- exec: + osd.2: + - |- + grep "protocol feature.*missing" /var/log/ceph/ceph-osd.2.log +# +# mon.c won't be able to join the because it does not support the feature +# +- ceph.restart: + daemons: [mon.c] + wait-for-healthy: false +- sleep: + duration: 60 +- exec: + mon.c: + - |- + grep "missing.*feature" /var/log/ceph/ceph-mon.c.log diff --git a/src/ceph/qa/erasure-code/ec-rados-default.yaml b/src/ceph/qa/erasure-code/ec-rados-default.yaml new file mode 100644 index 0000000..cc62371 --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-rados-default.yaml @@ -0,0 +1,19 @@ +tasks: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/src/ceph/qa/erasure-code/ec-rados-parallel.yaml b/src/ceph/qa/erasure-code/ec-rados-parallel.yaml new file mode 100644 index 0000000..0f01d84 --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-rados-parallel.yaml @@ -0,0 +1,20 @@ +workload: + parallel: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec parallel" diff --git a/src/ceph/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml b/src/ceph/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml new file mode 100644 index 0000000..64b5970 --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml @@ -0,0 +1,26 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + min_size: 2 + write_append_excl: false + erasure_code_profile: + name: isaprofile + plugin: isa + k: 2 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/src/ceph/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml b/src/ceph/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml new file mode 100644 index 0000000..d61b1c8 --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml @@ -0,0 +1,25 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure21profile + plugin: jerasure + k: 2 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/src/ceph/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml b/src/ceph/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml new file mode 100644 index 0000000..2ca53a7 --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml @@ -0,0 +1,31 @@ +# +# k=3 implies a stripe_width of 1376*3 = 4128 which is different from +# the default value of 4096 It is also not a multiple of 1024*1024 and +# creates situations where rounding rules during recovery becomes +# necessary. +# +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure31profile + plugin: jerasure + k: 3 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/src/ceph/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml b/src/ceph/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml new file mode 100644 index 0000000..dfcc616 --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml @@ -0,0 +1,25 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure21profile + plugin: jerasure + k: 4 + m: 2 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/src/ceph/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml b/src/ceph/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml new file mode 100644 index 0000000..86ae056 --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml @@ -0,0 +1,25 @@ +tasks: +- rados: + clients: [client.0] + ops: 400 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: lrcprofile + plugin: lrc + k: 4 + m: 2 + l: 3 + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/src/ceph/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml b/src/ceph/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml new file mode 100644 index 0000000..ee74c6e --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml @@ -0,0 +1,25 @@ +tasks: +- rados: + clients: [client.0] + ops: 400 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: shecprofile + plugin: shec + k: 4 + m: 3 + c: 2 + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/src/ceph/qa/erasure-code/ec-rados-sequential.yaml b/src/ceph/qa/erasure-code/ec-rados-sequential.yaml new file mode 100644 index 0000000..90536ee --- /dev/null +++ b/src/ceph/qa/erasure-code/ec-rados-sequential.yaml @@ -0,0 +1,20 @@ +workload: + sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec sequential" |