diff options
author | Qiaowei Ren <qiaowei.ren@intel.com> | 2018-01-04 13:43:33 +0800 |
---|---|---|
committer | Qiaowei Ren <qiaowei.ren@intel.com> | 2018-01-05 11:59:39 +0800 |
commit | 812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch) | |
tree | 04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/qa/suites/rbd/qemu/pool | |
parent | 15280273faafb77777eab341909a3f495cf248d9 (diff) |
initial code repo
This patch creates initial code repo.
For ceph, luminous stable release will be used for base code,
and next changes and optimization for ceph will be added to it.
For opensds, currently any changes can be upstreamed into original
opensds repo (https://github.com/opensds/opensds), and so stor4nfv
will directly clone opensds code to deploy stor4nfv environment.
And the scripts for deployment based on ceph and opensds will be
put into 'ci' directory.
Change-Id: I46a32218884c75dda2936337604ff03c554648e4
Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/qa/suites/rbd/qemu/pool')
5 files changed, 73 insertions, 0 deletions
diff --git a/src/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml b/src/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml new file mode 100644 index 0000000..c75e6fd --- /dev/null +++ b/src/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml @@ -0,0 +1,21 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 + - sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it + - sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add rbd cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay rbd cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 + - rbd pool init rbd diff --git a/src/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml b/src/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml new file mode 100644 index 0000000..f39a5bb --- /dev/null +++ b/src/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml @@ -0,0 +1,24 @@ +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 + - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile + - sudo ceph osd pool set datapool allow_ec_overwrites true + - rbd pool init datapool + +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + client: + rbd default data pool: datapool + osd: # force bluestore since it's required for ec overwrites + osd objectstore: bluestore + bluestore block size: 96636764160 + enable experimental unrecoverable data corrupting features: "*" + osd debug randomize hobject sort order: false +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/src/ceph/qa/suites/rbd/qemu/pool/none.yaml b/src/ceph/qa/suites/rbd/qemu/pool/none.yaml new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/src/ceph/qa/suites/rbd/qemu/pool/none.yaml diff --git a/src/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml b/src/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml new file mode 100644 index 0000000..c5647db --- /dev/null +++ b/src/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml @@ -0,0 +1,11 @@ +tasks: +- exec: + client.0: + - sudo ceph osd pool create datapool 4 + - rbd pool init datapool + +overrides: + ceph: + conf: + client: + rbd default data pool: datapool diff --git a/src/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml b/src/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml new file mode 100644 index 0000000..1b50565 --- /dev/null +++ b/src/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) +tasks: +- exec: + client.0: + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add rbd cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay rbd cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 |