From 7da45d65be36d36b880cc55c5036e96c24b53f00 Mon Sep 17 00:00:00 2001 From: Qiaowei Ren Date: Thu, 1 Mar 2018 14:38:11 +0800 Subject: remove ceph code This patch removes initial ceph code, due to license issue. Change-Id: I092d44f601cdf34aed92300fe13214925563081c Signed-off-by: Qiaowei Ren --- src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml | 27 ---------------------- src/ceph/qa/suites/rbd/cli/pool/none.yaml | 0 .../suites/rbd/cli/pool/replicated-data-pool.yaml | 11 --------- .../qa/suites/rbd/cli/pool/small-cache-pool.yaml | 17 -------------- 4 files changed, 55 deletions(-) delete mode 100644 src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml delete mode 100644 src/ceph/qa/suites/rbd/cli/pool/none.yaml delete mode 100644 src/ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml delete mode 100644 src/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml (limited to 'src/ceph/qa/suites/rbd/cli/pool') diff --git a/src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml b/src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml deleted file mode 100644 index 376bf08..0000000 --- a/src/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml +++ /dev/null @@ -1,27 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile - - sudo ceph osd pool set datapool allow_ec_overwrites true - - rbd pool init datapool - -overrides: - thrashosds: - bdev_inject_crash: 2 - bdev_inject_crash_probability: .5 - ceph: - fs: xfs - log-whitelist: - - overall HEALTH_ - - \(CACHE_POOL_NO_HIT_SET\) - conf: - client: - rbd default data pool: datapool - osd: # force bluestore since it's required for ec overwrites - osd objectstore: bluestore - bluestore block size: 96636764160 - enable experimental unrecoverable data corrupting features: "*" - osd debug randomize hobject sort order: false -# this doesn't work with failures bc the log writes are not atomic across the two backends -# bluestore bluefs env mirror: true diff --git a/src/ceph/qa/suites/rbd/cli/pool/none.yaml b/src/ceph/qa/suites/rbd/cli/pool/none.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/src/ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml b/src/ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml deleted file mode 100644 index c5647db..0000000 --- a/src/ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd pool create datapool 4 - - rbd pool init datapool - -overrides: - ceph: - conf: - client: - rbd default data pool: datapool diff --git a/src/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml b/src/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml deleted file mode 100644 index 1b50565..0000000 --- a/src/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 -- cgit 1.2.3-korg