summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering
diff options
context:
space:
mode:
authorQiaowei Ren <qiaowei.ren@intel.com>2018-01-04 13:43:33 +0800
committerQiaowei Ren <qiaowei.ren@intel.com>2018-01-05 11:59:39 +0800
commit812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch)
tree04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering
parent15280273faafb77777eab341909a3f495cf248d9 (diff)
initial code repo
This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering')
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml5
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml52
8 files changed, 107 insertions, 0 deletions
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml
new file mode 100644
index 0000000..9cd743c
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml
new file mode 100644
index 0000000..7485dce
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml
@@ -0,0 +1,13 @@
+tasks:
+- install:
+ branch: hammer
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+- print: '**** done hammer'
+- ceph:
+ fs: xfs
+ skip_mgr_daemons: true
+ add_osds_to_crush: true
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
new file mode 100644
index 0000000..f0e22bf
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
@@ -0,0 +1,6 @@
+tasks:
+- exec:
+ client.0:
+ - ceph osd erasure-code-profile set t-profile crush-failure-domain=osd k=2 m=1
+ - ceph osd pool create base-pool 4 4 erasure t-profile
+ - ceph osd pool application enable base-pool rados
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
new file mode 100644
index 0000000..36dc06d
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
@@ -0,0 +1,5 @@
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create base-pool 4
+ - ceph osd pool application enable base-pool rados
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml
new file mode 100644
index 0000000..d9cc348
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create cache-pool 4
+ - ceph osd tier add base-pool cache-pool
+ - ceph osd tier cache-mode cache-pool writeback
+ - ceph osd tier set-overlay base-pool cache-pool
+ - ceph osd pool set cache-pool hit_set_type bloom
+ - ceph osd pool set cache-pool hit_set_count 8
+ - ceph osd pool set cache-pool hit_set_period 5
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
new file mode 100644
index 0000000..b2fc171
--- /dev/null
+++ b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
@@ -0,0 +1,52 @@
+tasks:
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+
+workload:
+ sequential:
+ - rados:
+ clients: [client.0]
+ pools: [base-pool]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ - print: "**** done rados"
+
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ exclude_packages:
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+ osd.0:
+ branch: jewel
+ osd.2:
+ branch: jewel
+ - print: "*** done install.upgrade osd.0 and osd.2"
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart do not wait for healthy"
+ - exec:
+ mon.a:
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set sortbitwise
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - print: "**** done ceph.healthy"