summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering')
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml17
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml13
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%0
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml6
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml5
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml14
-rw-r--r--src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml52
8 files changed, 0 insertions, 107 deletions
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%
deleted file mode 100644
index e69de29..0000000
--- a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/%
+++ /dev/null
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml
deleted file mode 100644
index 9cd743c..0000000
--- a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- log-whitelist:
- - but it is still running
- - wrongly marked me down
-roles:
-- - mon.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml
deleted file mode 100644
index 7485dce..0000000
--- a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-tasks:
-- install:
- branch: hammer
- exclude_packages:
- - ceph-mgr
- - libcephfs2
- - libcephfs-devel
- - libcephfs-dev
-- print: '**** done hammer'
-- ceph:
- fs: xfs
- skip_mgr_daemons: true
- add_osds_to_crush: true
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/% b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%
deleted file mode 100644
index e69de29..0000000
--- a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/%
+++ /dev/null
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
deleted file mode 100644
index f0e22bf..0000000
--- a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-tasks:
-- exec:
- client.0:
- - ceph osd erasure-code-profile set t-profile crush-failure-domain=osd k=2 m=1
- - ceph osd pool create base-pool 4 4 erasure t-profile
- - ceph osd pool application enable base-pool rados
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
deleted file mode 100644
index 36dc06d..0000000
--- a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-tasks:
-- exec:
- client.0:
- - ceph osd pool create base-pool 4
- - ceph osd pool application enable base-pool rados
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml
deleted file mode 100644
index d9cc348..0000000
--- a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-overrides:
- ceph:
- log-whitelist:
- - must scrub before tier agent can activate
-tasks:
-- exec:
- client.0:
- - ceph osd pool create cache-pool 4
- - ceph osd tier add base-pool cache-pool
- - ceph osd tier cache-mode cache-pool writeback
- - ceph osd tier set-overlay base-pool cache-pool
- - ceph osd pool set cache-pool hit_set_type bloom
- - ceph osd pool set cache-pool hit_set_count 8
- - ceph osd pool set cache-pool hit_set_period 5
diff --git a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml b/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
deleted file mode 100644
index b2fc171..0000000
--- a/src/ceph/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-tasks:
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
-
-workload:
- sequential:
- - rados:
- clients: [client.0]
- pools: [base-pool]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- cache_flush: 50
- cache_try_flush: 50
- cache_evict: 50
- - print: "**** done rados"
-
-upgrade-sequence:
- sequential:
- - install.upgrade:
- exclude_packages:
- - ceph-mgr
- - libcephfs2
- - libcephfs-devel
- - libcephfs-dev
- osd.0:
- branch: jewel
- osd.2:
- branch: jewel
- - print: "*** done install.upgrade osd.0 and osd.2"
- - ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - ceph.restart:
- daemons: [mon.a, mon.b, mon.c]
- wait-for-healthy: false
- wait-for-osds-up: true
- - print: "**** done ceph.restart do not wait for healthy"
- - exec:
- mon.a:
- - sleep 300 # http://tracker.ceph.com/issues/17808
- - ceph osd set sortbitwise
- - ceph osd set require_jewel_osds
- - ceph.healthy:
- - print: "**** done ceph.healthy"